diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index d3c3a2d262f57..9d5156b2e2ebd 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -1,24 +1,36 @@ -Please answer these questions before submitting your issue. Thanks! - + ### What version of Go are you using (`go version`)? +
+$ go version
+
+
### Does this issue reproduce with the latest release? + ### What operating system and processor architecture are you using (`go env`)? +
go env Output
+$ go env
+
+
### What did you do? + + ### What did you expect to see? + ### What did you see instead? diff --git a/AUTHORS b/AUTHORS index 8f0a20a0d7601..8b8105b1ade37 100644 --- a/AUTHORS +++ b/AUTHORS @@ -418,7 +418,7 @@ Eivind Uggedal Elbert Fliek Eldar Rakhimberdin Elena Grahovac -Elias Naur +Elias Naur Elliot Morrison-Reed Emerson Lin Emil Hessman @@ -1304,6 +1304,7 @@ Sven Almgren Sylvain Zimmer Syohei YOSHIDA Szabolcs Nagy +Taavi Kivisik Tad Fisher Tad Glines Taj Khattra @@ -1404,6 +1405,7 @@ Vladimir Mihailenco Vladimir Nikishenko Vladimir Stefanovic Vladimir Varankin +VMware, Inc. Volker Dobler W. Trevor King Wade Simmons diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 333dff7aa3a66..b201301a850eb 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -25,6 +25,7 @@ # Please keep the list sorted. Aamir Khan +Aaron Cannon Aaron France Aaron Jacobs Aaron Kemp @@ -32,6 +33,7 @@ Aaron Stein Aaron Torres Aaron Zinman Aarti Parikh +Abdullah Al Maruf Abe Haskins Abhinav Gupta Adam Azarchs @@ -66,6 +68,7 @@ Aishraj Dahal Akhil Indurti Akihiro Suda Akshat Kumar +Alan Braithwaite Alan Donovan Alan Shreve Albert Nigmatzianov @@ -74,8 +77,10 @@ Albert Yu Alberto Bertogli Alberto Donizetti Alberto García Hierro +Aleksa Sarai Aleksandar Dezelin Aleksandr Lukinykh +Aleksandr Razumov Alekseev Artem Alessandro Arzilli Alessandro Baffa @@ -85,6 +90,7 @@ Alex Bramley Alex Browne Alex Carol Alex Jin +Alex Kohler Alex Myasoedov Alex Plugaru Alex Schroeder @@ -106,15 +112,19 @@ Alexander Polcyn Alexander Reece Alexander Surma Alexander Zhavnerchik +Alexander Zillion Alexander Zolotov Alexandre Cesaro Alexandre Fiori +Alexandre Maari Alexandre Normand Alexandre Parentea Alexandre Viau Alexandru Moșoi Alexei Sholik +Alexey Alexandrov Alexey Borzenkov +Alexey Naidonov Alexey Neganov Alexey Palazhchenko Alexis Hildebrandt @@ -133,14 +143,17 @@ Anand K. Mistry Anders Pearson André Carvalho Andre Nathan +Andrea Nodari Andrea Spadaccini Andreas Auernhammer Andreas Jellinghaus Andreas Litt +Andrei Gherzan Andrei Korzhevskii Andrei Tudor Călin Andrei Vieru Andres Erbsen +Andres Lowrie Andrew Austin Andrew Balholm Andrew Benton @@ -155,9 +168,11 @@ Andrew Jackura Andrew Lutomirski Andrew Pilloud Andrew Pogrebnoy +Andrew Poydence Andrew Pritchard Andrew Radev Andrew Skiba +Andrew Stribblehill Andrew Szeto Andrew Werner Andrew Wilkins @@ -174,21 +189,26 @@ Andy Finkenstadt Andy Lindeman Andy Maloney Andy Walker +Andzej Maciusovic Anfernee Yongkun Gui Angelo Bulfone Anh Hai Trinh Anit Gandhi +Ankit Goyal Anmol Sethi Anschel Schaffer-Cohen Anthony Alves Anthony Canino Anthony Eufemio +Anthony Fok Anthony Martin Anthony Sottile Anthony Starks Anthony Voutas Anthony Woods +Antoine GIRARD Antoine Martin +Anton Gyllenberg Antonin Amand Antonio Antelo Antonio Bibiano @@ -204,6 +224,7 @@ Arnaud Ysmal Arne Hormann Arnout Engelen Aron Nopanen +Arthur Fabre Arthur Khashaev Artyom Pervukhin Arvindh Rajesh Tamilmani @@ -217,6 +238,7 @@ Augusto Roman Aulus Egnatius Varialus Aurélien Rainone Austin Clements +Avi Flax awaw fumin Awn Umar Axel Wagner @@ -224,6 +246,7 @@ Ayanamist Yang Aymerick Jéhanne Azat Kaumov Baiju Muthukadan +Balaram Makam Balazs Lecz Baokun Lee Bartosz Grzybowski @@ -233,6 +256,7 @@ Ben Burkert Ben Eitzen Ben Fried Ben Haines +Ben Hoyt Ben Laurie Ben Lubar Ben Lynn @@ -263,6 +287,7 @@ Blake Mizerany Blixt Bob Briski Bob Potter +Bobby DeSimone Bobby Powers Boris Nagaev Borja Clemente @@ -313,6 +338,7 @@ Carlo Alberto Ferraris Carlos Castillo Carlos Cirello Carlos Eduardo Seo +Carlos Souza Carolyn Van Slyck Cary Hull Case Nelson @@ -324,7 +350,9 @@ Cedric Staub Cezar Sá Espinola Chad Rosier ChaiShushan +Channing Kimble-Brown Charles Fenwick Elliott +Charles Kenney Charles L. Dorian Charles Lee Charles Weill @@ -355,6 +383,7 @@ Christian Alexander Christian Couder Christian Himpel Christian Pellegrin +Christian R. Petrin Christine Hansmann Christoffer Buchholz Christoph Blecker @@ -371,12 +400,14 @@ Christopher Wedgwood Christos Zoulas Christy Perez CL Sung +Clément Chigot Clement Skau Cody Oss Colby Ranger Colin Cross Colin Edwards Colin Kennedy +Colin Nelson Colin Rice Conrad Irwin Conrad Meyer @@ -401,10 +432,13 @@ Dan Caddigan Dan Callahan Dan Harrington Dan Jacques +Dan Johnson Dan Peterson Dan Pupius Dan Sinclair +Daniël de Kok Daniel Fleischman +Daniel Ingram Daniel Johansson Daniel Kerwin Daniel Krech @@ -421,6 +455,7 @@ Daniel Upton Daniela Petruzalek Danny Rosseau Daria Kolistratova +Darien Raymond Darren Elwood Darshan Parajuli Datong Sun @@ -445,12 +480,15 @@ David du Colombier <0intro@gmail.com> David Forsythe David G. Andersen David Glasser +David Heuschmann David Howden David Hubbard David Jakob Fritz +David Jones David Lazar David Leon Gil David McLeish +David Ndungu David NewHamlet David Presotto David R. Jenni @@ -458,7 +496,9 @@ David Sansome David Stainton David Symonds David Thomas +David Timm David Titarenco +David Tolpin David Url David Volquartz Lebech David Wimmer @@ -471,6 +511,7 @@ Denis Brandolini Denis Nagorny Dennis Kuhnert Denys Honsiorovskyi +Denys Smirnov Derek Buitenhuis Derek Che Derek McGowan @@ -485,9 +526,11 @@ Dhiru Kholia Dhruvdutt Jadhav Di Xiao Didier Spezia +Diego Siqueira Dieter Plaetinck Dimitri Sokolyuk Dimitri Tcaciuc +Dina Garmash Diogo Pinela Dirk Gadsden Diwaker Gupta @@ -499,16 +542,21 @@ Dmitriy Shelenin Dmitriy Vyukov Dmitry Chestnykh Dmitry Doroginin +Dmitry Neverov Dmitry Savintsev Dmitry Yakunin +Domen Ipavec Dominic Green Dominik Honnef Dominik Vogt +Don Byington Donald Huang Dong-hee Na Donovan Hide Doug Anderson Doug Fawley +Douglas Danger Manley +Drew Flower Drew Hintz Duncan Holm Dustin Carlino @@ -520,6 +568,7 @@ Dvir Volk Dylan Waits Edan Bedrik <3d4nb3@gmail.com> Eden Li +Eduard Urbach Eduardo Ramalho Edward Muller Egon Elbre @@ -529,7 +578,7 @@ Eivind Uggedal Elbert Fliek Eldar Rakhimberdin Elena Grahovac -Elias Naur +Elias Naur Elliot Morrison-Reed Emerson Lin Emil Hessman @@ -547,6 +596,7 @@ Eric Koleda Eric Lagergren Eric Milliken Eric Pauley +Eric Ponce Eric Rescorla Eric Roshan-Eisner Eric Rykwalder @@ -555,6 +605,7 @@ Erik Dubbelboer Erik St. Martin Erik Staab Erik Westrup +Erin Masatsugu Ernest Chiang Erwin Oegema Esko Luontola @@ -566,6 +617,7 @@ Evan Broder Evan Brown Evan Hicks Evan Jones +Evan Klitzke Evan Kroske Evan Martin Evan Phoenix @@ -584,6 +636,7 @@ Fannie Zhang Fatih Arslan Fazal Majid Fazlul Shahriar +Federico Bond Federico Simoncelli Fedor Indutny Felipe Oliveira @@ -591,8 +644,10 @@ Felix Geisendörfer Felix Kollmann Filip Gruszczyński Filip Haglund +Filip Stanis Filippo Valsorda Firmansyah Adiputra +Florian Forster Florian Uekermann Florian Weimer Florin Patan @@ -610,9 +665,11 @@ Frederik Ring Fredrik Enestad Fredrik Forsmo Fredrik Wallgren +Frew Schmidt Frithjof Schulze Frits van Bommel Fumitoshi Ukai +G. Hussain Chinoy Gaal Yahas Gabríel Arthúr Pétursson Gabriel Aszalos @@ -627,6 +684,7 @@ Gaurish Sharma Gautham Thambidorai Gauthier Jolly Geert-Johan Riemer +Genevieve Luyt Gengliang Wang Geoff Berry Geoffroy Lorieux @@ -634,24 +692,41 @@ Geon Kim Georg Reinke George Gkirtsou George Shammas +Gerasimos (Makis) Maropoulos Gerasimos Dimitriadis +Gergely Brautigam Getulio Sánchez +Gianguido Sora` Gideon Jan-Wessel Redelinghuys Giles Lean Giovanni Bajo GitHub User @ajnirp (1688456) +GitHub User @andrius4669 (4699695) GitHub User @as (8127015) GitHub User @bgadrian (830001) GitHub User @bontequero (2674999) GitHub User @cch123 (384546) GitHub User @chanxuehong (3416908) +GitHub User @dupoxy (1143957) GitHub User @erifan (31343225) +GitHub User @esell (9735165) +GitHub User @itchyny (375258) +GitHub User @kc1212 (1093806) GitHub User @Kropekk (13366453) +GitHub User @LotusFenn (13775899) GitHub User @madiganz (18340029) +GitHub User @mkishere (224617) <224617+mkishere@users.noreply.github.com> +GitHub User @OlgaVlPetrova (44112727) GitHub User @pityonline (438222) GitHub User @pytimer (17105586) +GitHub User @saitarunreddy (21041941) GitHub User @shogo-ma (9860598) +GitHub User @tell-k (26263) +GitHub User @uhei (2116845) +GitHub User @uropek (39370426) Giulio Iotti +Giulio Micheloni +Giuseppe Valente Gleb Stepanov Glenn Brown Glenn Lewis @@ -660,14 +735,17 @@ Graham King Graham Miller Grant Griffiths Greg Poirier +Greg Steuck Greg Ward Grégoire Delattre Gregory Man +Guilherme Caruso Guilherme Garnier Guilherme Goncalves Guilherme Rezende Guillaume J. Charmes Guobiao Mei +Guoliang Wang Gustav Paul Gustav Westling Gustavo Franco @@ -702,6 +780,7 @@ Henry Clifford Herbert Georg Fischer Herbie Ong Heschi Kreinick +Hidetatsu Yaginuma Hilko Bengen Hiroaki Nakamura Hironao OTSUBO @@ -715,11 +794,16 @@ Hsin Tsao Hsin-Ho Yeh Hu Keping Hugues Bruant +Huy Le Hyang-Ah Hana Kim Ian Cottrell +Ian Davis Ian Gudger +Ian Haken Ian Kent Ian Lance Taylor +Ian Leue +Ian Zapolsky Ibrahim AshShohail Icarus Sparry Iccha Sethi @@ -727,6 +811,7 @@ Idora Shinatose Igor Bernstein Igor Dolzhikov Igor Vashyst +Igor Zhilianin Ilya Tocar INADA Naoki Inanc Gumus @@ -743,9 +828,12 @@ Issac Trotts Ivan Babrou Ivan Bertona Ivan Krasin +Ivan Kutuzov Ivan Markin Ivan Moscoso +Ivan Sharavuev Ivan Ukhov +Ivy Evans Jaana Burcu Dogan Jack Britton Jack Lindamood @@ -753,6 +841,7 @@ Jacob Baskin Jacob H. Haven Jacob Hoffman-Andrews Jae Kwon +Jake B Jakob Borg Jakob Weisblat Jakub Čajka @@ -762,6 +851,7 @@ James Bardin James Chacon James Clarke James Cowgill +James Craig Burley James David Chalfant James Fysh James Gray @@ -804,12 +894,15 @@ Jason Buberel Jason Chu Jason Del Ponte Jason Hall +Jason Keene +Jason LeBrun Jason Smale Jason Travis Jason Wangsadinata Javier Kohen Javier Segura Jay Conrod +Jay Taylor Jay Weisskopf Jean de Klerk Jean-André Santoni @@ -831,6 +924,8 @@ Jeffrey H Jelte Fennema Jens Frederich Jeremiah Harmsen +Jeremy Banks <_@jeremy.ca> +Jeremy Canady Jeremy Jackins Jeremy Schlatter Jeroen Bobbeldijk @@ -854,6 +949,7 @@ Jiong Du Jirka Daněk Jiulong Wang Joakim Sernbrant +Joe Bowbeer Joe Cortopassi Joe Farrell Joe Harrison @@ -877,6 +973,7 @@ John C Barstow John DeNero John Dethridge John Gibb +John Gilik John Graham-Cumming John Howard Palevich John Jeffery @@ -910,6 +1007,7 @@ Joonas Kuorilehto Joop Kiefte Jordan Krage Jordan Lewis +Jordan Rhee Jos Visser Jose Luis Vázquez González Joseph Bonneau @@ -930,12 +1028,15 @@ Jostein Stuhaug JP Sugarbroad JT Olds Juan Carlos +Juan Pablo Civile Jude Pereira Jukka-Pekka Kekkonen Julia Hansbrough Julian Kornberger Julian Pastarmov Julian Phillips +Julie Qiu +Julien Kauffmann Julien Salleyron Julien Schmidt Julio Montes @@ -961,10 +1062,12 @@ Karoly Negyesi Karsten Köhler Kashav Madan Kate Manson +Katie Hockman Kato Kazuyoshi Katrina Owen Kaviraj Kanagaraj Kay Zhu +Kazuhiro Sera KB Sriram Keegan Carruthers-Smith Kei Son @@ -989,6 +1092,7 @@ Kevin Klues Kevin Malachowski Kevin Ruffin Kevin Vu +Kevin Zita Kieran Colford Kim Shrier Kim Yongbin @@ -1000,6 +1104,7 @@ Klaus Post Kodie Goodwin Koichi Shiraishi Koki Ide +Komu Wairagu Konstantin Konstantin Shaposhnikov Kris Kwiatkowski @@ -1015,13 +1120,16 @@ Kyle Jones Kyle Lemons Kyle Shannon Kyle Spiers +Kyle Wood Kyohei Kadota Kyrylo Silin L Campbell Lai Jiangshan +Lajos Papp Lakshay Garg Lann Martin Lanre Adelowo +Larry Clapp Larry Hosken Lars Jeppesen Lars Lehtonen @@ -1066,9 +1174,11 @@ Luuk van Dijk Lyle Franklin Lynn Boger Ma Peiqi +Maarten Bezemer Maciej Dębski Magnus Hiie Maicon Costa +Mak Kolybabi Maksym Trykur Mal Curtis Manfred Touron @@ -1086,6 +1196,7 @@ Marcel van Lohuizen Marcelo Cantos Marcelo E. Magallon Marco Hennings +Marcus Willock Marga Manterola Marin Bašić Mario Arranz @@ -1102,12 +1213,14 @@ Mark Theunissen Mark Wolfe Mark Zavislak Marko Juhani Silokunnas +Marko Kevac Marko Mikulicic Marko Mudrinic Marko Tiikkaja Markus Duft Markus Sonderegger Markus Zimmermann +Marten Seemann Martin Bertschler Martin Garton Martin Habbecke @@ -1122,6 +1235,7 @@ Martin Olsen Martin Olsson Martin Probst Martin Sucha +Martin Tournoij Martins Sipenko Martynas Budriūnas Marvin Stenger @@ -1165,31 +1279,37 @@ Matthew Dempsky Matthew Denton Matthew Holt Matthew Horsnell +Matthew Waters Matthieu Hauglustaine Matthieu Olivier Matthijs Kooijman Max Riveiro Max Schmitt +Max Ushakov Maxim Khitrov Maxim Pimenov Maxim Ushakov Maxime de Roucy Máximo Cuadros Ortiz Maxwell Krohn +Maya Rashish Mayank Kumar Meir Fischer Meng Zhuo Mhd Sulhan Micah Stetson +Michael Anthony Knyszek Michael Brandenburg Michael Chaten Michael Darakananda Michael Dorner Michael Edwards Michael Elkins +Michael Ellis Michael Fraenkel Michael Fromberger Michael Gehring +Michael Henderson Michael Hendricks Michael Hoisie Michael Hudson-Doyle @@ -1214,18 +1334,21 @@ Michael Stapelberg Michael Steinert Michael T. Jones Michael Teichgräber +Michael Traver Michael Vetter Michal Bohuslávek Michal Cierniak Michał Derkacz Michal Franc Michal Pristas +Michal Rostecki Michalis Kargakis Michel Lespinasse Miek Gieben Miguel Mendez Miguel Molina Mihai Borobocea +Mihai Todor Mihail Minaev Mikael Tillenius Mike Andrews @@ -1244,6 +1367,7 @@ Mikhail Panchenko Miki Tebeka Mikio Hara Mikkel Krautz +Mikołaj Baranowski Milan Knezevic Milutin Jovanović MinJae Kwon @@ -1286,6 +1410,7 @@ Niall Sheridan Nic Day Nicholas Katsaros Nicholas Maniscalco +Nicholas Ng Nicholas Presta Nicholas Sullivan Nicholas Waples @@ -1326,12 +1451,15 @@ Oleg Vakheta Oleku Konko Oling Cat Oliver Hookins +Oliver Stenbom Oliver Tonnhofer Olivier Antoine Olivier Duperray Olivier Poitrey Olivier Saingre Omar Jarjur +Oryan Moshe +Osamu TONOMORI Özgür Kesim Pablo Lalloni Pablo Rozas Larraondo @@ -1341,6 +1469,7 @@ Pallat Anchaleechamaikorn Paolo Giarrusso Paolo Martini Parker Moore +Parminder Singh Pascal S. de Kloe Pat Moroney Patrick Crosby @@ -1360,6 +1489,7 @@ Paul Hammond Paul Hankin Paul Jolly Paul Lalonde +Paul M Furley Paul Marks Paul Meyer Paul Nasrat @@ -1386,8 +1516,10 @@ Peter Armitage Peter Bourgon Peter Collingbourne Peter Conerly +Peter Dotchev Peter Froehlich Peter Gonda +Peter Hoyes Peter Kleiweg Peter McKenzie Peter Moody @@ -1421,11 +1553,13 @@ Piers Pieter Droogendijk Pietro Gagliardi Piyush Mishra +Plekhanov Maxim Pontus Leitzler Prasanna Swaminathan Prashant Varanasi Pravendra Singh Preetam Jinka +Qais Patankar Qiuxuan Zhu Quan Tran Quan Yong Zhai @@ -1433,10 +1567,12 @@ Quentin Perez Quentin Renard Quentin Smith Quinn Slack +Quinten Yearsley Quoc-Viet Nguyen Radek Sohlich Radu Berinde Rafal Jeczalik +Raghavendra Nagaraj Rahul Chaudhry Raif S. Naffah Rajat Goel @@ -1470,6 +1606,7 @@ Richard Musiol Rick Arnold Rick Hudson Rick Sayre +Rijnard van Tonder Riku Voipio Risto Jaakko Saarelma Rob Earhart @@ -1488,22 +1625,28 @@ Robert Snedegar Robert Stepanek Robert-André Mauchin Roberto Clapis +Roberto Selbach Robin Eklind Rodolfo Carvalho +Rodolfo Rodriguez Rodrigo Moraes de Oliveira Rodrigo Rafael Monti Kochenburger Roger Pau Monné Roger Peppe +Roland Illig Roland Shoemaker Roman Budnikov +Roman Shchekin Ron Hashimoto Ron Minnich Ross Chater Ross Light +Ross Smith II Rowan Marshall Rowan Worth Rudi Kramer Rui Ueyama +Ruslan Nigmatullin Russ Cox Russell Haering Ryan Bagwell @@ -1511,6 +1654,7 @@ Ryan Barrett Ryan Boehning Ryan Brown Ryan Canty +Ryan Dahl Ryan Hitchman Ryan Lower Ryan Roden-Corrent @@ -1534,9 +1678,11 @@ Sam Whited Sameer Ajmani Sami Commerot Sami Pönkänen +Samuel Kelemen Samuel Tan Samuele Pedroni Sanjay Menakuru +Santhosh Kumar Tekuri Sarah Adams Sascha Brawer Sasha Lionheart @@ -1550,13 +1696,17 @@ Scott Mansfield Scott Schwartz Scott Van Woudenberg Sean Burford +Sean Chen Sean Chittenden Sean Christopherson Sean Dolphin Sean Harger Sean Rees +Sebastiaan van Stijn +Sebastian Schmidt Sebastien Binet Sébastien Paolacci +Sebastien Williams-Wynn Seiji Takahashi Sergei Skorobogatov Sergey 'SnakE' Gromov @@ -1568,6 +1718,7 @@ Sergey Mudrik Sergey Semin Sergio Luis O. B. Correia Sergiusz Bazanski +Serhii Aheienko Seth Hoenig Seth Vargo Shahar Kohanim @@ -1581,9 +1732,11 @@ Shawn Walker-Salas Shenghou Ma Shengyu Zhang Shi Han Ng +Shijie Hao Shinji Tanaka Shintaro Kaneko Shivakumar GN +Shivansh Rai Shun Fan Silvan Jegen Simon Jefford @@ -1603,6 +1756,7 @@ Stan Schwertly Stanislav Afanasev Steeve Morin Stefan Nilsson +Stepan Shabalin Stephan Renatus Stéphane Travostino Stephen Lewis @@ -1613,6 +1767,7 @@ Stephen Searles Stephen Weinberg Steve Francia Steve Gilbert +Steve LoFurno Steve McCoy Steve Newman Steve Phillips @@ -1621,7 +1776,10 @@ Steven Buss Steven Elliot Harris Steven Erenst Steven Hartland +Steven Littiebrant Steven Wilkin +Stuart Jansen +Sue Spence Sugu Sougoumarane Suharsh Sivakumar Sukrit Handa @@ -1634,8 +1792,11 @@ Sven Blumenstein Sylvain Zimmer Syohei YOSHIDA Szabolcs Nagy +Taavi Kivisik Tad Fisher Tad Glines +Tadas Valiukas +Taesu Pyo Taj Khattra Takashi Matsuo Takayoshi Nishida @@ -1644,11 +1805,14 @@ Takuto Ikuta Takuya Ueda Tal Shprecher Tamir Duberstein +Tao Shen Tao Wang Tarmigan Casebolt Taro Aoki Taru Karttunen Tatsuhiro Tsujikawa +Tatsuya Kaneko +Taufiq Rahman Teague Cole Ted Kornish Tejasvi Nareddy @@ -1664,6 +1828,7 @@ Thomas Alan Copeland Thomas Bonfort Thomas Bouldin Thomas Bruyelle +Thomas Bushnell, BSG Thomas de Zeeuw Thomas Desrosiers Thomas Habets @@ -1682,6 +1847,7 @@ Tim Henderson Tim Hockin Tim Swast Tim Wright +Tim Xu Timo Savola Timo Truyts Timothy Studd @@ -1705,6 +1871,7 @@ Tom Wilkie Tommy Schaefer Tomoya Ishizaki Tonis Tiigi +Tony Reix Tony Walker Tor Andersson Tormod Erevik Lea @@ -1732,13 +1899,17 @@ Tzu-Jung Lee Ugorji Nwoke Ulf Holm Nielsen Ulrich Kunitz +Umang Parmar Uriel Mangado +Urvil Patel Uttam C Pawar Vadim Grek Vadim Vygonets Val Polouchkine Vega Garcia Luis Alfonso +Venil Noronha Veselkov Konstantin +Viacheslav Poturaev Victor Chudnovsky Victor Vrantchan Vignesh Ramachandra @@ -1749,8 +1920,10 @@ Vincent Vanackere Vinu Rajashekhar Vish Subramanian Vishvananda Ishaya +Visweswara R Vitor De Mario Vlad Krasnov +Vladimir Kovpak Vladimir Kuzmin Vladimir Mihailenco Vladimir Nikishenko @@ -1762,17 +1935,22 @@ W. Trevor King Wade Simmons Walter Poupore Wander Lairson Costa +Warren Fernandes Wayne Ashley Berry Wedson Almeida Filho +Weerasak Chongnguluam Wèi Cōngruì Wei Fu Wei Guangjing Wei Xiao Weichao Tang Wembley G. Leach, Jr +Wil Selwood Wilfried Teiken +Will Beason Will Chan Will Faught +Will Morrow Will Norris Will Storey Willem van der Schyff @@ -1805,6 +1983,7 @@ Yestin Sun Yesudeep Mangalapilly Yissakhar Z. Beck Yo-An Lin +Yohei Takeda Yongjian Xu Yorman Arias Yoshiyuki Kanno @@ -1813,6 +1992,7 @@ Yosuke Akatsuka Yu Heng Zhang Yu Xuan Zhang Yuji Yaginuma +Yuki OKUSHI Yuki Yugui Sonoda Yukihiro Nishinaka <6elpinal@gmail.com> Yury Smolsky @@ -1823,12 +2003,14 @@ Yves Junqueira Zac Bergquist Zach Bintliff Zach Gershman +Zachary Amsden Zachary Gershman Zak Zakatell Kanda Zellyn Hunter Zev Goldstein Zheng Dayu +Zheng Xu Zhengyu He Zhongpeng Lin Zhongtao Chen diff --git a/api/except.txt b/api/except.txt index 46dbb458923c1..637be18135917 100644 --- a/api/except.txt +++ b/api/except.txt @@ -3,8 +3,11 @@ pkg math/big, const MaxBase = 36 pkg math/big, type Word uintptr pkg net, func ListenUnixgram(string, *UnixAddr) (*UDPConn, error) pkg os, const ModeType = 2399141888 +pkg os, const ModeType = 2399666176 pkg os (linux-arm), const O_SYNC = 4096 pkg os (linux-arm-cgo), const O_SYNC = 4096 +pkg os (linux-arm), const O_SYNC = 1052672 +pkg os (linux-arm-cgo), const O_SYNC = 1052672 pkg syscall (darwin-386), const ImplementsGetwd = false pkg syscall (darwin-386), func Fchflags(string, int) error pkg syscall (darwin-386-cgo), const ImplementsGetwd = false @@ -370,6 +373,7 @@ pkg syscall (windows-386), type CertContext struct, CertInfo uintptr pkg syscall (windows-386), type CertRevocationInfo struct, CrlInfo uintptr pkg syscall (windows-386), type CertRevocationInfo struct, OidSpecificInfo uintptr pkg syscall (windows-386), type CertSimpleChain struct, TrustListInfo uintptr +pkg syscall (windows-386), type RawSockaddrAny struct, Pad [96]int8 pkg syscall (windows-amd64), const TOKEN_ALL_ACCESS = 983295 pkg syscall (windows-amd64), type AddrinfoW struct, Addr uintptr pkg syscall (windows-amd64), type CertChainPolicyPara struct, ExtraPolicyPara uintptr @@ -378,3 +382,78 @@ pkg syscall (windows-amd64), type CertContext struct, CertInfo uintptr pkg syscall (windows-amd64), type CertRevocationInfo struct, CrlInfo uintptr pkg syscall (windows-amd64), type CertRevocationInfo struct, OidSpecificInfo uintptr pkg syscall (windows-amd64), type CertSimpleChain struct, TrustListInfo uintptr +pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [96]int8 +pkg syscall (freebsd-386), func Mknod(string, uint32, int) error +pkg syscall (freebsd-386), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-386), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-386), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-386), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-386), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-386), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-386), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-386), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-386), type Stat_t struct, Pad_cgo_0 [8]uint8 +pkg syscall (freebsd-386), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Pad_cgo_0 [8]uint8 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-amd64), func Mknod(string, uint32, int) error +pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-amd64), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-amd64), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-arm), func Mknod(string, uint32, int) error +pkg syscall (freebsd-arm), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-arm), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-arm), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-arm), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [88]int8 +pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, int) error +pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint32 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint8 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Lspare int32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint16 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint32 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [88]int8 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [88]int8 diff --git a/api/go1.11.txt b/api/go1.11.txt index 4c0bcc74795d0..863e1f162551f 100644 --- a/api/go1.11.txt +++ b/api/go1.11.txt @@ -1,7 +1,7 @@ pkg crypto/cipher, func NewGCMWithTagSize(Block, int) (AEAD, error) pkg crypto/rsa, method (*PrivateKey) Size() int pkg crypto/rsa, method (*PublicKey) Size() int -pkg crypto/tls, type ConnectionState struct, ExportKeyingMaterial func(string, []uint8, int) ([]uint8, bool) +pkg crypto/tls, method (*ConnectionState) ExportKeyingMaterial(string, []uint8, int) ([]uint8, error) pkg database/sql, method (IsolationLevel) String() string pkg database/sql, type DBStats struct, Idle int pkg database/sql, type DBStats struct, InUse int diff --git a/api/go1.12.txt b/api/go1.12.txt new file mode 100644 index 0000000000000..7d525cb86eef0 --- /dev/null +++ b/api/go1.12.txt @@ -0,0 +1,228 @@ +pkg bytes, func ReplaceAll([]uint8, []uint8, []uint8) []uint8 +pkg crypto/tls, const TLS_AES_128_GCM_SHA256 = 4865 +pkg crypto/tls, const TLS_AES_128_GCM_SHA256 uint16 +pkg crypto/tls, const TLS_AES_256_GCM_SHA384 = 4866 +pkg crypto/tls, const TLS_AES_256_GCM_SHA384 uint16 +pkg crypto/tls, const TLS_CHACHA20_POLY1305_SHA256 = 4867 +pkg crypto/tls, const TLS_CHACHA20_POLY1305_SHA256 uint16 +pkg crypto/tls, const VersionTLS13 = 772 +pkg crypto/tls, const VersionTLS13 ideal-int +pkg crypto/tls, type RecordHeaderError struct, Conn net.Conn +pkg debug/elf, const R_RISCV_32_PCREL = 57 +pkg debug/elf, const R_RISCV_32_PCREL R_RISCV +pkg debug/pe, const IMAGE_FILE_MACHINE_ARMNT = 452 +pkg debug/pe, const IMAGE_FILE_MACHINE_ARMNT ideal-int +pkg expvar, method (*Map) Delete(string) +pkg go/doc, const PreserveAST = 4 +pkg go/doc, const PreserveAST Mode +pkg go/importer, func ForCompiler(*token.FileSet, string, Lookup) types.Importer +pkg go/token, method (*File) LineStart(int) Pos +pkg io, type StringWriter interface { WriteString } +pkg io, type StringWriter interface, WriteString(string) (int, error) +pkg log, method (*Logger) Writer() io.Writer +pkg math/bits, func Add(uint, uint, uint) (uint, uint) +pkg math/bits, func Add32(uint32, uint32, uint32) (uint32, uint32) +pkg math/bits, func Add64(uint64, uint64, uint64) (uint64, uint64) +pkg math/bits, func Div(uint, uint, uint) (uint, uint) +pkg math/bits, func Div32(uint32, uint32, uint32) (uint32, uint32) +pkg math/bits, func Div64(uint64, uint64, uint64) (uint64, uint64) +pkg math/bits, func Mul(uint, uint) (uint, uint) +pkg math/bits, func Mul32(uint32, uint32) (uint32, uint32) +pkg math/bits, func Mul64(uint64, uint64) (uint64, uint64) +pkg math/bits, func Sub(uint, uint, uint) (uint, uint) +pkg math/bits, func Sub32(uint32, uint32, uint32) (uint32, uint32) +pkg math/bits, func Sub64(uint64, uint64, uint64) (uint64, uint64) +pkg net/http, const StatusTooEarly = 425 +pkg net/http, const StatusTooEarly ideal-int +pkg net/http, method (*Client) CloseIdleConnections() +pkg os, const ModeType = 2401763328 +pkg os, func UserHomeDir() (string, error) +pkg os, method (*File) SyscallConn() (syscall.RawConn, error) +pkg os, method (*ProcessState) ExitCode() int +pkg os/exec, method (ExitError) ExitCode() int +pkg reflect, method (*MapIter) Key() Value +pkg reflect, method (*MapIter) Next() bool +pkg reflect, method (*MapIter) Value() Value +pkg reflect, method (Value) MapRange() *MapIter +pkg reflect, type MapIter struct +pkg runtime/debug, func ReadBuildInfo() (*BuildInfo, bool) +pkg runtime/debug, type BuildInfo struct +pkg runtime/debug, type BuildInfo struct, Deps []*Module +pkg runtime/debug, type BuildInfo struct, Main Module +pkg runtime/debug, type BuildInfo struct, Path string +pkg runtime/debug, type Module struct +pkg runtime/debug, type Module struct, Path string +pkg runtime/debug, type Module struct, Replace *Module +pkg runtime/debug, type Module struct, Sum string +pkg runtime/debug, type Module struct, Version string +pkg strings, func ReplaceAll(string, string, string) string +pkg strings, method (*Builder) Cap() int +pkg syscall (freebsd-386), const S_IRWXG = 56 +pkg syscall (freebsd-386), const S_IRWXG ideal-int +pkg syscall (freebsd-386), const S_IRWXO = 7 +pkg syscall (freebsd-386), const S_IRWXO ideal-int +pkg syscall (freebsd-386), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-386), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-386), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-386), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-386), type Dirent struct, Off int64 +pkg syscall (freebsd-386), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-386), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-386), type Stat_t struct, Atim_ext int32 +pkg syscall (freebsd-386), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-386), type Stat_t struct, Btim_ext int32 +pkg syscall (freebsd-386), type Stat_t struct, Ctim_ext int32 +pkg syscall (freebsd-386), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-386), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-386), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-386), type Stat_t struct, Mtim_ext int32 +pkg syscall (freebsd-386), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-386), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-386), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-386), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-386), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-386), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-386), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (freebsd-386-cgo), const S_IRWXG = 56 +pkg syscall (freebsd-386-cgo), const S_IRWXG ideal-int +pkg syscall (freebsd-386-cgo), const S_IRWXO = 7 +pkg syscall (freebsd-386-cgo), const S_IRWXO ideal-int +pkg syscall (freebsd-386-cgo), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-386-cgo), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-386-cgo), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-386-cgo), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-386-cgo), type Dirent struct, Off int64 +pkg syscall (freebsd-386-cgo), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-386-cgo), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Atim_ext int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Btim_ext int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Ctim_ext int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Mtim_ext int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-386-cgo), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-386-cgo), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (freebsd-amd64), const S_IRWXG = 56 +pkg syscall (freebsd-amd64), const S_IRWXG ideal-int +pkg syscall (freebsd-amd64), const S_IRWXO = 7 +pkg syscall (freebsd-amd64), const S_IRWXO ideal-int +pkg syscall (freebsd-amd64), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-amd64), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-amd64), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-amd64), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-amd64), type Dirent struct, Off int64 +pkg syscall (freebsd-amd64), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-amd64), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-amd64), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-amd64), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-amd64), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-amd64), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-amd64), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-amd64), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-amd64), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-amd64), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-amd64), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-amd64), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (freebsd-amd64-cgo), const S_IRWXG = 56 +pkg syscall (freebsd-amd64-cgo), const S_IRWXG ideal-int +pkg syscall (freebsd-amd64-cgo), const S_IRWXO = 7 +pkg syscall (freebsd-amd64-cgo), const S_IRWXO ideal-int +pkg syscall (freebsd-amd64-cgo), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-amd64-cgo), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Off int64 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-amd64-cgo), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-amd64-cgo), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-amd64-cgo), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (freebsd-arm), const S_IRWXG = 56 +pkg syscall (freebsd-arm), const S_IRWXG ideal-int +pkg syscall (freebsd-arm), const S_IRWXO = 7 +pkg syscall (freebsd-arm), const S_IRWXO ideal-int +pkg syscall (freebsd-arm), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-arm), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-arm), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-arm), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-arm), type Dirent struct, Off int64 +pkg syscall (freebsd-arm), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-arm), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-arm), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-arm), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-arm), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-arm), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-arm), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-arm), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-arm), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-arm), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-arm), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-arm), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (freebsd-arm-cgo), const S_IRWXG = 56 +pkg syscall (freebsd-arm-cgo), const S_IRWXG ideal-int +pkg syscall (freebsd-arm-cgo), const S_IRWXO = 7 +pkg syscall (freebsd-arm-cgo), const S_IRWXO ideal-int +pkg syscall (freebsd-arm-cgo), func Fstatat(int, string, *Stat_t, int) error +pkg syscall (freebsd-arm-cgo), func Mknod(string, uint32, uint64) error +pkg syscall (freebsd-arm-cgo), type Dirent struct, Fileno uint64 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Namlen uint16 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Off int64 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Pad0 uint8 +pkg syscall (freebsd-arm-cgo), type Dirent struct, Pad1 uint16 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Blksize int32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Dev uint64 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Gen uint64 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Ino uint64 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Nlink uint64 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Padding0 int16 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Padding1 int32 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Rdev uint64 +pkg syscall (freebsd-arm-cgo), type Stat_t struct, Spare [10]uint64 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntfromname [1024]int8 +pkg syscall (freebsd-arm-cgo), type Statfs_t struct, Mntonname [1024]int8 +pkg syscall (openbsd-386), const S_IRWXG = 56 +pkg syscall (openbsd-386), const S_IRWXG ideal-int +pkg syscall (openbsd-386), const S_IRWXO = 7 +pkg syscall (openbsd-386), const S_IRWXO ideal-int +pkg syscall (openbsd-386-cgo), const S_IRWXG = 56 +pkg syscall (openbsd-386-cgo), const S_IRWXG ideal-int +pkg syscall (openbsd-386-cgo), const S_IRWXO = 7 +pkg syscall (openbsd-386-cgo), const S_IRWXO ideal-int +pkg syscall (openbsd-amd64), const S_IRWXG = 56 +pkg syscall (openbsd-amd64), const S_IRWXG ideal-int +pkg syscall (openbsd-amd64), const S_IRWXO = 7 +pkg syscall (openbsd-amd64), const S_IRWXO ideal-int +pkg syscall (openbsd-amd64-cgo), const S_IRWXG = 56 +pkg syscall (openbsd-amd64-cgo), const S_IRWXG ideal-int +pkg syscall (openbsd-amd64-cgo), const S_IRWXO = 7 +pkg syscall (openbsd-amd64-cgo), const S_IRWXO ideal-int +pkg syscall (windows-386), const UNIX_PATH_MAX = 108 +pkg syscall (windows-386), const UNIX_PATH_MAX ideal-int +pkg syscall (windows-386), func Syscall18(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno) +pkg syscall (windows-386), type RawSockaddrAny struct, Pad [100]int8 +pkg syscall (windows-386), type RawSockaddrUnix struct, Family uint16 +pkg syscall (windows-386), type RawSockaddrUnix struct, Path [108]int8 +pkg syscall (windows-amd64), const UNIX_PATH_MAX = 108 +pkg syscall (windows-amd64), const UNIX_PATH_MAX ideal-int +pkg syscall (windows-amd64), func Syscall18(uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr, uintptr) (uintptr, uintptr, Errno) +pkg syscall (windows-amd64), type RawSockaddrAny struct, Pad [100]int8 +pkg syscall (windows-amd64), type RawSockaddrUnix struct, Family uint16 +pkg syscall (windows-amd64), type RawSockaddrUnix struct, Path [108]int8 +pkg syscall, type RawSockaddrUnix struct diff --git a/doc/asm.html b/doc/asm.html index f2f8fad5766c2..debb1e2fc6a1d 100644 --- a/doc/asm.html +++ b/doc/asm.html @@ -740,6 +740,7 @@

ARM64

R18 is the "platform register", reserved on the Apple platform. +To prevent accidental misuse, the register is named R18_PLATFORM. R27 and R28 are reserved by the compiler and linker. R29 is the frame pointer. R30 is the link register. diff --git a/doc/codewalk/codewalk.js b/doc/codewalk/codewalk.js index abc59373a0a51..4f59a8fc89afd 100644 --- a/doc/codewalk/codewalk.js +++ b/doc/codewalk/codewalk.js @@ -276,7 +276,7 @@ CodewalkViewer.prototype.changeSelectedComment = function(target) { } // Force original file even if user hasn't changed comments since they may - // have nagivated away from it within the iframe without us knowing. + // have navigated away from it within the iframe without us knowing. this.navigateToCode(currentFile); }; diff --git a/doc/contrib.html b/doc/contrib.html index df53d480d3856..b4b19a6af75ed 100644 --- a/doc/contrib.html +++ b/doc/contrib.html @@ -34,6 +34,7 @@

Release History

A summary of the changes between Go releases. Notes for the major releases:

    +
  • Go 1.11 (August 2018)
  • Go 1.10 (February 2018)
  • Go 1.9 (August 2017)
  • Go 1.8 (February 2017)
  • @@ -59,6 +60,15 @@

    Developer Resources

    Source Code

    Check out the Go source code.

    +

    Discussion Mailing List

    +

    +A mailing list for general discussion of Go programming. +

    +

    +Questions about using Go or announcements relevant to other Go users should be sent to +golang-nuts. +

    +

    Developer and Code Review Mailing List

    The golang-dev @@ -66,9 +76,6 @@

    Develop The golang-codereviews mailing list is for actual reviewing of the code changes (CLs).

    -

    For general discussion of Go programming, see golang-nuts.

    -

    Checkins Mailing List

    A mailing list that receives a message summarizing each checkin to the Go repository.

    @@ -116,7 +123,7 @@

    Contributing code

    guidelines for information on design, testing, and our code review process.

    -Check the tracker for +Check the tracker for open issues that interest you. Those labeled help wanted are particularly in need of outside help. diff --git a/doc/contribute.html b/doc/contribute.html index 5dc8a0044d42c..68b2387d35f86 100644 --- a/doc/contribute.html +++ b/doc/contribute.html @@ -393,8 +393,8 @@

    Sending a change via Gerrit

    It is not possible to fully sync Gerrit and GitHub, at least at the moment, so we recommend learning Gerrit. -It's different but powerful and familiarity -with help you understand the flow. +It's different but powerful and familiarity with it will help you understand +the flow.

    Overview

    @@ -405,7 +405,7 @@

    Overview

    • -Step 1: Clone the Go source code from go.googlesource.com +Step 1: Clone the Go source code from go.googlesource.com and make sure it's stable by compiling and testing it once:
       $ git clone https://go.googlesource.com/go
      @@ -469,12 +469,11 @@ 

      Step 1: Clone the Go source code

      checked out from the correct repository. You can check out the Go source repo onto your local file system anywhere you want as long as it's outside your GOPATH. -Either clone from -go.googlesource.com or from GitHub: +Clone from go.googlesource.com (not GitHub):

      -$ git clone https://github.com/golang/go   # or https://go.googlesource.com/go
      +$ git clone https://go.googlesource.com/go
       $ cd go
       
      @@ -697,7 +696,7 @@

      Main content

      Add any relevant information, such as benchmark data if the change affects performance. -The benchcmp +The benchstat tool is conventionally used to format benchmark data for change descriptions.

      @@ -923,13 +922,13 @@

      -// Copyright 2018 The Go Authors. All rights reserved.
      +// Copyright 2019 The Go Authors. All rights reserved.
       // Use of this source code is governed by a BSD-style
       // license that can be found in the LICENSE file.
       

      -(Use the current year if you're reading this in 2019 or beyond.) +(Use the current year if you're reading this in 2020 or beyond.) Files in the repository are copyrighted the year they are added. Do not update the copyright year on files that you change.

      diff --git a/doc/debugging_with_gdb.html b/doc/debugging_with_gdb.html index f3b4e37a2827e..3899ac92d5082 100644 --- a/doc/debugging_with_gdb.html +++ b/doc/debugging_with_gdb.html @@ -179,7 +179,15 @@

      Known Issues

      "fmt.Print" as an unstructured literal with a "." that needs to be quoted. It objects even more strongly to method names of the form pkg.(*MyType).Meth. -
    • All global variables are lumped into package "main".
    • +
    • As of Go 1.11, debug information is compressed by default. +Older versions of gdb, such as the one available by default on MacOS, +do not understand the compression. +You can generate uncompressed debug information by using go +build -ldflags=-compressdwarf=false. +(For convenience you can put the -ldflags option in +the GOFLAGS +environment variable so that you don't have to specify it each time.) +
    • Tutorial

      @@ -248,7 +256,7 @@

      Inspecting the source

      -List a specific part of the source parametrizing "list" with a +List a specific part of the source parameterizing "list" with a function name (it must be qualified with its package name).

      diff --git a/doc/devel/release.html b/doc/devel/release.html index 584340b005f9b..73f7a0e304710 100644 --- a/doc/devel/release.html +++ b/doc/devel/release.html @@ -23,6 +23,49 @@

      Release Policy

      (for example, Go 1.6.1, Go 1.6.2, and so on).

      +

      go1.11 (released 2018/08/24)

      + +

      +Go 1.11 is a major release of Go. +Read the Go 1.11 Release Notes for more information. +

      + +

      Minor revisions

      + +

      +go1.11.1 (released 2018/10/01) includes fixes to the compiler, documentation, go +command, runtime, and the crypto/x509, encoding/json, +go/types, net, net/http, and +reflect packages. +See the Go +1.11.1 milestone on our issue tracker for details. +

      + +

      +go1.11.2 (released 2018/11/02) includes fixes to the compiler, linker, +documentation, go command, and the database/sql and +go/types packages. +See the Go +1.11.2 milestone on our issue tracker for details. +

      + +

      +go1.11.3 (released 2018/12/12) includes three security fixes to "go get" and +the crypto/x509 package. +See the Go +1.11.3 milestone on our issue tracker for details. +

      + +

      +go1.11.4 (released 2018/12/14) includes fixes to cgo, the compiler, linker, +runtime, documentation, go command, and the net/http and +go/types packages. +It includes a fix to a bug introduced in Go 1.11.3 that broke go +get for import path patterns containing "...". +See the Go +1.11.4 milestone on our issue tracker for details. +

      +

      go1.10 (released 2018/02/16)

      @@ -57,6 +100,37 @@

      Minor revisions

      1.10.3 milestone on our issue tracker for details.

      +

      +go1.10.4 (released 2018/08/24) includes fixes to the go command, linker, and the +net/http, mime/multipart, ld/macho, +bytes, and strings packages. +See the Go +1.10.4 milestone on our issue tracker for details. +

      + +

      +go1.10.5 (released 2018/11/02) includes fixes to the go command, linker, runtime +and the database/sql package. +See the Go +1.10.5 milestone on our issue tracker for details. +

      + +

      +go1.10.6 (released 2018/12/12) includes three security fixes to "go get" and +the crypto/x509 package. +It contains the same fixes as Go 1.11.3 and was released at the same time. +See the Go +1.10.6 milestone on our issue tracker for details. +

      + +

      +go1.10.7 (released 2018/12/14) includes a fix to a bug introduced in Go 1.10.6 +that broke go get for import path patterns containing +"...". +See the +Go 1.10.7 milestone on our issue tracker for details. +

      +

      go1.9 (released 2017/08/24)

      diff --git a/doc/diagnostics.html b/doc/diagnostics.html index 0a7847744b6e5..478611c15c0f6 100644 --- a/doc/diagnostics.html +++ b/doc/diagnostics.html @@ -456,3 +456,15 @@

      GODEBUG

      and the length of the pause.
    • GODEBUG=schedtrace=X prints scheduling events every X milliseconds.
    + +

    The GODEBUG environmental variable can be used to disable use of +instruction set extensions in the standard library and runtime.

    + +
      +
    • GODEBUG=cpu.all=off disables the use of all optional +instruction set extensions.
    • +
    • GODEBUG=cpu.extension=off disables use of instructions from the +specified instruction set extension.
      +extension is the lower case name for the instruction set extension +such as sse41 or avx.
    • +
    diff --git a/doc/docs.html b/doc/docs.html index 955eb3044e92f..8f79d3a77067b 100644 --- a/doc/docs.html +++ b/doc/docs.html @@ -50,10 +50,10 @@

    online or{{end}} install it locally with:

    -$ go get golang.org/x/tour/gotour
    +$ go get golang.org/x/tour
     

    -This will place the gotour binary in your workspace's bin directory. +This will place the tour binary in your workspace's bin directory.

    How to write Go code

    diff --git a/doc/editors.html b/doc/editors.html index 6f787864c60af..4ff35a58fc1ca 100644 --- a/doc/editors.html +++ b/doc/editors.html @@ -28,7 +28,7 @@

    Options

-Note that these are only a few top solutions; a more comphensive +Note that these are only a few top solutions; a more comprehensive community-maintained list of IDEs and text editor plugins is available at the Wiki. diff --git a/doc/effective_go.html b/doc/effective_go.html index 89c1d08782a13..34131868a4c75 100644 --- a/doc/effective_go.html +++ b/doc/effective_go.html @@ -246,14 +246,16 @@

Commentary

If every doc comment begins with the name of the item it describes, -the output of godoc can usefully be run through grep. +you can use the doc +subcommand of the go tool +and run the output through grep. Imagine you couldn't remember the name "Compile" but were looking for the parsing function for regular expressions, so you ran the command,

-$ godoc regexp | grep -i parse
+$ go doc -all regexp | grep -i parse
 

@@ -264,10 +266,10 @@

Commentary

-$ godoc regexp | grep parse
+$ go doc -all regexp | grep -i parse
     Compile parses a regular expression and returns, if successful, a Regexp
+    MustCompile is like Compile but panics if the expression cannot be parsed.
     parsed. It simplifies safe initialization of global variables holding
-    cannot be parsed. It simplifies safe initialization of global variables
 $
 
@@ -1402,11 +1404,11 @@

Slices

var err error for i := 0; i < 32; i++ { nbytes, e := f.Read(buf[i:i+1]) // Read one byte. + n += nbytes if nbytes == 0 || e != nil { err = e break } - n += nbytes }

@@ -1708,7 +1710,7 @@

Printing

&{7 -2.35 abc def} &{a:7 b:-2.35 c:abc def} &main.T{a:7, b:-2.35, c:"abc\tdef"} -map[string] int{"CST":-21600, "PST":-28800, "EST":-18000, "UTC":0, "MST":-25200} +map[string]int{"CST":-21600, "PST":-28800, "EST":-18000, "UTC":0, "MST":-25200}

(Note the ampersands.) @@ -1731,7 +1733,7 @@

Printing

prints

-map[string] int
+map[string]int
 

If you want to control the default format for a custom type, all that's required is to define @@ -2104,12 +2106,14 @@

Conversions

The String method of Sequence is recreating the -work that Sprint already does for slices. We can share the -effort if we convert the Sequence to a plain +work that Sprint already does for slices. +(It also has complexity O(N²), which is poor.) We can share the +effort (and also speed it up) if we convert the Sequence to a plain []int before calling Sprint.

 func (s Sequence) String() string {
+    s = s.Copy()
     sort.Sort(s)
     return fmt.Sprint([]int(s))
 }
@@ -2136,6 +2140,7 @@ 

Conversions

// Method for printing - sorts the elements before printing func (s Sequence) String() string { + s = s.Copy() sort.IntSlice(s).Sort() return fmt.Sprint([]int(s)) } @@ -2762,7 +2767,7 @@

Embedding

}

-The Job type now has the Log, Logf +The Job type now has the Print, Printf, Println and other methods of *log.Logger. We could have given the Logger a field name, of course, but it's not necessary to do so. And now, once @@ -2770,7 +2775,7 @@

Embedding

log to the Job:

-job.Log("starting now...")
+job.Println("starting now...")
 

The Logger is a regular field of the Job struct, @@ -2797,8 +2802,8 @@

Embedding

which would be useful if we wanted to refine the methods of Logger.

-func (job *Job) Logf(format string, args ...interface{}) {
-    job.Logger.Logf("%q: %s", job.Command, fmt.Sprintf(format, args...))
+func (job *Job) Printf(format string, args ...interface{}) {
+    job.Logger.Printf("%q: %s", job.Command, fmt.Sprintf(format, args...))
 }
 

diff --git a/doc/go1.11.html b/doc/go1.11.html index fae1c5ff145a6..1d85be9fe4e53 100644 --- a/doc/go1.11.html +++ b/doc/go1.11.html @@ -15,14 +15,7 @@ ul li { margin: 0.5em 0; } -

DRAFT RELEASE NOTES - Introduction to Go 1.11

- -

- - Go 1.11 is not yet released. These are work-in-progress - release notes. Go 1.11 is expected to be released in August 2018. - -

+

Introduction to Go 1.11

The latest Go release, version 1.11, arrives six months after Go 1.10. @@ -95,7 +88,8 @@

Ports

WebAssembly

- Go 1.11 adds an experimental port to WebAssembly (js/wasm). + Go 1.11 adds an experimental port to WebAssembly + (js/wasm).

Go programs currently compile to one WebAssembly module that @@ -116,6 +110,10 @@

WebAssembly

tools except when those GOOS/GOARCH values are being used. If you have existing filenames matching those patterns, you will need to rename them.

+

+ More information can be found on the + WebAssembly wiki page. +

RISC-V GOARCH values reserved

@@ -350,6 +348,20 @@

Cgo

details.

+

Go command

+ +

+ The environment variable GOFLAGS may now be used + to set default flags for the go command. + This is useful in certain situations. + Linking can be noticeably slower on underpowered systems due to DWARF, + and users may want to set -ldflags=-w by default. + For modules, some users and CI systems will want vendoring always, + so they should set -mod=vendor by default. + For more information, see the go + command documentation. +

+

Godoc

@@ -384,10 +396,20 @@

Gofmt

time. In general, systems that need consistent formatting of Go source code should use a specific version of the gofmt binary. - See the go/format package godoc for more + See the go/format package documentation for more information.

+

Run

+ +

+ + The go run + command now allows a single import path, a directory name or a + pattern matching a single package. + This allows go run pkg or go run dir, most importantly go run . +

+

Runtime

@@ -398,7 +420,7 @@

Runtime

- On macOS and iOS, the runtime now uses libSystem.so instead of + On macOS and iOS, the runtime now uses libSystem.dylib instead of calling the kernel directly. This should make Go binaries more compatible with future versions of macOS and iOS. The syscall package still makes direct @@ -500,7 +522,8 @@

Minor changes to the library

ConnectionState's new - ExportKeyingMaterial field allows exporting keying material bound to the + ExportKeyingMaterial + method allows exporting keying material bound to the connection according to RFC 5705.

diff --git a/doc/go1.12.html b/doc/go1.12.html new file mode 100644 index 0000000000000..3b086e700106a --- /dev/null +++ b/doc/go1.12.html @@ -0,0 +1,922 @@ + + + + + + +

DRAFT RELEASE NOTES - Introduction to Go 1.12

+ +

+ + Go 1.12 is not yet released. These are work-in-progress + release notes. Go 1.12 is expected to be released in February 2019. + +

+ +

+ The latest Go release, version 1.12, arrives six months after Go 1.11. + Most of its changes are in the implementation of the toolchain, runtime, and libraries. + As always, the release maintains the Go 1 promise of compatibility. + We expect almost all Go programs to continue to compile and run as before. +

+ +

Changes to the language

+ +

+ There are no changes to the language specification. +

+ +

Ports

+ +

+ The race detector is now supported on linux/arm64. +

+ +

+ Go 1.12 is the last release that is supported on FreeBSD 10.x, which has + already reached end-of-life. Go 1.13 will require FreeBSD 11.2+ or FreeBSD + 12.0+. + FreeBSD 12.0+ requires a kernel with the COMPAT_FREEBSD11 option set (this is the default). +

+ +

+ cgo is now supported on linux/ppc64. +

+ +

+ hurd is now a recognized value for GOOS, reserved + for the GNU/Hurd system for use with gccgo. +

+ +

Windows

+ +

+ Go's new windows/arm port supports running Go on Windows 10 + IoT Core on 32-bit ARM chips such as the Raspberry Pi 3. +

+ +

AIX

+ +

+ Go now supports AIX 7.2 and later on POWER8 architectures (aix/ppc64). External linking, cgo, pprof and the race detector aren't yet supported. +

+ +

Darwin

+ +

+ Go 1.12 is the last release that will run on macOS 10.10 Yosemite. + Go 1.13 will require macOS 10.11 El Capitan or later. +

+ +

+ libSystem is now used when making syscalls on Darwin, + ensuring forward-compatibility with future versions of macOS and iOS. + + The switch to libSystem triggered additional App Store + checks for private API usage. Since it is considered private, + syscall.Getdirentries now always fails with + ENOSYS on iOS. +

+ +

Tools

+ +

go tool vet no longer supported

+ +

+ The go vet command has been rewritten to serve as the + base for a range of different source code analysis tools. See + the golang.org/x/tools/go/analysis + package for details. A side-effect is that go tool vet + is no longer supported. External tools that use go tool + vet must be changed to use go + vet. Using go vet instead of go tool + vet should work with all supported versions of Go. +

+ +

+ As part of this change, the experimental -shadow option + is no longer available with go vet. Checking for + variable shadowing may now be done using +

+go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow
+go vet -vettool=$(which shadow)
+
+

+ +

Tour

+ +

+The Go tour is no longer included in the main binary distribution. To +run the tour locally, instead of running go tool tour, +manually install it: +

+go install golang.org/x/tour
+tour
+
+

+ +

Build cache requirement

+ +

+ The build cache is now + required as a step toward eliminating + $GOPATH/pkg. Setting the environment variable + GOCACHE=off will cause go commands that write to the + cache to fail. +

+ +

Binary-only packages

+ +

+ Go 1.12 is the last release that will support binary-only packages. +

+ +

Cgo

+ +

+ Go 1.12 will translate the C type EGLDisplay to the Go type uintptr. + This change is similar to how Go 1.10 and newer treats Darwin's CoreFoundation + and Java's JNI types. See the + cgo documentation + for more information. +

+ +

+ Mangled C names are no longer accepted in packages that use Cgo. Use the Cgo + names instead. For example, use the documented cgo name C.char + rather than the mangled name _Ctype_char that cgo generates. +

+ +

Modules

+ +

+ When GO111MODULE is set to on, the go + command now supports module-aware operations outside of a module directory, + provided that those operations do not need to resolve import paths relative to + the current directory or explicitly edit the go.mod file. + Commands such as go get, + go list, and + go mod download behave as if in a + module with initially-empty requirements. + In this mode, go env GOMOD reports + the system's null device (/dev/null or NUL). +

+ +

+ go commands that download and extract modules are now safe to + invoke concurrently. + The module cache (GOPATH/pkg/mod) must reside in a filesystem that + supports file locking. +

+ +

+ The go directive in a go.mod file now indicates the + version of the language used by the files within that module. + It will be set to the current release + (go 1.12) if no existing version is + present. + If the go directive for a module specifies a + version newer than the toolchain in use, the go command + will attempt to build the packages regardless, and will note the mismatch only if + that build fails. +

+ +

+ When an import cannot be resolved using the active modules, + the go command will now try to use the modules mentioned in the + main module's replace directives before consulting the module + cache and the usual network sources. + If a matching replacement is found but the replace directive does + not specify a version, the go command uses a pseudo-version + derived from the zero time.Time (such + as v0.0.0-00010101000000-000000000000). +

+ +

Compiler toolchain

+ +

+ The compiler's live variable analysis has improved. This may mean that + finalizers will be executed sooner in this release than in previous + releases. If that is a problem, consider the appropriate addition of a + runtime.KeepAlive call. +

+ +

+ More functions are now eligible for inlining by default, including + functions that do nothing but call another function. + This extra inlining makes it additionally important to use + runtime.CallersFrames + instead of iterating over the result of + runtime.Callers directly. +

+// Old code which no longer works correctly (it will miss inlined call frames).
+var pcs [10]uintptr
+n := runtime.Callers(1, pcs[:])
+for _, pc := range pcs[:n] {
+	f := runtime.FuncForPC(pc)
+	if f != nil {
+		fmt.Println(f.Name())
+	}
+}
+
+
+// New code which will work correctly.
+var pcs [10]uintptr
+n := runtime.Callers(1, pcs[:])
+frames := runtime.CallersFrames(pcs[:n])
+for {
+	frame, more := frames.Next()
+	fmt.Println(frame.Function)
+	if !more {
+		break
+	}
+}
+
+

+ +

+ Wrappers generated by the compiler to implement method expressions + are no longer reported + by runtime.CallersFrames + and runtime.Stack. They + are also not printed in panic stack traces. + + This change aligns the gc toolchain to match + the gccgo toolchain, which already elided such wrappers + from stack traces. + + Clients of these APIs might need to adjust for the missing + frames. For code that must interoperate between 1.11 and 1.12 + releases, you can replace the method expression x.M + with the function literal func (...) { x.M(...) } . +

+ +

+ The compiler now accepts a -lang flag to set the Go language + version to use. For example, -lang=go1.8 causes the compiler to + emit an error if the program uses type aliases, which were added in Go 1.9. + Language changes made before Go 1.12 are not consistently enforced. +

+ +

+ The compiler toolchain now uses different conventions to call Go + functions and assembly functions. This should be invisible to users, + except for calls that simultaneously cross between Go and + assembly and cross a package boundary. If linking results + in an error like "relocation target not defined for ABIInternal (but + is defined for ABI0)", please refer to the + compatibility section + of the ABI design document. +

+ +

+ There have been many improvements to the DWARF debug information + produced by the compiler, including improvements to argument + printing and variable location information. +

+ +

+ Go programs now also maintain stack frame pointers on linux/arm64 + for the benefit of profiling tools like perf. The frame pointer + maintenance has a small run-time overhead that varies but averages around 3%. + To build a toolchain that does not use frame pointers, set + GOEXPERIMENT=noframepointer when running make.bash. +

+ +

+ The obsolete "safe" compiler mode (enabled by the -u gcflag) has been removed. +

+ +

godoc and go doc

+ +

+ In Go 1.12, godoc no longer has a command-line interface and + is only a web server. Users should use go doc + for command-line help output instead. +

+ +

+ go doc now supports the -all flag, + which will cause it to print all exported APIs and their documentation, + as the godoc command line used to do. +

+ +

+ go doc also now includes the -src flag, + which will show the target's source code. +

+ +

Trace

+ +

+ The trace tool now supports plotting mutator utilization curves, + including cross-references to the execution trace. These are useful + for analyzing the impact of the garbage collector on application + latency and throughput. +

+ +

Assembler

+ +

+ On arm64, the platform register was renamed from + R18 to R18_PLATFORM to prevent accidental + use, as the OS could choose to reserve this register. +

+ +

Runtime

+ +

+ Go 1.12 significantly improves the performance of sweeping when a + large fraction of the heap remains live. This reduces allocation + latency immediately following a garbage collection. +

+ +

+ The Go runtime now releases memory back to the operating system more + aggressively, particularly in response to large allocations that + can't reuse existing heap space. +

+ +

+ The Go runtime's timer and deadline code is faster and scales better + with higher numbers of CPUs. In particular, this improves the + performance of manipulating network connection deadlines. +

+ +

+ On Linux, the runtime now uses MADV_FREE to release unused + memory. This is more efficient but may result in higher reported + RSS. The kernel will reclaim the unused data when it is needed. + To revert to the Go 1.11 behavior (MADV_DONTNEED), set the + environment variable GODEBUG=madvdontneed=1. +

+ +

+ Adding cpu.extension=off to the + GODEBUG environment + variable now disables the use of optional CPU instruction + set extensions in the standard library and runtime. This is not + yet supported on Windows. +

+ +

+ Go 1.12 improves the accuracy of memory profiles by fixing + overcounting of large heap allocations. +

+ +

+ Tracebacks, runtime.Caller, + and runtime.Callers no longer include + compiler-generated initialization functions. Doing a traceback + during the initialization of a global variable will now show a + function named PKG.init.ializers. +

+ +

Core library

+ +

TLS 1.3

+ +

+ Go 1.12 adds opt-in support for TLS 1.3 in the crypto/tls package as + specified by RFC 8446. It can + be enabled by adding the value tls13=1 to the GODEBUG + environment variable. It will be enabled by default in Go 1.13. +

+ +

+ To negotiate TLS 1.3, make sure you do not set an explicit MaxVersion in + Config and run your program with + the environment variable GODEBUG=tls13=1 set. +

+ +

+ All TLS 1.2 features except TLSUnique in + ConnectionState + and renegotiation are available in TLS 1.3 and provide equivalent or + better security and performance. Note that even though TLS 1.3 is backwards + compatible with previous versions, certain legacy systems might not work + correctly when attempting to negotiate it. RSA certificate keys too small + to be secure (including 512-bit keys) will not work with TLS 1.3. +

+ +

+ TLS 1.3 cipher suites are not configurable. All supported cipher suites are + safe, and if PreferServerCipherSuites is set in + Config the preference order + is based on the available hardware. +

+ +

+ Early data (also called "0-RTT mode") is not currently supported as a + client or server. Additionally, a Go 1.12 server does not support skipping + unexpected early data if a client sends it. Since TLS 1.3 0-RTT mode + involves clients keeping state regarding which servers support 0-RTT, + a Go 1.12 server cannot be part of a load-balancing pool where some other + servers do support 0-RTT. If switching a domain from a server that supported + 0-RTT to a Go 1.12 server, 0-RTT would have to be disabled for at least the + lifetime of the issued session tickets before the switch to ensure + uninterrupted operation. +

+ +

+ In TLS 1.3 the client is the last one to speak in the handshake, so if it causes + an error to occur on the server, it will be returned on the client by the first + Read, not by + Handshake. For + example, that will be the case if the server rejects the client certificate. + Similarly, session tickets are now post-handshake messages, so are only + received by the client upon its first + Read. +

+ +

Minor changes to the library

+ +

+ As always, there are various minor changes and updates to the library, + made with the Go 1 promise of compatibility + in mind. +

+ + + +
bufio
+
+

+ Reader's UnreadRune and + UnreadByte methods will now return an error + if they are called after Peek. +

+ +
+ +
bytes
+
+

+ The new function ReplaceAll returns a copy of + a byte slice with all non-overlapping instances of a value replaced by another. +

+ +

+ A pointer to a zero-value Reader is now + functionally equivalent to NewReader(nil). + Prior to Go 1.12, the former could not be used as a substitute for the latter in all cases. +

+ +
+ +
crypto/rand
+
+

+ A warning will now be printed to standard error the first time + Reader.Read is blocked for more than 60 seconds waiting + to read entropy from the kernel. +

+ +

+ On FreeBSD, Reader now uses the getrandom + system call if available, /dev/urandom otherwise. +

+ +
+ +
crypto/rc4
+
+

+ This release removes the optimized assembly implementations. RC4 is insecure + and should only be used for compatibility with legacy systems. +

+ +
+ +
crypto/tls
+
+

+ If a client sends an initial message that does not look like TLS, the server + will no longer reply with an alert, and it will expose the underlying + net.Conn in the new field Conn of + RecordHeaderError. +

+ +
+ +
database/sql
+
+

+ A query cursor can now be obtained by passing a + *Rows + value to the Row.Scan method. +

+ +
+ +
expvar
+
+

+ The new Delete method allows + for deletion of key/value pairs from a Map. +

+ +
+ +
fmt
+
+

+ Maps are now printed in key-sorted order to ease testing. The ordering rules are: +

    +
  • When applicable, nil compares low +
  • ints, floats, and strings order by < +
  • NaN compares less than non-NaN floats +
  • bool compares false before true +
  • Complex compares real, then imaginary +
  • Pointers compare by machine address +
  • Channel values compare by machine address +
  • Structs compare each field in turn +
  • Arrays compare each element in turn +
  • Interface values compare first by reflect.Type describing the concrete type + and then by concrete value as described in the previous rules. +
+

+ +

+ When printing maps, non-reflexive key values like NaN were previously + displayed as <nil>. As of this release, the correct values are printed. +

+ +
+ +
go/doc
+
+

+ To address some outstanding issues in cmd/doc, + this package has a new Mode bit, + PreserveAST, which controls whether AST data is cleared. +

+ +
+ +
go/token
+
+

+ The File type has a new + LineStart field, + which returns the position of the start of a given line. This is especially useful + in programs that occasionally handle non-Go files, such as assembly, but wish to use + the token.Pos mechanism to identify file positions. +

+ +
+ +
image
+
+

+ The RegisterFormat function is now safe for concurrent use. +

+ +
+ +
image/png
+
+

+ Paletted images with fewer than 16 colors now encode to smaller outputs. +

+ +
+ +
io
+
+

+ The new StringWriter interface wraps the + WriteString function. +

+ +
+ +
lib/time
+
+

+ The time zone database in $GOROOT/lib/time/zoneinfo.zip + has been updated to version 2018i. Note that this ZIP file is + only used if a time zone database is not provided by the operating + system. +

+ +
+ +
math
+
+

+ The functions + Sin, + Cos, + Tan, + and Sincos now + apply Payne-Hanek range reduction to huge arguments. This + produces more accurate answers, but they will not be bit-for-bit + identical with the results in earlier releases. +

+
+ +
math/bits
+
+

+ New extended precision operations Add, Sub, Mul, and Div are available in uint, uint32, and uint64 versions. +

+ +
+ +
net
+
+

+ The + Dialer.DualStack setting is now ignored and deprecated; + RFC 6555 Fast Fallback ("Happy Eyeballs") is now enabled by default. To disable, set + Dialer.FallbackDelay to a negative value. +

+ +

+ Similarly, TCP keep-alives are now enabled by default if + Dialer.KeepAlive is zero. + To disable, set it to a negative value. +

+ +

+ On Linux, the splice system call is now used when copying from a + UnixConn to a + TCPConn. +

+
+ +
net/http
+
+

+ The HTTP server now rejects misdirected HTTP requests to HTTPS servers with a plaintext "400 Bad Request" response. +

+ +

+ The new Client.CloseIdleConnections + method calls the Client's underlying Transport's CloseIdleConnections + if it has one. +

+ +

+ The Transport no longer rejects HTTP responses which declare + HTTP Trailers but don't use chunked encoding. Instead, the declared trailers are now just ignored. +

+ +

+ The Transport no longer handles MAX_CONCURRENT_STREAMS values + advertised from HTTP/2 servers as strictly as it did during Go 1.10 and Go 1.11. The default behavior is now back + to how it was in Go 1.9: each connection to a server can have up to MAX_CONCURRENT_STREAMS requests + active and then new TCP connections are created as needed. In Go 1.10 and Go 1.11 the http2 package + would block and wait for requests to finish instead of creating new connections. + To get the stricter behavior back, import the + golang.org/x/net/http2 package + directly and set + Transport.StrictMaxConcurrentStreams to + true. +

+ +
+ +
net/http/httputil
+
+

+ The ReverseProxy now automatically + proxies WebSocket requests. +

+ +
+ +
os
+
+

+ The new ProcessState.ExitCode method + returns the process's exit code. +

+ +

+ ModeCharDevice has been added to the ModeType bitmask, allowing for + ModeDevice | ModeCharDevice to be recovered when masking a + FileMode with ModeType. +

+ +

+ The new function UserHomeDir returns the + current user's home directory. +

+ +

+ RemoveAll now supports paths longer than 4096 characters + on most Unix systems. +

+ +

+ File.Sync now uses F_FULLFSYNC on macOS + to correctly flush the file contents to permanent storage. + This may cause the method to run more slowly than in previous releases. +

+ +

+ File now supports + a SyscallConn + method returning + a syscall.RawConn + interface value. This may be used to invoke system-specific + operations on the underlying file descriptor. +

+ +
+ +
path/filepath
+
+

+ The IsAbs function now returns true when passed + a reserved filename on Windows such as NUL. + List of reserved names. +

+ +
+ +
reflect
+
+

+ A new MapIter type is + an iterator for ranging over a map. This type is exposed through the + Value type's new + MapRange method. + This follows the same iteration semantics as a range statement, with Next + to advance the iterator, and Key/Value to access each entry. +

+ +
+ +
regexp
+
+

+ Copy is no longer necessary + to avoid lock contention, so it has been given a partial deprecation comment. + Copy + may still be appropriate if the reason for its use is to make two copies with + different Longest settings. +

+ +
+ +
runtime/debug
+
+

+ A new BuildInfo type + exposes the build information read from the running binary, available only in + binaries built with module support. This includes the main package path, main + module information, and the module dependencies. This type is given though the + ReadBuildInfo function + on BuildInfo. +

+ +
+ +
strings
+
+

+ The new function ReplaceAll returns a copy of + a string with all non-overlapping instances of a value replaced by another. +

+ +

+ A pointer to a zero-value Reader is now + functionally equivalent to NewReader(nil). + Prior to Go 1.12, the former could not be used as a substitute for the latter in all cases. +

+ +

+ The new Builder.Cap method returns the capacity of the builder's underlying byte slice. +

+ +

+ The character mapping functions Map, + Title, + ToLower, + ToLowerSpecial, + ToTitle, + ToTitleSpecial, + ToUpper, and + ToUpperSpecial + now always guarantee to return valid UTF-8. In earlier releases, if the input was invalid UTF-8 but no character replacements + needed to be applied, these routines incorrectly returned the invalid UTF-8 unmodified. +

+ +
+ +
syscall
+
+

+ 64-bit inodes are now supported on FreeBSD 12. Some types have been adjusted accordingly. +

+ +

+ The Unix socket + (AF_UNIX) + address family is now supported for compatible versions of Windows. +

+ +

+ The new function Syscall18 + has been introduced for Windows, allowing for calls with up to 18 arguments. +

+ +
+ +
syscall/js
+
+

+

+ The Callback type and NewCallback function have been renamed; + they are now called + Func and + FuncOf, respectively. + This is a breaking change, but WebAssembly support is still experimental + and not yet subject to the + Go 1 compatibility promise. Any code using the + old names will need to be updated. +

+ +

+ If a type implements the new + Wrapper + interface, + ValueOf + will use it to return the JavaScript value for that type. +

+ +

+ The meaning of the zero + Value + has changed. It now represents the JavaScript undefined value + instead of the number zero. + This is a breaking change, but WebAssembly support is still experimental + and not yet subject to the + Go 1 compatibility promise. Any code relying on + the zero Value + to mean the number zero will need to be updated. +

+ +

+ The new + Value.Truthy + method reports the + JavaScript "truthiness" + of a given value. +

+ +
+ +
testing
+
+

+ The -benchtime flag now supports setting an explicit iteration count instead of a time when the value ends with an "x". For example, -benchtime=100x runs the benchmark 100 times. +

+ +
+ +
text/template
+
+

+ When executing a template, long context values are no longer truncated in errors. +

+

+ executing "tmpl" at <.very.deep.context.v...>: map has no entry for key "notpresent" +

+

+ is now +

+

+ executing "tmpl" at <.very.deep.context.value.notpresent>: map has no entry for key "notpresent" +

+ +
+

+ If a user-defined function called by a template panics, the + panic is now caught and returned as an error by + the Execute or ExecuteTemplate method. +

+
+ +
unsafe
+
+

+ It is invalid to convert a nil unsafe.Pointer to uintptr and back with arithmetic. + (This was already invalid, but will now cause the compiler to misbehave.) +

+ +
diff --git a/doc/go1.3.html b/doc/go1.3.html index 18c638a1b5e31..feed6480c726a 100644 --- a/doc/go1.3.html +++ b/doc/go1.3.html @@ -525,7 +525,7 @@

Minor changes to the library

The net/http package's Request.ParseMultipartForm method will now return an error if the body's Content-Type -is not mutipart/form-data. +is not multipart/form-data. Prior to Go 1.3 it would silently fail and return nil. Code that relies on the previous behavior should be updated. diff --git a/doc/go_faq.html b/doc/go_faq.html index b1c15295d62e2..305878f2378f1 100644 --- a/doc/go_faq.html +++ b/doc/go_faq.html @@ -108,6 +108,26 @@

He has unique features; he's the Go gopher, not just any old gopher.

+

+Is the language called Go or Golang?

+ +

+The language is called Go. +The "golang" moniker arose because the web site is +golang.org, not +go.org, which was not available to us. +Many use the golang name, though, and it is handy as +a label. +For instance, the Twitter tag for the language is "#golang". +The language's name is just plain Go, regardless. +

+ +

+A side note: Although the +official logo +has two capital letters, the language name is written Go, not GO. +

+

Why did you create a new language?

@@ -784,7 +804,7 @@

A type must then implement the ImplementsFooer method to be a Fooer, clearly documenting the fact and announcing it in -godoc's output. +go doc's output.

@@ -1749,7 +1769,7 @@ 

type *T consists of all methods with receiver *T or T. That means the method set of *T -includes that of T), +includes that of T, but not the reverse.

@@ -2438,7 +2458,7 @@

Work continues to refine the algorithm, reduce overhead and latency further, and to explore new approaches. The 2018 -ISMM keynote +ISMM keynote by Rick Hudson of the Go team describes the progress so far and suggests some future approaches.

diff --git a/doc/go_mem.html b/doc/go_mem.html index 143f3b2ff22e9..d355bebaed881 100644 --- a/doc/go_mem.html +++ b/doc/go_mem.html @@ -418,8 +418,12 @@

Once

-calling twoprint causes "hello, world" to be printed twice. -The first call to doprint runs setup once. +calling twoprint will call setup exactly +once. +The setup function will complete before either call +of print. +The result will be that "hello, world" will be printed +twice.

Incorrect synchronization

diff --git a/doc/go_spec.html b/doc/go_spec.html index f70ff7a02feee..dcc81ed628717 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -811,7 +811,7 @@

Numeric types

types and thus distinct except byte, which is an alias for uint8, and rune, which is an alias for int32. -Conversions +Explicit conversions are required when different numeric types are mixed in an expression or assignment. For instance, int32 and int are not the same type even though they may have the same size on a @@ -823,6 +823,7 @@

String types

A string type represents the set of string values. A string value is a (possibly empty) sequence of bytes. +The number of bytes is called the length of the string and is never negative. Strings are immutable: once created, it is impossible to change the contents of a string. The predeclared string type is string; @@ -830,7 +831,7 @@

String types

-The length of a string s (its size in bytes) can be discovered using +The length of a string s can be discovered using the built-in function len. The length is a compile-time constant if the string is a constant. A string's bytes can be accessed by integer indices @@ -846,8 +847,7 @@

Array types

An array is a numbered sequence of elements of a single type, called the element type. -The number of elements is called the length and is never -negative. +The number of elements is called the length of the array and is never negative.

@@ -883,6 +883,7 @@ 

Slice types

A slice is a descriptor for a contiguous segment of an underlying array and provides access to a numbered sequence of elements from that array. A slice type denotes the set of all slices of arrays of its element type. +The number of elements is called the length of the slice and is never negative. The value of an uninitialized slice is nil.

@@ -891,8 +892,7 @@

Slice types

-Like arrays, slices are indexable and have a length. The length of a -slice s can be discovered by the built-in function +The length of a slice s can be discovered by the built-in function len; unlike with arrays it may change during execution. The elements can be addressed by integer indices 0 through len(s)-1. The slice index of a @@ -1349,7 +1349,8 @@

Channel types

send or receive. If no direction is given, the channel is bidirectional. A channel may be constrained only to send or only to receive by -conversion or assignment. +assignment or +explicit conversion.

@@ -2069,9 +2070,9 @@ 

Variable declarations

If a type is present, each variable is given that type. Otherwise, each variable is given the type of the corresponding initialization value in the assignment. -If that value is an untyped constant, it is first +If that value is an untyped constant, it is first implicitly converted to its default type; -if it is an untyped boolean value, it is first converted to type bool. +if it is an untyped boolean value, it is first implicitly converted to type bool. The predeclared value nil cannot be used to initialize a variable with no explicit type.

@@ -2112,8 +2113,8 @@

Short variable declarations

i, j := 0, 10 f := func() int { return 7 } ch := make(chan int) -r, w := os.Pipe(fd) // os.Pipe() returns two values -_, y, _ := coord(p) // coord() returns three values; only interested in y coordinate +r, w, _ := os.Pipe() // os.Pipe() returns a connected pair of Files and an error, if any +_, y, _ := coord(p) // coord() returns three values; only interested in y coordinate

@@ -2202,11 +2203,11 @@

Method declarations

The receiver is specified via an extra parameter section preceding the method name. That parameter section must declare a single non-variadic parameter, the receiver. -Its type must be of the form T or *T (possibly using -parentheses) where T is a type name. The type denoted by T is called -the receiver base type; it must not be a pointer or interface type and -it must be defined in the same package as the method. -The method is said to be bound to the base type and the method name +Its type must be a defined type T or a +pointer to a defined type T. T is called the receiver +base type. A receiver base type cannot be a pointer or interface type and +it must be defined in the same package as the method. +The method is said to be bound to its receiver base type and the method name is visible only within selectors for type T or *T.

@@ -2226,7 +2227,7 @@

Method declarations

-Given type Point, the declarations +Given defined type Point, the declarations

@@ -3260,7 +3261,7 @@ 

Type assertions

yields an additional untyped boolean value. The value of ok is true if the assertion holds. Otherwise it is false and the value of v is the zero value for type T. -No run-time panic occurs in this case. +No run-time panic occurs in this case.

@@ -3433,7 +3434,7 @@

Operators

Except for shift operations, if one operand is an untyped constant -and the other operand is not, the constant is converted +and the other operand is not, the constant is implicitly converted to the type of the other operand.

@@ -3442,7 +3443,7 @@

Operators

or be an untyped constant representable by a value of type uint. If the left operand of a non-constant shift expression is an untyped constant, -it is first converted to the type it would assume if the shift expression were +it is first implicitly converted to the type it would assume if the shift expression were replaced by its left operand alone.

@@ -3624,7 +3625,7 @@

Integer overflow

-, *, /, and << may legally overflow and the resulting value exists and is deterministically defined by the signed integer representation, the operation, and its operands. -No exception is raised as a result of overflow. +Overflow does not cause a run-time panic. A compiler may not optimize code under the assumption that overflow does not occur. For instance, it may not assume that x < x + 1 is always true.

@@ -3645,7 +3646,7 @@

Floating-point operators

An implementation may combine multiple floating-point operations into a single fused operation, possibly across statements, and produce a result that differs from the value obtained by executing and rounding the instructions individually. -A floating-point type conversion explicitly rounds to +An explicit floating-point type conversion rounds to the precision of the target type, preventing fusion that would discard that rounding.

@@ -3907,7 +3908,14 @@

Receive operator

Conversions

-Conversions are expressions of the form T(x) +A conversion changes the type of an expression +to the type specified by the conversion. +A conversion may appear literally in the source, or it may be implied +by the context in which an expression appears. +

+ +

+An explicit conversion is an expression of the form T(x) where T is a type and x is an expression that can be converted to type T.

@@ -3938,7 +3946,7 @@

Conversions

A constant value x can be converted to type T if x is representable by a value of T. -As a special case, an integer constant x can be converted to a +As a special case, an integer constant x can be explicitly converted to a string type using the same rule as for non-constant x. @@ -4672,13 +4680,13 @@

Assignments

  • If an untyped constant is assigned to a variable of interface type or the blank identifier, - the constant is first converted to its + the constant is first implicitly converted to its default type.
  • If an untyped boolean value is assigned to a variable of interface type or - the blank identifier, it is first converted to type bool. + the blank identifier, it is first implicitly converted to type bool.
  • @@ -4764,14 +4772,14 @@

    Expression switches

    -If the switch expression evaluates to an untyped constant, it is first +If the switch expression evaluates to an untyped constant, it is first implicitly converted to its default type; -if it is an untyped boolean value, it is first converted to type bool. +if it is an untyped boolean value, it is first implicitly converted to type bool. The predeclared untyped value nil cannot be used as a switch expression.

    -If a case expression is untyped, it is first converted +If a case expression is untyped, it is first implicitly converted to the type of the switch expression. For each (possibly converted) case expression x and the value t of the switch expression, x == t must be a valid comparison. @@ -5546,7 +5554,10 @@

    Defer statements

    and saved anew but the actual function is not invoked. Instead, deferred functions are invoked immediately before the surrounding function returns, in the reverse order -they were deferred. +they were deferred. That is, if the surrounding function +returns through an explicit return statement, +deferred functions are executed after any result parameters are set +by that return statement but before the function returns to its caller. If a deferred function value evaluates to nil, execution panics when the function is invoked, not when the "defer" statement is executed. @@ -5572,12 +5583,13 @@

    Defer statements

    defer fmt.Print(i) } -// f returns 1 +// f returns 42 func f() (result int) { defer func() { - result++ + // result is accessed after it was set to 6 by the return statement + result *= 7 }() - return 0 + return 6 } @@ -5877,7 +5889,7 @@

    Manipulating complex numbers

    with the corresponding floating-point constituents: complex64 for float32 arguments, and complex128 for float64 arguments. -If one of the arguments evaluates to an untyped constant, it is first +If one of the arguments evaluates to an untyped constant, it is first implicitly converted to the type of the other argument. If both arguments evaluate to untyped constants, they must be non-complex numbers or their imaginary parts must be zero, and the return value of diff --git a/doc/help.html b/doc/help.html index f668196871d0b..f11e286904657 100644 --- a/doc/help.html +++ b/doc/help.html @@ -27,6 +27,11 @@

    Go Forum

    forum for Go programmers.

    +

    Gophers Discord

    +

    +Get live support and talk with other gophers on the Go Discord. +

    +

    Gopher Slack

    Get live support from other users in the Go slack channel.

    diff --git a/doc/install-source.html b/doc/install-source.html index f6d9473d9be55..6d416d33f16fe 100644 --- a/doc/install-source.html +++ b/doc/install-source.html @@ -639,14 +639,10 @@

    Optional environment variables

    -
  • $GOMIPS (for mips and mipsle only) +
  • $GOMIPS (for mips and mipsle only)
    $GOMIPS64 (for mips64 and mips64le only)

    -This sets whether to use floating point instructions. + These variables set whether to use floating point instructions. Set to "hardfloat" to use floating point instructions; this is the default. Set to "softfloat" to use soft floating point.

    -
      -
    • GOMIPS=hardfloat: use floating point instructions (the default)
    • -
    • GOMIPS=softfloat: use soft floating point
    • -
  • @@ -670,7 +666,6 @@

    Optional environment variables

    -export GOROOT=$HOME/go1.X
     export GOARCH=amd64
     export GOOS=linux
     
    diff --git a/doc/install.html b/doc/install.html index 2e0c7f859d82c..a41c60ba6c8b2 100644 --- a/doc/install.html +++ b/doc/install.html @@ -171,11 +171,6 @@

    Zip archive

    Download the zip file and extract it into the directory of your choice (we suggest c:\Go).

    -

    -If you chose a directory other than c:\Go, -you must set the GOROOT environment variable to your chosen path. -

    -

    Add the bin subdirectory of your Go root (for example, c:\Go\bin) to your PATH environment variable.

    @@ -271,6 +266,39 @@

    Test your installation

    +

    Installing extra Go versions

    + +

    +It may be useful to have multiple Go versions installed on the same machine, for +example, to ensure that a package's tests pass on multiple Go versions. +Once you have one Go version installed, you can install another (such as 1.10.7) +as follows: +

    + +
    +$ go get golang.org/dl/go1.10.7
    +$ go1.10.7 download
    +
    + +

    +The newly downloaded version can be used like go: +

    + +
    +$ go1.10.7 version
    +go version go1.10.7 linux/amd64
    +
    + +

    +All Go versions available via this method are listed on +the download page. +You can find where each of these extra Go versions is installed by looking +at its GOROOT; for example, go1.10.7 env GOROOT. +To uninstall a downloaded version, just remove its GOROOT directory +and the goX.Y.Z binary. +

    + +

    Uninstalling Go

    diff --git a/doc/progs/eff_sequence.go b/doc/progs/eff_sequence.go index 11c885abf82a3..ab1826b6ee3b9 100644 --- a/doc/progs/eff_sequence.go +++ b/doc/progs/eff_sequence.go @@ -28,11 +28,18 @@ func (s Sequence) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// Copy returns a copy of the Sequence. +func (s Sequence) Copy() Sequence { + copy := make(Sequence, 0, len(s)) + return append(copy, s...) +} + // Method for printing - sorts the elements before printing. func (s Sequence) String() string { + s = s.Copy() // Make a copy; don't overwrite argument. sort.Sort(s) str := "[" - for i, elem := range s { + for i, elem := range s { // Loop is O(N²); will fix that in next example. if i > 0 { str += " " } diff --git a/doc/security.html b/doc/security.html index c305ae03c1a0f..b334963222ac0 100644 --- a/doc/security.html +++ b/doc/security.html @@ -70,7 +70,7 @@

    Disclosure Process

  • Code is audited to find any potential similar problems.
  • If it is determined, in consultation with the submitter, that a CVE-ID is required, the primary handler obtains one via email to -oss-distros.
  • +oss-distros.
  • Fixes are prepared for the two most recent major releases and the head/master revision. These fixes are not yet committed to the public repository.
  • A notification is sent to the diff --git a/lib/time/update.bash b/lib/time/update.bash index 629e74fce84f8..8d6785b9af6d2 100755 --- a/lib/time/update.bash +++ b/lib/time/update.bash @@ -8,8 +8,8 @@ # Consult https://www.iana.org/time-zones for the latest versions. # Versions to use. -CODE=2018e -DATA=2018e +CODE=2018i +DATA=2018i set -e rm -rf work diff --git a/lib/time/zoneinfo.zip b/lib/time/zoneinfo.zip index 08dca214181a8..bacb724322bcd 100644 Binary files a/lib/time/zoneinfo.zip and b/lib/time/zoneinfo.zip differ diff --git a/misc/cgo/errors/errors_test.go b/misc/cgo/errors/errors_test.go index 118187f23b8e7..59054f4703a49 100644 --- a/misc/cgo/errors/errors_test.go +++ b/misc/cgo/errors/errors_test.go @@ -121,12 +121,19 @@ func TestReportsTypeErrors(t *testing.T) { "issue16591.go", "issue18452.go", "issue18889.go", + "issue26745.go", + "issue28721.go", } { check(t, file) } if sizeofLongDouble(t) > 8 { - check(t, "err4.go") + for _, file := range []string{ + "err4.go", + "issue28069.go", + } { + check(t, file) + } } } diff --git a/misc/cgo/errors/ptr_test.go b/misc/cgo/errors/ptr_test.go index fe8dfff1d891b..254671f179eb7 100644 --- a/misc/cgo/errors/ptr_test.go +++ b/misc/cgo/errors/ptr_test.go @@ -357,6 +357,73 @@ var ptrTests = []ptrTest{ body: `r, _, _ := os.Pipe(); r.SetDeadline(time.Now().Add(C.US * time.Microsecond))`, fail: false, }, + { + // Test for double evaluation of channel receive. + name: "chan-recv", + c: `void f(char** p) {}`, + imports: []string{"time"}, + body: `c := make(chan []*C.char, 2); c <- make([]*C.char, 1); go func() { time.Sleep(10 * time.Second); panic("received twice from chan") }(); C.f(&(<-c)[0]);`, + fail: false, + }, + { + // Test that converting the address of a struct field + // to unsafe.Pointer still just checks that field. + // Issue #25941. + name: "struct-field", + c: `void f(void* p) {}`, + imports: []string{"unsafe"}, + support: `type S struct { p *int; a [8]byte; u uintptr }`, + body: `s := &S{p: new(int)}; C.f(unsafe.Pointer(&s.a))`, + fail: false, + }, + { + // Test that converting multiple struct field + // addresses to unsafe.Pointer still just checks those + // fields. Issue #25941. + name: "struct-field-2", + c: `void f(void* p, int r, void* s) {}`, + imports: []string{"unsafe"}, + support: `type S struct { a [8]byte; p *int; b int64; }`, + body: `s := &S{p: new(int)}; C.f(unsafe.Pointer(&s.a), 32, unsafe.Pointer(&s.b))`, + fail: false, + }, + { + // Test that second argument to cgoCheckPointer is + // evaluated when a deferred function is deferred, not + // when it is run. + name: "defer2", + c: `void f(char **pc) {}`, + support: `type S1 struct { s []*C.char }; type S2 struct { ps *S1 }`, + body: `p := &S2{&S1{[]*C.char{nil}}}; defer C.f(&p.ps.s[0]); p.ps = nil`, + fail: false, + }, + { + // Test that indexing into a function call still + // examines only the slice being indexed. + name: "buffer", + c: `void f(void *p) {}`, + imports: []string{"bytes", "unsafe"}, + body: `var b bytes.Buffer; b.WriteString("a"); C.f(unsafe.Pointer(&b.Bytes()[0]))`, + fail: false, + }, + { + // Test that bgsweep releasing a finalizer is OK. + name: "finalizer", + c: `// Nothing to declare.`, + imports: []string{"os"}, + support: `func open() { os.Open(os.Args[0]) }; var G [][]byte`, + body: `for i := 0; i < 10000; i++ { G = append(G, make([]byte, 4096)); if i % 100 == 0 { G = nil; open() } }`, + fail: false, + }, + { + // Test that converting generated struct to interface is OK. + name: "structof", + c: `// Nothing to declare.`, + imports: []string{"reflect"}, + support: `type MyInt int; func (i MyInt) Get() int { return int(i) }; type Getter interface { Get() int }`, + body: `t := reflect.StructOf([]reflect.StructField{{Name: "MyInt", Type: reflect.TypeOf(MyInt(0)), Anonymous: true}}); v := reflect.New(t).Elem(); v.Interface().(Getter).Get()`, + fail: false, + }, } func TestPointerChecks(t *testing.T) { @@ -429,7 +496,7 @@ func testOne(t *testing.T, pt ptrTest) { cmd := exec.Command("go", "build") cmd.Dir = src - cmd.Env = addEnv("GOPATH", gopath) + cmd.Env = append(os.Environ(), "GOPATH="+gopath) buf, err := cmd.CombinedOutput() if err != nil { t.Logf("%#q:\n%s", args(cmd), buf) @@ -501,16 +568,5 @@ func testOne(t *testing.T, pt ptrTest) { } func cgocheckEnv(val string) []string { - return addEnv("GODEBUG", "cgocheck="+val) -} - -func addEnv(key, val string) []string { - env := []string{key + "=" + val} - look := key + "=" - for _, e := range os.Environ() { - if !strings.HasPrefix(e, look) { - env = append(env, e) - } - } - return env + return append(os.Environ(), "GODEBUG=cgocheck="+val) } diff --git a/misc/cgo/errors/src/issue26745.go b/misc/cgo/errors/src/issue26745.go new file mode 100644 index 0000000000000..0e224538db629 --- /dev/null +++ b/misc/cgo/errors/src/issue26745.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// int a; +// void CF(int i) {} +import "C" + +func F1(i int) int { + return C.a + 1 // ERROR HERE: :13 +} + +func F2(i int) { + C.CF(i) // ERROR HERE: :6 +} diff --git a/misc/cgo/errors/src/issue28069.go b/misc/cgo/errors/src/issue28069.go new file mode 100644 index 0000000000000..e19a3b45bd58c --- /dev/null +++ b/misc/cgo/errors/src/issue28069.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that the error message for an unrepresentable typedef in a +// union appears on the right line. This test is only run if the size +// of long double is larger than 64. + +package main + +/* +typedef long double Float128; + +typedef struct SV { + union { + Float128 float128; + } value; +} SV; +*/ +import "C" + +type ts struct { + tv *C.SV // ERROR HERE +} + +func main() {} diff --git a/misc/cgo/errors/src/issue28721.go b/misc/cgo/errors/src/issue28721.go new file mode 100644 index 0000000000000..0eb2a9271c29d --- /dev/null +++ b/misc/cgo/errors/src/issue28721.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cgo should reject the use of mangled C names. + +package main + +/* +typedef struct a { + int i; +} a; +void fn(void) {} +*/ +import "C" + +type B _Ctype_struct_a // ERROR HERE + +var a _Ctype_struct_a // ERROR HERE + +type A struct { + a *_Ctype_struct_a // ERROR HERE +} + +var notExist _Ctype_NotExist // ERROR HERE + +func main() { + _Cfunc_fn() // ERROR HERE +} diff --git a/misc/cgo/test/callback.go b/misc/cgo/test/callback.go index b88bf134bc1e4..4fc6b39ffa627 100644 --- a/misc/cgo/test/callback.go +++ b/misc/cgo/test/callback.go @@ -179,7 +179,6 @@ func testCallbackCallers(t *testing.T) { pc := make([]uintptr, 100) n := 0 name := []string{ - "runtime.call16", "runtime.cgocallbackg1", "runtime.cgocallbackg", "runtime.cgocallback_gofunc", @@ -193,9 +192,6 @@ func testCallbackCallers(t *testing.T) { "testing.tRunner", "runtime.goexit", } - if unsafe.Sizeof((*byte)(nil)) == 8 { - name[0] = "runtime.call32" - } nestedCall(func() { n = runtime.Callers(4, pc) }) @@ -295,7 +291,7 @@ func goWithString(s string) { } func testCallbackStack(t *testing.T) { - // Make cgo call and callback with different amount of stack stack available. + // Make cgo call and callback with different amount of stack available. // We do not do any explicit checks, just ensure that it does not crash. for _, f := range splitTests { f() diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index ccacc50fe1a4f..2cb93d9c2eb28 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -92,6 +92,9 @@ func Test25143(t *testing.T) { test25143(t) } func Test23356(t *testing.T) { test23356(t) } func Test26066(t *testing.T) { test26066(t) } func Test26213(t *testing.T) { test26213(t) } +func Test27660(t *testing.T) { test27660(t) } +func Test28896(t *testing.T) { test28896(t) } +func Test30065(t *testing.T) { test30065(t) } func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } func BenchmarkGoString(b *testing.B) { benchGoString(b) } diff --git a/misc/cgo/test/issue27054/egl.h b/misc/cgo/test/issue27054/egl.h new file mode 100644 index 0000000000000..33a759ea2a831 --- /dev/null +++ b/misc/cgo/test/issue27054/egl.h @@ -0,0 +1,7 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is the relevant part of EGL/egl.h. + +typedef void *EGLDisplay; diff --git a/misc/cgo/test/issue27054/test27054.go b/misc/cgo/test/issue27054/test27054.go new file mode 100644 index 0000000000000..186f5bd602078 --- /dev/null +++ b/misc/cgo/test/issue27054/test27054.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue27054 + +/* +#include "egl.h" +*/ +import "C" +import ( + "testing" +) + +func Test27054(t *testing.T) { + var _ C.EGLDisplay = 0 // Note: 0, not nil. That makes sure we use uintptr for this type. +} diff --git a/misc/cgo/test/issue27340.go b/misc/cgo/test/issue27340.go new file mode 100644 index 0000000000000..f8c8a87f20110 --- /dev/null +++ b/misc/cgo/test/issue27340.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Failed to resolve typedefs consistently. +// No runtime test; just make sure it compiles. + +package cgotest + +import "./issue27340" + +var issue27340Var = issue27340.Issue27340GoFunc diff --git a/misc/cgo/test/issue27340/a.go b/misc/cgo/test/issue27340/a.go new file mode 100644 index 0000000000000..f5b120c1fd824 --- /dev/null +++ b/misc/cgo/test/issue27340/a.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Failed to resolve typedefs consistently. +// No runtime test; just make sure it compiles. +// In separate directory to isolate #pragma GCC diagnostic. + +package issue27340 + +// We use the #pragma to avoid a compiler warning about incompatible +// pointer types, because we generate code passing a struct ptr rather +// than using the typedef. This warning is expected and does not break +// a normal build. +// We can only disable -Wincompatible-pointer-types starting with GCC 5. + +// #if __GNU_MAJOR__ >= 5 +// +// #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" +// +// typedef struct { +// int a; +// } issue27340Struct, *issue27340Ptr; +// +// static void issue27340CFunc(issue27340Ptr p) {} +// +// #else /* _GNU_MAJOR_ < 5 */ +// +// typedef struct { +// int a; +// } issue27340Struct; +// +// static issue27340Struct* issue27340Ptr(issue27340Struct* p) { return p; } +// +// static void issue27340CFunc(issue27340Struct *p) {} +// #endif /* _GNU_MAJOR_ < 5 */ +import "C" + +func Issue27340GoFunc() { + var s C.issue27340Struct + C.issue27340CFunc(C.issue27340Ptr(&s)) +} diff --git a/misc/cgo/test/issue28545.go b/misc/cgo/test/issue28545.go new file mode 100644 index 0000000000000..8419b89c0afbe --- /dev/null +++ b/misc/cgo/test/issue28545.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Failed to add type conversion for negative constant. +// Issue 28772: Failed to add type conversion for Go constant set to C constant. +// No runtime test; just make sure it compiles. + +package cgotest + +/* +#include + +#define issue28772Constant 1 + +static void issue28545F(char **p, int n, complex double a) {} +*/ +import "C" + +const issue28772Constant = C.issue28772Constant + +func issue28545G(p **C.char) { + C.issue28545F(p, -1, (0)) + C.issue28545F(p, 2+3, complex(1, 1)) + C.issue28545F(p, issue28772Constant, issue28772Constant2) +} diff --git a/misc/cgo/test/issue28772.go b/misc/cgo/test/issue28772.go new file mode 100644 index 0000000000000..bed786bf30600 --- /dev/null +++ b/misc/cgo/test/issue28772.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// Constants didn't work if defined in different source file. + +// #define issue28772Constant2 2 +import "C" + +const issue28772Constant2 = C.issue28772Constant2 diff --git a/misc/cgo/test/issue28896.go b/misc/cgo/test/issue28896.go new file mode 100644 index 0000000000000..8796040f18e42 --- /dev/null +++ b/misc/cgo/test/issue28896.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cgo was incorrectly adding padding after a packed struct. + +package cgotest + +/* +#include +#include +#include + +typedef struct { + void *f1; + uint32_t f2; +} __attribute__((__packed__)) innerPacked; + +typedef struct { + innerPacked g1; + uint64_t g2; +} outerPacked; + +typedef struct { + void *f1; + uint32_t f2; +} innerUnpacked; + +typedef struct { + innerUnpacked g1; + uint64_t g2; +} outerUnpacked; + +size_t offset(int x) { + switch (x) { + case 0: + return offsetof(innerPacked, f2); + case 1: + return offsetof(outerPacked, g2); + case 2: + return offsetof(innerUnpacked, f2); + case 3: + return offsetof(outerUnpacked, g2); + default: + abort(); + } +} +*/ +import "C" + +import ( + "testing" + "unsafe" +) + +func offset(i int) uintptr { + var pi C.innerPacked + var po C.outerPacked + var ui C.innerUnpacked + var uo C.outerUnpacked + switch i { + case 0: + return unsafe.Offsetof(pi.f2) + case 1: + return unsafe.Offsetof(po.g2) + case 2: + return unsafe.Offsetof(ui.f2) + case 3: + return unsafe.Offsetof(uo.g2) + default: + panic("can't happen") + } +} + +func test28896(t *testing.T) { + for i := 0; i < 4; i++ { + c := uintptr(C.offset(C.int(i))) + g := offset(i) + if c != g { + t.Errorf("%d: C: %d != Go %d", i, c, g) + } + } +} diff --git a/misc/cgo/test/issue29383.go b/misc/cgo/test/issue29383.go new file mode 100644 index 0000000000000..462c9a37df293 --- /dev/null +++ b/misc/cgo/test/issue29383.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cgo's /*line*/ comments failed when inserted after '/', +// because the result looked like a "//" comment. +// No runtime test; just make sure it compiles. + +package cgotest + +// #include +import "C" + +func Issue29383(n, size uint) int { + if ^C.size_t(0)/C.size_t(n) < C.size_t(size) { + return 0 + } + return 0 +} diff --git a/misc/cgo/test/issue29748.go b/misc/cgo/test/issue29748.go new file mode 100644 index 0000000000000..8229b3bcf08d4 --- /dev/null +++ b/misc/cgo/test/issue29748.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Error handling a struct initializer that requires pointer checking. +// Compilation test only, nothing to run. + +package cgotest + +// typedef struct { char **p; } S29748; +// static int f29748(S29748 *p) { return 0; } +import "C" + +var Vissue29748 = C.f29748(&C.S29748{ + nil, +}) + +func Fissue299748() { + C.f29748(&C.S29748{ + nil, + }) +} diff --git a/misc/cgo/test/issue29781.go b/misc/cgo/test/issue29781.go new file mode 100644 index 0000000000000..0fd8c08b8eb0e --- /dev/null +++ b/misc/cgo/test/issue29781.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Error with newline inserted into constant expression. +// Compilation test only, nothing to run. + +package cgotest + +// static void issue29781F(char **p, int n) {} +// #define ISSUE29781C 0 +import "C" + +func issue29781G() { + var p *C.char + C.issue29781F(&p, C.ISSUE29781C+1) +} diff --git a/misc/cgo/test/issue30065.go b/misc/cgo/test/issue30065.go new file mode 100644 index 0000000000000..396d437f7ab3e --- /dev/null +++ b/misc/cgo/test/issue30065.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Don't make a private copy of an array when taking the address of an +// element. + +package cgotest + +// #include +import "C" + +import ( + "testing" + "unsafe" +) + +func test30065(t *testing.T) { + var a [256]byte + b := []byte("a") + C.memcpy(unsafe.Pointer(&a), unsafe.Pointer(&b[0]), 1) + if a[0] != 'a' { + t.Errorf("&a failed: got %c, want %c", a[0], 'a') + } + + b = []byte("b") + C.memcpy(unsafe.Pointer(&a[0]), unsafe.Pointer(&b[0]), 1) + if a[0] != 'b' { + t.Errorf("&a[0] failed: got %c, want %c", a[0], 'b') + } + + d := make([]byte, 256) + b = []byte("c") + C.memcpy(unsafe.Pointer(&d[0]), unsafe.Pointer(&b[0]), 1) + if d[0] != 'c' { + t.Errorf("&d[0] failed: got %c, want %c", d[0], 'c') + } +} diff --git a/misc/cgo/test/issue4339.go b/misc/cgo/test/issue4339.go index 4fa4b2bbd7e3c..3715fde57573a 100644 --- a/misc/cgo/test/issue4339.go +++ b/misc/cgo/test/issue4339.go @@ -5,7 +5,8 @@ package cgotest /* -#include "issue4339.h" +// We've historically permitted #include <>, so test it here. Issue 29333. +#include */ import "C" diff --git a/misc/cgo/test/issue9026/issue9026.go b/misc/cgo/test/issue9026/issue9026.go index 0af86e64da487..149c26562ad4d 100644 --- a/misc/cgo/test/issue9026/issue9026.go +++ b/misc/cgo/test/issue9026/issue9026.go @@ -29,7 +29,7 @@ func Test(t *testing.T) { // Brittle: the assertion may fail spuriously when the algorithm // changes, but should remain stable otherwise. got := fmt.Sprintf("%T %T", in, opts) - want := "issue9026._Ctype_struct___0 *issue9026._Ctype_struct___1" + want := "issue9026._Ctype_struct___0 *issue9026._Ctype_struct___0" if got != want { t.Errorf("Non-deterministic type names: got %s, want %s", got, want) } diff --git a/misc/cgo/test/issue9400_linux.go b/misc/cgo/test/issue9400_linux.go index 34eb4983a4161..7719535d251fa 100644 --- a/misc/cgo/test/issue9400_linux.go +++ b/misc/cgo/test/issue9400_linux.go @@ -41,7 +41,7 @@ func test9400(t *testing.T) { // Grow the stack and put down a test pattern const pattern = 0x123456789abcdef - var big [1024]uint64 // len must match assmebly + var big [1024]uint64 // len must match assembly for i := range big { big[i] = pattern } diff --git a/misc/cgo/test/test27660.go b/misc/cgo/test/test27660.go new file mode 100644 index 0000000000000..8c23b7dc58f23 --- /dev/null +++ b/misc/cgo/test/test27660.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stress the interaction between the race detector and cgo in an +// attempt to reproduce the memory corruption described in #27660. +// The bug was very timing sensitive; at the time of writing this +// test would only trigger the bug about once out of every five runs. + +package cgotest + +// #include +import "C" + +import ( + "context" + "math/rand" + "runtime" + "sync" + "testing" + "time" +) + +func test27660(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ints := make([]int, 100) + locks := make([]sync.Mutex, 100) + // Slowly create threads so that ThreadSanitizer is forced to + // frequently resize its SyncClocks. + for i := 0; i < 100; i++ { + go func() { + for ctx.Err() == nil { + // Sleep in C for long enough that it is likely that the runtime + // will retake this goroutine's currently wired P. + C.usleep(1000 /* 1ms */) + runtime.Gosched() // avoid starvation (see #28701) + } + }() + go func() { + // Trigger lots of synchronization and memory reads/writes to + // increase the likelihood that the race described in #27660 + // results in corruption of ThreadSanitizer's internal state + // and thus an assertion failure or segfault. + for ctx.Err() == nil { + j := rand.Intn(100) + locks[j].Lock() + ints[j]++ + locks[j].Unlock() + } + }() + time.Sleep(time.Millisecond) + } +} diff --git a/misc/cgo/test/twoargs.go b/misc/cgo/test/twoargs.go new file mode 100644 index 0000000000000..ca0534ca31007 --- /dev/null +++ b/misc/cgo/test/twoargs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Crash from call with two arguments that need pointer checking. +// No runtime test; just make sure it compiles. + +package cgotest + +/* +static void twoargs1(void *p, int n) {} +static void *twoargs2() { return 0; } +static int twoargs3(void * p) { return 0; } +*/ +import "C" + +import "unsafe" + +func twoargsF() { + v := []string{} + C.twoargs1(C.twoargs2(), C.twoargs3(unsafe.Pointer(&v))) +} diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go index 71232305f60b8..457ac0db091af 100644 --- a/misc/cgo/testcarchive/carchive_test.go +++ b/misc/cgo/testcarchive/carchive_test.go @@ -14,6 +14,7 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" "strings" "syscall" "testing" @@ -83,13 +84,17 @@ func init() { cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...) } libgodir = GOOS + "_" + GOARCH - switch GOOS { - case "darwin": - if GOARCH == "arm" || GOARCH == "arm64" { + if runtime.Compiler == "gccgo" { + libgodir = "gccgo_" + libgodir + "_fPIC" + } else { + switch GOOS { + case "darwin": + if GOARCH == "arm" || GOARCH == "arm64" { + libgodir += "_shared" + } + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": libgodir += "_shared" } - case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - libgodir += "_shared" } cc = append(cc, "-I", filepath.Join("pkg", libgodir)) @@ -155,6 +160,9 @@ func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) { } else { ccArgs = append(ccArgs, "main_unix.c", libgoa) } + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } t.Log(ccArgs) if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) @@ -163,7 +171,11 @@ func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) { defer os.Remove(exe) binArgs := append(cmdToRun(exe), "arg1", "arg2") - if out, err := exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput(); err != nil { + cmd = exec.Command(binArgs[0], binArgs[1:]...) + if runtime.Compiler == "gccgo" { + cmd.Env = append(os.Environ(), "GCCGO=1") + } + if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) } @@ -194,8 +206,13 @@ func checkLineComments(t *testing.T, hdrname string) { func TestInstall(t *testing.T) { defer os.RemoveAll("pkg") + libgoa := "libgo.a" + if runtime.Compiler == "gccgo" { + libgoa = "liblibgo.a" + } + testInstall(t, "./testp1"+exeSuffix, - filepath.Join("pkg", libgodir, "libgo.a"), + filepath.Join("pkg", libgodir, libgoa), filepath.Join("pkg", libgodir, "libgo.h"), "go", "install", "-i", "-buildmode=c-archive", "libgo") @@ -235,6 +252,9 @@ func TestEarlySignalHandler(t *testing.T) { checkLineComments(t, "libgo2.h") ccArgs := append(cc, "-o", "testp"+exeSuffix, "main2.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -265,6 +285,9 @@ func TestSignalForwarding(t *testing.T) { checkLineComments(t, "libgo2.h") ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -306,6 +329,9 @@ func TestSignalForwardingExternal(t *testing.T) { checkLineComments(t, "libgo2.h") ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -419,6 +445,9 @@ func TestOsSignal(t *testing.T) { checkLineComments(t, "libgo3.h") ccArgs := append(cc, "-o", "testp"+exeSuffix, "main3.c", "libgo3.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -452,6 +481,9 @@ func TestSigaltstack(t *testing.T) { checkLineComments(t, "libgo4.h") ccArgs := append(cc, "-o", "testp"+exeSuffix, "main4.c", "libgo4.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -476,6 +508,9 @@ func TestExtar(t *testing.T) { case "windows": t.Skip("skipping signal test on Windows") } + if runtime.Compiler == "gccgo" { + t.Skip("skipping -extar test when using gccgo") + } defer func() { os.Remove("libgo4.a") @@ -530,14 +565,26 @@ func TestPIE(t *testing.T) { t.Fatal(err) } - ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join("pkg", libgodir, "libgo.a")) + libgoa := "libgo.a" + if runtime.Compiler == "gccgo" { + libgoa = "liblibgo.a" + } + + ccArgs := append(cc, "-fPIE", "-pie", "-o", "testp"+exeSuffix, "main.c", "main_unix.c", filepath.Join("pkg", libgodir, libgoa)) + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) } binArgs := append(bin, "arg1", "arg2") - if out, err := exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput(); err != nil { + cmd = exec.Command(binArgs[0], binArgs[1:]...) + if runtime.Compiler == "gccgo" { + cmd.Env = append(os.Environ(), "GCCGO=1") + } + if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) } @@ -605,6 +652,9 @@ func TestSIGPROF(t *testing.T) { checkLineComments(t, "libgo6.h") ccArgs := append(cc, "-o", "testp6"+exeSuffix, "main6.c", "libgo6.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -648,6 +698,9 @@ func TestCompileWithoutShared(t *testing.T) { // In some cases, -no-pie is needed here, but not accepted everywhere. First try // if -no-pie is accepted. See #22126. ccArgs := append(cc, "-o", exe, "-no-pie", "main5.c", "libgo2.a") + if runtime.Compiler == "gccgo" { + ccArgs = append(ccArgs, "-lgo") + } t.Log(ccArgs) out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() diff --git a/misc/cgo/testcarchive/main_unix.c b/misc/cgo/testcarchive/main_unix.c index 4d9d16f03b034..b23ac1c2428ba 100644 --- a/misc/cgo/testcarchive/main_unix.c +++ b/misc/cgo/testcarchive/main_unix.c @@ -5,6 +5,7 @@ #include #include #include +#include #include struct sigaction sa; @@ -30,7 +31,12 @@ int install_handler() { perror("sigaction"); return 2; } - if (osa.sa_handler == SIG_DFL || (osa.sa_flags&SA_ONSTACK) == 0) { + if (osa.sa_handler == SIG_DFL) { + fprintf(stderr, "Go runtime did not install signal handler\n"); + return 2; + } + // gccgo does not set SA_ONSTACK for SIGSEGV. + if (getenv("GCCGO") == "" && (osa.sa_flags&SA_ONSTACK) == 0) { fprintf(stderr, "Go runtime did not install signal handler\n"); return 2; } diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go index 89b19d653a3d8..e5b90ff194ce1 100644 --- a/misc/cgo/testcshared/cshared_test.go +++ b/misc/cgo/testcshared/cshared_test.go @@ -602,3 +602,55 @@ func copyFile(t *testing.T, dst, src string) { t.Fatal(err) } } + +func TestGo2C2Go(t *testing.T) { + switch GOOS { + case "darwin": + // Darwin shared libraries don't support the multiple + // copies of the runtime package implied by this test. + t.Skip("linking c-shared into Go programs not supported on Darwin; issue 29061") + case "android": + t.Skip("test fails on android; issue 29087") + } + + t.Parallel() + + tmpdir, err := ioutil.TempDir("", "cshared-TestGo2C2Go") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + shlib := filepath.Join(tmpdir, "libtestgo2c2go."+libSuffix) + run(t, gopathEnv, "go", "build", "-buildmode=c-shared", "-o", shlib, "go2c2go/go") + + cgoCflags := os.Getenv("CGO_CFLAGS") + if cgoCflags != "" { + cgoCflags += " " + } + cgoCflags += "-I" + tmpdir + + cgoLdflags := os.Getenv("CGO_LDFLAGS") + if cgoLdflags != "" { + cgoLdflags += " " + } + cgoLdflags += "-L" + tmpdir + " -ltestgo2c2go" + + goenv := append(gopathEnv[:len(gopathEnv):len(gopathEnv)], "CGO_CFLAGS="+cgoCflags, "CGO_LDFLAGS="+cgoLdflags) + + ldLibPath := os.Getenv("LD_LIBRARY_PATH") + if ldLibPath != "" { + ldLibPath += ":" + } + ldLibPath += tmpdir + + runenv := append(gopathEnv[:len(gopathEnv):len(gopathEnv)], "LD_LIBRARY_PATH="+ldLibPath) + + bin := filepath.Join(tmpdir, "m1") + exeSuffix + run(t, goenv, "go", "build", "-o", bin, "go2c2go/m1") + runExe(t, runenv, bin) + + bin = filepath.Join(tmpdir, "m2") + exeSuffix + run(t, goenv, "go", "build", "-o", bin, "go2c2go/m2") + runExe(t, runenv, bin) +} diff --git a/misc/cgo/testcshared/src/go2c2go/go/shlib.go b/misc/cgo/testcshared/src/go2c2go/go/shlib.go new file mode 100644 index 0000000000000..76a5323ad2d55 --- /dev/null +++ b/misc/cgo/testcshared/src/go2c2go/go/shlib.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "C" + +//export GoFunc +func GoFunc() int { return 1 } + +func main() {} diff --git a/misc/cgo/testcshared/src/go2c2go/m1/c.c b/misc/cgo/testcshared/src/go2c2go/m1/c.c new file mode 100644 index 0000000000000..0e8fac4cf36f5 --- /dev/null +++ b/misc/cgo/testcshared/src/go2c2go/m1/c.c @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "libtestgo2c2go.h" + +int CFunc(void) { + return (GoFunc() << 8) + 2; +} diff --git a/misc/cgo/testcshared/src/go2c2go/m1/main.go b/misc/cgo/testcshared/src/go2c2go/m1/main.go new file mode 100644 index 0000000000000..17ba1eb0a72e5 --- /dev/null +++ b/misc/cgo/testcshared/src/go2c2go/m1/main.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// extern int CFunc(void); +import "C" + +import ( + "fmt" + "os" +) + +func main() { + got := C.CFunc() + const want = (1 << 8) | 2 + if got != want { + fmt.Printf("got %#x, want %#x\n", got, want) + os.Exit(1) + } +} diff --git a/misc/cgo/testcshared/src/go2c2go/m2/main.go b/misc/cgo/testcshared/src/go2c2go/m2/main.go new file mode 100644 index 0000000000000..91bf308057caa --- /dev/null +++ b/misc/cgo/testcshared/src/go2c2go/m2/main.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// #include "libtestgo2c2go.h" +import "C" + +import ( + "fmt" + "os" +) + +func main() { + got := C.GoFunc() + const want = 1 + if got != want { + fmt.Printf("got %#x, want %#x\n", got, want) + os.Exit(1) + } +} diff --git a/misc/cgo/testplugin/src/checkdwarf/main.go b/misc/cgo/testplugin/src/checkdwarf/main.go new file mode 100644 index 0000000000000..7886c834e7ca2 --- /dev/null +++ b/misc/cgo/testplugin/src/checkdwarf/main.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Usage: +// +// checkdwarf +// +// Opens , which must be an executable or a library and checks that +// there is an entry in .debug_info whose name ends in + +package main + +import ( + "debug/dwarf" + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "os" + "strings" +) + +func usage() { + fmt.Fprintf(os.Stderr, "checkdwarf executable-or-library DIE-suffix\n") +} + +type dwarfer interface { + DWARF() (*dwarf.Data, error) +} + +func openElf(path string) dwarfer { + exe, err := elf.Open(path) + if err != nil { + return nil + } + return exe +} + +func openMacho(path string) dwarfer { + exe, err := macho.Open(path) + if err != nil { + return nil + } + return exe +} + +func openPE(path string) dwarfer { + exe, err := pe.Open(path) + if err != nil { + return nil + } + return exe +} + +func main() { + if len(os.Args) != 3 { + usage() + } + + exePath := os.Args[1] + dieSuffix := os.Args[2] + + var exe dwarfer + + for _, openfn := range []func(string) dwarfer{openMacho, openPE, openElf} { + exe = openfn(exePath) + if exe != nil { + break + } + } + + if exe == nil { + fmt.Fprintf(os.Stderr, "could not open %s\n", exePath) + os.Exit(1) + } + + data, err := exe.DWARF() + if err != nil { + fmt.Fprintf(os.Stderr, "%s: error opening DWARF: %v\n", exePath, err) + os.Exit(1) + } + + rdr := data.Reader() + for { + e, err := rdr.Next() + if err != nil { + fmt.Fprintf(os.Stderr, "%s: error reading DWARF: %v\n", exePath, err) + os.Exit(1) + } + if e == nil { + break + } + name, hasname := e.Val(dwarf.AttrName).(string) + if !hasname { + continue + } + if strings.HasSuffix(name, dieSuffix) { + // found + os.Exit(0) + } + } + + fmt.Fprintf(os.Stderr, "%s: no entry with a name ending in %q was found\n", exePath, dieSuffix) + os.Exit(1) +} diff --git a/misc/cgo/testplugin/test.bash b/misc/cgo/testplugin/test.bash index bf8ed3cd191ea..1b94bc4badbcf 100755 --- a/misc/cgo/testplugin/test.bash +++ b/misc/cgo/testplugin/test.bash @@ -32,6 +32,14 @@ GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed1.so u GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed2.so unnamed2/main.go GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" host +# test that DWARF sections are emitted for plugins and programs importing "plugin" +if [ $GOOS != "darwin" ]; then + # On macOS, for some reason, the linker doesn't add debug sections to .so, + # see issue #27502. + go run src/checkdwarf/main.go plugin2.so plugin2.UnexportedNameReuse +fi +go run src/checkdwarf/main.go host main.main + LD_LIBRARY_PATH=$(pwd) ./host # Test that types and itabs get properly uniqified. diff --git a/misc/cgo/testplugin/unnamed1/main.go b/misc/cgo/testplugin/unnamed1/main.go index 5c1df086d76df..caf09c9e890b9 100644 --- a/misc/cgo/testplugin/unnamed1/main.go +++ b/misc/cgo/testplugin/unnamed1/main.go @@ -9,7 +9,7 @@ import "C" func FuncInt() int { return 1 } -// Add a recursive type to to check that type equality across plugins doesn't +// Add a recursive type to check that type equality across plugins doesn't // crash. See https://golang.org/issues/19258 func FuncRecursive() X { return X{} } diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go index f09ad52ceeebe..218e2254295ba 100644 --- a/misc/cgo/testsanitizers/cc_test.go +++ b/misc/cgo/testsanitizers/cc_test.go @@ -374,7 +374,7 @@ func (c *config) checkRuntime() (skip bool, err error) { } // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler. - // Dump the preprocessor defines to check that that works. + // Dump the preprocessor defines to check that works. // (Sometimes it doesn't: see https://golang.org/issue/15983.) cmd, err := cc(c.cFlags...) if err != nil { diff --git a/misc/cgo/testsanitizers/tsan_test.go b/misc/cgo/testsanitizers/tsan_test.go index 314b5072f360e..1d769a98b6e26 100644 --- a/misc/cgo/testsanitizers/tsan_test.go +++ b/misc/cgo/testsanitizers/tsan_test.go @@ -5,9 +5,9 @@ package sanitizers_test import ( + "runtime" "strings" "testing" - "runtime" ) func TestTSAN(t *testing.T) { diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go index 846a27173e311..41a24efe22c0e 100644 --- a/misc/cgo/testshared/shared_test.go +++ b/misc/cgo/testshared/shared_test.go @@ -560,7 +560,7 @@ func TestNotes(t *testing.T) { abiHashNoteFound = true case 3: // ELF_NOTE_GODEPS_TAG if depsNoteFound { - t.Error("multiple depedency list notes") + t.Error("multiple dependency list notes") } testDepsNote(t, f, note) depsNoteFound = true @@ -578,7 +578,7 @@ func TestNotes(t *testing.T) { } // Build a GOPATH package (depBase) into a shared library that links against the goroot -// runtime, another package (dep2) that links against the first, and and an +// runtime, another package (dep2) that links against the first, and an // executable that links against dep2. func TestTwoGopathShlibs(t *testing.T) { goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase") @@ -911,3 +911,9 @@ func TestGlobal(t *testing.T) { func TestTestInstalledShared(t *testing.T) { goCmd(nil, "test", "-linkshared", "-test.short", "sync/atomic") } + +// Test generated pointer method with -linkshared. +// Issue 25065. +func TestGeneratedMethod(t *testing.T) { + goCmd(t, "install", "-buildmode=shared", "-linkshared", "issue25065") +} diff --git a/misc/cgo/testshared/src/issue25065/a.go b/misc/cgo/testshared/src/issue25065/a.go new file mode 100644 index 0000000000000..979350ff24c9c --- /dev/null +++ b/misc/cgo/testshared/src/issue25065/a.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package issue25065 has a type with a method that is +// 1) referenced in a method expression +// 2) not called +// 3) not converted to an interface +// 4) is a value method but the reference is to the pointer method +// These cases avoid the call to makefuncsym from typecheckfunc, but we +// still need to call makefuncsym somehow or the symbol will not be defined. +package issue25065 + +type T int + +func (t T) M() {} + +func F() func(*T) { + return (*T).M +} diff --git a/misc/ios/detect.go b/misc/ios/detect.go index 2594185c11e86..1d47e47c86097 100644 --- a/misc/ios/detect.go +++ b/misc/ios/detect.go @@ -33,9 +33,9 @@ func main() { fail("did not find mobile provision matching device udids %q", udids) } - fmt.Println("Available provisioning profiles below.") - fmt.Println("NOTE: Any existing app on the device with the app id specified by GOIOS_APP_ID") - fmt.Println("will be overwritten when running Go programs.") + fmt.Println("# Available provisioning profiles below.") + fmt.Println("# NOTE: Any existing app on the device with the app id specified by GOIOS_APP_ID") + fmt.Println("# will be overwritten when running Go programs.") for _, mp := range mps { fmt.Println() f, err := ioutil.TempFile("", "go_ios_detect_") diff --git a/misc/nacl/README b/misc/nacl/README index 99b94dc90ae42..179e526d89aa2 100644 --- a/misc/nacl/README +++ b/misc/nacl/README @@ -26,7 +26,7 @@ scheme. # Download NaCl Download nacl_sdk.zip file from - https://developers.google.com/native-client/dev/sdk/download + https://developer.chrome.com/native-client/sdk/download and unpack it. I chose /opt/nacl_sdk. # Update @@ -37,7 +37,7 @@ sdk. These are released every 6-8 weeks, in line with Chrome releases. % cd /opt/nacl_sdk % ./naclsdk update -At this time pepper_40 is the stable version. The NaCl port needs at least pepper_39 +At this time pepper_49 is the stable version. The NaCl port needs at least pepper_39 to work. If naclsdk downloads a later version, please adjust accordingly. The cmd/go helper scripts expect that the loaders sel_ldr_{x86_{32,64},arm} and diff --git a/misc/nacl/testzip.proto b/misc/nacl/testzip.proto index f15a2ab224636..d05219364dcb7 100644 --- a/misc/nacl/testzip.proto +++ b/misc/nacl/testzip.proto @@ -47,6 +47,9 @@ go src=.. google pprof internal + binutils + testdata + + driver testdata + @@ -151,6 +154,9 @@ go src=.. trace testdata + + xcoff + testdata + + io + mime @@ -177,6 +183,8 @@ go src=.. strconv testdata + + testdata + + text template testdata diff --git a/misc/sortac/sortac.go b/misc/sortac/sortac.go deleted file mode 100644 index f61aa9617e9f6..0000000000000 --- a/misc/sortac/sortac.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Sortac sorts the AUTHORS and CONTRIBUTORS files. -// -// Usage: -// -// sortac [file...] -// -// Sortac sorts the named files in place. -// If given no arguments, it sorts standard input to standard output. -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - - "golang.org/x/text/collate" - "golang.org/x/text/language" -) - -func main() { - log.SetFlags(0) - log.SetPrefix("sortac: ") - flag.Parse() - - args := flag.Args() - if len(args) == 0 { - os.Stdout.Write(sortAC(os.Stdin)) - } else { - for _, arg := range args { - f, err := os.Open(arg) - if err != nil { - log.Fatal(err) - } - sorted := sortAC(f) - f.Close() - if err := ioutil.WriteFile(arg, sorted, 0644); err != nil { - log.Fatal(err) - } - } - } -} - -func sortAC(r io.Reader) []byte { - bs := bufio.NewScanner(r) - var header []string - var lines []string - for bs.Scan() { - t := bs.Text() - lines = append(lines, t) - if t == "# Please keep the list sorted." { - header = lines - lines = nil - continue - } - } - if err := bs.Err(); err != nil { - log.Fatal(err) - } - - var out bytes.Buffer - c := collate.New(language.Und, collate.Loose) - c.SortStrings(lines) - for _, l := range header { - fmt.Fprintln(&out, l) - } - for _, l := range lines { - fmt.Fprintln(&out, l) - } - return out.Bytes() -} diff --git a/misc/wasm/wasm_exec.html b/misc/wasm/wasm_exec.html index cc37ea73ce5a7..72e64473eb588 100644 --- a/misc/wasm/wasm_exec.html +++ b/misc/wasm/wasm_exec.html @@ -12,6 +12,11 @@ + + +function hideBlock(el) { + var es = el.parentNode.parentNode.getElementsByClassName("ssa-value-list"); + if (es.length===0) + return; + var e = es[0]; + if (e.style.display === 'block' || e.style.display === '') { + e.style.display = 'none'; + el.innerHTML = '+'; + } else { + e.style.display = 'block'; + el.innerHTML = '-'; + } +} + +// TODO: scale the graph with the viewBox attribute. +function graphReduce(id) { + var node = document.getElementById(id); + if (node) { + node.width.baseVal.value *= 0.9; + node.height.baseVal.value *= 0.9; + } + return false; +} + +function graphEnlarge(id) { + var node = document.getElementById(id); + if (node) { + node.width.baseVal.value *= 1.1; + node.height.baseVal.value *= 1.1; + } + return false; +} + +function makeDraggable(event) { + var svg = event.target; + if (window.PointerEvent) { + svg.addEventListener('pointerdown', startDrag); + svg.addEventListener('pointermove', drag); + svg.addEventListener('pointerup', endDrag); + svg.addEventListener('pointerleave', endDrag); + } else { + svg.addEventListener('mousedown', startDrag); + svg.addEventListener('mousemove', drag); + svg.addEventListener('mouseup', endDrag); + svg.addEventListener('mouseleave', endDrag); + } + + var point = svg.createSVGPoint(); + var isPointerDown = false; + var pointerOrigin; + var viewBox = svg.viewBox.baseVal; + + function getPointFromEvent (event) { + point.x = event.clientX; + point.y = event.clientY; + + // We get the current transformation matrix of the SVG and we inverse it + var invertedSVGMatrix = svg.getScreenCTM().inverse(); + return point.matrixTransform(invertedSVGMatrix); + } + + function startDrag(event) { + isPointerDown = true; + pointerOrigin = getPointFromEvent(event); + } + + function drag(event) { + if (!isPointerDown) { + return; + } + event.preventDefault(); + + var pointerPosition = getPointFromEvent(event); + viewBox.x -= (pointerPosition.x - pointerOrigin.x); + viewBox.y -= (pointerPosition.y - pointerOrigin.y); + } + + function endDrag(event) { + isPointerDown = false; + } +} `) w.WriteString("") @@ -378,7 +592,7 @@ function toggle_visibility(id) { w.WriteString(html.EscapeString(name)) w.WriteString("") w.WriteString(` -help +help

    @@ -396,6 +610,11 @@ Faded out values and blocks are dead code that has not been eliminated. Values printed in italics have a dependency cycle.

    +

    +CFG: Dashed edge is for unlikely branches. Blue color is for backward edges. +Edge with a dot means that this edge follows the order in which blocks were laidout. +

    +
    `) w.WriteString("") @@ -411,15 +630,125 @@ func (w *HTMLWriter) Close() { io.WriteString(w.w, "") io.WriteString(w.w, "") w.w.Close() + fmt.Printf("dumped SSA to %v\n", w.path) } // WriteFunc writes f in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. func (w *HTMLWriter) WriteFunc(phase, title string, f *Func) { if w == nil { return // avoid generating HTML just to discard it } - w.WriteColumn(phase, title, "", f.HTML()) - // TODO: Add visual representation of f's CFG. + //w.WriteColumn(phase, title, "", f.HTML()) + w.WriteColumn(phase, title, "", f.HTML(phase, w.dot)) +} + +// FuncLines contains source code for a function to be displayed +// in sources column. +type FuncLines struct { + Filename string + StartLineno uint + Lines []string +} + +// ByTopo sorts topologically: target function is on top, +// followed by inlined functions sorted by filename and line numbers. +type ByTopo []*FuncLines + +func (x ByTopo) Len() int { return len(x) } +func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x ByTopo) Less(i, j int) bool { + a := x[i] + b := x[j] + if a.Filename == b.Filename { + return a.StartLineno < b.StartLineno + } + return a.Filename < b.Filename +} + +// WriteSources writes lines as source code in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. +func (w *HTMLWriter) WriteSources(phase string, all []*FuncLines) { + if w == nil { + return // avoid generating HTML just to discard it + } + var buf bytes.Buffer + fmt.Fprint(&buf, "
    ") + filename := "" + for _, fl := range all { + fmt.Fprint(&buf, "
     
    ") + if filename != fl.Filename { + fmt.Fprint(&buf, "
     
    ") + filename = fl.Filename + } + for i := range fl.Lines { + ln := int(fl.StartLineno) + i + fmt.Fprintf(&buf, "
    %v
    ", ln, ln) + } + } + fmt.Fprint(&buf, "
    ")
    +	filename = ""
    +	for _, fl := range all {
    +		fmt.Fprint(&buf, "
     
    ") + if filename != fl.Filename { + fmt.Fprintf(&buf, "
    %v
    ", fl.Filename) + filename = fl.Filename + } + for i, line := range fl.Lines { + ln := int(fl.StartLineno) + i + var escaped string + if strings.TrimSpace(line) == "" { + escaped = " " + } else { + escaped = html.EscapeString(line) + } + fmt.Fprintf(&buf, "
    %v
    ", ln, escaped) + } + } + fmt.Fprint(&buf, "
    ") + w.WriteColumn(phase, phase, "allow-x-scroll", buf.String()) +} + +func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) { + if w == nil { + return // avoid generating HTML just to discard it + } + lines := strings.Split(buf.String(), "\n") + var out bytes.Buffer + + fmt.Fprint(&out, "
    ") + for _, l := range lines { + l = strings.TrimSpace(l) + var escaped string + var lineNo string + if l == "" { + escaped = " " + } else { + if strings.HasPrefix(l, "buildssa") { + escaped = fmt.Sprintf("%v", l) + } else { + // Parse the line number from the format l(123). + idx := strings.Index(l, " l(") + if idx != -1 { + subl := l[idx+3:] + idxEnd := strings.Index(subl, ")") + if idxEnd != -1 { + if _, err := strconv.Atoi(subl[:idxEnd]); err == nil { + lineNo = subl[:idxEnd] + } + } + } + escaped = html.EscapeString(l) + } + } + if lineNo != "" { + fmt.Fprintf(&out, "
    %v
    ", lineNo, escaped) + } else { + fmt.Fprintf(&out, "
    %v
    ", escaped) + } + } + fmt.Fprint(&out, "
    ") + w.WriteColumn(phase, phase, "allow-x-scroll", out.String()) } // WriteColumn writes raw HTML in a column headed by title. @@ -470,9 +799,9 @@ func (v *Value) LongHTML() string { // maybe we could replace some of that with formatting. s := fmt.Sprintf("", v.String()) - linenumber := "(?)" + linenumber := "(?)" if v.Pos.IsKnown() { - linenumber = fmt.Sprintf("(%s)", v.Pos.LineNumberHTML()) + linenumber = fmt.Sprintf("(%s)", v.Pos.LineNumber(), v.Pos.LineNumberHTML()) } s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String()) @@ -536,22 +865,147 @@ func (b *Block) LongHTML() string { if b.Pos.IsKnown() { // TODO does not begin to deal with the full complexity of line numbers. // Maybe we want a string/slice instead, of outer-inner when inlining. - s += fmt.Sprintf(" (line %s)", b.Pos.LineNumberHTML()) + s += fmt.Sprintf(" (%s)", b.Pos.LineNumber(), b.Pos.LineNumberHTML()) } return s } -func (f *Func) HTML() string { - var buf bytes.Buffer - fmt.Fprint(&buf, "") - p := htmlFuncPrinter{w: &buf} +func (f *Func) HTML(phase string, dot *dotWriter) string { + buf := new(bytes.Buffer) + if dot != nil { + dot.writeFuncSVG(buf, phase, f) + } + fmt.Fprint(buf, "") + p := htmlFuncPrinter{w: buf} fprintFunc(p, f) // fprintFunc(&buf, f) // TODO: HTML, not text,
    for line breaks, etc. - fmt.Fprint(&buf, "
    ") + fmt.Fprint(buf, "
    ") return buf.String() } +func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) { + if d.broken { + return + } + if _, ok := d.phases[phase]; !ok { + return + } + cmd := exec.Command(d.path, "-Tsvg") + pipe, err := cmd.StdinPipe() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + buf := new(bytes.Buffer) + cmd.Stdout = buf + bufErr := new(bytes.Buffer) + cmd.Stderr = bufErr + err = cmd.Start() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + fmt.Fprint(pipe, `digraph "" { margin=0; size="4,40"; ranksep=.2; `) + id := strings.Replace(phase, " ", "-", -1) + fmt.Fprintf(pipe, `id="g_graph_%s";`, id) + fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`) + fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`) + for i, b := range f.Blocks { + if b.Kind == BlockInvalid { + continue + } + layout := "" + if f.laidout { + layout = fmt.Sprintf(" #%d", i) + } + fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v",tooltip="%v"];`, b, b, layout, b.Kind.String(), id, b, b.LongString()) + } + indexOf := make([]int, f.NumBlocks()) + for i, b := range f.Blocks { + indexOf[b.ID] = i + } + layoutDrawn := make([]bool, f.NumBlocks()) + + ponums := make([]int32, f.NumBlocks()) + _ = postorderWithNumbering(f, ponums) + isBackEdge := func(from, to ID) bool { + return ponums[from] <= ponums[to] + } + + for _, b := range f.Blocks { + for i, s := range b.Succs { + style := "solid" + color := "black" + arrow := "vee" + if b.unlikelyIndex() == i { + style = "dashed" + } + if f.laidout && indexOf[s.b.ID] == indexOf[b.ID]+1 { + // Red color means ordered edge. It overrides other colors. + arrow = "dotvee" + layoutDrawn[s.b.ID] = true + } else if isBackEdge(b.ID, s.b.ID) { + color = "blue" + } + fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow) + } + } + if f.laidout { + fmt.Fprintln(pipe, `edge[constraint=false,color=gray,style=solid,arrowhead=dot];`) + colors := [...]string{"#eea24f", "#f38385", "#f4d164", "#ca89fc", "gray"} + ci := 0 + for i := 1; i < len(f.Blocks); i++ { + if layoutDrawn[f.Blocks[i].ID] { + continue + } + fmt.Fprintf(pipe, `%s -> %s [color="%s"];`, f.Blocks[i-1], f.Blocks[i], colors[ci]) + ci = (ci + 1) % len(colors) + } + } + fmt.Fprint(pipe, "}") + pipe.Close() + err = cmd.Wait() + if err != nil { + d.broken = true + fmt.Printf("dot: %v\n%v\n", err, bufErr.String()) + return + } + + svgID := "svg_graph_" + id + fmt.Fprintf(w, `
    `, svgID, svgID) + // For now, an awful hack: edit the html as it passes through + // our fingers, finding ' 0 { + io.WriteString(p.w, ``) + } io.WriteString(p.w, "") if len(b.Values) > 0 { // start list of values io.WriteString(p.w, "
  • ") @@ -589,7 +1045,6 @@ func (p htmlFuncPrinter) endBlock(b *Block) { fmt.Fprint(p.w, b.LongHTML()) io.WriteString(p.w, "
  • ") io.WriteString(p.w, "") - // io.WriteString(p.w, "
    ") } func (p htmlFuncPrinter) value(v *Value, live bool) { @@ -617,3 +1072,63 @@ func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) { } fmt.Fprintf(p.w, "") } + +type dotWriter struct { + path string + broken bool + phases map[string]bool // keys specify phases with CFGs +} + +// newDotWriter returns non-nil value when mask is valid. +// dotWriter will generate SVGs only for the phases specifed in the mask. +// mask can contain following patterns and combinations of them: +// * - all of them; +// x-y - x through y, inclusive; +// x,y - x and y, but not the passes between. +func newDotWriter(mask string) *dotWriter { + if mask == "" { + return nil + } + // User can specify phase name with _ instead of spaces. + mask = strings.Replace(mask, "_", " ", -1) + ph := make(map[string]bool) + ranges := strings.Split(mask, ",") + for _, r := range ranges { + spl := strings.Split(r, "-") + if len(spl) > 2 { + fmt.Printf("range is not valid: %v\n", mask) + return nil + } + var first, last int + if mask == "*" { + first = 0 + last = len(passes) - 1 + } else { + first = passIdxByName(spl[0]) + last = passIdxByName(spl[len(spl)-1]) + } + if first < 0 || last < 0 || first > last { + fmt.Printf("range is not valid: %v\n", r) + return nil + } + for p := first; p <= last; p++ { + ph[passes[p].name] = true + } + } + + path, err := exec.LookPath("dot") + if err != nil { + fmt.Println(err) + return nil + } + return &dotWriter{path: path, phases: ph} +} + +func passIdxByName(name string) int { + for i, p := range passes { + if p.name == name { + return i + } + } + return -1 +} diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go index 15e111ae7c4e4..338cd91c47feb 100644 --- a/src/cmd/compile/internal/ssa/layout.go +++ b/src/cmd/compile/internal/ssa/layout.go @@ -12,7 +12,7 @@ func layout(f *Func) { } // Register allocation may use a different order which has constraints -// imposed by the linear-scan algorithm. Note that that f.pass here is +// imposed by the linear-scan algorithm. Note that f.pass here is // regalloc, so the switch is conditional on -d=ssa/regalloc/test=N func layoutRegallocOrder(f *Func) []*Block { @@ -143,5 +143,7 @@ blockloop: } } } + f.laidout = true return order + //f.Blocks = order } diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go index 24f927f144edb..ab0fa803bfdea 100644 --- a/src/cmd/compile/internal/ssa/lower.go +++ b/src/cmd/compile/internal/ssa/lower.go @@ -21,7 +21,7 @@ func checkLower(f *Func) { continue // lowered } switch v.Op { - case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpConvert: + case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpConvert, OpInlMark: continue // ok not to lower case OpGetG: if f.Config.hasGReg { diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go index 0457e90b53c93..12044111ea8b3 100644 --- a/src/cmd/compile/internal/ssa/magic.go +++ b/src/cmd/compile/internal/ssa/magic.go @@ -83,7 +83,7 @@ import "math/big" // a+b has n+1 bits in it. Nevertheless, can be done // in 2 instructions on x86.) -// umagicOK returns whether we should strength reduce a n-bit divide by c. +// umagicOK reports whether we should strength reduce a n-bit divide by c. func umagicOK(n uint, c int64) bool { // Convert from ConstX auxint values to the real uint64 constant they represent. d := uint64(c) << (64 - n) >> (64 - n) diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 0359e25c98735..5f58e2d7ec1ef 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/internal/objabi" "cmd/internal/src" ) @@ -47,7 +48,8 @@ func nilcheckelim(f *Func) { // a value resulting from taking the address of a // value, or a value constructed from an offset of a // non-nil ptr (OpAddPtr) implies it is non-nil - if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr { + // We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180. + if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 { nonNilValues[v.ID] = true } } @@ -182,6 +184,9 @@ func nilcheckelim(f *Func) { // This should agree with minLegalPointer in the runtime. const minZeroPage = 4096 +// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV. +var faultOnLoad = objabi.GOOS != "aix" + // nilcheckelim2 eliminates unnecessary nil checks. // Runs after lowering and scheduling. func nilcheckelim2(f *Func) { @@ -224,12 +229,16 @@ func nilcheckelim2(f *Func) { // Find any pointers that this op is guaranteed to fault on if nil. var ptrstore [2]*Value ptrs := ptrstore[:0] - if opcodeTable[v.Op].faultOnNilArg0 { + if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) { + // On AIX, only writing will fault. ptrs = append(ptrs, v.Args[0]) } - if opcodeTable[v.Op].faultOnNilArg1 { + if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) { + // On AIX, only writing will fault. + // LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked. ptrs = append(ptrs, v.Args[1]) } + for _, ptr := range ptrs { // Check to make sure the offset is small. switch opcodeTable[v.Op].auxType { @@ -281,6 +290,6 @@ func nilcheckelim2(f *Func) { b.Values = b.Values[:i] // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find - // more unnecessary nil checks. Would fix test/nilptr3_ssa.go:157. + // more unnecessary nil checks. Would fix test/nilptr3.go:159. } } diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index 815c4a5047289..b2f5cae08818f 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -87,7 +87,7 @@ func TestNilcheckSimple(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -124,7 +124,7 @@ func TestNilcheckDomOrder(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -157,7 +157,7 @@ func TestNilcheckAddr(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -191,7 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -235,7 +235,7 @@ func TestNilcheckPhi(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -276,7 +276,7 @@ func TestNilcheckKeepRemove(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -323,7 +323,7 @@ func TestNilcheckInFalseBranch(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -374,7 +374,7 @@ func TestNilcheckUser(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) @@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) { nilcheckelim(fun.f) // clean up the removed nil check - fuse(fun.f) + fusePlain(fun.f) deadcode(fun.f) CheckFunc(fun.f) diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go index 662f58e4b5766..3e14b9e3df38a 100644 --- a/src/cmd/compile/internal/ssa/numberlines.go +++ b/src/cmd/compile/internal/ssa/numberlines.go @@ -20,7 +20,7 @@ func isPoorStatementOp(op Op) bool { return false } -// LosesStmtMark returns whether a prog with op as loses its statement mark on the way to DWARF. +// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF. // The attributes from some opcodes are lost in translation. // TODO: this is an artifact of how funcpctab combines information for instructions at a single PC. // Should try to fix it there. diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 610921808e723..43f5c59591a00 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -50,9 +50,17 @@ type outputInfo struct { } type regInfo struct { - inputs []inputInfo // ordered in register allocation order + // inputs encodes the register restrictions for an instruction's inputs. + // Each entry specifies an allowed register set for a particular input. + // They are listed in the order in which regalloc should pick a register + // from the register set (most constrained first). + // Inputs which do not need registers are not listed. + inputs []inputInfo + // clobbers encodes the set of registers that are overwritten by + // the instruction (other than the output registers). clobbers regMask - outputs []outputInfo // ordered in register allocation order + // outputs is the same as inputs, but for the outputs of the instruction. + outputs []outputInfo } type auxType int8 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index b479bca7ff470..2278407a260e7 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -22,6 +22,8 @@ const ( Block386LE Block386GT Block386GE + Block386OS + Block386OC Block386ULT Block386ULE Block386UGT @@ -37,6 +39,8 @@ const ( BlockAMD64LE BlockAMD64GT BlockAMD64GE + BlockAMD64OS + BlockAMD64OC BlockAMD64ULT BlockAMD64ULE BlockAMD64UGT @@ -130,6 +134,8 @@ var blockString = [...]string{ Block386LE: "LE", Block386GT: "GT", Block386GE: "GE", + Block386OS: "OS", + Block386OC: "OC", Block386ULT: "ULT", Block386ULE: "ULE", Block386UGT: "UGT", @@ -145,6 +151,8 @@ var blockString = [...]string{ BlockAMD64LE: "LE", BlockAMD64GT: "GT", BlockAMD64GE: "GE", + BlockAMD64OS: "OS", + BlockAMD64OC: "OC", BlockAMD64ULT: "ULT", BlockAMD64ULE: "ULE", BlockAMD64UGT: "UGT", @@ -262,6 +270,8 @@ const ( Op386SUBSDload Op386MULSSload Op386MULSDload + Op386DIVSSload + Op386DIVSDload Op386ADDL Op386ADDLconst Op386ADDLcarry @@ -276,6 +286,7 @@ const ( Op386SBBLconst Op386MULL Op386MULLconst + Op386MULLU Op386HMULL Op386HMULLU Op386MULLQU @@ -300,6 +311,12 @@ const ( Op386CMPLconst Op386CMPWconst Op386CMPBconst + Op386CMPLload + Op386CMPWload + Op386CMPBload + Op386CMPLconstload + Op386CMPWconstload + Op386CMPBconstload Op386UCOMISS Op386UCOMISD Op386TESTL @@ -327,9 +344,16 @@ const ( Op386ROLBconst Op386ADDLload Op386SUBLload + Op386MULLload Op386ANDLload Op386ORLload Op386XORLload + Op386ADDLloadidx4 + Op386SUBLloadidx4 + Op386MULLloadidx4 + Op386ANDLloadidx4 + Op386ORLloadidx4 + Op386XORLloadidx4 Op386NEGL Op386NOTL Op386BSFL @@ -349,6 +373,7 @@ const ( Op386SETBE Op386SETA Op386SETAE + Op386SETO Op386SETEQF Op386SETNEF Op386SETORD @@ -385,6 +410,19 @@ const ( Op386ANDLmodify Op386ORLmodify Op386XORLmodify + Op386ADDLmodifyidx4 + Op386SUBLmodifyidx4 + Op386ANDLmodifyidx4 + Op386ORLmodifyidx4 + Op386XORLmodifyidx4 + Op386ADDLconstmodify + Op386ANDLconstmodify + Op386ORLconstmodify + Op386XORLconstmodify + Op386ADDLconstmodifyidx4 + Op386ANDLconstmodifyidx4 + Op386ORLconstmodifyidx4 + Op386XORLconstmodifyidx4 Op386MOVBloadidx1 Op386MOVWloadidx1 Op386MOVWloadidx2 @@ -456,6 +494,8 @@ const ( OpAMD64SUBSDload OpAMD64MULSSload OpAMD64MULSDload + OpAMD64DIVSSload + OpAMD64DIVSDload OpAMD64ADDQ OpAMD64ADDL OpAMD64ADDQconst @@ -470,6 +510,8 @@ const ( OpAMD64MULL OpAMD64MULQconst OpAMD64MULLconst + OpAMD64MULLU + OpAMD64MULQU OpAMD64HMULQ OpAMD64HMULL OpAMD64HMULQU @@ -481,20 +523,35 @@ const ( OpAMD64DIVQU OpAMD64DIVLU OpAMD64DIVWU + OpAMD64NEGLflags + OpAMD64ADDQcarry + OpAMD64ADCQ + OpAMD64ADDQconstcarry + OpAMD64ADCQconst + OpAMD64SUBQborrow + OpAMD64SBBQ + OpAMD64SUBQconstborrow + OpAMD64SBBQconst OpAMD64MULQU2 OpAMD64DIVQU2 OpAMD64ANDQ OpAMD64ANDL OpAMD64ANDQconst OpAMD64ANDLconst + OpAMD64ANDQconstmodify + OpAMD64ANDLconstmodify OpAMD64ORQ OpAMD64ORL OpAMD64ORQconst OpAMD64ORLconst + OpAMD64ORQconstmodify + OpAMD64ORLconstmodify OpAMD64XORQ OpAMD64XORL OpAMD64XORQconst OpAMD64XORLconst + OpAMD64XORQconstmodify + OpAMD64XORLconstmodify OpAMD64CMPQ OpAMD64CMPL OpAMD64CMPW @@ -529,6 +586,18 @@ const ( OpAMD64BTRQconst OpAMD64BTSLconst OpAMD64BTSQconst + OpAMD64BTCQmodify + OpAMD64BTCLmodify + OpAMD64BTSQmodify + OpAMD64BTSLmodify + OpAMD64BTRQmodify + OpAMD64BTRLmodify + OpAMD64BTCQconstmodify + OpAMD64BTCLconstmodify + OpAMD64BTSQconstmodify + OpAMD64BTSLconstmodify + OpAMD64BTRQconstmodify + OpAMD64BTRLconstmodify OpAMD64TESTQ OpAMD64TESTL OpAMD64TESTW @@ -579,6 +648,16 @@ const ( OpAMD64ORLload OpAMD64XORQload OpAMD64XORLload + OpAMD64ADDQmodify + OpAMD64SUBQmodify + OpAMD64ANDQmodify + OpAMD64ORQmodify + OpAMD64XORQmodify + OpAMD64ADDLmodify + OpAMD64SUBLmodify + OpAMD64ANDLmodify + OpAMD64ORLmodify + OpAMD64XORLmodify OpAMD64NEGQ OpAMD64NEGL OpAMD64NOTQ @@ -647,6 +726,7 @@ const ( OpAMD64SETBE OpAMD64SETA OpAMD64SETAE + OpAMD64SETO OpAMD64SETEQstore OpAMD64SETNEstore OpAMD64SETLstore @@ -1076,6 +1156,7 @@ const ( OpARM64LoweredMuluhilo OpARM64MVN OpARM64NEG + OpARM64FABSD OpARM64FNEGS OpARM64FNEGD OpARM64FSQRTD @@ -1098,12 +1179,18 @@ const ( OpARM64FMSUBD OpARM64FNMSUBS OpARM64FNMSUBD + OpARM64MADD + OpARM64MADDW + OpARM64MSUB + OpARM64MSUBW OpARM64SLL OpARM64SLLconst OpARM64SRL OpARM64SRLconst OpARM64SRA OpARM64SRAconst + OpARM64ROR + OpARM64RORW OpARM64RORconst OpARM64RORWconst OpARM64EXTRconst @@ -1122,6 +1209,12 @@ const ( OpARM64TSTWconst OpARM64FCMPS OpARM64FCMPD + OpARM64MVNshiftLL + OpARM64MVNshiftRL + OpARM64MVNshiftRA + OpARM64NEGshiftLL + OpARM64NEGshiftRL + OpARM64NEGshiftRA OpARM64ADDshiftLL OpARM64ADDshiftRL OpARM64ADDshiftRA @@ -1149,6 +1242,12 @@ const ( OpARM64CMPshiftLL OpARM64CMPshiftRL OpARM64CMPshiftRA + OpARM64CMNshiftLL + OpARM64CMNshiftRL + OpARM64CMNshiftRA + OpARM64TSTshiftLL + OpARM64TSTshiftRL + OpARM64TSTshiftRA OpARM64BFI OpARM64BFXIL OpARM64SBFIZ @@ -1175,6 +1274,8 @@ const ( OpARM64MOVHUloadidx OpARM64MOVBloadidx OpARM64MOVBUloadidx + OpARM64FMOVSloadidx + OpARM64FMOVDloadidx OpARM64MOVHloadidx2 OpARM64MOVHUloadidx2 OpARM64MOVWloadidx4 @@ -1191,6 +1292,8 @@ const ( OpARM64MOVHstoreidx OpARM64MOVWstoreidx OpARM64MOVDstoreidx + OpARM64FMOVSstoreidx + OpARM64FMOVDstoreidx OpARM64MOVHstoreidx2 OpARM64MOVWstoreidx4 OpARM64MOVDstoreidx8 @@ -1208,6 +1311,8 @@ const ( OpARM64MOVDstorezeroidx8 OpARM64FMOVDgpfp OpARM64FMOVDfpgp + OpARM64FMOVSgpfp + OpARM64FMOVSfpgp OpARM64MOVBreg OpARM64MOVBUreg OpARM64MOVHreg @@ -1236,6 +1341,7 @@ const ( OpARM64FCVTDS OpARM64FRINTAD OpARM64FRINTMD + OpARM64FRINTND OpARM64FRINTPD OpARM64FRINTZD OpARM64CSEL @@ -1512,6 +1618,7 @@ const ( OpPPC64MULHW OpPPC64MULHDU OpPPC64MULHWU + OpPPC64LoweredMuluhilo OpPPC64FMUL OpPPC64FMULS OpPPC64FMADD @@ -1556,10 +1663,13 @@ const ( OpPPC64MTVSRD OpPPC64AND OpPPC64ANDN + OpPPC64ANDCC OpPPC64OR OpPPC64ORN + OpPPC64ORCC OpPPC64NOR OpPPC64XOR + OpPPC64XORCC OpPPC64EQV OpPPC64NEG OpPPC64FNEG @@ -1591,6 +1701,17 @@ const ( OpPPC64MOVDBRload OpPPC64MOVWBRload OpPPC64MOVHBRload + OpPPC64MOVBZloadidx + OpPPC64MOVHloadidx + OpPPC64MOVHZloadidx + OpPPC64MOVWloadidx + OpPPC64MOVWZloadidx + OpPPC64MOVDloadidx + OpPPC64MOVHBRloadidx + OpPPC64MOVWBRloadidx + OpPPC64MOVDBRloadidx + OpPPC64FMOVDloadidx + OpPPC64FMOVSloadidx OpPPC64MOVDBRstore OpPPC64MOVWBRstore OpPPC64MOVHBRstore @@ -1602,6 +1723,15 @@ const ( OpPPC64MOVDstore OpPPC64FMOVDstore OpPPC64FMOVSstore + OpPPC64MOVBstoreidx + OpPPC64MOVHstoreidx + OpPPC64MOVWstoreidx + OpPPC64MOVDstoreidx + OpPPC64FMOVDstoreidx + OpPPC64FMOVSstoreidx + OpPPC64MOVHBRstoreidx + OpPPC64MOVWBRstoreidx + OpPPC64MOVDBRstoreidx OpPPC64MOVBstorezero OpPPC64MOVHstorezero OpPPC64MOVWstorezero @@ -1755,6 +1885,8 @@ const ( OpS390XSRAW OpS390XSRADconst OpS390XSRAWconst + OpS390XRLLG + OpS390XRLL OpS390XRLLGconst OpS390XRLLconst OpS390XNEG @@ -1863,6 +1995,10 @@ const ( OpS390XLoweredAtomicExchange32 OpS390XLoweredAtomicExchange64 OpS390XFLOGR + OpS390XPOPCNT + OpS390XSumBytes2 + OpS390XSumBytes4 + OpS390XSumBytes8 OpS390XSTMG2 OpS390XSTMG3 OpS390XSTMG4 @@ -1972,6 +2108,8 @@ const ( OpHmul64u OpMul32uhilo OpMul64uhilo + OpMul32uover + OpMul64uover OpAvg32u OpAvg64u OpDiv8 @@ -2147,6 +2285,10 @@ const ( OpPopCount16 OpPopCount32 OpPopCount64 + OpRotateLeft8 + OpRotateLeft16 + OpRotateLeft32 + OpRotateLeft64 OpSqrt OpFloor OpCeil @@ -2256,6 +2398,7 @@ const ( OpVarKill OpVarLive OpKeepAlive + OpInlMark OpInt64Make OpInt64Hi OpInt64Lo @@ -2263,6 +2406,8 @@ const ( OpAdd32withcarry OpSub32carry OpSub32withcarry + OpAdd64carry + OpSub64borrow OpSignmask OpZeromask OpSlicemask @@ -2279,15 +2424,18 @@ const ( OpAtomicLoad32 OpAtomicLoad64 OpAtomicLoadPtr + OpAtomicLoadAcq32 OpAtomicStore32 OpAtomicStore64 OpAtomicStorePtrNoWB + OpAtomicStoreRel32 OpAtomicExchange32 OpAtomicExchange64 OpAtomicAdd32 OpAtomicAdd64 OpAtomicCompareAndSwap32 OpAtomicCompareAndSwap64 + OpAtomicCompareAndSwapRel32 OpAtomicAnd8 OpAtomicOr8 OpAtomicAdd32Variant @@ -2738,6 +2886,42 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "DIVSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "DIVSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, { name: "ADDL", argLen: 2, @@ -2963,6 +3147,24 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MULLU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 255}, // AX CX DX BX SP BP SI DI + }, + clobbers: 4, // DX + outputs: []outputInfo{ + {1, 0}, + {0, 1}, // AX + }, + }, + }, { name: "HMULL", argLen: 2, @@ -3032,6 +3234,7 @@ var opcodeTable = [...]opInfo{ }, { name: "DIVL", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVL, @@ -3048,6 +3251,7 @@ var opcodeTable = [...]opInfo{ }, { name: "DIVW", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVW, @@ -3096,6 +3300,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MODL", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVL, @@ -3112,6 +3317,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MODW", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVW, @@ -3323,6 +3529,87 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMPLload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPWload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPBload", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPLconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPWconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "CMPBconstload", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: x86.ACMPB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, { name: "UCOMISS", argLen: 2, @@ -3726,6 +4013,25 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MULLload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "ANDLload", auxType: auxSymOff, @@ -3784,14 +4090,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "NEGL", - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: x86.ANEGL, + name: "ADDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3799,14 +4110,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "NOTL", - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: x86.ANOTL, + name: "SUBLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3814,13 +4130,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BSFL", - argLen: 1, - clobberFlags: true, - asm: x86.ABSFL, + name: "MULLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AIMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3828,13 +4150,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BSFW", - argLen: 1, - clobberFlags: true, - asm: x86.ABSFW, + name: "ANDLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3842,13 +4170,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BSRL", - argLen: 1, - clobberFlags: true, - asm: x86.ABSRL, + name: "ORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3856,13 +4190,19 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BSRW", - argLen: 1, - clobberFlags: true, - asm: x86.ABSRW, + name: "XORLloadidx4", + auxType: auxSymOff, + argLen: 4, + resultInArg0: true, + clobberFlags: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 239}, // AX CX DX BX BP SI DI + {0, 239}, // AX CX DX BX BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {1, 65791}, // AX CX DX BX SP BP SI DI SB }, outputs: []outputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3870,11 +4210,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "BSWAPL", + name: "NEGL", argLen: 1, resultInArg0: true, clobberFlags: true, - asm: x86.ABSWAPL, + asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -3885,21 +4225,107 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "SQRTSD", - argLen: 1, - asm: x86.ASQRTSD, + name: "NOTL", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ANOTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 239}, // AX CX DX BX BP SI DI }, outputs: []outputInfo{ - {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + {0, 239}, // AX CX DX BX BP SI DI }, }, }, { - name: "SBBLcarrymask", - argLen: 1, + name: "BSFL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSFL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSFW", + argLen: 1, + clobberFlags: true, + asm: x86.ABSFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSRL", + argLen: 1, + clobberFlags: true, + asm: x86.ABSRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSRW", + argLen: 1, + clobberFlags: true, + asm: x86.ABSRW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "BSWAPL", + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABSWAPL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "SQRTSD", + argLen: 1, + asm: x86.ASQRTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + outputs: []outputInfo{ + {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7 + }, + }, + }, + { + name: "SBBLcarrymask", + argLen: 1, asm: x86.ASBBL, reg: regInfo{ outputs: []outputInfo{ @@ -4007,6 +4433,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SETO", + argLen: 1, + asm: x86.ASETOS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "SETEQF", argLen: 1, @@ -4508,182 +4944,378 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVBLZX, + name: "ADDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, }, }, { - name: "MOVWloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVWLZX, + name: "SUBLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, }, }, { - name: "MOVWloadidx2", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AMOVWLZX, + name: "ANDLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, }, }, { - name: "MOVLloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVL, + name: "ORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, }, }, { - name: "MOVLloadidx4", - auxType: auxSymOff, - argLen: 3, - symEffect: SymRead, - asm: x86.AMOVL, + name: "XORLmodifyidx4", + auxType: auxSymOff, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, - outputs: []outputInfo{ - {0, 239}, // AX CX DX BX BP SI DI - }, }, }, { - name: "MOVBstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVB, + name: "ADDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ - {1, 255}, // AX CX DX BX SP BP SI DI - {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVWstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVW, + name: "ANDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {1, 255}, // AX CX DX BX SP BP SI DI - {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVWstoreidx2", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: x86.AMOVW, + name: "ORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {1, 255}, // AX CX DX BX SP BP SI DI - {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVLstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVL, + name: "XORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {1, 255}, // AX CX DX BX SP BP SI DI - {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVLstoreidx4", - auxType: auxSymOff, - argLen: 4, - symEffect: SymWrite, - asm: x86.AMOVL, + name: "ADDLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI - {2, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVBstoreconst", + name: "ANDLconstmodifyidx4", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: x86.AMOVB, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI {0, 65791}, // AX CX DX BX SP BP SI DI SB }, }, }, { - name: "MOVWstoreconst", + name: "ORLconstmodifyidx4", auxType: auxSymValAndOff, - argLen: 2, + argLen: 3, + clobberFlags: true, faultOnNilArg0: true, - symEffect: SymWrite, - asm: x86.AMOVW, - reg: regInfo{ + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "XORLconstmodifyidx4", + auxType: auxSymValAndOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVBloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVBLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVWloadidx2", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVWLZX, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLloadidx1", + auxType: auxSymOff, + argLen: 3, + commutative: true, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVLloadidx4", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "MOVBstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreidx2", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreidx1", + auxType: auxSymOff, + argLen: 4, + commutative: true, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVLstoreidx4", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 255}, // AX CX DX BX SP BP SI DI + {2, 255}, // AX CX DX BX SP BP SI DI + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVBstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65791}, // AX CX DX BX SP BP SI DI SB + }, + }, + }, + { + name: "MOVWstoreconst", + auxType: auxSymValAndOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AMOVW, + reg: regInfo{ inputs: []inputInfo{ {0, 65791}, // AX CX DX BX SP BP SI DI SB }, @@ -5450,6 +6082,42 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "DIVSSload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, + { + name: "DIVSDload", + auxType: auxSymOff, + argLen: 3, + resultInArg0: true, + faultOnNilArg1: true, + symEffect: SymRead, + asm: x86.ADIVSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + outputs: []outputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, { name: "ADDQ", argLen: 2, @@ -5669,45 +6337,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "HMULQ", + name: "MULLU", argLen: 2, commutative: true, clobberFlags: true, - asm: x86.AIMULQ, + asm: x86.AMULL, reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, - clobbers: 1, // AX + clobbers: 4, // DX outputs: []outputInfo{ - {0, 4}, // DX + {1, 0}, + {0, 1}, // AX }, }, }, { - name: "HMULL", + name: "MULQU", argLen: 2, commutative: true, clobberFlags: true, - asm: x86.AIMULL, + asm: x86.AMULQ, reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, - clobbers: 1, // AX + clobbers: 4, // DX outputs: []outputInfo{ - {0, 4}, // DX + {1, 0}, + {0, 1}, // AX }, }, }, { - name: "HMULQU", + name: "HMULQ", argLen: 2, commutative: true, clobberFlags: true, - asm: x86.AMULQ, + asm: x86.AIMULQ, reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX @@ -5720,11 +6390,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "HMULLU", + name: "HMULL", argLen: 2, commutative: true, clobberFlags: true, - asm: x86.AMULL, + asm: x86.AIMULL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULQU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1}, // AX + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + clobbers: 1, // AX + outputs: []outputInfo{ + {0, 4}, // DX + }, + }, + }, + { + name: "HMULLU", + argLen: 2, + commutative: true, + clobberFlags: true, + asm: x86.AMULL, reg: regInfo{ inputs: []inputInfo{ {0, 1}, // AX @@ -5754,6 +6458,7 @@ var opcodeTable = [...]opInfo{ }, { name: "DIVQ", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVQ, @@ -5770,6 +6475,7 @@ var opcodeTable = [...]opInfo{ }, { name: "DIVL", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVL, @@ -5786,6 +6492,7 @@ var opcodeTable = [...]opInfo{ }, { name: "DIVW", + auxType: auxBool, argLen: 2, clobberFlags: true, asm: x86.AIDIVW, @@ -5848,6 +6555,151 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "NEGLflags", + argLen: 1, + resultInArg0: true, + asm: x86.ANEGL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADDQcarry", + argLen: 2, + commutative: true, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADCQ", + argLen: 3, + commutative: true, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADDQconstcarry", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "ADCQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.AADCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SUBQborrow", + argLen: 2, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SBBQ", + argLen: 3, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SUBQconstborrow", + auxType: auxInt32, + argLen: 1, + resultInArg0: true, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "SBBQconst", + auxType: auxInt32, + argLen: 2, + resultInArg0: true, + asm: x86.ASBBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {1, 0}, + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "MULQU2", argLen: 2, @@ -5948,6 +6800,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ANDQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ANDLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "ORQ", argLen: 2, @@ -6014,6 +6894,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ORQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "XORQ", argLen: 2, @@ -6080,6 +6988,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "XORQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "XORLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "CMPQ", argLen: 2, @@ -6495,42 +7431,216 @@ var opcodeTable = [...]opInfo{ asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "BTSLconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "BTSQconst", + auxType: auxInt8, + argLen: 1, + resultInArg0: true, + clobberFlags: true, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "BTCQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTCQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTCLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTCL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTSQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTSLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTSL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTRQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTRQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTRLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTRL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTCQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTCQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTCLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTCL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTSQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "BTSLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, }, { - name: "BTSLconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: x86.ABTSL, + name: "BTRQconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, }, { - name: "BTSQconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - clobberFlags: true, - asm: x86.ABTSQ, + name: "BTRLconstmodify", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ABTRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - outputs: []outputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB }, }, }, @@ -7328,6 +8438,156 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADDQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SUBQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ANDQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ORQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "XORQmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORQ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ADDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SUBLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.ASUBL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ANDLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AANDL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "XORLmodify", + auxType: auxSymOff, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymRead | SymWrite, + asm: x86.AXORL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "NEGQ", argLen: 1, @@ -8282,6 +9542,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SETO", + argLen: 1, + asm: x86.ASETOS, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "SETEQstore", auxType: auxSymOff, @@ -14145,6 +15415,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FABSD", + argLen: 1, + asm: arm64.AFABSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "FNEGS", argLen: 1, @@ -14445,7 +15728,67 @@ var opcodeTable = [...]opInfo{ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, outputs: []outputInfo{ - {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MADD", + argLen: 3, + asm: arm64.AMADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MADDW", + argLen: 3, + asm: arm64.AMADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MSUB", + argLen: 3, + asm: arm64.AMSUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MSUBW", + argLen: 3, + asm: arm64.AMSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, @@ -14533,6 +15876,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ROR", + argLen: 2, + asm: arm64.AROR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "RORW", + argLen: 2, + asm: arm64.ARORW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, { name: "RORconst", auxType: auxInt64, @@ -14636,9 +16007,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMN", - argLen: 2, - asm: arm64.ACMN, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm64.ACMN, reg: regInfo{ inputs: []inputInfo{ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -14658,9 +16030,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMNW", - argLen: 2, - asm: arm64.ACMNW, + name: "CMNW", + argLen: 2, + commutative: true, + asm: arm64.ACMNW, reg: regInfo{ inputs: []inputInfo{ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -14680,9 +16053,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TST", - argLen: 2, - asm: arm64.ATST, + name: "TST", + argLen: 2, + commutative: true, + asm: arm64.ATST, reg: regInfo{ inputs: []inputInfo{ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -14702,9 +16076,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TSTW", - argLen: 2, - asm: arm64.ATSTW, + name: "TSTW", + argLen: 2, + commutative: true, + asm: arm64.ATSTW, reg: regInfo{ inputs: []inputInfo{ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 @@ -14745,6 +16120,90 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MVNshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVNshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "MVNshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.AMVN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftLL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftRL", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, + { + name: "NEGshiftRA", + auxType: auxInt64, + argLen: 1, + asm: arm64.ANEG, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, { name: "ADDshiftLL", auxType: auxInt64, @@ -15141,6 +16600,78 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMNshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "CMNshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftRL", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt64, + argLen: 2, + asm: arm64.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + }, + }, + }, { name: "BFI", auxType: auxInt64, @@ -15523,6 +17054,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FMOVSloadidx", + argLen: 3, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDloadidx", + argLen: 3, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "MOVHloadidx2", argLen: 3, @@ -15740,6 +17299,30 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FMOVSstoreidx", + argLen: 4, + asm: arm64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstoreidx", + argLen: 4, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "MOVHstoreidx2", argLen: 4, @@ -15908,20 +17491,46 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDstorezeroidx8", - argLen: 3, - asm: arm64.AMOVD, + name: "MOVDstorezeroidx8", + argLen: 3, + asm: arm64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, + { + name: "FMOVDgpfp", + argLen: 1, + asm: arm64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDfpgp", + argLen: 1, + asm: arm64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ - {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 - {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 }, }, }, { - name: "FMOVDgpfp", + name: "FMOVSgpfp", argLen: 1, - asm: arm64.AFMOVD, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 @@ -15932,9 +17541,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "FMOVDfpgp", + name: "FMOVSfpgp", argLen: 1, - asm: arm64.AFMOVD, + asm: arm64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -16308,6 +17917,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FRINTND", + argLen: 1, + asm: arm64.AFRINTND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "FRINTPD", argLen: 1, @@ -19962,6 +21584,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredMuluhilo", + argLen: 2, + resultNotInArgs: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "FMUL", argLen: 2, @@ -20569,6 +22206,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ANDCC", + argLen: 2, + commutative: true, + asm: ppc64.AANDCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "OR", argLen: 2, @@ -20598,6 +22250,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ORCC", + argLen: 2, + commutative: true, + asm: ppc64.AORCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "NOR", argLen: 2, @@ -20628,6 +22295,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "XORCC", + argLen: 2, + commutative: true, + asm: ppc64.AXORCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "EQV", argLen: 2, @@ -21005,9 +22687,193 @@ var opcodeTable = [...]opInfo{ argLen: 2, faultOnNilArg0: true, symEffect: SymRead, - asm: ppc64.AMOVD, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVBZloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVBZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHZloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWZloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWZ, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRloadidx", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymRead, + asm: ppc64.AMOVWBR, reg: regInfo{ inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -21016,14 +22882,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDBRload", + name: "MOVDBRloadidx", auxType: auxSymOff, - argLen: 2, + argLen: 3, faultOnNilArg0: true, symEffect: SymRead, asm: ppc64.AMOVDBR, reg: regInfo{ inputs: []inputInfo{ + {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ @@ -21032,34 +22899,36 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWBRload", + name: "FMOVDloadidx", auxType: auxSymOff, - argLen: 2, + argLen: 3, faultOnNilArg0: true, symEffect: SymRead, - asm: ppc64.AMOVWBR, + asm: ppc64.AFMOVD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 }, }, }, { - name: "MOVHBRload", + name: "FMOVSloadidx", auxType: auxSymOff, - argLen: 2, + argLen: 3, faultOnNilArg0: true, symEffect: SymRead, - asm: ppc64.AMOVHBR, + asm: ppc64.AFMOVS, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 }, outputs: []outputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 }, }, }, @@ -21221,6 +23090,141 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVDstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "FMOVSstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AFMOVS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVHBRstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVHBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVWBRstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVWBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "MOVDBRstoreidx", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: ppc64.AMOVDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "MOVBstorezero", auxType: auxSymOff, @@ -21653,6 +23657,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicStore32", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, @@ -21665,6 +23670,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicStore64", + auxType: auxInt64, argLen: 3, faultOnNilArg0: true, hasSideEffects: true, @@ -21677,6 +23683,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicLoad32", + auxType: auxInt64, argLen: 2, clobberFlags: true, faultOnNilArg0: true, @@ -21691,6 +23698,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicLoad64", + auxType: auxInt64, argLen: 2, clobberFlags: true, faultOnNilArg0: true, @@ -21705,6 +23713,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicLoadPtr", + auxType: auxInt64, argLen: 2, clobberFlags: true, faultOnNilArg0: true, @@ -21787,6 +23796,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicCas64", + auxType: auxInt64, argLen: 4, resultNotInArgs: true, clobberFlags: true, @@ -21805,6 +23815,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredAtomicCas32", + auxType: auxInt64, argLen: 4, resultNotInArgs: true, clobberFlags: true, @@ -23364,6 +25375,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "RLLG", + argLen: 2, + asm: s390x.ARLLG, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "RLL", + argLen: 2, + asm: s390x.ARLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, { name: "RLLGconst", auxType: auxInt8, @@ -24920,6 +26959,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "POPCNT", + argLen: 1, + clobberFlags: true, + asm: s390x.APOPCNT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + outputs: []outputInfo{ + {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 + }, + }, + }, + { + name: "SumBytes2", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes4", + argLen: 1, + reg: regInfo{}, + }, + { + name: "SumBytes8", + argLen: 1, + reg: regInfo{}, + }, { name: "STMG2", auxType: auxSymOff, @@ -26142,6 +28210,18 @@ var opcodeTable = [...]opInfo{ commutative: true, generic: true, }, + { + name: "Mul32uover", + argLen: 2, + commutative: true, + generic: true, + }, + { + name: "Mul64uover", + argLen: 2, + commutative: true, + generic: true, + }, { name: "Avg32u", argLen: 2, @@ -26164,6 +28244,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Div16", + auxType: auxBool, argLen: 2, generic: true, }, @@ -26174,6 +28255,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Div32", + auxType: auxBool, argLen: 2, generic: true, }, @@ -26184,6 +28266,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Div64", + auxType: auxBool, argLen: 2, generic: true, }, @@ -26209,6 +28292,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Mod16", + auxType: auxBool, argLen: 2, generic: true, }, @@ -26219,6 +28303,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Mod32", + auxType: auxBool, argLen: 2, generic: true, }, @@ -26229,6 +28314,7 @@ var opcodeTable = [...]opInfo{ }, { name: "Mod64", + auxType: auxBool, argLen: 2, generic: true, }, @@ -27095,6 +29181,26 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "RotateLeft8", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft16", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft32", + argLen: 2, + generic: true, + }, + { + name: "RotateLeft64", + argLen: 2, + generic: true, + }, { name: "Sqrt", argLen: 1, @@ -27691,6 +29797,12 @@ var opcodeTable = [...]opInfo{ zeroWidth: true, generic: true, }, + { + name: "InlMark", + auxType: auxInt32, + argLen: 1, + generic: true, + }, { name: "Int64Make", argLen: 2, @@ -27728,6 +29840,17 @@ var opcodeTable = [...]opInfo{ argLen: 3, generic: true, }, + { + name: "Add64carry", + argLen: 3, + commutative: true, + generic: true, + }, + { + name: "Sub64borrow", + argLen: 3, + generic: true, + }, { name: "Signmask", argLen: 1, @@ -27810,6 +29933,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AtomicLoadAcq32", + argLen: 2, + generic: true, + }, { name: "AtomicStore32", argLen: 3, @@ -27828,6 +29956,12 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, generic: true, }, + { + name: "AtomicStoreRel32", + argLen: 3, + hasSideEffects: true, + generic: true, + }, { name: "AtomicExchange32", argLen: 3, @@ -27864,6 +29998,12 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, generic: true, }, + { + name: "AtomicCompareAndSwapRel32", + argLen: 4, + hasSideEffects: true, + generic: true, + }, { name: "AtomicAnd8", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go index 37b607977c31c..4ebfb89e52b1e 100644 --- a/src/cmd/compile/internal/ssa/poset.go +++ b/src/cmd/compile/internal/ssa/poset.go @@ -114,7 +114,7 @@ type posetNode struct { // given that non-equality is not transitive, the only effect is that a later call // to SetEqual for the same values will fail. NonEqual checks whether it is known that // the nodes are different, either because SetNonEqual was called before, or because -// we know that that they are strictly ordered. +// we know that they are strictly ordered. // // It is implemented as a forest of DAGs; in each DAG, if node A dominates B, // it means that A 2 { + parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r) + } // No need to do anything else if we already found unsat. if ft.unsat { return @@ -234,6 +237,9 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { panic("unknown relation") } if !ok { + if parent.Func.pass.debug > 2 { + parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r) + } ft.unsat = true return } @@ -260,6 +266,9 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { ft.facts[p] = oldR & r // If this relation is not satisfiable, mark it and exit right away if oldR&r == 0 { + if parent.Func.pass.debug > 2 { + parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r) + } ft.unsat = true return } @@ -361,7 +370,7 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { lim = old.intersect(lim) ft.limits[v.ID] = lim if v.Block.Func.pass.debug > 2 { - v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s", parent, v, w, lim.String()) + v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s %s", parent, v, w, r, lim.String()) } if lim.min > lim.max || lim.umin > lim.umax { ft.unsat = true @@ -425,13 +434,13 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { // // Useful for i > 0; s[i-1]. lim, ok := ft.limits[x.ID] - if ok && lim.min > opMin[v.Op] { + if ok && ((d == signed && lim.min > opMin[v.Op]) || (d == unsigned && lim.umin > 0)) { ft.update(parent, x, w, d, gt) } } else if x, delta := isConstDelta(w); x != nil && delta == 1 { // v >= x+1 && x < max ⇒ v > x lim, ok := ft.limits[x.ID] - if ok && lim.max < opMax[w.Op] { + if ok && ((d == signed && lim.max < opMax[w.Op]) || (d == unsigned && lim.umax < opUMax[w.Op])) { ft.update(parent, v, x, d, gt) } } @@ -442,7 +451,7 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { if r == gt || r == gt|eq { if x, delta := isConstDelta(v); x != nil && d == signed { if parent.Func.pass.debug > 1 { - parent.Func.Warnl(parent.Pos, "x+d >= w; x:%v %v delta:%v w:%v d:%v", x, parent.String(), delta, w.AuxInt, d) + parent.Func.Warnl(parent.Pos, "x+d %s w; x:%v %v delta:%v w:%v d:%v", r, x, parent.String(), delta, w.AuxInt, d) } if !w.isGenericIntConst() { // If we know that x+delta > w but w is not constant, we can derive: @@ -503,8 +512,10 @@ func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) { // the other must be true if l, has := ft.limits[x.ID]; has { if l.max <= min { - // x>min is impossible, so it must be x<=max - ft.update(parent, vmax, x, d, r|eq) + if r&eq == 0 || l.max < min { + // x>min (x>=min) is impossible, so it must be x<=max + ft.update(parent, vmax, x, d, r|eq) + } } else if l.min > max { // x<=max is impossible, so it must be x>min ft.update(parent, x, vmin, d, r) @@ -527,6 +538,11 @@ var opMax = map[Op]int64{ OpAdd32: math.MaxInt32, OpSub32: math.MaxInt32, } +var opUMax = map[Op]uint64{ + OpAdd64: math.MaxUint64, OpSub64: math.MaxUint64, + OpAdd32: math.MaxUint32, OpSub32: math.MaxUint32, +} + // isNonNegative reports whether v is known to be non-negative. func (ft *factsTable) isNonNegative(v *Value) bool { if isNonNegative(v) { @@ -620,7 +636,7 @@ var ( // For example: // OpLess8: {signed, lt}, // v1 = (OpLess8 v2 v3). - // If v1 branch is taken than we learn that the rangeMaks + // If v1 branch is taken then we learn that the rangeMask // can be at most lt. domainRelationTable = map[Op]struct { d domain @@ -1071,6 +1087,13 @@ func addLocalInductiveFacts(ft *factsTable, b *Block) { } var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero} +var mostNegativeDividend = map[Op]int64{ + OpDiv16: -1 << 15, + OpMod16: -1 << 15, + OpDiv32: -1 << 31, + OpMod32: -1 << 31, + OpDiv64: -1 << 63, + OpMod64: -1 << 63} // simplifyBlock simplifies some constant values in b and evaluates // branches to non-uniquely dominated successors of b. @@ -1142,6 +1165,22 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op) } } + case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64: + // On amd64 and 386 fix-up code can be avoided if we know + // the divisor is not -1 or the dividend > MinIntNN. + divr := v.Args[1] + divrLim, divrLimok := ft.limits[divr.ID] + divd := v.Args[0] + divdLim, divdLimok := ft.limits[divd.ID] + if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) || + (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) { + v.AuxInt = 1 // see NeedsFixUp in genericOps - v.AuxInt = 0 means we have not proved + // that the divisor is not -1 and the dividend is not the most negative, + // so we need to add fix-up code. + if b.Func.pass.debug > 0 { + b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op) + } + } } } diff --git a/src/cmd/compile/internal/ssa/redblack32_test.go b/src/cmd/compile/internal/ssa/redblack32_test.go index 6d72a3eee5f80..1ec29760728da 100644 --- a/src/cmd/compile/internal/ssa/redblack32_test.go +++ b/src/cmd/compile/internal/ssa/redblack32_test.go @@ -175,8 +175,6 @@ func allRBT32Ops(te *testing.T, x []int32) { if s != "" { te.Errorf("Tree consistency problem at %v", s) return - } else { - // fmt.Printf("%s", t.DebugString()) } } diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index bbf1932981458..8946cf6b5c083 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -119,6 +119,7 @@ import ( "cmd/internal/src" "cmd/internal/sys" "fmt" + "math/bits" "unsafe" ) @@ -149,6 +150,8 @@ type register uint8 const noRegister register = 255 +// A regMask encodes a set of machine registers. +// TODO: regMask -> regSet? type regMask uint64 func (m regMask) String() string { @@ -183,26 +186,16 @@ func (s *regAllocState) RegMaskString(m regMask) string { // countRegs returns the number of set bits in the register mask. func countRegs(r regMask) int { - n := 0 - for r != 0 { - n += int(r & 1) - r >>= 1 - } - return n + return bits.OnesCount64(uint64(r)) } // pickReg picks an arbitrary register from the register mask. func pickReg(r regMask) register { - // pick the lowest one if r == 0 { panic("can't pick a register from an empty set") } - for i := register(0); ; i++ { - if r&1 != 0 { - return i - } - r >>= 1 - } + // pick the lowest one + return register(bits.TrailingZeros64(uint64(r))) } type use struct { diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 5e151b5213d29..a154249371a29 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -7,7 +7,9 @@ package ssa import ( "cmd/compile/internal/types" "cmd/internal/obj" + "cmd/internal/objabi" "cmd/internal/src" + "encoding/binary" "fmt" "io" "math" @@ -175,23 +177,11 @@ func canMergeSym(x, y interface{}) bool { return x == nil || y == nil } -// canMergeLoad reports whether the load can be merged into target without +// canMergeLoadClobber reports whether the load can be merged into target without // invalidating the schedule. // It also checks that the other non-load argument x is something we -// are ok with clobbering (all our current load+op instructions clobber -// their input register). -func canMergeLoad(target, load, x *Value) bool { - if target.Block.ID != load.Block.ID { - // If the load is in a different block do not merge it. - return false - } - - // We can't merge the load into the target if the load - // has more than one use. - if load.Uses != 1 { - return false - } - +// are ok with clobbering. +func canMergeLoadClobber(target, load, x *Value) bool { // The register containing x is going to get clobbered. // Don't merge if we still need the value of x. // We don't have liveness information here, but we can @@ -206,6 +196,22 @@ func canMergeLoad(target, load, x *Value) bool { if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) { return false } + return canMergeLoad(target, load) +} + +// canMergeLoad reports whether the load can be merged into target without +// invalidating the schedule. +func canMergeLoad(target, load *Value) bool { + if target.Block.ID != load.Block.ID { + // If the load is in a different block do not merge it. + return false + } + + // We can't merge the load into the target if the load + // has more than one use. + if load.Uses != 1 { + return false + } mem := load.MemoryArg() @@ -234,7 +240,6 @@ func canMergeLoad(target, load, x *Value) bool { // memPreds contains memory states known to be predecessors of load's // memory state. It is lazily initialized. var memPreds map[*Value]bool -search: for i := 0; len(args) > 0; i++ { const limit = 100 if i >= limit { @@ -246,19 +251,33 @@ search: if target.Block.ID != v.Block.ID { // Since target and load are in the same block // we can stop searching when we leave the block. - continue search + continue } if v.Op == OpPhi { // A Phi implies we have reached the top of the block. // The memory phi, if it exists, is always // the first logical store in the block. - continue search + continue } if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { // We could handle this situation however it is likely // to be very rare. return false } + if v.Op.SymEffect()&SymAddr != 0 { + // This case prevents an operation that calculates the + // address of a local variable from being forced to schedule + // before its corresponding VarDef. + // See issue 28445. + // v1 = LOAD ... + // v2 = VARDEF + // v3 = LEAQ + // v4 = CMPQ v1 v3 + // We don't want to combine the CMPQ with the load, because + // that would force the CMPQ to schedule before the VARDEF, which + // in turn requires the LEAQ to schedule before the VARDEF. + return false + } if v.Type.IsMemory() { if memPreds == nil { // Initialise a map containing memory states @@ -296,14 +315,14 @@ search: // load = read ... mem // target = add x load if memPreds[v] { - continue search + continue } return false } if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem { // If v takes mem as an input then we know mem // is valid at this point. - continue search + continue } for _, a := range v.Args { if target.Block.ID == a.Block.ID { @@ -315,7 +334,7 @@ search: return true } -// isSameSym returns whether sym is the same as the given named symbol +// isSameSym reports whether sym is the same as the given named symbol func isSameSym(sym interface{}, name string) bool { s, ok := sym.(fmt.Stringer) return ok && s.String() == name @@ -418,22 +437,69 @@ func shiftIsBounded(v *Value) bool { return v.AuxInt != 0 } +// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern +// of the mantissa. It will panic if the truncation results in lost information. +func truncate64Fto32F(f float64) float32 { + if !isExactFloat32(f) { + panic("truncate64Fto32F: truncation is not exact") + } + if !math.IsNaN(f) { + return float32(f) + } + // NaN bit patterns aren't necessarily preserved across conversion + // instructions so we need to do the conversion manually. + b := math.Float64bits(f) + m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand) + // | sign | exponent | mantissa | + r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23))) + return math.Float32frombits(r) +} + +// extend32Fto64F converts a float32 value to a float64 value preserving the bit +// pattern of the mantissa. +func extend32Fto64F(f float32) float64 { + if !math.IsNaN(float64(f)) { + return float64(f) + } + // NaN bit patterns aren't necessarily preserved across conversion + // instructions so we need to do the conversion manually. + b := uint64(math.Float32bits(f)) + // | sign | exponent | mantissa | + r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23)) + return math.Float64frombits(r) +} + +// NeedsFixUp reports whether the division needs fix-up code. +func NeedsFixUp(v *Value) bool { + return v.AuxInt == 0 +} + // i2f is used in rules for converting from an AuxInt to a float. func i2f(i int64) float64 { return math.Float64frombits(uint64(i)) } -// i2f32 is used in rules for converting from an AuxInt to a float32. -func i2f32(i int64) float32 { - return float32(math.Float64frombits(uint64(i))) +// auxFrom64F encodes a float64 value so it can be stored in an AuxInt. +func auxFrom64F(f float64) int64 { + return int64(math.Float64bits(f)) } -// f2i is used in the rules for storing a float in AuxInt. -func f2i(f float64) int64 { - return int64(math.Float64bits(f)) +// auxFrom32F encodes a float32 value so it can be stored in an AuxInt. +func auxFrom32F(f float32) int64 { + return int64(math.Float64bits(extend32Fto64F(f))) } -// uaddOvf returns true if unsigned a+b would overflow. +// auxTo32F decodes a float32 from the AuxInt value provided. +func auxTo32F(i int64) float32 { + return truncate64Fto32F(math.Float64frombits(uint64(i))) +} + +// auxTo64F decodes a float64 from the AuxInt value provided. +func auxTo64F(i int64) float64 { + return math.Float64frombits(uint64(i)) +} + +// uaddOvf reports whether unsigned a+b would overflow. func uaddOvf(a, b int64) bool { return uint64(a)+uint64(b) < uint64(a) } @@ -478,6 +544,13 @@ func isSamePtr(p1, p2 *Value) bool { return false } +func isStackPtr(v *Value) bool { + for v.Op == OpOffPtr || v.Op == OpAddPtr { + v = v.Args[0] + } + return v.Op == OpSP || v.Op == OpLocalAddr +} + // disjoint reports whether the memory region specified by [p1:p1+n1) // does not overlap with [p2:p2+n2). // A return value of false does not imply the regions overlap. @@ -490,7 +563,7 @@ func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool { } baseAndOffset := func(ptr *Value) (base *Value, offset int64) { base, offset = ptr, 0 - if base.Op == OpOffPtr { + for base.Op == OpOffPtr { offset += base.AuxInt base = base.Args[0] } @@ -614,11 +687,11 @@ func noteRule(s string) bool { return true } -// warnRule generates a compiler debug output with string s when -// cond is true and the rule is fired. +// warnRule generates compiler debug output with string s when +// v is not in autogenerated code, cond is true and the rule has fired. func warnRule(cond bool, v *Value, s string) bool { - if cond { - v.Block.Func.Warnl(v.Pos, s) + if pos := v.Pos; pos.Line() > 1 && cond { + v.Block.Func.Warnl(pos, s) } return true } @@ -894,6 +967,54 @@ func zeroUpper32Bits(x *Value, depth int) bool { return false } +// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits +func zeroUpper48Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2: + return true + case OpArg: + return x.Type.Width == 2 + case OpPhi, OpSelect0, OpSelect1: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper48Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + +// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits +func zeroUpper56Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1: + return true + case OpArg: + return x.Type.Width == 1 + case OpPhi, OpSelect0, OpSelect1: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper56Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + // isInlinableMemmove reports whether the given arch performs a Move of the given size // faster than memmove. It will only return true if replacing the memmove with a Move is // safe, either because Move is small or because the arguments are disjoint. @@ -905,7 +1026,7 @@ func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool { // have fast Move ops. switch c.arch { case "amd64", "amd64p32": - return sz <= 16 + return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz)) case "386", "ppc64", "ppc64le", "arm64": return sz <= 8 case "s390x": @@ -978,3 +1099,93 @@ func registerizable(b *Block, t interface{}) bool { } return false } + +// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed. +func needRaceCleanup(sym interface{}, v *Value) bool { + f := v.Block.Func + if !f.Config.Race { + return false + } + if !isSameSym(sym, "runtime.racefuncenter") && !isSameSym(sym, "runtime.racefuncexit") { + return false + } + for _, b := range f.Blocks { + for _, v := range b.Values { + switch v.Op { + case OpStaticCall: + switch v.Aux.(fmt.Stringer).String() { + case "runtime.racefuncenter", "runtime.racefuncexit", "runtime.panicindex", + "runtime.panicslice", "runtime.panicdivide", "runtime.panicwrap": + // Check for racefuncenter will encounter racefuncexit and vice versa. + // Allow calls to panic* + default: + // If we encountered any call, we need to keep racefunc*, + // for accurate stacktraces. + return false + } + case OpClosureCall, OpInterCall: + // We must keep the race functions if there are any other call types. + return false + } + } + } + return true +} + +// symIsRO reports whether sym is a read-only global. +func symIsRO(sym interface{}) bool { + lsym := sym.(*obj.LSym) + return lsym.Type == objabi.SRODATA && len(lsym.R) == 0 +} + +// read8 reads one byte from the read-only global sym at offset off. +func read8(sym interface{}, off int64) uint8 { + lsym := sym.(*obj.LSym) + if off >= int64(len(lsym.P)) { + // Invalid index into the global sym. + // This can happen in dead code, so we don't want to panic. + // Just return any value, it will eventually get ignored. + // See issue 29215. + return 0 + } + return lsym.P[off] +} + +// read16 reads two bytes from the read-only global sym at offset off. +func read16(sym interface{}, off int64, bigEndian bool) uint16 { + lsym := sym.(*obj.LSym) + if off >= int64(len(lsym.P))-1 { + return 0 + } + if bigEndian { + return binary.BigEndian.Uint16(lsym.P[off:]) + } else { + return binary.LittleEndian.Uint16(lsym.P[off:]) + } +} + +// read32 reads four bytes from the read-only global sym at offset off. +func read32(sym interface{}, off int64, bigEndian bool) uint32 { + lsym := sym.(*obj.LSym) + if off >= int64(len(lsym.P))-3 { + return 0 + } + if bigEndian { + return binary.BigEndian.Uint32(lsym.P[off:]) + } else { + return binary.LittleEndian.Uint32(lsym.P[off:]) + } +} + +// read64 reads eight bytes from the read-only global sym at offset off. +func read64(sym interface{}, off int64, bigEndian bool) uint64 { + lsym := sym.(*obj.LSym) + if off >= int64(len(lsym.P))-7 { + return 0 + } + if bigEndian { + return binary.BigEndian.Uint64(lsym.P[off:]) + } else { + return binary.LittleEndian.Uint64(lsym.P[off:]) + } +} diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index a204d48d073bb..75b6de8055f3c 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -23,10 +25,18 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386ADDLcarry_0(v) case Op386ADDLconst: return rewriteValue386_Op386ADDLconst_0(v) + case Op386ADDLconstmodify: + return rewriteValue386_Op386ADDLconstmodify_0(v) + case Op386ADDLconstmodifyidx4: + return rewriteValue386_Op386ADDLconstmodifyidx4_0(v) case Op386ADDLload: return rewriteValue386_Op386ADDLload_0(v) + case Op386ADDLloadidx4: + return rewriteValue386_Op386ADDLloadidx4_0(v) case Op386ADDLmodify: return rewriteValue386_Op386ADDLmodify_0(v) + case Op386ADDLmodifyidx4: + return rewriteValue386_Op386ADDLmodifyidx4_0(v) case Op386ADDSD: return rewriteValue386_Op386ADDSD_0(v) case Op386ADDSDload: @@ -39,22 +49,44 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386ANDL_0(v) case Op386ANDLconst: return rewriteValue386_Op386ANDLconst_0(v) + case Op386ANDLconstmodify: + return rewriteValue386_Op386ANDLconstmodify_0(v) + case Op386ANDLconstmodifyidx4: + return rewriteValue386_Op386ANDLconstmodifyidx4_0(v) case Op386ANDLload: return rewriteValue386_Op386ANDLload_0(v) + case Op386ANDLloadidx4: + return rewriteValue386_Op386ANDLloadidx4_0(v) case Op386ANDLmodify: return rewriteValue386_Op386ANDLmodify_0(v) + case Op386ANDLmodifyidx4: + return rewriteValue386_Op386ANDLmodifyidx4_0(v) case Op386CMPB: return rewriteValue386_Op386CMPB_0(v) case Op386CMPBconst: return rewriteValue386_Op386CMPBconst_0(v) + case Op386CMPBload: + return rewriteValue386_Op386CMPBload_0(v) case Op386CMPL: return rewriteValue386_Op386CMPL_0(v) case Op386CMPLconst: - return rewriteValue386_Op386CMPLconst_0(v) + return rewriteValue386_Op386CMPLconst_0(v) || rewriteValue386_Op386CMPLconst_10(v) + case Op386CMPLload: + return rewriteValue386_Op386CMPLload_0(v) case Op386CMPW: return rewriteValue386_Op386CMPW_0(v) case Op386CMPWconst: return rewriteValue386_Op386CMPWconst_0(v) + case Op386CMPWload: + return rewriteValue386_Op386CMPWload_0(v) + case Op386DIVSD: + return rewriteValue386_Op386DIVSD_0(v) + case Op386DIVSDload: + return rewriteValue386_Op386DIVSDload_0(v) + case Op386DIVSS: + return rewriteValue386_Op386DIVSS_0(v) + case Op386DIVSSload: + return rewriteValue386_Op386DIVSSload_0(v) case Op386LEAL: return rewriteValue386_Op386LEAL_0(v) case Op386LEAL1: @@ -76,13 +108,13 @@ func rewriteValue386(v *Value) bool { case Op386MOVBloadidx1: return rewriteValue386_Op386MOVBloadidx1_0(v) case Op386MOVBstore: - return rewriteValue386_Op386MOVBstore_0(v) + return rewriteValue386_Op386MOVBstore_0(v) || rewriteValue386_Op386MOVBstore_10(v) case Op386MOVBstoreconst: return rewriteValue386_Op386MOVBstoreconst_0(v) case Op386MOVBstoreconstidx1: return rewriteValue386_Op386MOVBstoreconstidx1_0(v) case Op386MOVBstoreidx1: - return rewriteValue386_Op386MOVBstoreidx1_0(v) || rewriteValue386_Op386MOVBstoreidx1_10(v) + return rewriteValue386_Op386MOVBstoreidx1_0(v) || rewriteValue386_Op386MOVBstoreidx1_10(v) || rewriteValue386_Op386MOVBstoreidx1_20(v) case Op386MOVLload: return rewriteValue386_Op386MOVLload_0(v) case Op386MOVLloadidx1: @@ -90,7 +122,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVLloadidx4: return rewriteValue386_Op386MOVLloadidx4_0(v) case Op386MOVLstore: - return rewriteValue386_Op386MOVLstore_0(v) || rewriteValue386_Op386MOVLstore_10(v) + return rewriteValue386_Op386MOVLstore_0(v) || rewriteValue386_Op386MOVLstore_10(v) || rewriteValue386_Op386MOVLstore_20(v) case Op386MOVLstoreconst: return rewriteValue386_Op386MOVLstoreconst_0(v) case Op386MOVLstoreconstidx1: @@ -100,7 +132,7 @@ func rewriteValue386(v *Value) bool { case Op386MOVLstoreidx1: return rewriteValue386_Op386MOVLstoreidx1_0(v) case Op386MOVLstoreidx4: - return rewriteValue386_Op386MOVLstoreidx4_0(v) + return rewriteValue386_Op386MOVLstoreidx4_0(v) || rewriteValue386_Op386MOVLstoreidx4_10(v) case Op386MOVSDconst: return rewriteValue386_Op386MOVSDconst_0(v) case Op386MOVSDload: @@ -157,6 +189,10 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386MULL_0(v) case Op386MULLconst: return rewriteValue386_Op386MULLconst_0(v) || rewriteValue386_Op386MULLconst_10(v) || rewriteValue386_Op386MULLconst_20(v) || rewriteValue386_Op386MULLconst_30(v) + case Op386MULLload: + return rewriteValue386_Op386MULLload_0(v) + case Op386MULLloadidx4: + return rewriteValue386_Op386MULLloadidx4_0(v) case Op386MULSD: return rewriteValue386_Op386MULSD_0(v) case Op386MULSDload: @@ -173,10 +209,18 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386ORL_0(v) || rewriteValue386_Op386ORL_10(v) || rewriteValue386_Op386ORL_20(v) || rewriteValue386_Op386ORL_30(v) || rewriteValue386_Op386ORL_40(v) || rewriteValue386_Op386ORL_50(v) case Op386ORLconst: return rewriteValue386_Op386ORLconst_0(v) + case Op386ORLconstmodify: + return rewriteValue386_Op386ORLconstmodify_0(v) + case Op386ORLconstmodifyidx4: + return rewriteValue386_Op386ORLconstmodifyidx4_0(v) case Op386ORLload: return rewriteValue386_Op386ORLload_0(v) + case Op386ORLloadidx4: + return rewriteValue386_Op386ORLloadidx4_0(v) case Op386ORLmodify: return rewriteValue386_Op386ORLmodify_0(v) + case Op386ORLmodifyidx4: + return rewriteValue386_Op386ORLmodifyidx4_0(v) case Op386ROLBconst: return rewriteValue386_Op386ROLBconst_0(v) case Op386ROLLconst: @@ -243,8 +287,12 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386SUBLconst_0(v) case Op386SUBLload: return rewriteValue386_Op386SUBLload_0(v) + case Op386SUBLloadidx4: + return rewriteValue386_Op386SUBLloadidx4_0(v) case Op386SUBLmodify: return rewriteValue386_Op386SUBLmodify_0(v) + case Op386SUBLmodifyidx4: + return rewriteValue386_Op386SUBLmodifyidx4_0(v) case Op386SUBSD: return rewriteValue386_Op386SUBSD_0(v) case Op386SUBSDload: @@ -257,10 +305,18 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386XORL_0(v) || rewriteValue386_Op386XORL_10(v) case Op386XORLconst: return rewriteValue386_Op386XORLconst_0(v) + case Op386XORLconstmodify: + return rewriteValue386_Op386XORLconstmodify_0(v) + case Op386XORLconstmodifyidx4: + return rewriteValue386_Op386XORLconstmodifyidx4_0(v) case Op386XORLload: return rewriteValue386_Op386XORLload_0(v) + case Op386XORLloadidx4: + return rewriteValue386_Op386XORLloadidx4_0(v) case Op386XORLmodify: return rewriteValue386_Op386XORLmodify_0(v) + case Op386XORLmodifyidx4: + return rewriteValue386_Op386XORLmodifyidx4_0(v) case OpAdd16: return rewriteValue386_OpAdd16_0(v) case OpAdd32: @@ -583,6 +639,10 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_OpRsh8x64_0(v) case OpRsh8x8: return rewriteValue386_OpRsh8x8_0(v) + case OpSelect0: + return rewriteValue386_OpSelect0_0(v) + case OpSelect1: + return rewriteValue386_OpSelect1_0(v) case OpSignExt16to32: return rewriteValue386_OpSignExt16to32_0(v) case OpSignExt8to16: @@ -1239,7 +1299,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1253,7 +1313,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLload) @@ -1265,7 +1325,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { return true } // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1279,7 +1339,7 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ADDLload) @@ -1290,6 +1350,62 @@ func rewriteValue386_Op386ADDL_20(v *Value) bool { v.AddArg(mem) return true } + // match: (ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ADDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ADDLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ADDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (ADDL x (NEGL y)) // cond: // result: (SUBL x y) @@ -1541,76 +1657,72 @@ func rewriteValue386_Op386ADDLconst_0(v *Value) bool { } return false } -func rewriteValue386_Op386ADDLload_0(v *Value) bool { +func rewriteValue386_Op386ADDLconstmodify_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem) - // cond: is32Bit(off1+off2) - // result: (ADDLload [off1+off2] {sym} val base mem) + // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym := v.Aux - _ = v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - off2 := v_1.AuxInt - base := v_1.Args[0] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + off2 := v_0.AuxInt + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2)) { break } - v.reset(Op386ADDLload) - v.AuxInt = off1 + off2 + v.reset(Op386ADDLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(val) v.AddArg(base) v.AddArg(mem) return true } - // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386LEAL { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - base := v_1.Args[0] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ADDLload) - v.AuxInt = off1 + off2 + v.reset(Op386ADDLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) v.AddArg(base) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { +func rewriteValue386_Op386ADDLconstmodifyidx4_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) - // cond: is32Bit(off1+off2) - // result: (ADDLmodify [off1+off2] {sym} base val mem) + // match: (ADDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym := v.Aux _ = v.Args[2] v_0 := v.Args[0] @@ -1619,24 +1731,50 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + idx := v.Args[1] mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + if !(ValAndOff(valoff1).canAdd(off2)) { break } - v.reset(Op386ADDLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym v.AddArg(base) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // match: (ADDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) + // cond: ValAndOff(valoff1).canAdd(off2*4) + // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2 * 4)) { + break + } + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2 * 4) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ADDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem) + for { + valoff1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] v_0 := v.Args[0] @@ -1646,92 +1784,123 @@ func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + idx := v.Args[1] mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ADDLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) v.AddArg(base) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ADDSD_0(v *Value) bool { +func rewriteValue386_Op386ADDLload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSDload x [off] {sym} ptr mem) + // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (ADDLload [off1+off2] {sym} val base mem) for { - _ = v.Args[1] - x := v.Args[0] - l := v.Args[1] - if l.Op != Op386MOVSDload { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - off := l.AuxInt - sym := l.Aux - _ = l.Args[1] - ptr := l.Args[0] - mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - v.reset(Op386ADDSDload) - v.AuxInt = off + v.reset(Op386ADDLload) + v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) + v.AddArg(val) + v.AddArg(base) v.AddArg(mem) return true } - // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSDload x [off] {sym} ptr mem) + // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { - _ = v.Args[1] - l := v.Args[0] - if l.Op != Op386MOVSDload { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - off := l.AuxInt - sym := l.Aux - _ = l.Args[1] - ptr := l.Args[0] - mem := l.Args[1] - x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ADDSDload) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) + v.reset(Op386ADDLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (ADDLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL4 { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr := v_1.Args[0] + idx := v_1.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386ADDLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ADDSDload_0(v *Value) bool { +func rewriteValue386_Op386ADDLloadidx4_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // match: (ADDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) // cond: is32Bit(off1+off2) - // result: (ADDSDload [off1+off2] {sym} val base mem) + // result: (ADDLloadidx4 [off1+off2] {sym} val base idx mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { @@ -1739,25 +1908,55 @@ func rewriteValue386_Op386ADDSDload_0(v *Value) bool { } off2 := v_1.AuxInt base := v_1.Args[0] - mem := v.Args[2] + idx := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386ADDSDload) + v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(val) v.AddArg(base) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // match: (ADDLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) + // cond: is32Bit(off1+off2*4) + // result: (ADDLloadidx4 [off1+off2*4] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + base := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386ADDLconst { + break + } + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386ADDLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ADDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // result: (ADDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386LEAL { @@ -1766,33 +1965,213 @@ func rewriteValue386_Op386ADDSDload_0(v *Value) bool { off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - mem := v.Args[2] + idx := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ADDSDload) + v.reset(Op386ADDLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(val) v.AddArg(base) + v.AddArg(idx) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ADDSS_0(v *Value) bool { +func rewriteValue386_Op386ADDLmodify_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSSload x [off] {sym} ptr mem) + // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (ADDLmodify [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ADDLmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ADDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) + // cond: is32Bit(off1+off2) + // result: (ADDLmodifyidx4 [off1+off2] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) + // cond: is32Bit(off1+off2*4) + // result: (ADDLmodifyidx4 [off1+off2*4] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386MOVLconst { + break + } + c := v_2.AuxInt + mem := v.Args[3] + if !(validValAndOff(c, off)) { + break + } + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ADDSD_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] x := v.Args[0] l := v.Args[1] - if l.Op != Op386MOVSSload { + if l.Op != Op386MOVSDload { break } off := l.AuxInt @@ -1800,10 +2179,10 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - v.reset(Op386ADDSSload) + v.reset(Op386ADDSDload) v.AuxInt = off v.Aux = sym v.AddArg(x) @@ -1811,13 +2190,13 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) - // result: (ADDSSload x [off] {sym} ptr mem) + // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] l := v.Args[0] - if l.Op != Op386MOVSSload { + if l.Op != Op386MOVSDload { break } off := l.AuxInt @@ -1826,10 +2205,10 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - v.reset(Op386ADDSSload) + v.reset(Op386ADDSDload) v.AuxInt = off v.Aux = sym v.AddArg(x) @@ -1839,14 +2218,14 @@ func rewriteValue386_Op386ADDSS_0(v *Value) bool { } return false } -func rewriteValue386_Op386ADDSSload_0(v *Value) bool { +func rewriteValue386_Op386ADDSDload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem) // cond: is32Bit(off1+off2) - // result: (ADDSSload [off1+off2] {sym} val base mem) + // result: (ADDSDload [off1+off2] {sym} val base mem) for { off1 := v.AuxInt sym := v.Aux @@ -1862,7 +2241,7 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { if !(is32Bit(off1 + off2)) { break } - v.reset(Op386ADDSSload) + v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(val) @@ -1870,9 +2249,9 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -1889,7 +2268,7 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ADDSSload) + v.reset(Op386ADDSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(val) @@ -1899,47 +2278,19 @@ func rewriteValue386_Op386ADDSSload_0(v *Value) bool { } return false } -func rewriteValue386_Op386ANDL_0(v *Value) bool { - // match: (ANDL x (MOVLconst [c])) - // cond: - // result: (ANDLconst [c] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - v.reset(Op386ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDL (MOVLconst [c]) x) - // cond: - // result: (ANDLconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(Op386ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (ANDLload x [off] {sym} ptr mem) +func rewriteValue386_Op386ADDSS_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] x := v.Args[0] l := v.Args[1] - if l.Op != Op386MOVLload { + if l.Op != Op386MOVSSload { break } off := l.AuxInt @@ -1947,10 +2298,10 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - v.reset(Op386ANDLload) + v.reset(Op386ADDSSload) v.AuxInt = off v.Aux = sym v.AddArg(x) @@ -1958,13 +2309,13 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (ANDLload x [off] {sym} ptr mem) + // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] l := v.Args[0] - if l.Op != Op386MOVLload { + if l.Op != Op386MOVSSload { break } off := l.AuxInt @@ -1973,10 +2324,10 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - v.reset(Op386ANDLload) + v.reset(Op386ADDSSload) v.AuxInt = off v.Aux = sym v.AddArg(x) @@ -1984,25 +2335,228 @@ func rewriteValue386_Op386ANDL_0(v *Value) bool { v.AddArg(mem) return true } - // match: (ANDL x x) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } return false } -func rewriteValue386_Op386ANDLconst_0(v *Value) bool { - // match: (ANDLconst [c] (ANDLconst [d] x)) - // cond: +func rewriteValue386_Op386ADDSSload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (ADDSSload [off1+off2] {sym} val base mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ADDSSload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ADDSSload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ANDL_0(v *Value) bool { + // match: (ANDL x (MOVLconst [c])) + // cond: + // result: (ANDLconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { + break + } + c := v_1.AuxInt + v.reset(Op386ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDL (MOVLconst [c]) x) + // cond: + // result: (ANDLconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(Op386ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLload x [off] {sym} ptr mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ANDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLload x [off] {sym} ptr mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ANDLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ANDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ANDLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ANDLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ANDL x x) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386ANDLconst_0(v *Value) bool { + // match: (ANDLconst [c] (ANDLconst [d] x)) + // cond: // result: (ANDLconst [c & d] x) for { c := v.AuxInt @@ -2059,76 +2613,72 @@ func rewriteValue386_Op386ANDLconst_0(v *Value) bool { } return false } -func rewriteValue386_Op386ANDLload_0(v *Value) bool { +func rewriteValue386_Op386ANDLconstmodify_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem) - // cond: is32Bit(off1+off2) - // result: (ANDLload [off1+off2] {sym} val base mem) + // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym := v.Aux - _ = v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - off2 := v_1.AuxInt - base := v_1.Args[0] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + off2 := v_0.AuxInt + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2)) { break } - v.reset(Op386ANDLload) - v.AuxInt = off1 + off2 + v.reset(Op386ANDLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym - v.AddArg(val) v.AddArg(base) v.AddArg(mem) return true } - // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] - val := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386LEAL { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - base := v_1.Args[0] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ANDLload) - v.AuxInt = off1 + off2 + v.reset(Op386ANDLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) - v.AddArg(val) v.AddArg(base) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { +func rewriteValue386_Op386ANDLconstmodifyidx4_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) - // cond: is32Bit(off1+off2) - // result: (ANDLmodify [off1+off2] {sym} base val mem) + // match: (ANDLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt sym := v.Aux _ = v.Args[2] v_0 := v.Args[0] @@ -2137,24 +2687,50 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { } off2 := v_0.AuxInt base := v_0.Args[0] - val := v.Args[1] + idx := v.Args[1] mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + if !(ValAndOff(valoff1).canAdd(off2)) { break } - v.reset(Op386ANDLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386ANDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = sym v.AddArg(base) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // match: (ANDLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) + // cond: ValAndOff(valoff1).canAdd(off2*4) + // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem) for { - off1 := v.AuxInt + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2 * 4)) { + break + } + v.reset(Op386ANDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2 * 4) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ANDLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem) + for { + valoff1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] v_0 := v.Args[0] @@ -2164,215 +2740,387 @@ func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] + idx := v.Args[1] mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ANDLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386ANDLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) v.Aux = mergeSym(sym1, sym2) v.AddArg(base) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386CMPB_0(v *Value) bool { +func rewriteValue386_Op386ANDLload_0(v *Value) bool { b := v.Block _ = b - // match: (CMPB x (MOVLconst [c])) - // cond: - // result: (CMPBconst x [int64(int8(c))]) + config := b.Func.Config + _ = config + // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (ANDLload [off1+off2] {sym} val base mem) for { - _ = v.Args[1] - x := v.Args[0] + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { + if v_1.Op != Op386ADDLconst { break } - c := v_1.AuxInt - v.reset(Op386CMPBconst) - v.AuxInt = int64(int8(c)) - v.AddArg(x) - return true - } - // match: (CMPB (MOVLconst [c]) x) - // cond: - // result: (InvertFlags (CMPBconst x [int64(int8(c))])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(Op386InvertFlags) - v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) - v0.AuxInt = int64(int8(c)) - v0.AddArg(x) - v.AddArg(v0) + v.reset(Op386ANDLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386CMPBconst_0(v *Value) bool { - // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)==int8(y) - // result: (FlagEQ) + // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - x := v_0.AuxInt - if !(int8(x) == int8(y)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386FlagEQ) + v.reset(Op386ANDLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)uint8(y) - // result: (FlagLT_UGT) + return false +} +func rewriteValue386_Op386ANDLloadidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ANDLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) + // cond: is32Bit(off1+off2) + // result: (ANDLloadidx4 [off1+off2] {sym} val base idx mem) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - x := v_0.AuxInt - if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { + off2 := v_1.AuxInt + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { break } - v.reset(Op386FlagLT_UGT) + v.reset(Op386ANDLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)>int8(y) && uint8(x) int8(y) && uint8(x) < uint8(y)) { + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { break } - v.reset(Op386FlagGT_ULT) + v.reset(Op386ANDLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (CMPBconst (MOVLconst [x]) [y]) - // cond: int8(x)>int8(y) && uint8(x)>uint8(y) - // result: (FlagGT_UGT) + // match: (ANDLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - x := v_0.AuxInt - if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386FlagGT_UGT) + v.reset(Op386ANDLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (CMPBconst (ANDLconst _ [m]) [n]) - // cond: 0 <= int8(m) && int8(m) < int8(n) - // result: (FlagLT_ULT) + return false +} +func rewriteValue386_Op386ANDLmodify_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (ANDLmodify [off1+off2] {sym} base val mem) for { - n := v.AuxInt + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { + if v_0.Op != Op386ADDLconst { break } - m := v_0.AuxInt - if !(0 <= int8(m) && int8(m) < int8(n)) { + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - v.reset(Op386FlagLT_ULT) + v.reset(Op386ANDLmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (CMPBconst (ANDL x y) [0]) - // cond: - // result: (TESTB x y) + // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - if v.AuxInt != 0 { - break - } + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != Op386ANDL { + if v_0.Op != Op386LEAL { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(y) + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (CMPBconst (ANDLconst [c] x) [0]) - // cond: - // result: (TESTBconst [int64(int8(c))] x) + return false +} +func rewriteValue386_Op386ANDLmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ANDLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) + // cond: is32Bit(off1+off2) + // result: (ANDLmodifyidx4 [off1+off2] {sym} base idx val mem) for { - if v.AuxInt != 0 { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ANDLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) + // cond: is32Bit(off1+off2*4) + // result: (ANDLmodifyidx4 [off1+off2*4] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } + off2 := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ANDLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ANDLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { + if v_0.Op != Op386LEAL { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(Op386TESTBconst) - v.AuxInt = int64(int8(c)) - v.AddArg(x) + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (CMPBconst x [0]) - // cond: - // result: (TESTB x x) + // match: (ANDLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) for { - if v.AuxInt != 0 { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386MOVLconst { break } - x := v.Args[0] - v.reset(Op386TESTB) - v.AddArg(x) - v.AddArg(x) + c := v_2.AuxInt + mem := v.Args[3] + if !(validValAndOff(c, off)) { + break + } + v.reset(Op386ANDLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386CMPL_0(v *Value) bool { +func rewriteValue386_Op386CMPB_0(v *Value) bool { b := v.Block _ = b - // match: (CMPL x (MOVLconst [c])) + // match: (CMPB x (MOVLconst [c])) // cond: - // result: (CMPLconst x [c]) + // result: (CMPBconst x [int64(int8(c))]) for { _ = v.Args[1] x := v.Args[0] @@ -2381,14 +3129,14 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(Op386CMPLconst) - v.AuxInt = c + v.reset(Op386CMPBconst) + v.AuxInt = int64(int8(c)) v.AddArg(x) return true } - // match: (CMPL (MOVLconst [c]) x) + // match: (CMPB (MOVLconst [c]) x) // cond: - // result: (InvertFlags (CMPLconst x [c])) + // result: (InvertFlags (CMPBconst x [int64(int8(c))])) for { _ = v.Args[1] v_0 := v.Args[0] @@ -2398,17 +3146,73 @@ func rewriteValue386_Op386CMPL_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(Op386InvertFlags) - v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags) - v0.AuxInt = c + v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags) + v0.AuxInt = int64(int8(c)) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (CMPBload {sym} [off] ptr x mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVBload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386CMPBload) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (CMPB x l:(MOVBload {sym} [off] ptr mem)) + // cond: canMergeLoad(v, l) && clobber(l) + // result: (InvertFlags (CMPBload {sym} [off] ptr x mem)) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVBload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoad(v, l) && clobber(l)) { + break + } + v.reset(Op386InvertFlags) + v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) v0.AddArg(x) + v0.AddArg(mem) v.AddArg(v0) return true } return false } -func rewriteValue386_Op386CMPLconst_0(v *Value) bool { - // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)==int32(y) +func rewriteValue386_Op386CMPBconst_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)==int8(y) // result: (FlagEQ) for { y := v.AuxInt @@ -2417,14 +3221,14 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(int32(x) == int32(y)) { + if !(int8(x) == int8(y)) { break } v.reset(Op386FlagEQ) return true } - // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)uint32(y) + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)uint8(y) // result: (FlagLT_UGT) for { y := v.AuxInt @@ -2449,14 +3253,14 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { break } v.reset(Op386FlagLT_UGT) return true } - // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)int8(y) && uint8(x) int32(y) && uint32(x) < uint32(y)) { + if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { break } v.reset(Op386FlagGT_ULT) return true } - // match: (CMPLconst (MOVLconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // match: (CMPBconst (MOVLconst [x]) [y]) + // cond: int8(x)>int8(y) && uint8(x)>uint8(y) // result: (FlagGT_UGT) for { y := v.AuxInt @@ -2481,102 +3285,146 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { break } v.reset(Op386FlagGT_UGT) return true } - // match: (CMPLconst (SHRLconst _ [c]) [n]) - // cond: 0 <= n && 0 < c && c <= 32 && (1<uint16(y) + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)uint32(y) // result: (FlagLT_UGT) for { y := v.AuxInt @@ -2653,14 +3555,14 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { + if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { break } v.reset(Op386FlagLT_UGT) return true } - // match: (CMPWconst (MOVLconst [x]) [y]) - // cond: int16(x)>int16(y) && uint16(x)int32(y) && uint32(x) int16(y) && uint16(x) < uint16(y)) { + if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { break } v.reset(Op386FlagGT_ULT) return true } - // match: (CMPWconst (MOVLconst [x]) [y]) - // cond: int16(x)>int16(y) && uint16(x)>uint16(y) + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)>uint32(y) // result: (FlagGT_UGT) for { y := v.AuxInt @@ -2685,14 +3587,30 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { + if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { break } v.reset(Op386FlagGT_UGT) return true } - // match: (CMPWconst (ANDLconst _ [m]) [n]) - // cond: 0 <= int16(m) && int16(m) < int16(n) + // match: (CMPLconst (SHRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<uint16(y) + // result: (FlagLT_UGT) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { + x := v_0.AuxInt + if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { break } - v.reset(Op386LEAL1) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.reset(Op386FlagLT_UGT) return true } - // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) - // cond: - // result: (LEAL2 [c] {s} x y) + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>int16(y) && uint16(x) int16(y) && uint16(x) < uint16(y)) { break } - y := v_1.Args[0] - v.reset(Op386LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.reset(Op386FlagGT_ULT) return true } - // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) - // cond: - // result: (LEAL2 [c] {s} x y) + // match: (CMPWconst (MOVLconst [x]) [y]) + // cond: int16(x)>int16(y) && uint16(x)>uint16(y) + // result: (FlagGT_UGT) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { + if v_0.Op != Op386MOVLconst { break } - if v_0.AuxInt != 1 { + x := v_0.AuxInt + if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL2) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.reset(Op386FlagGT_UGT) return true } - // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) - // cond: - // result: (LEAL4 [c] {s} x y) + // match: (CMPWconst (ANDLconst _ [m]) [n]) + // cond: 0 <= int16(m) && int16(m) < int16(n) + // result: (FlagLT_ULT) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + n := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386ANDLconst { break } - if v_1.AuxInt != 2 { + m := v_0.AuxInt + if !(0 <= int16(m) && int16(m) < int16(n)) { break } - y := v_1.Args[0] - v.reset(Op386LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.reset(Op386FlagLT_ULT) return true } - // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) - // cond: - // result: (LEAL4 [c] {s} x y) + // match: (CMPWconst l:(ANDL x y) [0]) + // cond: l.Uses==1 + // result: (TESTW x y) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { + if v.AuxInt != 0 { break } - if v_0.AuxInt != 2 { + l := v.Args[0] + if l.Op != Op386ANDL { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL4) - v.AuxInt = c - v.Aux = s + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTW) v.AddArg(x) v.AddArg(y) return true } - // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) - // cond: - // result: (LEAL8 [c] {s} x y) + // match: (CMPWconst l:(ANDLconst [c] x) [0]) + // cond: l.Uses==1 + // result: (TESTWconst [int64(int16(c))] x) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + if v.AuxInt != 0 { break } - if v_1.AuxInt != 3 { + l := v.Args[0] + if l.Op != Op386ANDLconst { break } - y := v_1.Args[0] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + v.reset(Op386TESTWconst) + v.AuxInt = int64(int16(c)) v.AddArg(x) - v.AddArg(y) return true } - // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) + // match: (CMPWconst x [0]) // cond: - // result: (LEAL8 [c] {s} x y) + // result: (TESTW x x) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 3 { + if v.AuxInt != 0 { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s + x := v.Args[0] + v.reset(Op386TESTW) + v.AddArg(x) v.AddArg(x) - v.AddArg(y) return true } - // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c]) + // cond: l.Uses == 1 && validValAndOff(c, off) && clobber(l) + // result: @l.Block (CMPWconstload {sym} [makeValAndOff(c,off)] ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + c := v.AuxInt + l := v.Args[0] + if l.Op != Op386MOVWload { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) { break } - v.reset(Op386LEAL1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + b = l.Block + v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = makeValAndOff(c, off) + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (LEAL1 [off1] {sym1} y (LEAL [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + return false +} +func rewriteValue386_Op386CMPWload_0(v *Value) bool { + // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem) + // cond: validValAndOff(int64(int16(c)),off) + // result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - y := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386LEAL { + if v_1.Op != Op386MOVLconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - x := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + c := v_1.AuxInt + mem := v.Args[2] + if !(validValAndOff(int64(int16(c)), off)) { break } - v.reset(Op386LEAL1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.reset(Op386CMPWconstload) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386LEAL2_0(v *Value) bool { - // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAL2 [c+d] {s} x y) +func rewriteValue386_Op386DIVSD_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (DIVSDload x [off] {sym} ptr mem) for { - c := v.AuxInt - s := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVSDload { break } - d := v_0.AuxInt - x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(c+d) && x.Op != OpSB) { + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - v.reset(Op386LEAL2) - v.AuxInt = c + d - v.Aux = s + v.reset(Op386DIVSDload) + v.AuxInt = off + v.Aux = sym v.AddArg(x) - v.AddArg(y) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+2*d) && y.Op != OpSB - // result: (LEAL2 [c+2*d] {s} x y) + return false +} +func rewriteValue386_Op386DIVSDload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (DIVSDload [off1+off2] {sym} val base mem) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - d := v_1.AuxInt - y := v_1.Args[0] - if !(is32Bit(c+2*d) && y.Op != OpSB) { + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - v.reset(Op386LEAL2) - v.AuxInt = c + 2*d - v.Aux = s - v.AddArg(x) - v.AddArg(y) + v.reset(Op386DIVSDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) - // cond: - // result: (LEAL4 [c] {s} x y) + // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + if v_1.Op != Op386LEAL { break } - if v_1.AuxInt != 1 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - y := v_1.Args[0] - v.reset(Op386LEAL4) - v.AuxInt = c - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) - // cond: - // result: (LEAL8 [c] {s} x y) + v.reset(Op386DIVSDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386DIVSS_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) + // result: (DIVSSload x [off] {sym} ptr mem) for { - c := v.AuxInt - s := v.Aux _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + l := v.Args[1] + if l.Op != Op386MOVSSload { break } - if v_1.AuxInt != 2 { + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } - y := v_1.Args[0] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s + v.reset(Op386DIVSSload) + v.AuxInt = off + v.Aux = sym v.AddArg(x) - v.AddArg(y) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) + return false +} +func rewriteValue386_Op386DIVSSload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (DIVSSload [off1+off2] {sym} val base mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386DIVSSload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386LEAL2) + v.reset(Op386DIVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386LEAL4_0(v *Value) bool { - // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAL4 [c+d] {s} x y) +func rewriteValue386_Op386LEAL_0(v *Value) bool { + // match: (LEAL [c] {s} (ADDLconst [d] x)) + // cond: is32Bit(c+d) + // result: (LEAL [c+d] {s} x) for { c := v.AuxInt s := v.Aux - _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(c+d) && x.Op != OpSB) { + if !(is32Bit(c + d)) { break } - v.reset(Op386LEAL4) + v.reset(Op386LEAL) v.AuxInt = c + d v.Aux = s v.AddArg(x) - v.AddArg(y) return true } - // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+4*d) && y.Op != OpSB - // result: (LEAL4 [c+4*d] {s} x y) + // match: (LEAL [c] {s} (ADDL x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAL1 [c] {s} x y) for { c := v.AuxInt s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - d := v_1.AuxInt - y := v_1.Args[0] - if !(is32Bit(c+4*d) && y.Op != OpSB) { + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + if !(x.Op != OpSB && y.Op != OpSB) { break } - v.reset(Op386LEAL4) - v.AuxInt = c + 4*d + v.reset(Op386LEAL1) + v.AuxInt = c v.Aux = s v.AddArg(x) v.AddArg(y) return true } - // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) - // cond: - // result: (LEAL8 [c] {s} x y) + // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - if v_1.AuxInt != 1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - y := v_1.Args[0] - v.reset(Op386LEAL8) - v.AuxInt = c - v.Aux = s + v.reset(Op386LEAL) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(x) - v.AddArg(y) return true } - // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) + // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + if v_0.Op != Op386LEAL1 { break } off2 := v_0.AuxInt sym2 := v_0.Aux + _ = v_0.Args[1] x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + y := v_0.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386LEAL4) + v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(x) v.AddArg(y) return true } - return false -} -func rewriteValue386_Op386LEAL8_0(v *Value) bool { - // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) - // cond: is32Bit(c+d) && x.Op != OpSB - // result: (LEAL8 [c+d] {s} x y) + // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] + off1 := v.AuxInt + sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + if v_0.Op != Op386LEAL2 { break } - d := v_0.AuxInt + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(c+d) && x.Op != OpSB) { + y := v_0.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386LEAL8) - v.AuxInt = c + d - v.Aux = s + v.reset(Op386LEAL2) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(x) v.AddArg(y) return true } - // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) - // cond: is32Bit(c+8*d) && y.Op != OpSB - // result: (LEAL8 [c+8*d] {s} x y) + // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) for { - c := v.AuxInt - s := v.Aux - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { break } - d := v_1.AuxInt - y := v_1.Args[0] - if !(is32Bit(c+8*d) && y.Op != OpSB) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386LEAL8) - v.AuxInt = c + 8*d - v.Aux = s + v.reset(Op386LEAL4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(x) v.AddArg(y) return true } - // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + if v_0.Op != Op386LEAL8 { break } off2 := v_0.AuxInt sym2 := v_0.Aux + _ = v_0.Args[1] x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + y := v_0.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } v.reset(Op386LEAL8) @@ -3464,241 +4401,196 @@ func rewriteValue386_Op386LEAL8_0(v *Value) bool { } return false } -func rewriteValue386_Op386MOVBLSX_0(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBLSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != Op386MOVBload { - break - } - off := x.AuxInt - sym := x.Aux - _ = x.Args[1] - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVBLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVBLSX (ANDLconst [c] x)) - // cond: c & 0x80 == 0 - // result: (ANDLconst [c & 0x7f] x) +func rewriteValue386_Op386LEAL1_0(v *Value) bool { + // match: (LEAL1 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (LEAL1 [c+d] {s} x y) for { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { + if v_0.Op != Op386ADDLconst { break } - c := v_0.AuxInt + d := v_0.AuxInt x := v_0.Args[0] - if !(c&0x80 == 0) { + y := v.Args[1] + if !(is32Bit(c+d) && x.Op != OpSB) { break } - v.reset(Op386ANDLconst) - v.AuxInt = c & 0x7f + v.reset(Op386LEAL1) + v.AuxInt = c + d + v.Aux = s v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVBLSX x) + // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (LEAL1 [c+d] {s} x y) for { - off := v.AuxInt - sym := v.Aux + c := v.AuxInt + s := v.Aux _ = v.Args[1] - ptr := v.Args[0] + y := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVBstore { + if v_1.Op != Op386ADDLconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + d := v_1.AuxInt + x := v_1.Args[0] + if !(is32Bit(c+d) && x.Op != OpSB) { break } - v.reset(Op386MOVBLSX) + v.reset(Op386LEAL1) + v.AuxInt = c + d + v.Aux = s v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // match: (LEAL1 [c] {s} x (SHLLconst [1] y)) + // cond: + // result: (LEAL2 [c] {s} x y) for { - off1 := v.AuxInt - sym1 := v.Aux + c := v.AuxInt + s := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if v_1.AuxInt != 1 { break } - v.reset(Op386MOVBLSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + y := v_1.Args[0] + v.reset(Op386LEAL2) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBload [off] {sym} ptr mem) + // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) + // cond: + // result: (LEAL2 [c] {s} x y) for { - x := v.Args[0] - if x.Op != Op386MOVBload { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { break } - off := x.AuxInt - sym := x.Aux - _ = x.Args[1] - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { + if v_0.AuxInt != 1 { break } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVBload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) + y := v_0.Args[0] + x := v.Args[1] + v.reset(Op386LEAL2) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) + // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) + // cond: + // result: (LEAL4 [c] {s} x y) for { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] x := v.Args[0] - if x.Op != Op386MOVBloadidx1 { + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - off := x.AuxInt - sym := x.Aux - _ = x.Args[2] - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + if v_1.AuxInt != 2 { break } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBLZX (ANDLconst [c] x)) + // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) // cond: - // result: (ANDLconst [c & 0xff] x) + // result: (LEAL4 [c] {s} x y) for { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { + if v_0.Op != Op386SHLLconst { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(Op386ANDLconst) - v.AuxInt = c & 0xff + if v_0.AuxInt != 2 { + break + } + y := v_0.Args[0] + x := v.Args[1] + v.reset(Op386LEAL4) + v.AuxInt = c + v.Aux = s v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValue386_Op386MOVBload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVBLZX x) + // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) + // cond: + // result: (LEAL8 [c] {s} x y) for { - off := v.AuxInt - sym := v.Aux + c := v.AuxInt + s := v.Aux _ = v.Args[1] - ptr := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVBstore { + if v_1.Op != Op386SHLLconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if v_1.AuxInt != 3 { break } - v.reset(Op386MOVBLZX) + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = c + v.Aux = s v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVBload [off1+off2] {sym} ptr mem) + // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) + // cond: + // result: (LEAL8 [c] {s} x y) for { - off1 := v.AuxInt - sym := v.Aux + c := v.AuxInt + s := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + if v_0.Op != Op386SHLLconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { + if v_0.AuxInt != 3 { break } - v.reset(Op386MOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + y := v_0.Args[0] + x := v.Args[1] + v.reset(Op386LEAL8) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux @@ -3709,537 +4601,632 @@ func rewriteValue386_Op386MOVBload_0(v *Value) bool { } off2 := v_0.AuxInt sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - v.reset(Op386MOVBload) + v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (LEAL1 [off1] {sym1} y (LEAL [off2] {sym2} x)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + x := v_1.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - v.reset(Op386MOVBloadidx1) + v.reset(Op386LEAL1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBload [off] {sym} (ADDL ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVBloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVBloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.AddArg(x) + v.AddArg(y) return true } return false } -func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { - // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) - // cond: - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) +func rewriteValue386_Op386LEAL2_0(v *Value) bool { + // match: (LEAL2 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (LEAL2 [c+d] {s} x y) for { c := v.AuxInt - sym := v.Aux - _ = v.Args[2] + s := v.Aux + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // cond: - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - _ = v.Args[2] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(c+d) && x.Op != OpSB) { break } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(Op386LEAL2) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) - // cond: - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + // match: (LEAL2 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(c+2*d) && y.Op != OpSB + // result: (LEAL2 [c+2*d] {s} x y) for { c := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + s := v.Aux + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // cond: - // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + y := v_1.Args[0] + if !(is32Bit(c+2*d) && y.Op != OpSB) { break } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(Op386LEAL2) + v.AuxInt = c + 2*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - return false -} -func rewriteValue386_Op386MOVBstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) + // match: (LEAL2 [c] {s} x (SHLLconst [1] y)) // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + // result: (LEAL4 [c] {s} x y) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVBLSX { + if v_1.Op != Op386SHLLconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) + if v_1.AuxInt != 1 { + break + } + y := v_1.Args[0] + v.reset(Op386LEAL4) + v.AuxInt = c + v.Aux = s v.AddArg(x) - v.AddArg(mem) + v.AddArg(y) return true } - // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) + // match: (LEAL2 [c] {s} x (SHLLconst [2] y)) // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + // result: (LEAL8 [c] {s} x y) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVBLZX { + if v_1.Op != Op386SHLLconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) + if v_1.AuxInt != 2 { + break + } + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = c + v.Aux = s v.AddArg(x) - v.AddArg(mem) + v.AddArg(y) return true } - // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVBstore [off1+off2] {sym} ptr val mem) + // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt - sym := v.Aux - _ = v.Args[2] + sym1 := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - v.reset(Op386MOVBstore) + v.reset(Op386LEAL2) v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + return false +} +func rewriteValue386_Op386LEAL4_0(v *Value) bool { + // match: (LEAL4 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (LEAL4 [c+d] {s} x y) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(c+d) && x.Op != OpSB) { + break + } + v.reset(Op386LEAL4) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAL4 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(c+4*d) && y.Op != OpSB + // result: (LEAL4 [c+4*d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { + if v_1.Op != Op386ADDLconst { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { + d := v_1.AuxInt + y := v_1.Args[0] + if !(is32Bit(c+4*d) && y.Op != OpSB) { break } - v.reset(Op386MOVBstoreconst) - v.AuxInt = makeValAndOff(int64(int8(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(Op386LEAL4) + v.AuxInt = c + 4*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // match: (LEAL4 [c] {s} x (SHLLconst [1] y)) + // cond: + // result: (LEAL8 [c] {s} x y) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if v_1.AuxInt != 1 { break } - v.reset(Op386MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) + y := v_1.Args[0] + v.reset(Op386LEAL8) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { + if v_0.Op != Op386LEAL { break } off2 := v_0.AuxInt sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - v.reset(Op386MOVBstoreidx1) + v.reset(Op386LEAL4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) + return false +} +func rewriteValue386_Op386LEAL8_0(v *Value) bool { + // match: (LEAL8 [c] {s} (ADDLconst [d] x) y) + // cond: is32Bit(c+d) && x.Op != OpSB + // result: (LEAL8 [c+d] {s} x y) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] + c := v.AuxInt + s := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386ADDL { + if v_0.Op != Op386ADDLconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(c+d) && x.Op != OpSB) { break } - v.reset(Op386MOVBstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.reset(Op386LEAL8) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-1] {s} p w mem) + // match: (LEAL8 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(c+8*d) && y.Op != OpSB + // result: (LEAL8 [c+8*d] {s} x y) for { - i := v.AuxInt + c := v.AuxInt s := v.Aux - _ = v.Args[2] - p := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - if v_1.AuxInt != 8 { + if v_1.Op != Op386ADDLconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != Op386MOVBstore { + d := v_1.AuxInt + y := v_1.Args[0] + if !(is32Bit(c+8*d) && y.Op != OpSB) { break } - if x.AuxInt != i-1 { + v.reset(Op386LEAL8) + v.AuxInt = c + 8*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - if x.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - _ = x.Args[2] - if p != x.Args[0] { + v.reset(Op386LEAL8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValue386_Op386MOVBLSX_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBLSXload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != Op386MOVBload { break } - if w != x.Args[1] { + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBLSX (ANDLconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDLconst [c & 0x7f] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386ANDLconst { break } - v.reset(Op386MOVWstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(Op386ANDLconst) + v.AuxInt = c & 0x7f + v.AddArg(x) return true } - // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-1] {s} p w0 mem) + return false +} +func rewriteValue386_Op386MOVBLSXload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBLSX x) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - p := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != Op386MOVBstore { + if v_1.Op != Op386MOVBstore { break } - if x.AuxInt != i-1 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if x.Aux != s { + v.reset(Op386MOVBLSX) + v.AddArg(x) + return true + } + // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - _ = x.Args[2] - if p != x.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - w0 := x.Args[1] - if w0.Op != Op386SHRLconst { + v.reset(Op386MOVBLSXload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVBLZX_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != Op386MOVBload { break } - if w0.AuxInt != j-8 { + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if w != w0.Args[0] { + b = x.Block + v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBLZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != Op386MOVBloadidx1 { break } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) + b = x.Block + v0 := b.NewValue0(v.Pos, Op386MOVBloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVBLZX (ANDLconst [c] x)) + // cond: + // result: (ANDLconst [c & 0xff] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386ANDLconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(Op386ANDLconst) + v.AuxInt = c & 0xff + v.AddArg(x) return true } return false } -func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { +func rewriteValue386_Op386MOVBload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVBLZX x) for { - sc := v.AuxInt - s := v.Aux + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVBstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(Op386MOVBLZX) + v.AddArg(x) + return true + } + // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - off := v_0.AuxInt + off2 := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { + if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + v.reset(Op386MOVBload) + v.AuxInt = off1 + off2 + v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { - sc := v.AuxInt + off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } - off := v_0.AuxInt + off2 := v_0.AuxInt sym2 := v_0.Aux - ptr := v_0.Args[0] + base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVBstoreconst) - v.AuxInt = ValAndOff(sc).add(off) + v.reset(Op386MOVBload) + v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) + v.AddArg(base) v.AddArg(mem) return true } - // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVBload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - x := v.AuxInt + off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } - off := v_0.AuxInt + off2 := v_0.AuxInt sym2 := v_0.Aux _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVBstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) + v.reset(Op386MOVBloadidx1) + v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) - // cond: - // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) + // match: (MOVBload [off] {sym} (ADDL ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVBloadidx1 [off] {sym} ptr idx mem) for { - x := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] @@ -4250,302 +5237,334 @@ func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - v.reset(Op386MOVBstoreconstidx1) - v.AuxInt = x + if !(ptr.Op != OpSB) { + break + } + v.reset(Op386MOVBloadidx1) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int64(read8(sym, off))]) for { - c := v.AuxInt - s := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[1] - p := v.Args[0] - x := v.Args[1] - if x.Op != Op386MOVBstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - _ = x.Args[1] - if p != x.Args[0] { + v_0 := v.Args[0] + if v_0.Op != OpSB { break } - mem := x.Args[1] - if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + if !(symIsRO(sym)) { break } - v.reset(Op386MOVWstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(mem) + v.reset(Op386MOVLconst) + v.AuxInt = int64(read8(sym, off)) return true } return false } -func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { - // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) +func rewriteValue386_Op386MOVBloadidx1_0(v *Value) bool { + // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // cond: - // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - c := v_0.AuxInt + d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] - v.reset(Op386MOVBstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + v.reset(Op386MOVBloadidx1) + v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) + // match: (MOVBloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) // cond: - // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux _ = v.Args[2] - ptr := v.Args[0] + idx := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - c := v_1.AuxInt - idx := v_1.Args[0] + d := v_1.AuxInt + ptr := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVBstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + v.reset(Op386MOVBloadidx1) + v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) + // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt - s := v.Aux + sym := v.Aux _ = v.Args[2] - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != Op386MOVBstoreconstidx1 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - _ = x.Args[2] - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - v.reset(Op386MOVWstoreconstidx1) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(i) + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVBloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { - // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) + // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) // cond: - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // result: (MOVBloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux - _ = v.Args[3] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVBloadidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) + return false +} +func rewriteValue386_Op386MOVBstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem) // cond: - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // result: (MOVBstore [off] {sym} ptr x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[3] - idx := v.Args[0] + _ = v.Args[2] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + if v_1.Op != Op386MOVBLSX { break } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVBstore) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem) // cond: - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // result: (MOVBstore [off] {sym} ptr x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[3] + _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + if v_1.Op != Op386MOVBLZX { break } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVBstore) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // cond: - // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { - c := v.AuxInt + off1 := v.AuxInt sym := v.Aux - _ = v.Args[3] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = int64(int32(c + d)) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MOVBstore) + v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { break } - if x.AuxInt != i-1 { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { break } - if x.Aux != s { + v.reset(Op386MOVBstoreconst) + v.AuxInt = makeValAndOff(int64(int8(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - _ = x.Args[3] - if p != x.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if idx != x.Args[1] { + v.reset(Op386MOVBstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { break } - if w != x.Args[2] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + v.reset(Op386MOVBstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} (ADDL ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(Op386MOVBstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // result: (MOVWstore [i-1] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] + _ = v.Args[2] p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { + v_1 := v.Args[1] + if v_1.Op != Op386SHRWconst { break } - if v_2.AuxInt != 8 { + if v_1.AuxInt != 8 { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != Op386MOVBstore { break } if x.AuxInt != i-1 { @@ -4554,48 +5573,43 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { if x.Aux != s { break } - _ = x.Args[3] - if idx != x.Args[0] { - break - } - if p != x.Args[1] { + _ = x.Args[2] + if p != x.Args[0] { break } - if w != x.Args[2] { + if w != x.Args[1] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstoreidx1) + v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) - v.AddArg(idx) v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // result: (MOVWstore [i-1] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRLconst { break } - if v_2.AuxInt != 8 { + if v_1.AuxInt != 8 { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != Op386MOVBstore { break } if x.AuxInt != i-1 { @@ -4604,151 +5618,134 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { if x.Aux != s { break } - _ = x.Args[3] + _ = x.Args[2] if p != x.Args[0] { break } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { + if w != x.Args[1] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstoreidx1) + v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) - v.AddArg(idx) v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // result: (MOVWstore [i] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 8 { + _ = v.Args[2] + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != Op386MOVBstore { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + if x.AuxInt != i+1 { break } - if x.AuxInt != i-1 { + if x.Aux != s { break } - if x.Aux != s { + _ = x.Args[2] + if p != x.Args[0] { break } - _ = x.Args[3] - if idx != x.Args[0] { + x_1 := x.Args[1] + if x_1.Op != Op386SHRWconst { break } - if p != x.Args[1] { + if x_1.AuxInt != 8 { break } - if w != x.Args[2] { + if w != x_1.Args[0] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 + v.reset(Op386MOVWstore) + v.AuxInt = i v.Aux = s v.AddArg(p) - v.AddArg(idx) v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) + return false +} +func rewriteValue386_Op386MOVBstore_10(v *Value) bool { + // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + // result: (MOVWstore [i] {s} p w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] + _ = v.Args[2] p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + w := v.Args[1] + x := v.Args[2] + if x.Op != Op386MOVBstore { break } - if x.AuxInt != i-1 { + if x.AuxInt != i+1 { break } if x.Aux != s { break } - _ = x.Args[3] + _ = x.Args[2] if p != x.Args[0] { break } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { + x_1 := x.Args[1] + if x_1.Op != Op386SHRLconst { break } - if w0.AuxInt != j-8 { + if x_1.AuxInt != 8 { break } - if w != w0.Args[0] { + if w != x_1.Args[0] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 + v.reset(Op386MOVWstore) + v.AuxInt = i v.Aux = s v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) + // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + // result: (MOVWstore [i-1] {s} p w0 mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[3] + _ = v.Args[2] p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { + v_1 := v.Args[1] + if v_1.Op != Op386SHRLconst { break } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != Op386MOVBstore { break } if x.AuxInt != i-1 { @@ -4757,14 +5754,11 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { if x.Aux != s { break } - _ = x.Args[3] - if idx != x.Args[0] { - break - } - if p != x.Args[1] { + _ = x.Args[2] + if p != x.Args[0] { break } - w0 := x.Args[2] + w0 := x.Args[1] if w0.Op != Op386SHRLconst { break } @@ -4774,274 +5768,107 @@ func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { if w != w0.Args[0] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVWstoreidx1) + v.reset(Op386MOVWstore) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) - v.AddArg(idx) v.AddArg(w0) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool { - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) +func rewriteValue386_Op386MOVBstoreconst_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { - i := v.AuxInt + sc := v.AuxInt s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - _ = x.Args[3] - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[3] - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - _ = x.Args[3] - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVLload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[2] - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVLload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - off2 := v_0.AuxInt + off := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - if !(is32Bit(off1 + off2)) { + if !(ValAndOff(sc).canAdd(off)) { break } - v.reset(Op386MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(Op386MOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) for { - off1 := v.AuxInt + sc := v.AuxInt sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(Op386MOVLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { - break - } - off2 := v_0.AuxInt + off := v_0.AuxInt sym2 := v_0.Aux - _ = v_0.Args[1] ptr := v_0.Args[0] - idx := v_0.Args[1] mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off1 + off2 + v.reset(Op386MOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVBstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt + x := v.AuxInt sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if v_0.Op != Op386LEAL1 { break } - off2 := v_0.AuxInt + off := v_0.AuxInt sym2 := v_0.Aux _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLloadidx4) - v.AuxInt = off1 + off2 + v.reset(Op386MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVLloadidx1 [off] {sym} ptr idx mem) + // match: (MOVBstoreconst [x] {sym} (ADDL ptr idx) mem) + // cond: + // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) for { - off := v.AuxInt + x := v.AuxInt sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] @@ -5052,195 +5879,221 @@ func rewriteValue386_Op386MOVLload_0(v *Value) bool { ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off + v.reset(Op386MOVBstoreconstidx1) + v.AuxInt = x v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { - // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) - // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) for { c := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != Op386MOVBstoreconst { break } - if v_1.AuxInt != 2 { + a := x.AuxInt + if x.Aux != s { break } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(Op386MOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true } - // match: (MOVLloadidx1 [c] {sym} (SHLLconst [2] idx) ptr mem) - // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) for { - c := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { + a := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != Op386MOVBstoreconst { break } - if v_0.AuxInt != 2 { + c := x.AuxInt + if x.Aux != s { break } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(Op386MOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + return false +} +func rewriteValue386_Op386MOVBstoreconstidx1_0(v *Value) bool { + // match: (MOVBstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) // cond: - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - c := v.AuxInt + x := v.AuxInt sym := v.Aux _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - d := v_0.AuxInt + c := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) + v.reset(Op386MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) + // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) // cond: - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - c := v.AuxInt + x := v.AuxInt sym := v.Aux _ = v.Args[2] - idx := v.Args[0] + ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - d := v_1.AuxInt - ptr := v_1.Args[0] + c := v_1.AuxInt + idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) + v.reset(Op386MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) - // cond: - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) for { c := v.AuxInt - sym := v.Aux + s := v.Aux _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != Op386MOVBstoreconstidx1 { break } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = int64(int32(c + d)) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) + a := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(Op386MOVWstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(i) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) + return false +} +func rewriteValue386_Op386MOVBstoreidx1_0(v *Value) bool { + // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLloadidx1) + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { - // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) + // match: (MOVBstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) // cond: - // result: (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem) + // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + _ = v.Args[3] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) + d := v_1.AuxInt + ptr := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVBstoreidx1) v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) + // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVLloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem) + // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { @@ -5248,702 +6101,2980 @@ func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { } d := v_1.AuxInt idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) - v.AuxInt = int64(int32(c + 4*d)) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVBstoreidx1) + v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) + // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) + // cond: + // result: (MOVBstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) for { - off1 := v.AuxInt + c := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(Op386MOVLstore) - v.AuxInt = off1 + off2 + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVBstoreidx1) + v.AuxInt = int64(int32(c + d)) v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { + if v_2.AuxInt != 8 { break } - v.reset(Op386MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if x.AuxInt != i-1 { break } - v.reset(Op386MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { + if x.Aux != s { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + _ = x.Args[3] + if p != x.Args[0] { break } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if idx != x.Args[1] { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if w != x.Args[2] { break } - v.reset(Op386MOVLstoreidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) v.AddArg(idx) - v.AddArg(val) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { + if v_2.AuxInt != 8 { break } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) - // cond: y.Uses==1 && clobber(y) - // result: (ADDLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ADDLload { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - if y.AuxInt != off { + if x.AuxInt != i-1 { break } - if y.Aux != sym { + if x.Aux != s { break } - _ = y.Args[2] - x := y.Args[0] - if ptr != y.Args[1] { + _ = x.Args[3] + if idx != x.Args[0] { break } - mem := y.Args[2] - if mem != v.Args[2] { + if p != x.Args[1] { break } - if !(y.Uses == 1 && clobber(y)) { + if w != x.Args[2] { break } - v.reset(Op386ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) - // cond: y.Uses==1 && clobber(y) - // result: (ANDLmodify [off] {sym} ptr x mem) + // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ANDLload { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - if y.AuxInt != off { + if v_2.AuxInt != 8 { break } - if y.Aux != sym { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - _ = y.Args[2] - x := y.Args[0] - if ptr != y.Args[1] { + if x.AuxInt != i-1 { break } - mem := y.Args[2] - if mem != v.Args[2] { + if x.Aux != s { break } - if !(y.Uses == 1 && clobber(y)) { + _ = x.Args[3] + if p != x.Args[0] { break } - v.reset(Op386ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) - // cond: y.Uses==1 && clobber(y) - // result: (ORLmodify [off] {sym} ptr x mem) + // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ORLload { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - if y.AuxInt != off { + if v_2.AuxInt != 8 { break } - if y.Aux != sym { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - _ = y.Args[2] - x := y.Args[0] - if ptr != y.Args[1] { + if x.AuxInt != i-1 { break } - mem := y.Args[2] - if mem != v.Args[2] { + if x.Aux != s { break } - if !(y.Uses == 1 && clobber(y)) { + _ = x.Args[3] + if idx != x.Args[0] { break } - v.reset(Op386ORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) - // cond: y.Uses==1 && clobber(y) - // result: (XORLmodify [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386XORLload { - break - } - if y.AuxInt != off { - break - } - if y.Aux != sym { - break - } - _ = y.Args[2] - x := y.Args[0] - if ptr != y.Args[1] { + if p != x.Args[1] { break } - mem := y.Args[2] - if mem != v.Args[2] { + if w != x.Args[2] { break } - if !(y.Uses == 1 && clobber(y)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstore_10(v *Value) bool { - // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDLmodify [off] {sym} ptr x mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ADDL { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRWconst { break } - _ = y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload { + if v_2.AuxInt != 8 { break } - if l.AuxInt != off { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - if l.Aux != sym { + if x.AuxInt != i-1 { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if x.Aux != s { break } - mem := l.Args[1] - x := y.Args[1] - if mem != v.Args[2] { + _ = x.Args[3] + if p != x.Args[0] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if idx != x.Args[1] { break } - v.reset(Op386ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ADDLmodify [off] {sym} ptr x mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ADDL { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRWconst { break } - _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload { + if v_2.AuxInt != 8 { break } - if l.AuxInt != off { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - if l.Aux != sym { + if x.AuxInt != i-1 { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if x.Aux != s { break } - mem := l.Args[1] - if mem != v.Args[2] { + _ = x.Args[3] + if idx != x.Args[0] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if p != x.Args[1] { break } - v.reset(Op386ADDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (SUBLmodify [off] {sym} ptr x mem) + return false +} +func rewriteValue386_Op386MOVBstoreidx1_10(v *Value) bool { + // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386SUBL { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRWconst { break } - _ = y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload { + if v_2.AuxInt != 8 { break } - if l.AuxInt != off { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - if l.Aux != sym { + if x.AuxInt != i-1 { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if x.Aux != s { break } - mem := l.Args[1] - x := y.Args[1] - if mem != v.Args[2] { + _ = x.Args[3] + if p != x.Args[0] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if idx != x.Args[1] { break } - v.reset(Op386SUBLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDLmodify [off] {sym} ptr x mem) + // match: (MOVBstoreidx1 [i] {s} idx p (SHRWconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ANDL { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRWconst { break } - _ = y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload { + if v_2.AuxInt != 8 { break } - if l.AuxInt != off { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { break } - if l.Aux != sym { + if x.AuxInt != i-1 { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if x.Aux != s { break } - mem := l.Args[1] - x := y.Args[1] - if mem != v.Args[2] { + _ = x.Args[3] + if idx != x.Args[0] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if p != x.Args[1] { break } - v.reset(Op386ANDLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRLconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRLconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRLconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRLconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRWconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRWconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} p idx (SHRWconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRWconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p w x:(MOVBstoreidx1 [i+1] {s} idx p (SHRWconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != Op386SHRWconst { + break + } + if x_2.AuxInt != 8 { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVBstoreidx1_20(v *Value) bool { + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[3] + idx := v.Args[0] + p := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[3] + if idx != x.Args[0] { + break + } + if p != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(Op386MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MOVLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVLloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(Op386MOVLloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int64(int32(read32(sym, off, config.BigEndian)))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int64(int32(read32(sym, off, config.BigEndian))) + return true + } + return false +} +func rewriteValue386_Op386MOVLloadidx1_0(v *Value) bool { + // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) + // cond: + // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 2 { + break + } + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} (SHLLconst [2] idx) ptr mem) + // cond: + // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { + break + } + if v_0.AuxInt != 2 { + break + } + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) + // cond: + // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) + // cond: + // result: (MOVLloadidx1 [int64(int32(c+d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLloadidx4_0(v *Value) bool { + // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx4 [int64(int32(c+d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVLloadidx4 [int64(int32(c+4*d))] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = int64(int32(c + 4*d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLstoreidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(Op386MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ADDLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ANDLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ORLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386XORLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstore_10(v *Value) bool { + // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ADDL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ADDL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (SUBLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386SUBL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ANDL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ANDL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ORL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ORL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386XORL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386XORL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ADDLconst { + break + } + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ADDLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstore_20(v *Value) bool { + // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ANDLconst { + break + } + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ANDLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386ORLconst { + break + } + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != Op386XORLconst { + break + } + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { + break + } + off := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) + // cond: + // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { + // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 2 { + break + } + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) + // cond: + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreconstidx4_0(v *Value) bool { + // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(4 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { + // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) + // cond: + // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 2 { + break + } + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} (SHLLconst [2] idx) ptr val mem) + // cond: + // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { + break + } + if v_0.AuxInt != 2 { + break + } + idx := v_0.Args[0] + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) + // cond: + // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) + // cond: + // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + ptr := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) + // cond: + // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { + // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) + // cond: + // result: (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = int64(int32(c + d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = int64(int32(c + 4*d)) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ANDLmodify [off] {sym} ptr x mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLloadidx4 x [off] {sym} ptr idx mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ANDL { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ADDLloadidx4 { break } - _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload { + if y.AuxInt != off { break } - if l.AuxInt != off { + if y.Aux != sym { break } - if l.Aux != sym { + _ = y.Args[3] + x := y.Args[0] + if ptr != y.Args[1] { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if idx != y.Args[2] { break } - mem := l.Args[1] - if mem != v.Args[2] { + mem := y.Args[3] + if mem != v.Args[3] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if !(y.Uses == 1 && clobber(y)) { break } - v.reset(Op386ANDLmodify) + v.reset(Op386ADDLmodifyidx4) v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORLmodify [off] {sym} ptr x mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLloadidx4 x [off] {sym} ptr idx mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ORL { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ANDLloadidx4 { break } - _ = y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload { + if y.AuxInt != off { break } - if l.AuxInt != off { + if y.Aux != sym { break } - if l.Aux != sym { + _ = y.Args[3] + x := y.Args[0] + if ptr != y.Args[1] { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if idx != y.Args[2] { break } - mem := l.Args[1] - x := y.Args[1] - if mem != v.Args[2] { + mem := y.Args[3] + if mem != v.Args[3] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if !(y.Uses == 1 && clobber(y)) { break } - v.reset(Op386ORLmodify) + v.reset(Op386ANDLmodifyidx4) v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (ORLmodify [off] {sym} ptr x mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLloadidx4 x [off] {sym} ptr idx mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386ORL { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ORLloadidx4 { break } - _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload { + if y.AuxInt != off { break } - if l.AuxInt != off { + if y.Aux != sym { break } - if l.Aux != sym { + _ = y.Args[3] + x := y.Args[0] + if ptr != y.Args[1] { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if idx != y.Args[2] { break } - mem := l.Args[1] - if mem != v.Args[2] { + mem := y.Args[3] + if mem != v.Args[3] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if !(y.Uses == 1 && clobber(y)) { break } - v.reset(Op386ORLmodify) + v.reset(Op386ORLmodifyidx4) v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) - // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORLmodify [off] {sym} ptr x mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLloadidx4 x [off] {sym} ptr idx mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386XORL { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386XORLloadidx4 { break } - _ = y.Args[1] - l := y.Args[0] - if l.Op != Op386MOVLload { + if y.AuxInt != off { break } - if l.AuxInt != off { + if y.Aux != sym { break } - if l.Aux != sym { + _ = y.Args[3] + x := y.Args[0] + if ptr != y.Args[1] { break } - _ = l.Args[1] - if ptr != l.Args[0] { + if idx != y.Args[2] { break } - mem := l.Args[1] - x := y.Args[1] - if mem != v.Args[2] { + mem := y.Args[3] + if mem != v.Args[3] { break } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + if !(y.Uses == 1 && clobber(y)) { break } - v.reset(Op386XORLmodify) + v.reset(Op386XORLmodifyidx4) v.AuxInt = off v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) - // result: (XORLmodify [off] {sym} ptr x mem) + // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - y := v.Args[1] - if y.Op != Op386XORL { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ADDL { break } _ = y.Args[1] - x := y.Args[0] - l := y.Args[1] - if l.Op != Op386MOVLload { + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { break } if l.AuxInt != off { @@ -5952,490 +9083,598 @@ func rewriteValue386_Op386MOVLstore_10(v *Value) bool { if l.Aux != sym { break } - _ = l.Args[1] + _ = l.Args[2] if ptr != l.Args[0] { break } - mem := l.Args[1] - if mem != v.Args[2] { - break - } - if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { - break - } - v.reset(Op386XORLmodify) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVLstoreconst_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(Op386MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(Op386MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if idx != l.Args[1] { break } - off := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + mem := l.Args[2] + x := y.Args[1] + if mem != v.Args[3] { break } - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) - // cond: - // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = x + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstoreconstidx1_0(v *Value) bool { - // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) - // cond: - // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDLmodifyidx4 [off] {sym} ptr idx x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ADDL { break } - if v_1.AuxInt != 2 { + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLloadidx4 { break } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = c + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ADDLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) - // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(SUBL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (SUBLmodifyidx4 [off] {sym} ptr idx x mem) for { - x := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386SUBL { break } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + x := y.Args[1] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386SUBLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) - // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem) for { - x := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ANDL { break } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + x := y.Args[1] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLstoreconstidx4_0(v *Value) bool { - // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) - // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) +func rewriteValue386_Op386MOVLstoreidx4_10(v *Value) bool { + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodifyidx4 [off] {sym} ptr idx x mem) for { - x := v.AuxInt + off := v.AuxInt sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ANDL { break } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) - // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLloadidx4 { break } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(4 * c) + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ANDLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstoreidx1_0(v *Value) bool { - // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ORL { break } - if v_1.AuxInt != 2 { + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { break } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + x := y.Args[1] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ORLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreidx1 [c] {sym} (SHLLconst [2] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodifyidx4 [off] {sym} ptr idx x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ORL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { break } - if v_0.AuxInt != 2 { + _ = l.Args[2] + if ptr != l.Args[0] { break } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386ORLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386XORL { break } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) + _ = y.Args[1] + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + x := y.Args[1] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // cond: - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodifyidx4 [off] {sym} ptr idx x mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386XORL { break } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ADDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ADDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ADDLconst { break } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx1 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ANDLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ANDLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ANDLconst { break } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = int64(int32(c + d)) + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ANDLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstoreidx4_0(v *Value) bool { - // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx4 [int64(int32(c+d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(ORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + ptr := v.Args[0] + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386ORLconst { break } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = int64(int32(c + d)) + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386ORLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [int64(int32(c+4*d))] {sym} ptr idx val mem) + // match: (MOVLstoreidx4 {sym} [off] ptr idx y:(XORLconst [c] l:(MOVLloadidx4 [off] {sym} ptr idx mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) && validValAndOff(c,off) + // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + idx := v.Args[1] + y := v.Args[2] + if y.Op != Op386XORLconst { break } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = int64(int32(c + 4*d)) + c := y.AuxInt + l := y.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[2] + if ptr != l.Args[0] { + break + } + if idx != l.Args[1] { + break + } + mem := l.Args[2] + if mem != v.Args[3] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l) && validValAndOff(c, off)) { + break + } + v.reset(Op386XORLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } @@ -7487,7 +10726,7 @@ func rewriteValue386_Op386MOVWLSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWLSXload, v.Type) + v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -7592,7 +10831,7 @@ func rewriteValue386_Op386MOVWLZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWload, v.Type) + v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -7826,12 +11065,30 @@ func rewriteValue386_Op386MOVWload_0(v *Value) bool { if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(Op386MOVWloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = int64(read16(sym, off, config.BigEndian)) return true } return false @@ -8505,6 +11762,37 @@ func rewriteValue386_Op386MOVWstoreconst_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + a := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != Op386MOVWstoreconst { + break + } + c := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(Op386MOVLstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } return false } func rewriteValue386_Op386MOVWstoreconstidx1_0(v *Value) bool { @@ -9486,6 +12774,114 @@ func rewriteValue386_Op386MULL_0(v *Value) bool { v.AddArg(x) return true } + // match: (MULL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULLload x [off] {sym} ptr mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386MULLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULL l:(MOVLload [off] {sym} ptr mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULLload x [off] {sym} ptr mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLload { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[1] + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386MULLload) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MULL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (MULLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValue386_Op386MULLconst_0(v *Value) bool { @@ -9974,21 +13370,203 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValue386_Op386MULLconst_30(v *Value) bool { - // match: (MULLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [int64(int32(c*d))]) + return false +} +func rewriteValue386_Op386MULLconst_30(v *Value) bool { + // match: (MULLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [int64(int32(c*d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { + break + } + d := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = int64(int32(c * d)) + return true + } + return false +} +func rewriteValue386_Op386MULLload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MULLload [off1+off2] {sym} val base mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MULLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MULLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MULLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL4 { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr := v_1.Args[0] + idx := v_1.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MULLloadidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MULLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) + // cond: is32Bit(off1+off2) + // result: (MULLloadidx4 [off1+off2] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MULLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) + // cond: is32Bit(off1+off2*4) + // result: (MULLloadidx4 [off1+off2*4] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + base := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386ADDLconst { + break + } + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MULLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MULLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - d := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = int64(int32(c * d)) + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386MULLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) return true } return false @@ -9999,7 +13577,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10013,7 +13591,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSDload) @@ -10025,7 +13603,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { return true } // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10039,7 +13617,7 @@ func rewriteValue386_Op386MULSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSDload) @@ -10118,7 +13696,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10132,7 +13710,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSSload) @@ -10144,7 +13722,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { return true } // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10158,7 +13736,7 @@ func rewriteValue386_Op386MULSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386MULSSload) @@ -10463,7 +14041,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { return true } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10477,7 +14055,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLload) @@ -10489,7 +14067,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { return true } // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -10503,7 +14081,7 @@ func rewriteValue386_Op386ORL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386ORLload) @@ -10521,6 +14099,62 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ + // match: (ORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ORLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (ORLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (ORL x x) // cond: // result: x @@ -10575,7 +14209,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10624,7 +14258,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16) + v0 := b.NewValue0(x0.Pos, Op386MOVWload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10700,7 +14334,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10776,7 +14410,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10852,7 +14486,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0 := b.NewValue0(x1.Pos, Op386MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10928,7 +14562,7 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32) + v0 := b.NewValue0(x0.Pos, Op386MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10991,6 +14625,11 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386ORL_20(v *Value) bool { + b := v.Block + _ = b // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) @@ -11099,11 +14738,6 @@ func rewriteValue386_Op386ORL_10(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386ORL_20(v *Value) bool { - b := v.Block - _ = b // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) @@ -11626,6 +15260,11 @@ func rewriteValue386_Op386ORL_20(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386ORL_30(v *Value) bool { + b := v.Block + _ = b // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -11794,11 +15433,6 @@ func rewriteValue386_Op386ORL_20(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386ORL_30(v *Value) bool { - b := v.Block - _ = b // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -12471,6 +16105,11 @@ func rewriteValue386_Op386ORL_30(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386ORL_40(v *Value) bool { + b := v.Block + _ = b // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -12639,11 +16278,6 @@ func rewriteValue386_Op386ORL_30(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386ORL_40(v *Value) bool { - b := v.Block - _ = b // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -13316,6 +16950,11 @@ func rewriteValue386_Op386ORL_40(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValue386_Op386ORL_50(v *Value) bool { + b := v.Block + _ = b // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -13484,11 +17123,6 @@ func rewriteValue386_Op386ORL_40(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386ORL_50(v *Value) bool { - b := v.Block - _ = b // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) @@ -14057,83 +17691,315 @@ func rewriteValue386_Op386ORL_50(v *Value) bool { if idx != x0.Args[0] { break } - if p != x0.Args[1] { + if p != x0.Args[1] { + break + } + if mem != x0.Args[2] { + break + } + if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ORLconst_0(v *Value) bool { + // match: (ORLconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORLconst [c] _) + // cond: int32(c)==-1 + // result: (MOVLconst [-1]) + for { + c := v.AuxInt + if !(int32(c) == -1) { + break + } + v.reset(Op386MOVLconst) + v.AuxInt = -1 + return true + } + // match: (ORLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c|d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { + break + } + d := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValue386_Op386ORLconstmodify_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ORLconstmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2)) { + break + } + v.reset(Op386ORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) + // cond: ValAndOff(valoff1).canAdd(off2*4) + // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2 * 4)) { break } - if mem != x0.Args[2] { + v.reset(Op386ORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2 * 4) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem) + for { + valoff1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386ORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ORLconst_0(v *Value) bool { - // match: (ORLconst [c] x) - // cond: int32(c)==0 - // result: x +func rewriteValue386_Op386ORLload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (ORLload [off1+off2] {sym} val base mem) for { - c := v.AuxInt - x := v.Args[0] - if !(int32(c) == 0) { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ORLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (ORLconst [c] _) - // cond: int32(c)==-1 - // result: (MOVLconst [-1]) + // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) for { - c := v.AuxInt - if !(int32(c) == -1) { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { break } - v.reset(Op386MOVLconst) - v.AuxInt = -1 + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (ORLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [c|d]) + // match: (ORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL4 { break } - d := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = c | d + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr := v_1.Args[0] + idx := v_1.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386ORLload_0(v *Value) bool { +func rewriteValue386_Op386ORLloadidx4_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // match: (ORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) // cond: is32Bit(off1+off2) - // result: (ORLload [off1+off2] {sym} val base mem) + // result: (ORLloadidx4 [off1+off2] {sym} val base idx mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { @@ -14141,25 +18007,55 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { } off2 := v_1.AuxInt base := v_1.Args[0] - mem := v.Args[2] + idx := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386ORLload) + v.reset(Op386ORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(val) v.AddArg(base) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // match: (ORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) + // cond: is32Bit(off1+off2*4) + // result: (ORLloadidx4 [off1+off2*4] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + base := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386ADDLconst { + break + } + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (ORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // result: (ORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386LEAL { @@ -14168,58 +18064,150 @@ func rewriteValue386_Op386ORLload_0(v *Value) bool { off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386ORLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ORLmodify_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (ORLmodify [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ORLmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] mem := v.Args[2] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ORLload) + v.reset(Op386ORLmodify) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386ORLmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (ORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) + // cond: is32Bit(off1+off2) + // result: (ORLmodifyidx4 [off1+off2] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386ORLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) v.AddArg(val) - v.AddArg(base) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386ORLmodify_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem) - // cond: is32Bit(off1+off2) - // result: (ORLmodify [off1+off2] {sym} base val mem) + // match: (ORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) + // cond: is32Bit(off1+off2*4) + // result: (ORLmodifyidx4 [off1+off2*4] {sym} base idx val mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + _ = v.Args[3] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - off2 := v_0.AuxInt - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + off2 := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { break } - v.reset(Op386ORLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386ORLmodifyidx4) + v.AuxInt = off1 + off2*4 v.Aux = sym v.AddArg(base) + v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (ORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // result: (ORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != Op386LEAL { break @@ -14227,19 +18215,47 @@ func rewriteValue386_Op386ORLmodify_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386ORLmodify) + v.reset(Op386ORLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) + v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } + // match: (ORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (ORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386MOVLconst { + break + } + c := v_2.AuxInt + mem := v.Args[3] + if !(validValAndOff(c, off)) { + break + } + v.reset(Op386ORLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValue386_Op386ROLBconst_0(v *Value) bool { @@ -15604,7 +19620,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { return true } // match: (SUBL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (SUBLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -15618,7 +19634,7 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386SUBLload) @@ -15629,6 +19645,34 @@ func rewriteValue386_Op386SUBL_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SUBL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (SUBLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386SUBLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (SUBL x x) // cond: // result: (MOVLconst [0]) @@ -15748,6 +19792,128 @@ func rewriteValue386_Op386SUBLload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (SUBLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL4 { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr := v_1.Args[0] + idx := v_1.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386SUBLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386SUBLloadidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SUBLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) + // cond: is32Bit(off1+off2) + // result: (SUBLloadidx4 [off1+off2] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386SUBLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (SUBLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) + // cond: is32Bit(off1+off2*4) + // result: (SUBLloadidx4 [off1+off2*4] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + base := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386ADDLconst { + break + } + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386SUBLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (SUBLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + idx := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { @@ -15766,28 +19932,118 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { if v_0.Op != Op386ADDLconst { break } - off2 := v_0.AuxInt - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386SUBLmodify) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386SUBLmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SUBLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) + // cond: is32Bit(off1+off2) + // result: (SUBLmodifyidx4 [off1+off2] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386SUBLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (SUBLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) + // cond: is32Bit(off1+off2*4) + // result: (SUBLmodifyidx4 [off1+off2*4] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { break } - v.reset(Op386SUBLmodify) - v.AuxInt = off1 + off2 + v.reset(Op386SUBLmodifyidx4) + v.AuxInt = off1 + off2*4 v.Aux = sym v.AddArg(base) + v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (SUBLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // result: (SUBLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != Op386LEAL { break @@ -15795,19 +20051,47 @@ func rewriteValue386_Op386SUBLmodify_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386SUBLmodify) + v.reset(Op386SUBLmodifyidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) + v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } + // match: (SUBLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) + // cond: validValAndOff(-c,off) + // result: (ADDLconstmodifyidx4 [makeValAndOff(-c,off)] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386MOVLconst { + break + } + c := v_2.AuxInt + mem := v.Args[3] + if !(validValAndOff(-c, off)) { + break + } + v.reset(Op386ADDLconstmodifyidx4) + v.AuxInt = makeValAndOff(-c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValue386_Op386SUBSD_0(v *Value) bool { @@ -15816,7 +20100,7 @@ func rewriteValue386_Op386SUBSD_0(v *Value) bool { config := b.Func.Config _ = config // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -15830,7 +20114,7 @@ func rewriteValue386_Op386SUBSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386SUBSDload) @@ -15909,7 +20193,7 @@ func rewriteValue386_Op386SUBSS_0(v *Value) bool { config := b.Func.Config _ = config // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && !config.use387 && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l) // result: (SUBSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -15923,7 +20207,7 @@ func rewriteValue386_Op386SUBSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && !config.use387 && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && !config.use387 && clobber(l)) { break } v.reset(Op386SUBSSload) @@ -16196,7 +20480,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return true } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -16210,7 +20494,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLload) @@ -16222,7 +20506,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return true } // match: (XORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -16236,7 +20520,7 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(Op386XORLload) @@ -16250,6 +20534,62 @@ func rewriteValue386_Op386XORL_0(v *Value) bool { return false } func rewriteValue386_Op386XORL_10(v *Value) bool { + // match: (XORL x l:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + x := v.Args[0] + l := v.Args[1] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386XORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (XORL l:(MOVLloadidx4 [off] {sym} ptr idx mem) x) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) + // result: (XORLloadidx4 x [off] {sym} ptr idx mem) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != Op386MOVLloadidx4 { + break + } + off := l.AuxInt + sym := l.Aux + _ = l.Args[2] + ptr := l.Args[0] + idx := l.Args[1] + mem := l.Args[2] + x := v.Args[1] + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { + break + } + v.reset(Op386XORLloadidx4) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (XORL x x) // cond: // result: (MOVLconst [0]) @@ -16296,34 +20636,266 @@ func rewriteValue386_Op386XORLconst_0(v *Value) bool { v.AddArg(x) return true } - // match: (XORLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [c^d]) + // match: (XORLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c^d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { + break + } + d := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValue386_Op386XORLconstmodify_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem) + for { + valoff1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLconstmodify) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386XORLconstmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (XORLconstmodifyidx4 [valoff1] {sym} (ADDLconst [off2] base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) + // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {sym} base idx mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2)) { + break + } + v.reset(Op386XORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (XORLconstmodifyidx4 [valoff1] {sym} base (ADDLconst [off2] idx) mem) + // cond: ValAndOff(valoff1).canAdd(off2*4) + // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2*4)] {sym} base idx mem) + for { + valoff1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2 * 4)) { + break + } + v.reset(Op386XORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2 * 4) + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (XORLconstmodifyidx4 [valoff1] {sym1} (LEAL [off2] {sym2} base) idx mem) + // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLconstmodifyidx4 [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base idx mem) + for { + valoff1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLconstmodifyidx4) + v.AuxInt = ValAndOff(valoff1).add(off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386XORLload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (XORLload [off1+off2] {sym} val base mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386XORLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + base := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (XORLload [off1] {sym1} val (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val ptr idx mem) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + val := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386LEAL4 { break } - d := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = c ^ d + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr := v_1.Args[0] + idx := v_1.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386XORLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(val) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValue386_Op386XORLload_0(v *Value) bool { +func rewriteValue386_Op386XORLloadidx4_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem) + // match: (XORLloadidx4 [off1] {sym} val (ADDLconst [off2] base) idx mem) // cond: is32Bit(off1+off2) - // result: (XORLload [off1+off2] {sym} val base mem) + // result: (XORLloadidx4 [off1+off2] {sym} val base idx mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { @@ -16331,25 +20903,55 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { } off2 := v_1.AuxInt base := v_1.Args[0] - mem := v.Args[2] + idx := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386XORLload) + v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(val) v.AddArg(base) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + // match: (XORLloadidx4 [off1] {sym} val base (ADDLconst [off2] idx) mem) + // cond: is32Bit(off1+off2*4) + // result: (XORLloadidx4 [off1+off2*4] {sym} val base idx mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + val := v.Args[0] + base := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386ADDLconst { + break + } + off2 := v_2.AuxInt + idx := v_2.Args[0] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386XORLloadidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(val) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (XORLloadidx4 [off1] {sym1} val (LEAL [off2] {sym2} base) idx mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem) + // result: (XORLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} val base idx mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[3] val := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386LEAL { @@ -16358,15 +20960,17 @@ func rewriteValue386_Op386XORLload_0(v *Value) bool { off2 := v_1.AuxInt sym2 := v_1.Aux base := v_1.Args[0] - mem := v.Args[2] + idx := v.Args[2] + mem := v.Args[3] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386XORLload) + v.reset(Op386XORLloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(val) v.AddArg(base) + v.AddArg(idx) v.AddArg(mem) return true } @@ -16432,6 +21036,124 @@ func rewriteValue386_Op386XORLmodify_0(v *Value) bool { } return false } +func rewriteValue386_Op386XORLmodifyidx4_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (XORLmodifyidx4 [off1] {sym} (ADDLconst [off2] base) idx val mem) + // cond: is32Bit(off1+off2) + // result: (XORLmodifyidx4 [off1+off2] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (XORLmodifyidx4 [off1] {sym} base (ADDLconst [off2] idx) val mem) + // cond: is32Bit(off1+off2*4) + // result: (XORLmodifyidx4 [off1+off2*4] {sym} base idx val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + base := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + off2 := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1 + off2*4)) { + break + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off1 + off2*4 + v.Aux = sym + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (XORLmodifyidx4 [off1] {sym1} (LEAL [off2] {sym2} base) idx val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (XORLmodifyidx4 [off1+off2] {mergeSym(sym1,sym2)} base idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(Op386XORLmodifyidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (XORLmodifyidx4 [off] {sym} ptr idx (MOVLconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (XORLconstmodifyidx4 [makeValAndOff(c,off)] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386MOVLconst { + break + } + c := v_2.AuxInt + mem := v.Args[3] + if !(validValAndOff(c, off)) { + break + } + v.reset(Op386XORLconstmodifyidx4) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} func rewriteValue386_OpAdd16_0(v *Value) bool { // match: (Add16 x y) // cond: @@ -16834,14 +21556,16 @@ func rewriteValue386_OpCvt64Fto32F_0(v *Value) bool { } } func rewriteValue386_OpDiv16_0(v *Value) bool { - // match: (Div16 x y) + // match: (Div16 [a] x y) // cond: - // result: (DIVW x y) + // result: (DIVW [a] x y) for { + a := v.AuxInt _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(Op386DIVW) + v.AuxInt = a v.AddArg(x) v.AddArg(y) return true @@ -16862,14 +21586,16 @@ func rewriteValue386_OpDiv16u_0(v *Value) bool { } } func rewriteValue386_OpDiv32_0(v *Value) bool { - // match: (Div32 x y) + // match: (Div32 [a] x y) // cond: - // result: (DIVL x y) + // result: (DIVL [a] x y) for { + a := v.AuxInt _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(Op386DIVL) + v.AuxInt = a v.AddArg(x) v.AddArg(y) return true @@ -18237,14 +22963,16 @@ func rewriteValue386_OpLsh8x8_0(v *Value) bool { } } func rewriteValue386_OpMod16_0(v *Value) bool { - // match: (Mod16 x y) + // match: (Mod16 [a] x y) // cond: - // result: (MODW x y) + // result: (MODW [a] x y) for { + a := v.AuxInt _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(Op386MODW) + v.AuxInt = a v.AddArg(x) v.AddArg(y) return true @@ -18265,14 +22993,16 @@ func rewriteValue386_OpMod16u_0(v *Value) bool { } } func rewriteValue386_OpMod32_0(v *Value) bool { - // match: (Mod32 x y) + // match: (Mod32 [a] x y) // cond: - // result: (MODL x y) + // result: (MODL [a] x y) for { + a := v.AuxInt _ = v.Args[1] x := v.Args[0] y := v.Args[1] v.reset(Op386MODL) + v.AuxInt = a v.AddArg(x) v.AddArg(y) return true @@ -18758,7 +23488,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool { _ = typ // match: (Neg32F x) // cond: !config.use387 - // result: (PXOR x (MOVSSconst [f2i(math.Copysign(0, -1))])) + // result: (PXOR x (MOVSSconst [auxFrom32F(float32(math.Copysign(0, -1)))])) for { x := v.Args[0] if !(!config.use387) { @@ -18767,7 +23497,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool { v.reset(Op386PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32) - v0.AuxInt = f2i(math.Copysign(0, -1)) + v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) v.AddArg(v0) return true } @@ -18794,7 +23524,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool { _ = typ // match: (Neg64F x) // cond: !config.use387 - // result: (PXOR x (MOVSDconst [f2i(math.Copysign(0, -1))])) + // result: (PXOR x (MOVSDconst [auxFrom64F(math.Copysign(0, -1))])) for { x := v.Args[0] if !(!config.use387) { @@ -18803,7 +23533,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool { v.reset(Op386PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64) - v0.AuxInt = f2i(math.Copysign(0, -1)) + v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) v.AddArg(v0) return true } @@ -19794,6 +24524,59 @@ func rewriteValue386_OpRsh8x8_0(v *Value) bool { return true } } +func rewriteValue386_OpSelect0_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Select0 (Mul32uover x y)) + // cond: + // result: (Select0 (MULLU x y)) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_OpSelect1_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Select1 (Mul32uover x y)) + // cond: + // result: (SETO (Select1 (MULLU x y))) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(Op386SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} func rewriteValue386_OpSignExt16to32_0(v *Value) bool { // match: (SignExt16to32 x) // cond: @@ -20932,6 +25715,20 @@ func rewriteBlock386(b *Block) bool { b.Aux = nil return true } + // match: (If (SETO cmp) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != Op386SETO { + break + } + cmp := v.Args[0] + b.Kind = Block386OS + b.SetControl(cmp) + b.Aux = nil + return true + } // match: (If (SETGF cmp) yes no) // cond: // result: (UGT cmp yes no) @@ -21689,6 +26486,58 @@ func rewriteBlock386(b *Block) bool { b.Aux = nil return true } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != Op386TESTB { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386SETO { + break + } + cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SETO { + break + } + if cmp != v_1.Args[0] { + break + } + b.Kind = Block386OS + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != Op386TESTB { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != Op386SETO { + break + } + cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SETO { + break + } + if cmp != v_1.Args[0] { + break + } + b.Kind = Block386OS + b.SetControl(cmp) + b.Aux = nil + return true + } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) // cond: // result: (UGT cmp yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 47d3f431ab22e..b52e53f9d2340 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -15,6 +17,10 @@ var _ = types.TypeMem // in case not otherwise used func rewriteValueAMD64(v *Value) bool { switch v.Op { + case OpAMD64ADCQ: + return rewriteValueAMD64_OpAMD64ADCQ_0(v) + case OpAMD64ADCQconst: + return rewriteValueAMD64_OpAMD64ADCQconst_0(v) case OpAMD64ADDL: return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v) case OpAMD64ADDLconst: @@ -23,14 +29,20 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v) case OpAMD64ADDLload: return rewriteValueAMD64_OpAMD64ADDLload_0(v) + case OpAMD64ADDLmodify: + return rewriteValueAMD64_OpAMD64ADDLmodify_0(v) case OpAMD64ADDQ: return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) + case OpAMD64ADDQcarry: + return rewriteValueAMD64_OpAMD64ADDQcarry_0(v) case OpAMD64ADDQconst: return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v) case OpAMD64ADDQconstmodify: return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v) case OpAMD64ADDQload: return rewriteValueAMD64_OpAMD64ADDQload_0(v) + case OpAMD64ADDQmodify: + return rewriteValueAMD64_OpAMD64ADDQmodify_0(v) case OpAMD64ADDSD: return rewriteValueAMD64_OpAMD64ADDSD_0(v) case OpAMD64ADDSDload: @@ -43,28 +55,64 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ANDL_0(v) case OpAMD64ANDLconst: return rewriteValueAMD64_OpAMD64ANDLconst_0(v) + case OpAMD64ANDLconstmodify: + return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v) case OpAMD64ANDLload: return rewriteValueAMD64_OpAMD64ANDLload_0(v) + case OpAMD64ANDLmodify: + return rewriteValueAMD64_OpAMD64ANDLmodify_0(v) case OpAMD64ANDQ: return rewriteValueAMD64_OpAMD64ANDQ_0(v) case OpAMD64ANDQconst: return rewriteValueAMD64_OpAMD64ANDQconst_0(v) + case OpAMD64ANDQconstmodify: + return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v) case OpAMD64ANDQload: return rewriteValueAMD64_OpAMD64ANDQload_0(v) + case OpAMD64ANDQmodify: + return rewriteValueAMD64_OpAMD64ANDQmodify_0(v) case OpAMD64BSFQ: return rewriteValueAMD64_OpAMD64BSFQ_0(v) + case OpAMD64BTCLconst: + return rewriteValueAMD64_OpAMD64BTCLconst_0(v) + case OpAMD64BTCLconstmodify: + return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v) + case OpAMD64BTCLmodify: + return rewriteValueAMD64_OpAMD64BTCLmodify_0(v) + case OpAMD64BTCQconst: + return rewriteValueAMD64_OpAMD64BTCQconst_0(v) + case OpAMD64BTCQconstmodify: + return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v) + case OpAMD64BTCQmodify: + return rewriteValueAMD64_OpAMD64BTCQmodify_0(v) case OpAMD64BTLconst: return rewriteValueAMD64_OpAMD64BTLconst_0(v) case OpAMD64BTQconst: return rewriteValueAMD64_OpAMD64BTQconst_0(v) case OpAMD64BTRLconst: return rewriteValueAMD64_OpAMD64BTRLconst_0(v) + case OpAMD64BTRLconstmodify: + return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v) + case OpAMD64BTRLmodify: + return rewriteValueAMD64_OpAMD64BTRLmodify_0(v) case OpAMD64BTRQconst: return rewriteValueAMD64_OpAMD64BTRQconst_0(v) + case OpAMD64BTRQconstmodify: + return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v) + case OpAMD64BTRQmodify: + return rewriteValueAMD64_OpAMD64BTRQmodify_0(v) case OpAMD64BTSLconst: return rewriteValueAMD64_OpAMD64BTSLconst_0(v) + case OpAMD64BTSLconstmodify: + return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v) + case OpAMD64BTSLmodify: + return rewriteValueAMD64_OpAMD64BTSLmodify_0(v) case OpAMD64BTSQconst: return rewriteValueAMD64_OpAMD64BTSQconst_0(v) + case OpAMD64BTSQconstmodify: + return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v) + case OpAMD64BTSQmodify: + return rewriteValueAMD64_OpAMD64BTSQmodify_0(v) case OpAMD64CMOVLCC: return rewriteValueAMD64_OpAMD64CMOVLCC_0(v) case OpAMD64CMOVLCS: @@ -129,30 +177,54 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64CMPB_0(v) case OpAMD64CMPBconst: return rewriteValueAMD64_OpAMD64CMPBconst_0(v) + case OpAMD64CMPBconstload: + return rewriteValueAMD64_OpAMD64CMPBconstload_0(v) case OpAMD64CMPBload: return rewriteValueAMD64_OpAMD64CMPBload_0(v) case OpAMD64CMPL: return rewriteValueAMD64_OpAMD64CMPL_0(v) case OpAMD64CMPLconst: return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v) + case OpAMD64CMPLconstload: + return rewriteValueAMD64_OpAMD64CMPLconstload_0(v) case OpAMD64CMPLload: return rewriteValueAMD64_OpAMD64CMPLload_0(v) case OpAMD64CMPQ: return rewriteValueAMD64_OpAMD64CMPQ_0(v) case OpAMD64CMPQconst: return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v) + case OpAMD64CMPQconstload: + return rewriteValueAMD64_OpAMD64CMPQconstload_0(v) case OpAMD64CMPQload: return rewriteValueAMD64_OpAMD64CMPQload_0(v) case OpAMD64CMPW: return rewriteValueAMD64_OpAMD64CMPW_0(v) case OpAMD64CMPWconst: return rewriteValueAMD64_OpAMD64CMPWconst_0(v) + case OpAMD64CMPWconstload: + return rewriteValueAMD64_OpAMD64CMPWconstload_0(v) case OpAMD64CMPWload: return rewriteValueAMD64_OpAMD64CMPWload_0(v) case OpAMD64CMPXCHGLlock: return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v) case OpAMD64CMPXCHGQlock: return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v) + case OpAMD64DIVSD: + return rewriteValueAMD64_OpAMD64DIVSD_0(v) + case OpAMD64DIVSDload: + return rewriteValueAMD64_OpAMD64DIVSDload_0(v) + case OpAMD64DIVSS: + return rewriteValueAMD64_OpAMD64DIVSS_0(v) + case OpAMD64DIVSSload: + return rewriteValueAMD64_OpAMD64DIVSSload_0(v) + case OpAMD64HMULL: + return rewriteValueAMD64_OpAMD64HMULL_0(v) + case OpAMD64HMULLU: + return rewriteValueAMD64_OpAMD64HMULLU_0(v) + case OpAMD64HMULQ: + return rewriteValueAMD64_OpAMD64HMULQ_0(v) + case OpAMD64HMULQU: + return rewriteValueAMD64_OpAMD64HMULQU_0(v) case OpAMD64LEAL: return rewriteValueAMD64_OpAMD64LEAL_0(v) case OpAMD64LEAL1: @@ -184,7 +256,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVBloadidx1: return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) case OpAMD64MOVBstore: - return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) + return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v) case OpAMD64MOVBstoreconst: return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) case OpAMD64MOVBstoreconstidx1: @@ -204,7 +276,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVLi2f: return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) case OpAMD64MOVLload: - return rewriteValueAMD64_OpAMD64MOVLload_0(v) + return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v) case OpAMD64MOVLloadidx1: return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) case OpAMD64MOVLloadidx4: @@ -212,7 +284,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVLloadidx8: return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) case OpAMD64MOVLstore: - return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) + return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v) case OpAMD64MOVLstoreconst: return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v) case OpAMD64MOVLstoreconstidx1: @@ -242,7 +314,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVQloadidx8: return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) case OpAMD64MOVQstore: - return rewriteValueAMD64_OpAMD64MOVQstore_0(v) + return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v) case OpAMD64MOVQstoreconst: return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) case OpAMD64MOVQstoreconstidx1: @@ -329,14 +401,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) case OpAMD64ORLconst: return rewriteValueAMD64_OpAMD64ORLconst_0(v) + case OpAMD64ORLconstmodify: + return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v) case OpAMD64ORLload: return rewriteValueAMD64_OpAMD64ORLload_0(v) + case OpAMD64ORLmodify: + return rewriteValueAMD64_OpAMD64ORLmodify_0(v) case OpAMD64ORQ: return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) case OpAMD64ORQconst: return rewriteValueAMD64_OpAMD64ORQconst_0(v) + case OpAMD64ORQconstmodify: + return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v) case OpAMD64ORQload: return rewriteValueAMD64_OpAMD64ORQload_0(v) + case OpAMD64ORQmodify: + return rewriteValueAMD64_OpAMD64ORQmodify_0(v) case OpAMD64ROLB: return rewriteValueAMD64_OpAMD64ROLB_0(v) case OpAMD64ROLBconst: @@ -379,8 +459,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SARWconst_0(v) case OpAMD64SBBLcarrymask: return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v) + case OpAMD64SBBQ: + return rewriteValueAMD64_OpAMD64SBBQ_0(v) case OpAMD64SBBQcarrymask: return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v) + case OpAMD64SBBQconst: + return rewriteValueAMD64_OpAMD64SBBQconst_0(v) case OpAMD64SETA: return rewriteValueAMD64_OpAMD64SETA_0(v) case OpAMD64SETAE: @@ -451,12 +535,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SUBLconst_0(v) case OpAMD64SUBLload: return rewriteValueAMD64_OpAMD64SUBLload_0(v) + case OpAMD64SUBLmodify: + return rewriteValueAMD64_OpAMD64SUBLmodify_0(v) case OpAMD64SUBQ: return rewriteValueAMD64_OpAMD64SUBQ_0(v) + case OpAMD64SUBQborrow: + return rewriteValueAMD64_OpAMD64SUBQborrow_0(v) case OpAMD64SUBQconst: return rewriteValueAMD64_OpAMD64SUBQconst_0(v) case OpAMD64SUBQload: return rewriteValueAMD64_OpAMD64SUBQload_0(v) + case OpAMD64SUBQmodify: + return rewriteValueAMD64_OpAMD64SUBQmodify_0(v) case OpAMD64SUBSD: return rewriteValueAMD64_OpAMD64SUBSD_0(v) case OpAMD64SUBSDload: @@ -493,14 +583,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) case OpAMD64XORLconst: return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) + case OpAMD64XORLconstmodify: + return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v) case OpAMD64XORLload: return rewriteValueAMD64_OpAMD64XORLload_0(v) + case OpAMD64XORLmodify: + return rewriteValueAMD64_OpAMD64XORLmodify_0(v) case OpAMD64XORQ: return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v) case OpAMD64XORQconst: return rewriteValueAMD64_OpAMD64XORQconst_0(v) + case OpAMD64XORQconstmodify: + return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v) case OpAMD64XORQload: return rewriteValueAMD64_OpAMD64XORQload_0(v) + case OpAMD64XORQmodify: + return rewriteValueAMD64_OpAMD64XORQmodify_0(v) case OpAdd16: return rewriteValueAMD64_OpAdd16_0(v) case OpAdd32: @@ -834,7 +932,7 @@ func rewriteValueAMD64(v *Value) bool { case OpMod8u: return rewriteValueAMD64_OpMod8u_0(v) case OpMove: - return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) + return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) || rewriteValueAMD64_OpMove_20(v) case OpMul16: return rewriteValueAMD64_OpMul16_0(v) case OpMul32: @@ -901,6 +999,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpPopCount64_0(v) case OpPopCount8: return rewriteValueAMD64_OpPopCount8_0(v) + case OpRotateLeft16: + return rewriteValueAMD64_OpRotateLeft16_0(v) + case OpRotateLeft32: + return rewriteValueAMD64_OpRotateLeft32_0(v) + case OpRotateLeft64: + return rewriteValueAMD64_OpRotateLeft64_0(v) + case OpRotateLeft8: + return rewriteValueAMD64_OpRotateLeft8_0(v) case OpRound32F: return rewriteValueAMD64_OpRound32F_0(v) case OpRound64F: @@ -1050,6 +1156,86 @@ func rewriteValueAMD64(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool { + // match: (ADCQ x (MOVQconst [c]) carry) + // cond: is32Bit(c) + // result: (ADCQconst x [c] carry) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + carry := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(carry) + return true + } + // match: (ADCQ (MOVQconst [c]) x carry) + // cond: is32Bit(c) + // result: (ADCQconst x [c] carry) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + carry := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADCQconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(carry) + return true + } + // match: (ADCQ x y (FlagEQ)) + // cond: + // result: (ADDQcarry x y) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQcarry) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool { + // match: (ADCQconst x [c] (FlagEQ)) + // cond: + // result: (ADDQconstcarry x [c]) + for { + c := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool { // match: (ADDL x (MOVLconst [c])) // cond: @@ -1604,7 +1790,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { return true } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1618,7 +1804,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDLload) @@ -1630,7 +1816,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { return true } // match: (ADDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -1644,7 +1830,7 @@ func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDLload) @@ -2011,13 +2197,69 @@ func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64ADDL) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32) v0.AddArg(y) v.AddArg(v0) return true } return false } +func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool { + // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (ADDLmodify [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (MOVQconst [c])) // cond: is32Bit(c) @@ -2466,7 +2708,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { return true } // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2480,7 +2722,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDQload) @@ -2492,7 +2734,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { return true } // match: (ADDQ l:(MOVQload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDQload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2506,7 +2748,7 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDQload) @@ -2519,6 +2761,47 @@ func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool { + // match: (ADDQcarry x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ADDQconstcarry x [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDQcarry (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ADDQconstcarry x [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconstcarry) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { // match: (ADDQconst [c] (ADDQ x y)) // cond: @@ -2875,16 +3158,72 @@ func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64ADDQ) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64) v0.AddArg(y) v.AddArg(v0) return true } return false } +func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool { + // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (ADDQmodify [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2898,7 +3237,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSDload) @@ -2910,7 +3249,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { return true } // match: (ADDSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -2924,7 +3263,7 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSDload) @@ -3021,7 +3360,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64ADDSD) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) v.AddArg(v0) return true @@ -3030,7 +3369,7 @@ func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3044,7 +3383,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSSload) @@ -3056,7 +3395,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { return true } // match: (ADDSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ADDSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3070,7 +3409,7 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ADDSSload) @@ -3167,7 +3506,7 @@ func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64ADDSS) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) v.AddArg(v0) return true @@ -3326,7 +3665,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { return true } // match: (ANDL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3340,7 +3679,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDLload) @@ -3352,7 +3691,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { return true } // match: (ANDL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ANDLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -3366,7 +3705,7 @@ func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ANDLload) @@ -3414,6 +3753,22 @@ func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ANDLconst [c] (BTRLconst [d] x)) + // cond: + // result: (ANDLconst [c &^ (1< [off] {sym} ptr idx mem) @@ -10389,6 +12581,24 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int64(read8(sym, off))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int64(read8(sym, off)) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { @@ -10896,6 +13106,30 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validOff(off) + // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = makeValAndOff(int64(int8(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -11022,7 +13256,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AuxInt = i - 1 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type) v0.AuxInt = 8 v0.AddArg(w) v.AddArg(v0) @@ -11118,12 +13352,19 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AuxInt = i - 3 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) // result: (MOVQstore [i-7] {s} p (BSWAPQ w) mem) @@ -11309,19 +13550,12 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AuxInt = i - 7 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w mem) @@ -11457,6 +13691,141 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstore [i] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVBstore { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpAMD64SHRWconst { + break + } + if x_1.AuxInt != 8 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstore [i] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVBstore { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpAMD64SHRLconst { + break + } + if x_1.AuxInt != 8 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstore [i] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVBstore { + break + } + if x.AuxInt != i+1 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpAMD64SHRQconst { + break + } + if x_1.AuxInt != 8 { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w0 mem) @@ -11615,7 +13984,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { v.AuxInt = i - 1 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16) v0.AuxInt = j - 1 v0.Aux = s2 v0.AddArg(p2) @@ -11624,6 +13993,9 @@ func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool { // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -11811,6 +14183,37 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + for { + a := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVBstoreconst { + break + } + c := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) @@ -12696,7 +15099,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -12722,7 +15125,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -12862,7 +15265,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -12888,7 +15291,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -13392,6 +15795,31 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVQconst [int64(read32(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64(read32(sym, off, config.BigEndian)) + return true + } + return false +} func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) // cond: @@ -13818,9 +16246,1014 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { if v_1.Op != OpAMD64MOVLQSX { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstore) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLQZX { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + if v_1.AuxInt != 32 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) + // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) + // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpAMD64MOVLload { + break + } + j := x1.AuxInt + s2 := x1.Aux + _ = x1.Args[1] + p2 := x1.Args[0] + mem := x1.Args[1] + mem2 := v.Args[2] + if mem2.Op != OpAMD64MOVLstore { + break + } + if mem2.AuxInt != i-4 { + break + } + if mem2.Aux != s { + break + } + _ = mem2.Args[2] + if p != mem2.Args[0] { + break + } + x2 := mem2.Args[1] + if x2.Op != OpAMD64MOVLload { + break + } + if x2.AuxInt != j-4 { + break + } + if x2.Aux != s2 { + break + } + _ = x2.Args[1] + if p2 != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if mem != mem2.Args[2] { + break + } + if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = j - 4 + v0.Aux = s2 + v0.AddArg(p2) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORLload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool { + // match: (MOVLstore {sym} [off] ptr y:(ADDL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ADDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (SUBLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SUBL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64SUBLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ANDL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ANDLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(ORL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORL { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64XORLmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore {sym} [off] ptr y:(XORL x l:(MOVLload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORLmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORL { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64XORLmodify) v.AuxInt = off v.Aux = sym v.AddArg(ptr) @@ -13828,21 +17261,42 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) - // cond: - // result: (MOVLstore [off] {sym} ptr x mem) + // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTCLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLQZX { + y := v.Args[1] + if y.Op != OpAMD64BTCL { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstore) + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTCLmodify) v.AuxInt = off v.Aux = sym v.AddArg(ptr) @@ -13850,433 +17304,339 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) + // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTRLmodify [off] {sym} ptr x mem) for { - off1 := v.AuxInt + off := v.AuxInt sym := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64BTRL { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { break } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTRLmodify) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool { + // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTSLmodify [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { + y := v.Args[1] + if y.Op != OpAMD64BTSL { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVLload { break } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { + if l.AuxInt != off { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if l.Aux != sym { break } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { + _ = l.Args[1] + if ptr != l.Args[0] { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTSLmodify) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ADDLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { + if l.AuxInt != off { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if l.Aux != sym { break } - v.reset(OpAMD64MOVLstoreidx8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off + v.reset(OpAMD64ADDLconstmodify) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstore [i-4] {s} p w mem) + // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { - i := v.AuxInt - s := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVLstore { + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ANDLconst { break } - if x.AuxInt != i-4 { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - if x.Aux != s { + if l.AuxInt != off { break } - _ = x.Args[2] - if p != x.Args[0] { + if l.Aux != sym { break } - if w != x.Args[1] { + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w) + v.reset(OpAMD64ANDLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstore [i-4] {s} p w0 mem) + // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { - i := v.AuxInt - s := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVLstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ORLconst { break } - _ = x.Args[2] - if p != x.Args[0] { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst { + if l.AuxInt != off { break } - if w0.AuxInt != j-32 { + if l.Aux != sym { break } - if w != w0.Args[0] { + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) + v.reset(OpAMD64ORLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) - // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) - // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { - i := v.AuxInt - s := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[2] - p := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVLload { + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64XORLconst { break } - j := x1.AuxInt - s2 := x1.Aux - _ = x1.Args[1] - p2 := x1.Args[0] - mem := x1.Args[1] - mem2 := v.Args[2] - if mem2.Op != OpAMD64MOVLstore { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - if mem2.AuxInt != i-4 { + if l.AuxInt != off { break } - if mem2.Aux != s { + if l.Aux != sym { break } - _ = mem2.Args[2] - if p != mem2.Args[0] { + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { break } - x2 := mem2.Args[1] - if x2.Op != OpAMD64MOVLload { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - if x2.AuxInt != j-4 { + v.reset(OpAMD64XORLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64BTCLconst { break } - if x2.Aux != s2 { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - _ = x2.Args[1] - if p2 != x2.Args[0] { + if l.AuxInt != off { break } - if mem != x2.Args[1] { + if l.Aux != sym { break } - if mem != mem2.Args[2] { + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { break } - if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = j - 4 - v0.Aux = s2 - v0.AddArg(p2) - v0.AddArg(mem) - v.AddArg(v0) + v.reset(OpAMD64BTCLconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64BTRLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { break } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { + if l.AuxInt != off { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { + if l.Aux != sym { break } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64BTRLconstmodify) + v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) - // result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { off := v.AuxInt sym := v.Aux _ = v.Args[2] ptr := v.Args[0] a := v.Args[1] - if a.Op != OpAMD64ADDLconst { + if a.Op != OpAMD64BTSLconst { break } c := a.AuxInt @@ -14296,10 +17656,10 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { if mem != v.Args[2] { break } - if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } - v.reset(OpAMD64ADDLconstmodify) + v.reset(OpAMD64BTSLconstmodify) v.AuxInt = makeValAndOff(c, off) v.Aux = sym v.AddArg(ptr) @@ -14491,7 +17851,41 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { v.AuxInt = ValAndOff(a).Off() v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + a := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVLstoreconst { + break + } + c := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64) v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 v.AddArg(v0) v.AddArg(mem) @@ -15516,6 +18910,10 @@ func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: x @@ -15748,6 +19146,24 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { v.AddArg(val) return true } + // match: (MOVQload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVQconst [int64(read64(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64(read64(sym, off, config.BigEndian)) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { @@ -16253,8 +19669,682 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ADDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDQload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ANDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDQload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { + // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (ORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORQload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem) + // cond: y.Uses==1 && clobber(y) + // result: (XORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORQload { + break + } + if y.AuxInt != off { + break + } + if y.Aux != sym { + break + } + _ = y.Args[2] + x := y.Args[0] + if ptr != y.Args[1] { + break + } + mem := y.Args[2] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && clobber(y)) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ADDQ x l:(MOVQload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ADDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ADDQ { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ADDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (SUBQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SUBQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64SUBQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ANDQ x l:(MOVQload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ANDQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ANDQ { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ANDQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(ORQ x l:(MOVQload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (ORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64ORQ { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64ORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool { + // match: (MOVQstore {sym} [off] ptr y:(XORQ x l:(MOVQload [off] {sym} ptr mem)) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (XORQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64XORQ { + break + } + _ = y.Args[1] + x := y.Args[0] + l := y.Args[1] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64XORQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTCQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64BTCQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTCQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTRQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64BTRQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTRQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem) + // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) + // result: (BTSQmodify [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64BTSQ { + break + } + _ = y.Args[1] + l := y.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + if ptr != l.Args[0] { + break + } + mem := l.Args[1] + x := y.Args[1] + if mem != v.Args[2] { + break + } + if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) { + break + } + v.reset(OpAMD64BTSQmodify) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) - // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) // result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) for { off := v.AuxInt @@ -16282,7 +20372,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { if mem != v.Args[2] { break } - if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { break } v.reset(OpAMD64ADDQconstmodify) @@ -16292,6 +20382,243 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ANDQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64ANDQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ORQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64ORQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64XORQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64XORQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64BTCQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64BTCQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64BTRQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64BTRQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool { + // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) + // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64BTSQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) { + break + } + v.reset(OpAMD64BTSQconstmodify) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) // cond: // result: (MOVSDstore [off] {sym} ptr val mem) @@ -16477,7 +20804,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { v.AuxInt = ValAndOff(c2).Off() v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128) v0.AuxInt = 0 v.AddArg(v0) v.AddArg(mem) @@ -18300,7 +22627,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18326,7 +22653,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18352,7 +22679,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18479,7 +22806,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18505,7 +22832,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18531,7 +22858,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) + v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18540,6 +22867,19 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVWQZX x) + // cond: zeroUpper48Bits(x,3) + // result: x + for { + x := v.Args[0] + if !(zeroUpper48Bits(x, 3)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) @@ -18640,6 +22980,10 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVWQZX x) @@ -18844,6 +23188,24 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVLconst [int64(read16(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = int64(read16(sym, off, config.BigEndian)) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { @@ -19225,6 +23587,30 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validOff(off) + // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -19385,51 +23771,6 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVWstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - _ = x.Args[2] - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } return false } func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { @@ -19437,6 +23778,51 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ + // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + if v_1.AuxInt != 16 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVWstore { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + _ = x.Args[2] + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVLstore [i-2] {s} p w0 mem) @@ -19595,7 +23981,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { v.AuxInt = i - 2 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32) v0.AuxInt = j - 2 v0.Aux = s2 v0.AddArg(p2) @@ -19819,6 +24205,37 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + a := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVWstoreconst { + break + } + c := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) @@ -21746,7 +26163,7 @@ func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool { } func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -21760,7 +26177,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSDload) @@ -21772,7 +26189,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { return true } // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSDload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -21786,7 +26203,7 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSDload) @@ -21883,7 +26300,7 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64MULSD) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64) v0.AddArg(y) v.AddArg(v0) return true @@ -21892,7 +26309,7 @@ func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -21906,7 +26323,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSSload) @@ -21918,7 +26335,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { return true } // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (MULSSload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -21932,7 +26349,7 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64MULSSload) @@ -22029,7 +26446,7 @@ func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool { y := v_2.Args[1] v.reset(OpAMD64MULSS) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) + v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32) v0.AddArg(y) v.AddArg(v0) return true @@ -25558,7 +29975,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -25607,7 +30024,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -25656,7 +30073,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -25705,7 +30122,7 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -25763,12 +30180,12 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -25827,12 +30244,12 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -25891,12 +30308,12 @@ func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -25962,12 +30379,12 @@ func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -28006,11 +32423,11 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28058,11 +32475,11 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28124,10 +32541,10 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28189,10 +32606,10 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28249,14 +32666,14 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28316,14 +32733,14 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16) v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28383,14 +32800,14 @@ func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28457,14 +32874,14 @@ func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) + v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16) v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -30665,7 +35082,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { return true } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -30679,7 +35096,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { _ = l.Args[1] ptr := l.Args[0] mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORLload) @@ -30691,7 +35108,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { return true } // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) + // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) for { _ = v.Args[1] @@ -30705,7 +35122,7 @@ func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool { ptr := l.Args[0] mem := l.Args[1] x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { + if !(canMergeLoadClobber(v, l, x) && clobber(l)) { break } v.reset(OpAMD64ORLload) @@ -30737,6 +35154,38 @@ func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ORLconst [c] (ORLconst [d] x)) + // cond: + // result: (ORLconst [c | d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64ORLconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpAMD64ORLconst) + v.AuxInt = c | d + v.AddArg(x) + return true + } + // match: (ORLconst [c] (BTSLconst [d] x)) + // cond: + // result: (ORLconst [c | 1< 8 && s < 16 + // cond: s == 11 || s >= 13 && s <= 15 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { s := v.AuxInt @@ -57595,7 +62832,7 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { dst := v.Args[0] src := v.Args[1] mem := v.Args[2] - if !(s > 8 && s < 16) { + if !(s == 11 || s >= 13 && s <= 15) { break } v.reset(OpAMD64MOVQstore) @@ -57680,6 +62917,15 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { v.AddArg(v2) return true } + return false +} +func rewriteValueAMD64_OpMove_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ // match: (Move [s] dst src mem) // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) @@ -57890,13 +63136,13 @@ func rewriteValueAMD64_OpNeg32F_0(v *Value) bool { _ = typ // match: (Neg32F x) // cond: - // result: (PXOR x (MOVSSconst [f2i(math.Copysign(0, -1))])) + // result: (PXOR x (MOVSSconst [auxFrom32F(float32(math.Copysign(0, -1)))])) for { x := v.Args[0] v.reset(OpAMD64PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = f2i(math.Copysign(0, -1)) + v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1))) v.AddArg(v0) return true } @@ -57919,13 +63165,13 @@ func rewriteValueAMD64_OpNeg64F_0(v *Value) bool { _ = typ // match: (Neg64F x) // cond: - // result: (PXOR x (MOVSDconst [f2i(math.Copysign(0, -1))])) + // result: (PXOR x (MOVSDconst [auxFrom64F(math.Copysign(0, -1))])) for { x := v.Args[0] v.reset(OpAMD64PXOR) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = f2i(math.Copysign(0, -1)) + v0.AuxInt = auxFrom64F(math.Copysign(0, -1)) v.AddArg(v0) return true } @@ -58313,6 +63559,62 @@ func rewriteValueAMD64_OpPopCount8_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpRotateLeft16_0(v *Value) bool { + // match: (RotateLeft16 a b) + // cond: + // result: (ROLW a b) + for { + _ = v.Args[1] + a := v.Args[0] + b := v.Args[1] + v.reset(OpAMD64ROLW) + v.AddArg(a) + v.AddArg(b) + return true + } +} +func rewriteValueAMD64_OpRotateLeft32_0(v *Value) bool { + // match: (RotateLeft32 a b) + // cond: + // result: (ROLL a b) + for { + _ = v.Args[1] + a := v.Args[0] + b := v.Args[1] + v.reset(OpAMD64ROLL) + v.AddArg(a) + v.AddArg(b) + return true + } +} +func rewriteValueAMD64_OpRotateLeft64_0(v *Value) bool { + // match: (RotateLeft64 a b) + // cond: + // result: (ROLQ a b) + for { + _ = v.Args[1] + a := v.Args[0] + b := v.Args[1] + v.reset(OpAMD64ROLQ) + v.AddArg(a) + v.AddArg(b) + return true + } +} +func rewriteValueAMD64_OpRotateLeft8_0(v *Value) bool { + // match: (RotateLeft8 a b) + // cond: + // result: (ROLB a b) + for { + _ = v.Args[1] + a := v.Args[0] + b := v.Args[1] + v.reset(OpAMD64ROLB) + v.AddArg(a) + v.AddArg(b) + return true + } +} func rewriteValueAMD64_OpRound32F_0(v *Value) bool { // match: (Round32F x) // cond: @@ -59808,6 +65110,96 @@ func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool { func rewriteValueAMD64_OpSelect0_0(v *Value) bool { b := v.Block _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Select0 (Mul64uover x y)) + // cond: + // result: (Select0 (MULQU x y)) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Select0 (Mul32uover x y)) + // cond: + // result: (Select0 (MULLU x y)) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSelect0) + v.Type = typ.UInt32 + v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Select0 (Add64carry x y c)) + // cond: + // result: (Select0 (ADCQ x y (Select1 (NEGLflags c)))) + for { + v_0 := v.Args[0] + if v_0.Op != OpAdd64carry { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select0 (Sub64borrow x y c)) + // cond: + // result: (Select0 (SBBQ x y (Select1 (NEGLflags c)))) + for { + v_0 := v.Args[0] + if v_0.Op != OpSub64borrow { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v2.AddArg(c) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } // match: (Select0 (AddTupleFirst32 val tuple)) // cond: // result: (ADDL val (Select0 tuple)) @@ -59849,6 +65241,148 @@ func rewriteValueAMD64_OpSelect0_0(v *Value) bool { return false } func rewriteValueAMD64_OpSelect1_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Select1 (Mul64uover x y)) + // cond: + // result: (SETO (Select1 (MULQU x y))) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul64uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Mul32uover x y)) + // cond: + // result: (SETO (Select1 (MULLU x y))) + for { + v_0 := v.Args[0] + if v_0.Op != OpMul32uover { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpAMD64SETO) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags)) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Add64carry x y c)) + // cond: + // result: (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) + for { + v_0 := v.Args[0] + if v_0.Op != OpAdd64carry { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (Sub64borrow x y c)) + // cond: + // result: (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) + for { + v_0 := v.Args[0] + if v_0.Op != OpSub64borrow { + break + } + _ = v_0.Args[2] + x := v_0.Args[0] + y := v_0.Args[1] + c := v_0.Args[2] + v.reset(OpAMD64NEGQ) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v2.AddArg(y) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags)) + v4.AddArg(c) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Select1 (NEGLflags (MOVQconst [0]))) + // cond: + // result: (FlagEQ) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst { + break + } + if v_0_0.AuxInt != 0 { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64NEGLflags { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpAMD64SBBQcarrymask { + break + } + x := v_0_0_0.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (Select1 (AddTupleFirst32 _ tuple)) // cond: // result: (Select1 tuple) @@ -62013,6 +67547,20 @@ func rewriteBlockAMD64(b *Block) bool { b.Aux = nil return true } + // match: (If (SETO cmp) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETO { + break + } + cmp := v.Args[0] + b.Kind = BlockAMD64OS + b.SetControl(cmp) + b.Aux = nil + return true + } // match: (If (SETGF cmp) yes no) // cond: // result: (UGT cmp yes no) @@ -62770,6 +68318,58 @@ func rewriteBlockAMD64(b *Block) bool { b.Aux = nil return true } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAMD64SETO { + break + } + cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETO { + break + } + if cmp != v_1.Args[0] { + break + } + b.Kind = BlockAMD64OS + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no) + // cond: + // result: (OS cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAMD64SETO { + break + } + cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETO { + break + } + if cmp != v_1.Args[0] { + break + } + b.Kind = BlockAMD64OS + b.SetControl(cmp) + b.Aux = nil + return true + } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) // cond: !config.nacl // result: (ULT (BTL x y)) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 5e9ce5c96c11b..4fc7fdfbe1839 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -2850,6 +2852,20 @@ func rewriteValueARM_OpARMADDconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ADDconst [c] x) + // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // result: (SUBconst [int64(int32(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + break + } + v.reset(OpARMSUBconst) + v.AuxInt = int64(int32(-c)) + v.AddArg(x) + return true + } // match: (ADDconst [c] (MOVWconst [d])) // cond: // result: (MOVWconst [int64(int32(c+d))]) @@ -3670,6 +3686,20 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ANDconst [c] x) + // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // result: (BICconst [int64(int32(^uint32(c)))] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + break + } + v.reset(OpARMBICconst) + v.AuxInt = int64(int32(^uint32(c))) + v.AddArg(x) + return true + } // match: (ANDconst [c] (MOVWconst [d])) // cond: // result: (MOVWconst [c&d]) @@ -4243,6 +4273,20 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (BICconst [c] x) + // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // result: (ANDconst [int64(int32(^uint32(c)))] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int64(int32(^uint32(c))) + v.AddArg(x) + return true + } // match: (BICconst [c] (MOVWconst [d])) // cond: // result: (MOVWconst [d&^c]) @@ -6841,6 +6885,24 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int64(read8(sym, off))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int64(read8(sym, off)) + return true + } return false } func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { @@ -7911,6 +7973,24 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVHUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int64(read16(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int64(read16(sym, off, config.BigEndian)) + return true + } return false } func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { @@ -8755,6 +8835,24 @@ func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVWconst [int64(int32(read32(sym, off, config.BigEndian)))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(read32(sym, off, config.BigEndian))) + return true + } return false } func rewriteValueARM_OpARMMOVWloadidx_0(v *Value) bool { @@ -15243,6 +15341,20 @@ func rewriteValueARM_OpARMSUBconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (SUBconst [c] x) + // cond: objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // result: (ANDconst [int64(int32(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(objabi.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = int64(int32(-c)) + v.AddArg(x) + return true + } // match: (SUBconst [c] (MOVWconst [d])) // cond: // result: (MOVWconst [int64(int32(d-c))]) @@ -22200,8 +22312,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUB x y)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 // result: (EQ (CMP x y) yes no) for { v := b.Control @@ -22211,13 +22323,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUB { + l := v.Args[0] + if l.Op != OpARMSUB { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -22226,8 +22341,41 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBconst [c] x)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 // result: (EQ (CMPconst [c] x) yes no) for { v := b.Control @@ -22237,12 +22385,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -22251,8 +22402,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftLL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftLL x y [c]) yes no) for { v := b.Control @@ -22262,14 +22413,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftLL { + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c @@ -22279,8 +22433,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftRL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftRL x y [c]) yes no) for { v := b.Control @@ -22290,14 +22444,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRL { + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c @@ -22307,8 +22464,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftRA x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftRA x y [c]) yes no) for { v := b.Control @@ -22318,14 +22475,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRA { + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c @@ -22335,8 +22495,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftLLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftLLreg x y z) yes no) for { v := b.Control @@ -22346,14 +22506,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftLLreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -22363,8 +22526,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftRLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftRLreg x y z) yes no) for { v := b.Control @@ -22374,14 +22537,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRLreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -22391,8 +22557,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (SUBshiftRAreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMPshiftRAreg x y z) yes no) for { v := b.Control @@ -22402,14 +22568,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRAreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -22419,8 +22588,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADD x y)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 // result: (EQ (CMN x y) yes no) for { v := b.Control @@ -22430,13 +22599,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADD { + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) v0.AddArg(x) @@ -22445,8 +22617,41 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDconst [c] x)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (EQ (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMEQ + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 // result: (EQ (CMNconst [c] x) yes no) for { v := b.Control @@ -22456,12 +22661,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c @@ -22470,8 +22678,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftLL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftLL x y [c]) yes no) for { v := b.Control @@ -22481,14 +22689,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftLL { + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c @@ -22498,8 +22709,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftRL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftRL x y [c]) yes no) for { v := b.Control @@ -22509,14 +22720,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRL { + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c @@ -22526,8 +22740,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftRA x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftRA x y [c]) yes no) for { v := b.Control @@ -22537,14 +22751,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRA { + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c @@ -22554,8 +22771,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftLLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftLLreg x y z) yes no) for { v := b.Control @@ -22565,14 +22782,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftLLreg { + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -22582,8 +22802,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftRLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftRLreg x y z) yes no) for { v := b.Control @@ -22593,14 +22813,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRLreg { + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -22610,8 +22833,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ADDshiftRAreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (CMNshiftRAreg x y z) yes no) for { v := b.Control @@ -22621,14 +22844,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRAreg { + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -22638,8 +22864,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (AND x y)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 // result: (EQ (TST x y) yes no) for { v := b.Control @@ -22649,13 +22875,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMAND { + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) v0.AddArg(x) @@ -22664,8 +22893,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 // result: (EQ (TSTconst [c] x) yes no) for { v := b.Control @@ -22675,12 +22904,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c @@ -22689,8 +22921,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftLL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftLL x y [c]) yes no) for { v := b.Control @@ -22700,14 +22932,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftLL { + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c @@ -22717,8 +22952,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftRL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftRL x y [c]) yes no) for { v := b.Control @@ -22728,14 +22963,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRL { + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c @@ -22745,8 +22983,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftRA x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftRA x y [c]) yes no) for { v := b.Control @@ -22756,14 +22994,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRA { + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c @@ -22773,8 +23014,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftLLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftLLreg x y z) yes no) for { v := b.Control @@ -22784,14 +23025,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftLLreg { + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -22801,8 +23045,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftRLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftRLreg x y z) yes no) for { v := b.Control @@ -22812,14 +23056,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRLreg { + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -22829,8 +23076,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (ANDshiftRAreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TSTshiftRAreg x y z) yes no) for { v := b.Control @@ -22840,14 +23087,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRAreg { + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -22857,8 +23107,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XOR x y)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 // result: (EQ (TEQ x y) yes no) for { v := b.Control @@ -22868,13 +23118,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXOR { + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg(x) @@ -22883,8 +23136,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORconst [c] x)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 // result: (EQ (TEQconst [c] x) yes no) for { v := b.Control @@ -22894,12 +23147,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORconst { + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c @@ -22908,8 +23164,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftLL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftLL x y [c]) yes no) for { v := b.Control @@ -22919,14 +23175,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftLL { + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c @@ -22936,8 +23195,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftRL x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftRL x y [c]) yes no) for { v := b.Control @@ -22947,14 +23206,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRL { + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c @@ -22964,8 +23226,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftRA x y [c])) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftRA x y [c]) yes no) for { v := b.Control @@ -22975,14 +23237,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRA { + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c @@ -22992,8 +23257,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftLLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftLLreg x y z) yes no) for { v := b.Control @@ -23003,14 +23268,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftLLreg { + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -23020,8 +23288,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftRLreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftRLreg x y z) yes no) for { v := b.Control @@ -23031,14 +23299,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRLreg { + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -23048,8 +23319,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] (XORshiftRAreg x y z)) yes no) - // cond: + // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (EQ (TEQshiftRAreg x y z) yes no) for { v := b.Control @@ -23059,14 +23330,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRAreg { + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMEQ v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -23158,407 +23432,4559 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - case BlockARMGT: - // match: (GT (FlagEQ) yes no) - // cond: - // result: (First nil no yes) + // match: (GE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (GE (CMP x y) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) - b.Aux = nil - b.swapSuccessors() - return true - } - // match: (GT (FlagLT_ULT) yes no) - // cond: - // result: (First nil no yes) - for { - v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.AuxInt != 0 { break } - b.Kind = BlockFirst - b.SetControl(nil) - b.Aux = nil - b.swapSuccessors() - return true - } - // match: (GT (FlagLT_UGT) yes no) - // cond: - // result: (First nil no yes) - for { - v := b.Control - if v.Op != OpARMFlagLT_UGT { + l := v.Args[0] + if l.Op != OpARMSUB { break } - b.Kind = BlockFirst - b.SetControl(nil) - b.Aux = nil - b.swapSuccessors() - return true - } - // match: (GT (FlagGT_ULT) yes no) - // cond: - // result: (First nil yes no) - for { - v := b.Control - if v.Op != OpARMFlagGT_ULT { + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - b.Kind = BlockFirst - b.SetControl(nil) + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (GT (FlagGT_UGT) yes no) - // cond: - // result: (First nil yes no) + // match: (GE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (GE (CMP a (MUL x y)) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) - b.Aux = nil - return true - } - // match: (GT (InvertFlags cmp) yes no) - // cond: - // result: (LT cmp yes no) - for { - v := b.Control - if v.Op != OpARMInvertFlags { + if v.AuxInt != 0 { break } - cmp := v.Args[0] - b.Kind = BlockARMLT - b.SetControl(cmp) + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - case BlockIf: - // match: (If (Equal cc) yes no) - // cond: - // result: (EQ cc yes no) + // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GE (CMPconst [c] x) yes no) for { v := b.Control - if v.Op != OpARMEqual { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMEQ - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (NotEqual cc) yes no) - // cond: - // result: (NE cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftLL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMNotEqual { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMNE - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessThan cc) yes no) - // cond: - // result: (LT cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftRL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMLessThan { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMLT - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessThanU cc) yes no) - // cond: - // result: (ULT cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMLessThanU { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMULT - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessEqual cc) yes no) - // cond: - // result: (LE cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMLessEqual { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMLE - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (LessEqualU cc) yes no) - // cond: - // result: (ULE cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMLessEqualU { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMULE - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (GreaterThan cc) yes no) - // cond: - // result: (GT cc yes no) + // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMPshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMGreaterThan { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMGT - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (GreaterThanU cc) yes no) - // cond: - // result: (UGT cc yes no) + // match: (GE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (GE (CMN x y) yes no) for { v := b.Control - if v.Op != OpARMGreaterThanU { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMUGT - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (GreaterEqual cc) yes no) - // cond: - // result: (GE cc yes no) + // match: (GE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (GE (CMN a (MUL x y)) yes no) for { v := b.Control - if v.Op != OpARMGreaterEqual { + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { break } - cc := v.Args[0] b.Kind = BlockARMGE - b.SetControl(cc) + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - // match: (If (GreaterEqualU cc) yes no) - // cond: - // result: (UGE cc yes no) + // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GE (CMNconst [c] x) yes no) for { v := b.Control - if v.Op != OpARMGreaterEqualU { + if v.Op != OpARMCMPconst { break } - cc := v.Args[0] - b.Kind = BlockARMUGE - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (If cond yes no) - // cond: - // result: (NE (CMPconst [0] cond) yes no) + // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftLL x y [c]) yes no) for { v := b.Control - _ = v - cond := b.Control - b.Kind = BlockARMNE - v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(cond) + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) b.SetControl(v0) b.Aux = nil return true } - case BlockARMLE: - // match: (LE (FlagEQ) yes no) - // cond: - // result: (First nil yes no) + // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftRL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagLT_ULT) yes no) - // cond: - // result: (First nil yes no) + // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagLT_UGT) yes no) - // cond: - // result: (First nil yes no) + // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } - // match: (LE (FlagGT_ULT) yes no) - // cond: - // result: (First nil no yes) + // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LE (FlagGT_UGT) yes no) - // cond: - // result: (First nil no yes) + // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (CMNshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LE (InvertFlags cmp) yes no) - // cond: - // result: (GE cmp yes no) + // match: (GE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (GE (TST x y) yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - cmp := v.Args[0] b.Kind = BlockARMGE - b.SetControl(cmp) + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - case BlockARMLT: - // match: (LT (FlagEQ) yes no) - // cond: - // result: (First nil no yes) + // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GE (TSTconst [c] x) yes no) for { v := b.Control - if v.Op != OpARMFlagEQ { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (FlagLT_ULT) yes no) - // cond: - // result: (First nil yes no) + // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftLL x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_ULT { + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TSTshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (GE (TEQ x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GE (TEQconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GE (TEQshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockARMGT: + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (GT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GT (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (GT (InvertFlags cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMLT + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (GT (CMP x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUB { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (GT (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GT (CMPconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMPshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (GT (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GT (CMNconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (CMNshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (GT (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (GT (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GT (TSTconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TSTshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (GT (TEQ x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (GT (TEQconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (GT (TEQshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMGT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockIf: + // match: (If (Equal cc) yes no) + // cond: + // result: (EQ cc yes no) + for { + v := b.Control + if v.Op != OpARMEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMEQ + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (NotEqual cc) yes no) + // cond: + // result: (NE cc yes no) + for { + v := b.Control + if v.Op != OpARMNotEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMNE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessThan cc) yes no) + // cond: + // result: (LT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThan { + break + } + cc := v.Args[0] + b.Kind = BlockARMLT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessThanU cc) yes no) + // cond: + // result: (ULT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThanU { + break + } + cc := v.Args[0] + b.Kind = BlockARMULT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessEqual cc) yes no) + // cond: + // result: (LE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMLE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (LessEqualU cc) yes no) + // cond: + // result: (ULE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqualU { + break + } + cc := v.Args[0] + b.Kind = BlockARMULE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterThan cc) yes no) + // cond: + // result: (GT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThan { + break + } + cc := v.Args[0] + b.Kind = BlockARMGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterThanU cc) yes no) + // cond: + // result: (UGT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThanU { + break + } + cc := v.Args[0] + b.Kind = BlockARMUGT + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterEqual cc) yes no) + // cond: + // result: (GE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqual { + break + } + cc := v.Args[0] + b.Kind = BlockARMGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If (GreaterEqualU cc) yes no) + // cond: + // result: (UGE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqualU { + break + } + cc := v.Args[0] + b.Kind = BlockARMUGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (CMPconst [0] cond) yes no) + for { + v := b.Control + _ = v + cond := b.Control + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(cond) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockARMLE: + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LE (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LE (InvertFlags cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMGE + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (LE (CMP x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUB { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (LE (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LE (CMPconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMPshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (LE (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (LE (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LE (CMNconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (CMNshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (LE (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LE (TSTconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TSTshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (LE (TEQ x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LE (TEQconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LE (TEQshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLE + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockARMLT: + // match: (LT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LT (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LT (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LT (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LT (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LT (InvertFlags cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARMGT + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 + // result: (LT (CMP x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUB { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (LT (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LT (CMPconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMPshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 + // result: (LT (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (LT (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LT (CMNconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (CMNshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 + // result: (LT (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LT (TSTconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftRA x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftLLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftRLreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TSTshiftRAreg x y z) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 + // result: (LT (TEQ x y) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 + // result: (LT (TEQconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftLL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftRL x y [c]) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - b.Kind = BlockFirst - b.SetControl(nil) + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LT (FlagLT_UGT) yes no) - // cond: - // result: (First nil yes no) + // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftRA x y [c]) yes no) for { v := b.Control - if v.Op != OpARMFlagLT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (LT (FlagGT_ULT) yes no) - // cond: - // result: (First nil no yes) + // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftLLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_ULT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (FlagGT_UGT) yes no) - // cond: - // result: (First nil no yes) + // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftRLreg x y z) yes no) for { v := b.Control - if v.Op != OpARMFlagGT_UGT { + if v.Op != OpARMCMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (InvertFlags cmp) yes no) - // cond: - // result: (GT cmp yes no) + // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 + // result: (LT (TEQshiftRAreg x y z) yes no) for { v := b.Control - if v.Op != OpARMInvertFlags { + if v.Op != OpARMCMPconst { break } - cmp := v.Args[0] - b.Kind = BlockARMGT - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMLT + v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v0.AddArg(z) + b.SetControl(v0) b.Aux = nil return true } @@ -23853,8 +28279,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUB x y)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUB x y)) yes no) + // cond: l.Uses==1 // result: (NE (CMP x y) yes no) for { v := b.Control @@ -23864,13 +28290,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUB { + l := v.Args[0] + if l.Op != OpARMSUB { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) v0.AddArg(x) @@ -23879,8 +28308,41 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBconst [c] x)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(MULS x y a)) yes no) + // cond: l.Uses==1 + // result: (NE (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULS { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no) + // cond: l.Uses==1 // result: (NE (CMPconst [c] x) yes no) for { v := b.Control @@ -23890,12 +28352,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBconst { + l := v.Args[0] + if l.Op != OpARMSUBconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags) v0.AuxInt = c @@ -23904,8 +28369,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftLL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftLL x y [c]) yes no) for { v := b.Control @@ -23915,14 +28380,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftLL { + l := v.Args[0] + if l.Op != OpARMSUBshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags) v0.AuxInt = c @@ -23932,8 +28400,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftRL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftRL x y [c]) yes no) for { v := b.Control @@ -23943,14 +28411,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRL { + l := v.Args[0] + if l.Op != OpARMSUBshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags) v0.AuxInt = c @@ -23960,8 +28431,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftRA x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftRA x y [c]) yes no) for { v := b.Control @@ -23971,14 +28442,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRA { + l := v.Args[0] + if l.Op != OpARMSUBshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags) v0.AuxInt = c @@ -23988,8 +28462,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftLLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftLLreg x y z) yes no) for { v := b.Control @@ -23999,14 +28473,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftLLreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -24016,8 +28493,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftRLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftRLreg x y z) yes no) for { v := b.Control @@ -24027,14 +28504,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRLreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -24044,8 +28524,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (SUBshiftRAreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMPshiftRAreg x y z) yes no) for { v := b.Control @@ -24055,14 +28535,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMSUBshiftRAreg { + l := v.Args[0] + if l.Op != OpARMSUBshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -24072,8 +28555,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADD x y)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADD x y)) yes no) + // cond: l.Uses==1 // result: (NE (CMN x y) yes no) for { v := b.Control @@ -24083,13 +28566,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADD { + l := v.Args[0] + if l.Op != OpARMADD { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) v0.AddArg(x) @@ -24098,8 +28584,41 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDconst [c] x)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(MULA x y a)) yes no) + // cond: l.Uses==1 + // result: (NE (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + l := v.Args[0] + if l.Op != OpARMMULA { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + a := l.Args[2] + if !(l.Uses == 1) { + break + } + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Pos, OpARMCMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARMMUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no) + // cond: l.Uses==1 // result: (NE (CMNconst [c] x) yes no) for { v := b.Control @@ -24109,12 +28628,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + l := v.Args[0] + if l.Op != OpARMADDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNconst, types.TypeFlags) v0.AuxInt = c @@ -24123,8 +28645,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftLL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftLL x y [c]) yes no) for { v := b.Control @@ -24134,14 +28656,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftLL { + l := v.Args[0] + if l.Op != OpARMADDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftLL, types.TypeFlags) v0.AuxInt = c @@ -24151,8 +28676,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftRL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftRL x y [c]) yes no) for { v := b.Control @@ -24162,14 +28687,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRL { + l := v.Args[0] + if l.Op != OpARMADDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftRL, types.TypeFlags) v0.AuxInt = c @@ -24179,8 +28707,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftRA x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftRA x y [c]) yes no) for { v := b.Control @@ -24190,14 +28718,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRA { + l := v.Args[0] + if l.Op != OpARMADDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftRA, types.TypeFlags) v0.AuxInt = c @@ -24207,8 +28738,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftLLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftLLreg x y z) yes no) for { v := b.Control @@ -24218,14 +28749,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftLLreg { + l := v.Args[0] + if l.Op != OpARMADDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -24235,8 +28769,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftRLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftRLreg x y z) yes no) for { v := b.Control @@ -24246,14 +28780,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRLreg { + l := v.Args[0] + if l.Op != OpARMADDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -24263,8 +28800,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ADDshiftRAreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (CMNshiftRAreg x y z) yes no) for { v := b.Control @@ -24274,14 +28811,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMADDshiftRAreg { + l := v.Args[0] + if l.Op != OpARMADDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMCMNshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -24291,8 +28831,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (AND x y)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(AND x y)) yes no) + // cond: l.Uses==1 // result: (NE (TST x y) yes no) for { v := b.Control @@ -24302,13 +28842,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMAND { + l := v.Args[0] + if l.Op != OpARMAND { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTST, types.TypeFlags) v0.AddArg(x) @@ -24317,8 +28860,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no) + // cond: l.Uses==1 // result: (NE (TSTconst [c] x) yes no) for { v := b.Control @@ -24328,12 +28871,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDconst { + l := v.Args[0] + if l.Op != OpARMANDconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTconst, types.TypeFlags) v0.AuxInt = c @@ -24342,8 +28888,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftLL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftLL x y [c]) yes no) for { v := b.Control @@ -24353,14 +28899,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftLL { + l := v.Args[0] + if l.Op != OpARMANDshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftLL, types.TypeFlags) v0.AuxInt = c @@ -24370,8 +28919,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftRL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftRL x y [c]) yes no) for { v := b.Control @@ -24381,14 +28930,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRL { + l := v.Args[0] + if l.Op != OpARMANDshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftRL, types.TypeFlags) v0.AuxInt = c @@ -24398,8 +28950,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftRA x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftRA x y [c]) yes no) for { v := b.Control @@ -24409,14 +28961,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRA { + l := v.Args[0] + if l.Op != OpARMANDshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftRA, types.TypeFlags) v0.AuxInt = c @@ -24426,8 +28981,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftLLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftLLreg x y z) yes no) for { v := b.Control @@ -24437,14 +28992,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftLLreg { + l := v.Args[0] + if l.Op != OpARMANDshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -24454,8 +29012,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftRLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftRLreg x y z) yes no) for { v := b.Control @@ -24465,14 +29023,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRLreg { + l := v.Args[0] + if l.Op != OpARMANDshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -24482,8 +29043,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (ANDshiftRAreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TSTshiftRAreg x y z) yes no) for { v := b.Control @@ -24493,14 +29054,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMANDshiftRAreg { + l := v.Args[0] + if l.Op != OpARMANDshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTSTshiftRAreg, types.TypeFlags) v0.AddArg(x) @@ -24510,8 +29074,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XOR x y)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XOR x y)) yes no) + // cond: l.Uses==1 // result: (NE (TEQ x y) yes no) for { v := b.Control @@ -24521,13 +29085,16 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXOR { + l := v.Args[0] + if l.Op != OpARMXOR { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQ, types.TypeFlags) v0.AddArg(x) @@ -24536,8 +29103,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORconst [c] x)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no) + // cond: l.Uses==1 // result: (NE (TEQconst [c] x) yes no) for { v := b.Control @@ -24547,12 +29114,15 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORconst { + l := v.Args[0] + if l.Op != OpARMXORconst { + break + } + c := l.AuxInt + x := l.Args[0] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQconst, types.TypeFlags) v0.AuxInt = c @@ -24561,8 +29131,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftLL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftLL x y [c]) yes no) for { v := b.Control @@ -24572,14 +29142,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftLL { + l := v.Args[0] + if l.Op != OpARMXORshiftLL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftLL, types.TypeFlags) v0.AuxInt = c @@ -24589,8 +29162,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftRL x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftRL x y [c]) yes no) for { v := b.Control @@ -24600,14 +29173,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRL { + l := v.Args[0] + if l.Op != OpARMXORshiftRL { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftRL, types.TypeFlags) v0.AuxInt = c @@ -24617,8 +29193,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftRA x y [c])) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftRA x y [c]) yes no) for { v := b.Control @@ -24628,14 +29204,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRA { + l := v.Args[0] + if l.Op != OpARMXORshiftRA { + break + } + c := l.AuxInt + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1) { break } - c := v_0.AuxInt - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftRA, types.TypeFlags) v0.AuxInt = c @@ -24645,8 +29224,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftLLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftLLreg x y z) yes no) for { v := b.Control @@ -24656,14 +29235,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftLLreg { + l := v.Args[0] + if l.Op != OpARMXORshiftLLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftLLreg, types.TypeFlags) v0.AddArg(x) @@ -24673,8 +29255,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftRLreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftRLreg x y z) yes no) for { v := b.Control @@ -24684,14 +29266,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRLreg { + l := v.Args[0] + if l.Op != OpARMXORshiftRLreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftRLreg, types.TypeFlags) v0.AddArg(x) @@ -24701,8 +29286,8 @@ func rewriteBlockARM(b *Block) bool { b.Aux = nil return true } - // match: (NE (CMPconst [0] (XORshiftRAreg x y z)) yes no) - // cond: + // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) + // cond: l.Uses==1 // result: (NE (TEQshiftRAreg x y z) yes no) for { v := b.Control @@ -24712,14 +29297,17 @@ func rewriteBlockARM(b *Block) bool { if v.AuxInt != 0 { break } - v_0 := v.Args[0] - if v_0.Op != OpARMXORshiftRAreg { + l := v.Args[0] + if l.Op != OpARMXORshiftRAreg { + break + } + _ = l.Args[2] + x := l.Args[0] + y := l.Args[1] + z := l.Args[2] + if !(l.Uses == 1) { break } - _ = v_0.Args[2] - x := v_0.Args[0] - y := v_0.Args[1] - z := v_0.Args[2] b.Kind = BlockARMNE v0 := b.NewValue0(v.Pos, OpARMTEQshiftRAreg, types.TypeFlags) v0.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 0715a5347de22..2afd0f335ea64 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -16,7 +18,7 @@ var _ = types.TypeMem // in case not otherwise used func rewriteValueARM64(v *Value) bool { switch v.Op { case OpARM64ADD: - return rewriteValueARM64_OpARM64ADD_0(v) + return rewriteValueARM64_OpARM64ADD_0(v) || rewriteValueARM64_OpARM64ADD_10(v) || rewriteValueARM64_OpARM64ADD_20(v) case OpARM64ADDconst: return rewriteValueARM64_OpARM64ADDconst_0(v) case OpARM64ADDshiftLL: @@ -45,10 +47,18 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64BICshiftRL_0(v) case OpARM64CMN: return rewriteValueARM64_OpARM64CMN_0(v) + case OpARM64CMNW: + return rewriteValueARM64_OpARM64CMNW_0(v) case OpARM64CMNWconst: return rewriteValueARM64_OpARM64CMNWconst_0(v) case OpARM64CMNconst: return rewriteValueARM64_OpARM64CMNconst_0(v) + case OpARM64CMNshiftLL: + return rewriteValueARM64_OpARM64CMNshiftLL_0(v) + case OpARM64CMNshiftRA: + return rewriteValueARM64_OpARM64CMNshiftRA_0(v) + case OpARM64CMNshiftRL: + return rewriteValueARM64_OpARM64CMNshiftRL_0(v) case OpARM64CMP: return rewriteValueARM64_OpARM64CMP_0(v) case OpARM64CMPW: @@ -85,16 +95,26 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64FADDD_0(v) case OpARM64FADDS: return rewriteValueARM64_OpARM64FADDS_0(v) + case OpARM64FMOVDfpgp: + return rewriteValueARM64_OpARM64FMOVDfpgp_0(v) case OpARM64FMOVDgpfp: return rewriteValueARM64_OpARM64FMOVDgpfp_0(v) case OpARM64FMOVDload: return rewriteValueARM64_OpARM64FMOVDload_0(v) + case OpARM64FMOVDloadidx: + return rewriteValueARM64_OpARM64FMOVDloadidx_0(v) case OpARM64FMOVDstore: return rewriteValueARM64_OpARM64FMOVDstore_0(v) + case OpARM64FMOVDstoreidx: + return rewriteValueARM64_OpARM64FMOVDstoreidx_0(v) case OpARM64FMOVSload: return rewriteValueARM64_OpARM64FMOVSload_0(v) + case OpARM64FMOVSloadidx: + return rewriteValueARM64_OpARM64FMOVSloadidx_0(v) case OpARM64FMOVSstore: return rewriteValueARM64_OpARM64FMOVSstore_0(v) + case OpARM64FMOVSstoreidx: + return rewriteValueARM64_OpARM64FMOVSstoreidx_0(v) case OpARM64FMULD: return rewriteValueARM64_OpARM64FMULD_0(v) case OpARM64FMULS: @@ -127,6 +147,10 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64LessThan_0(v) case OpARM64LessThanU: return rewriteValueARM64_OpARM64LessThanU_0(v) + case OpARM64MADD: + return rewriteValueARM64_OpARM64MADD_0(v) || rewriteValueARM64_OpARM64MADD_10(v) || rewriteValueARM64_OpARM64MADD_20(v) + case OpARM64MADDW: + return rewriteValueARM64_OpARM64MADDW_0(v) || rewriteValueARM64_OpARM64MADDW_10(v) || rewriteValueARM64_OpARM64MADDW_20(v) case OpARM64MNEG: return rewriteValueARM64_OpARM64MNEG_0(v) || rewriteValueARM64_OpARM64MNEG_10(v) || rewriteValueARM64_OpARM64MNEG_20(v) case OpARM64MNEGW: @@ -233,18 +257,34 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v) case OpARM64MOVWstorezeroidx4: return rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v) + case OpARM64MSUB: + return rewriteValueARM64_OpARM64MSUB_0(v) || rewriteValueARM64_OpARM64MSUB_10(v) || rewriteValueARM64_OpARM64MSUB_20(v) + case OpARM64MSUBW: + return rewriteValueARM64_OpARM64MSUBW_0(v) || rewriteValueARM64_OpARM64MSUBW_10(v) || rewriteValueARM64_OpARM64MSUBW_20(v) case OpARM64MUL: return rewriteValueARM64_OpARM64MUL_0(v) || rewriteValueARM64_OpARM64MUL_10(v) || rewriteValueARM64_OpARM64MUL_20(v) case OpARM64MULW: return rewriteValueARM64_OpARM64MULW_0(v) || rewriteValueARM64_OpARM64MULW_10(v) || rewriteValueARM64_OpARM64MULW_20(v) case OpARM64MVN: return rewriteValueARM64_OpARM64MVN_0(v) + case OpARM64MVNshiftLL: + return rewriteValueARM64_OpARM64MVNshiftLL_0(v) + case OpARM64MVNshiftRA: + return rewriteValueARM64_OpARM64MVNshiftRA_0(v) + case OpARM64MVNshiftRL: + return rewriteValueARM64_OpARM64MVNshiftRL_0(v) case OpARM64NEG: return rewriteValueARM64_OpARM64NEG_0(v) + case OpARM64NEGshiftLL: + return rewriteValueARM64_OpARM64NEGshiftLL_0(v) + case OpARM64NEGshiftRA: + return rewriteValueARM64_OpARM64NEGshiftRA_0(v) + case OpARM64NEGshiftRL: + return rewriteValueARM64_OpARM64NEGshiftRL_0(v) case OpARM64NotEqual: return rewriteValueARM64_OpARM64NotEqual_0(v) case OpARM64OR: - return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) || rewriteValueARM64_OpARM64OR_30(v) + return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) || rewriteValueARM64_OpARM64OR_30(v) || rewriteValueARM64_OpARM64OR_40(v) case OpARM64ORN: return rewriteValueARM64_OpARM64ORN_0(v) case OpARM64ORNshiftLL: @@ -261,6 +301,10 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64ORshiftRA_0(v) case OpARM64ORshiftRL: return rewriteValueARM64_OpARM64ORshiftRL_0(v) + case OpARM64RORWconst: + return rewriteValueARM64_OpARM64RORWconst_0(v) + case OpARM64RORconst: + return rewriteValueARM64_OpARM64RORconst_0(v) case OpARM64SLL: return rewriteValueARM64_OpARM64SLL_0(v) case OpARM64SLLconst: @@ -276,7 +320,7 @@ func rewriteValueARM64(v *Value) bool { case OpARM64STP: return rewriteValueARM64_OpARM64STP_0(v) case OpARM64SUB: - return rewriteValueARM64_OpARM64SUB_0(v) + return rewriteValueARM64_OpARM64SUB_0(v) || rewriteValueARM64_OpARM64SUB_10(v) case OpARM64SUBconst: return rewriteValueARM64_OpARM64SUBconst_0(v) case OpARM64SUBshiftLL: @@ -287,10 +331,18 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64SUBshiftRL_0(v) case OpARM64TST: return rewriteValueARM64_OpARM64TST_0(v) + case OpARM64TSTW: + return rewriteValueARM64_OpARM64TSTW_0(v) case OpARM64TSTWconst: return rewriteValueARM64_OpARM64TSTWconst_0(v) case OpARM64TSTconst: return rewriteValueARM64_OpARM64TSTconst_0(v) + case OpARM64TSTshiftLL: + return rewriteValueARM64_OpARM64TSTshiftLL_0(v) + case OpARM64TSTshiftRA: + return rewriteValueARM64_OpARM64TSTshiftRA_0(v) + case OpARM64TSTshiftRL: + return rewriteValueARM64_OpARM64TSTshiftRL_0(v) case OpARM64UBFIZ: return rewriteValueARM64_OpARM64UBFIZ_0(v) case OpARM64UBFX: @@ -313,6 +365,8 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpARM64XORshiftRA_0(v) case OpARM64XORshiftRL: return rewriteValueARM64_OpARM64XORshiftRL_0(v) + case OpAbs: + return rewriteValueARM64_OpAbs_0(v) case OpAdd16: return rewriteValueARM64_OpAdd16_0(v) case OpAdd32: @@ -719,12 +773,18 @@ func rewriteValueARM64(v *Value) bool { return rewriteValueARM64_OpPopCount32_0(v) case OpPopCount64: return rewriteValueARM64_OpPopCount64_0(v) + case OpRotateLeft32: + return rewriteValueARM64_OpRotateLeft32_0(v) + case OpRotateLeft64: + return rewriteValueARM64_OpRotateLeft64_0(v) case OpRound: return rewriteValueARM64_OpRound_0(v) case OpRound32F: return rewriteValueARM64_OpRound32F_0(v) case OpRound64F: return rewriteValueARM64_OpRound64F_0(v) + case OpRoundToEven: + return rewriteValueARM64_OpRoundToEven_0(v) case OpRsh16Ux16: return rewriteValueARM64_OpRsh16Ux16_0(v) case OpRsh16Ux32: @@ -897,6 +957,189 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { v.AddArg(x) return true } + // match: (ADD a l:(MUL x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MADD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MUL { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD l:(MUL x y) a) + // cond: l.Uses==1 && clobber(l) + // result: (MADD a x y) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != OpARM64MUL { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + a := v.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD a l:(MNEG x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MSUB a x y) + for { + _ = v.Args[1] + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MNEG { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUB) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD l:(MNEG x y) a) + // cond: l.Uses==1 && clobber(l) + // result: (MSUB a x y) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != OpARM64MNEG { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + a := v.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUB) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD a l:(MULW x y)) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MADDW a x y) + for { + _ = v.Args[1] + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MULW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADDW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD l:(MULW x y) a) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MADDW a x y) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != OpARM64MULW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + a := v.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADDW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD a l:(MNEGW x y)) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MSUBW a x y) + for { + _ = v.Args[1] + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MNEGW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUBW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD l:(MNEGW x y) a) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MSUBW a x y) + for { + _ = v.Args[1] + l := v.Args[0] + if l.Op != OpARM64MNEGW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + a := v.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUBW) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64ADD_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (ADD x (NEG y)) // cond: // result: (SUB x y) @@ -1055,583 +1298,951 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool { v.AddArg(y) return true } - return false -} -func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { - // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) - // cond: - // result: (MOVDaddr [off1+off2] {sym} ptr) + // match: (ADD (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) for { - off1 := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if v_0.Op != OpARM64SLL { break } - off2 := v_0.AuxInt - sym := v_0.Aux - ptr := v_0.Args[0] - v.reset(OpARM64MOVDaddr) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - return true - } - // match: (ADDconst [0] x) - // cond: - // result: x - for { - if v.AuxInt != 0 { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ADDconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [c+d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + t := v_0_1.Type + if v_0_1.AuxInt != 63 { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c + d - return true - } - // match: (ADDconst [c] (ADDconst [d] x)) - // cond: - // result: (ADDconst [c+d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ADDconst) - v.AuxInt = c + d - v.AddArg(x) - return true - } - // match: (ADDconst [c] (SUBconst [d] x)) - // cond: - // result: (ADDconst [c-d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SUBconst { + if v_1.Type != typ.UInt64 { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ADDconst) - v.AuxInt = c - d - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { - b := v.Block - _ = b - // match: (ADDshiftLL (MOVDconst [c]) x [d]) - // cond: - // result: (ADDconst [c] (SLLconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ADDconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (ADDshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (ADDconst x [int64(uint64(c)< [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if x != v.Args[1] { + if v_1_0_1_0.AuxInt != 64 { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { break } - v.reset(OpARM64RORWconst) - v.AuxInt = 32 - c - v.AddArg(x) - return true - } - // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2) - // cond: - // result: (EXTRconst [64-c] x2 x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + if v_1_0_1_1.Type != t { break } - if v_0.AuxInt != 64-c { + if v_1_0_1_1.AuxInt != 63 { break } - x := v_0.Args[0] - x2 := v.Args[1] - v.reset(OpARM64EXTRconst) - v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) - return true - } - // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (EXTRWconst [32-c] x2 x) - for { - t := v.Type - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + if y != v_1_0_1_1.Args[0] { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { break } - v.reset(OpARM64EXTRWconst) - v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64ADDshiftRA_0(v *Value) bool { - b := v.Block - _ = b - // match: (ADDshiftRA (MOVDconst [c]) x [d]) - // cond: - // result: (ADDconst [c] (SRAconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_1_1.AuxInt != 64 { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ADDconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (ADDshiftRA x (MOVDconst [c]) [d]) - // cond: - // result: (ADDconst x [c>>uint64(d)]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { break } - c := v_1.AuxInt - v.reset(OpARM64ADDconst) - v.AuxInt = c >> uint64(d) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { - b := v.Block - _ = b - // match: (ADDshiftRL (MOVDconst [c]) x [d]) - // cond: - // result: (ADDconst [c] (SRLconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_1_1_0.Type != t { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ADDconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (ADDshiftRL x (MOVDconst [c]) [d]) - // cond: - // result: (ADDconst x [int64(uint64(c)>>uint64(d))]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - v.reset(OpARM64ADDconst) - v.AuxInt = int64(uint64(c) >> uint64(d)) - v.AddArg(x) - return true - } - // match: (ADDshiftRL [c] (SLLconst x [64-c]) x) - // cond: - // result: (RORconst [ c] x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_1_1_0_0.AuxInt != 64 { break } - if v_0.AuxInt != 64-c { + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { break } - x := v_0.Args[0] - if x != v.Args[1] { + if v_1_1_0_1.Type != t { break } - v.reset(OpARM64RORconst) - v.AuxInt = c + if v_1_1_0_1.AuxInt != 63 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (ADDshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) + // match: (ADD (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) for { - t := v.Type - c := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_0.Op != OpARM64CSEL0 { break } - if v_0.AuxInt != 32-c { + if v_0.Type != typ.UInt64 { + break + } + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { + break + } + if v_0_0.Type != typ.UInt64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 64 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 63 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 64 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 63 { + break + } + if y != v_0_1_0_1.Args[0] { break } - x := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + if v_1.Op != OpARM64SLL { break } + _ = v_1.Args[1] if x != v_1.Args[0] { break } - if !(c < 32 && t.Size() == 4) { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { break } - v.reset(OpARM64RORWconst) - v.AuxInt = c + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) return true } return false } -func rewriteValueARM64_OpARM64AND_0(v *Value) bool { - // match: (AND x (MOVDconst [c])) - // cond: - // result: (ANDconst [c] x) +func rewriteValueARM64_OpARM64ADD_20(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { break } - c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [c]) x) - // cond: - // result: (ANDconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Type != typ.UInt64 { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ANDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AND x x) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (AND x (MVN y)) - // cond: - // result: (BIC x y) - for { - _ = v.Args[1] - x := v.Args[0] + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { + if v_1.Op != OpARM64CSEL0 { break } - y := v_1.Args[0] - v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (AND (MVN y) x) - // cond: - // result: (BIC x y) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { + if v_1.Type != typ.UInt64 { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64BIC) + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + break + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 64 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 63 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 64 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 63 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) v.AddArg(x) v.AddArg(y) return true } - // match: (AND x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ANDshiftLL x0 y [c]) + // match: (ADD (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if v_0.Type != typ.UInt64 { break } - v.reset(OpARM64ANDshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (AND x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftLL x0 y [c]) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { break } - v.reset(OpARM64ANDshiftLL) - v.AuxInt = c - v.AddArg(x0) + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 64 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 63 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 64 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 63 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { + break + } + if v_1.Type != typ.UInt64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) + v.AddArg(x) v.AddArg(y) return true } - // match: (AND x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ANDshiftRL x0 y [c]) + // match: (ADD (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - v.reset(OpARM64ANDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt32 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { + break + } + if v_1_0.Type != typ.UInt32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg { + break + } + if x != v_1_0_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 32 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 31 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 32 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 31 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (AND x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftRL x0 y [c]) + // match: (ADD (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + if v_0.Type != typ.UInt32 { break } - v.reset(OpARM64ANDshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { + break + } + if v_0_0.Type != typ.UInt32 { + break + } + _ = v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpARM64MOVWUreg { + break + } + x := v_0_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 32 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 31 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 32 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 31 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SLL { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (AND x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ANDshiftRA x0 y [c]) + // match: (ADD (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if v_0.Type != typ.UInt32 { break } - v.reset(OpARM64ANDshiftRA) - v.AuxInt = c - v.AddArg(x0) + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { + break + } + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + break + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt32 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + break + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 32 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 31 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 32 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 31 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) v.AddArg(y) return true } - return false -} -func rewriteValueARM64_OpARM64AND_10(v *Value) bool { - // match: (AND x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ANDshiftRA x0 y [c]) + // match: (ADD (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) for { _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + if v_0.Type != typ.UInt32 { break } - v.reset(OpARM64ANDshiftRA) - v.AuxInt = c - v.AddArg(x0) + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 32 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 31 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 32 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 31 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { + break + } + if v_1.Type != typ.UInt32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVWUreg { + break + } + if x != v_1_0.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { - // match: (ANDconst [0] _) +func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool { + // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDaddr [off1+off2] {sym} ptr) for { - if v.AuxInt != 0 { + off1 := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + off2 := v_0.AuxInt + sym := v_0.Aux + ptr := v_0.Args[0] + v.reset(OpARM64MOVDaddr) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) return true } - // match: (ANDconst [-1] x) + // match: (ADDconst [0] x) // cond: // result: x for { - if v.AuxInt != -1 { + if v.AuxInt != 0 { break } x := v.Args[0] @@ -1640,9 +2251,9 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { v.AddArg(x) return true } - // match: (ANDconst [c] (MOVDconst [d])) + // match: (ADDconst [c] (MOVDconst [d])) // cond: - // result: (MOVDconst [c&d]) + // result: (MOVDconst [c+d]) for { c := v.AuxInt v_0 := v.Args[0] @@ -1651,116 +2262,181 @@ func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { } d := v_0.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = c & d + v.AuxInt = c + d return true } - // match: (ANDconst [c] (ANDconst [d] x)) + // match: (ADDconst [c] (ADDconst [d] x)) // cond: - // result: (ANDconst [c&d] x) + // result: (ADDconst [c+d] x) for { c := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + if v_0.Op != OpARM64ADDconst { break } d := v_0.AuxInt x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & d + v.reset(OpARM64ADDconst) + v.AuxInt = c + d v.AddArg(x) return true } - // match: (ANDconst [c] (MOVWUreg x)) + // match: (ADDconst [c] (SUBconst [d] x)) // cond: - // result: (ANDconst [c&(1<<32-1)] x) + // result: (ADDconst [c-d] x) for { c := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64MOVWUreg { + if v_0.Op != OpARM64SUBconst { break } + d := v_0.AuxInt x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<32 - 1) + v.reset(OpARM64ADDconst) + v.AuxInt = c - d v.AddArg(x) return true } - // match: (ANDconst [c] (MOVHUreg x)) + return false +} +func rewriteValueARM64_OpARM64ADDshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (ADDshiftLL (MOVDconst [c]) x [d]) // cond: - // result: (ANDconst [c&(1<<16-1)] x) + // result: (ADDconst [c] (SLLconst x [d])) for { - c := v.AuxInt + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVHUreg { + if v_0.Op != OpARM64MOVDconst { break } - x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<16 - 1) - v.AddArg(x) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ADDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (ANDconst [c] (MOVBUreg x)) + // match: (ADDshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (ANDconst [c&(1<<8-1)] x) + // result: (ADDconst x [int64(uint64(c)< [c] (UBFX [bfc] x) x) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (RORWconst [32-c] x) for { - ac := v.AuxInt + t := v.Type + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFX { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if x != v.Args[1] { + break + } + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64RORWconst) + v.AuxInt = 32 - c + v.AddArg(x) + return true + } + // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2) + // cond: + // result: (EXTRconst [64-c] x2 x) + for { + c := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64SRLconst { break } - sc := v_0.AuxInt + if v_0.AuxInt != 64-c { + break + } x := v_0.Args[0] - if !(isARM64BFMask(sc, ac, 0)) { + x2 := v.Args[1] + v.reset(OpARM64EXTRconst) + v.AuxInt = 64 - c + v.AddArg(x2) + v.AddArg(x) + return true + } + // match: (ADDshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) + for { + t := v.Type + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFX { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, 0)) + bfc := v_0.AuxInt + x := v_0.Args[0] + x2 := v.Args[1] + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64EXTRWconst) + v.AuxInt = 32 - c + v.AddArg(x2) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { +func rewriteValueARM64_OpARM64ADDshiftRA_0(v *Value) bool { b := v.Block _ = b - // match: (ANDshiftLL (MOVDconst [c]) x [d]) + // match: (ADDshiftRA (MOVDconst [c]) x [d]) // cond: - // result: (ANDconst [c] (SLLconst x [d])) + // result: (ADDconst [c] (SRAconst x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -1770,17 +2446,17 @@ func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARM64ANDconst) + v.reset(OpARM64ADDconst) v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) return true } - // match: (ANDshiftLL x (MOVDconst [c]) [d]) + // match: (ADDshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (ANDconst x [int64(uint64(c)<>uint64(d)]) for { d := v.AuxInt _ = v.Args[1] @@ -1790,42 +2466,19 @@ func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = int64(uint64(c) << uint64(d)) + v.reset(OpARM64ADDconst) + v.AuxInt = c >> uint64(d) v.AddArg(x) return true } - // match: (ANDshiftLL x y:(SLLconst x [c]) [d]) - // cond: c==d - // result: y - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SLLconst { - break - } - c := y.AuxInt - if x != y.Args[0] { - break - } - if !(c == d) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } return false } -func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { +func rewriteValueARM64_OpARM64ADDshiftRL_0(v *Value) bool { b := v.Block _ = b - // match: (ANDshiftRA (MOVDconst [c]) x [d]) + // match: (ADDshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (ANDconst [c] (SRAconst x [d])) + // result: (ADDconst [c] (SRLconst x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -1835,17 +2488,17 @@ func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARM64ANDconst) + v.reset(OpARM64ADDconst) v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) return true } - // match: (ANDshiftRA x (MOVDconst [c]) [d]) + // match: (ADDshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (ANDconst x [c>>uint64(d)]) + // result: (ADDconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt _ = v.Args[1] @@ -1855,64 +2508,70 @@ func rewriteValueARM64_OpARM64ANDshiftRA_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = c >> uint64(d) + v.reset(OpARM64ADDconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) v.AddArg(x) return true } - // match: (ANDshiftRA x y:(SRAconst x [c]) [d]) - // cond: c==d - // result: y + // match: (ADDshiftRL [c] (SLLconst x [64-c]) x) + // cond: + // result: (RORconst [ c] x) for { - d := v.AuxInt + c := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SRAconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - c := y.AuxInt - if x != y.Args[0] { + if v_0.AuxInt != 64-c { break } - if !(c == d) { + x := v_0.Args[0] + if x != v.Args[1] { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.reset(OpARM64RORconst) + v.AuxInt = c + v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { - b := v.Block - _ = b - // match: (ANDshiftRL (MOVDconst [c]) x [d]) - // cond: - // result: (ANDconst [c] (SRLconst x [d])) + // match: (ADDshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) + // cond: c < 32 && t.Size() == 4 + // result: (RORWconst [c] x) for { - d := v.AuxInt + t := v.Type + c := v.AuxInt _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64SLLconst { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ANDconst) + if v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWUreg { + break + } + if x != v_1.Args[0] { + break + } + if !(c < 32 && t.Size() == 4) { + break + } + v.reset(OpARM64RORWconst) v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(x) return true } - // match: (ANDshiftRL x (MOVDconst [c]) [d]) + return false +} +func rewriteValueARM64_OpARM64AND_0(v *Value) bool { + // match: (AND x (MOVDconst [c])) // cond: - // result: (ANDconst x [int64(uint64(c)>>uint64(d))]) + // result: (ANDconst [c] x) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] @@ -1921,68 +2580,75 @@ func rewriteValueARM64_OpARM64ANDshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARM64ANDconst) - v.AuxInt = int64(uint64(c) >> uint64(d)) + v.AuxInt = c v.AddArg(x) return true } - // match: (ANDshiftRL x y:(SRLconst x [c]) [d]) - // cond: c==d - // result: y + // match: (AND (MOVDconst [c]) x) + // cond: + // result: (ANDconst [c] x) for { - d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SRLconst { - break - } - c := y.AuxInt - if x != y.Args[0] { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if !(c == d) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ANDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (AND x x) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + if x != v.Args[1] { break } v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { - // match: (BIC x (MOVDconst [c])) + // match: (AND x (MVN y)) // cond: - // result: (ANDconst [^c] x) + // result: (BIC x y) for { _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64MVN { break } - c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = ^c + y := v_1.Args[0] + v.reset(OpARM64BIC) v.AddArg(x) + v.AddArg(y) return true } - // match: (BIC x x) + // match: (AND (MVN y) x) // cond: - // result: (MOVDconst [0]) + // result: (BIC x y) for { _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + v_0 := v.Args[0] + if v_0.Op != OpARM64MVN { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64BIC) + v.AddArg(x) + v.AddArg(y) return true } - // match: (BIC x0 x1:(SLLconst [c] y)) + // match: (AND x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) - // result: (BICshiftLL x0 y [c]) + // result: (ANDshiftLL x0 y [c]) for { _ = v.Args[1] x0 := v.Args[0] @@ -1995,41 +2661,41 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { if !(clobberIfDead(x1)) { break } - v.reset(OpARM64BICshiftLL) + v.reset(OpARM64ANDshiftLL) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - // match: (BIC x0 x1:(SRLconst [c] y)) + // match: (AND x1:(SLLconst [c] y) x0) // cond: clobberIfDead(x1) - // result: (BICshiftRL x0 y [c]) + // result: (ANDshiftLL x0 y [c]) for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + x1 := v.Args[0] + if x1.Op != OpARM64SLLconst { break } c := x1.AuxInt y := x1.Args[0] + x0 := v.Args[1] if !(clobberIfDead(x1)) { break } - v.reset(OpARM64BICshiftRL) + v.reset(OpARM64ANDshiftLL) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - // match: (BIC x0 x1:(SRAconst [c] y)) + // match: (AND x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) - // result: (BICshiftRA x0 y [c]) + // result: (ANDshiftRL x0 y [c]) for { _ = v.Args[1] x0 := v.Args[0] x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + if x1.Op != OpARM64SRLconst { break } c := x1.AuxInt @@ -2037,331 +2703,419 @@ func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { if !(clobberIfDead(x1)) { break } - v.reset(OpARM64BICshiftRA) + v.reset(OpARM64ANDshiftRL) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - return false -} -func rewriteValueARM64_OpARM64BICshiftLL_0(v *Value) bool { - // match: (BICshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (ANDconst x [^int64(uint64(c)<>uint64(d))]) +func rewriteValueARM64_OpARM64AND_10(v *Value) bool { + // match: (AND x1:(SRAconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (ANDshiftRA x0 y [c]) for { - d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x1 := v.Args[0] + if x1.Op != OpARM64SRAconst { break } - c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = ^(c >> uint64(d)) - v.AddArg(x) + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64ANDshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (BICshiftRA x (SRAconst x [c]) [d]) - // cond: c==d + return false +} +func rewriteValueARM64_OpARM64ANDconst_0(v *Value) bool { + // match: (ANDconst [0] _) + // cond: // result: (MOVDconst [0]) for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRAconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { + if v.AuxInt != 0 { break } v.reset(OpARM64MOVDconst) v.AuxInt = 0 return true } - return false -} -func rewriteValueARM64_OpARM64BICshiftRL_0(v *Value) bool { - // match: (BICshiftRL x (MOVDconst [c]) [d]) + // match: (ANDconst [-1] x) // cond: - // result: (ANDconst x [^int64(uint64(c)>>uint64(d))]) + // result: x for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v.AuxInt != -1 { break } - c := v_1.AuxInt - v.reset(OpARM64ANDconst) - v.AuxInt = ^int64(uint64(c) >> uint64(d)) + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (BICshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [0]) + // match: (ANDconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c&d]) for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } + d := v_0.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = c & d return true } - return false -} -func rewriteValueARM64_OpARM64CMN_0(v *Value) bool { - // match: (CMN x (MOVDconst [c])) + // match: (ANDconst [c] (ANDconst [d] x)) // cond: - // result: (CMNconst [c] x) + // result: (ANDconst [c&d] x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64ANDconst { break } - c := v_1.AuxInt - v.reset(OpARM64CMNconst) - v.AuxInt = c + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = c & d v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { - // match: (CMNWconst (MOVDconst [x]) [y]) - // cond: int32(x)==int32(-y) - // result: (FlagEQ) + // match: (ANDconst [c] (MOVWUreg x)) + // cond: + // result: (ANDconst [c&(1<<32-1)] x) for { - y := v.AuxInt + c := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - x := v_0.AuxInt - if !(int32(x) == int32(-y)) { + if v_0.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64FlagEQ) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = c & (1<<32 - 1) + v.AddArg(x) return true } - // match: (CMNWconst (MOVDconst [x]) [y]) - // cond: int32(x)uint32(-y) - // result: (FlagLT_UGT) + // match: (ANDconst [ac] (SLLconst [sc] x)) + // cond: isARM64BFMask(sc, ac, sc) + // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { - y := v.AuxInt + ac := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64SLLconst { break } - x := v_0.AuxInt - if !(int32(x) < int32(-y) && uint32(x) > uint32(-y)) { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, sc)) { break } - v.reset(OpARM64FlagLT_UGT) + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc)) + v.AddArg(x) return true } - // match: (CMNWconst (MOVDconst [x]) [y]) - // cond: int32(x)>int32(-y) && uint32(x) int32(-y) && uint32(x) < uint32(-y)) { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, 0)) { break } - v.reset(OpARM64FlagGT_ULT) + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, 0)) + v.AddArg(x) return true } - // match: (CMNWconst (MOVDconst [x]) [y]) - // cond: int32(x)>int32(-y) && uint32(x)>uint32(-y) - // result: (FlagGT_UGT) + return false +} +func rewriteValueARM64_OpARM64ANDshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (ANDshiftLL (MOVDconst [c]) x [d]) + // cond: + // result: (ANDconst [c] (SLLconst x [d])) for { - y := v.AuxInt + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - x := v_0.AuxInt - if !(int32(x) > int32(-y) && uint32(x) > uint32(-y)) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ANDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftLL x (MOVDconst [c]) [d]) + // cond: + // result: (ANDconst x [int64(uint64(c)< x [d])) for { - y := v.AuxInt + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - x := v_0.AuxInt - if !(int64(x) < int64(-y) && uint64(x) < uint64(-y)) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ANDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRA x (MOVDconst [c]) [d]) + // cond: + // result: (ANDconst x [c>>uint64(d)]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64FlagLT_ULT) + c := v_1.AuxInt + v.reset(OpARM64ANDconst) + v.AuxInt = c >> uint64(d) + v.AddArg(x) return true } - // match: (CMNconst (MOVDconst [x]) [y]) - // cond: int64(x)uint64(-y) - // result: (FlagLT_UGT) + // match: (ANDshiftRA x y:(SRAconst x [c]) [d]) + // cond: c==d + // result: y for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if y.Op != OpARM64SRAconst { break } - x := v_0.AuxInt - if !(int64(x) < int64(-y) && uint64(x) > uint64(-y)) { + c := y.AuxInt + if x != y.Args[0] { break } - v.reset(OpARM64FlagLT_UGT) + if !(c == d) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } - // match: (CMNconst (MOVDconst [x]) [y]) - // cond: int64(x)>int64(-y) && uint64(x) x [d])) for { - y := v.AuxInt + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - x := v_0.AuxInt - if !(int64(x) > int64(-y) && uint64(x) < uint64(-y)) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ANDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ANDshiftRL x (MOVDconst [c]) [d]) + // cond: + // result: (ANDconst x [int64(uint64(c)>>uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64FlagGT_ULT) + c := v_1.AuxInt + v.reset(OpARM64ANDconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) + v.AddArg(x) return true } - // match: (CMNconst (MOVDconst [x]) [y]) - // cond: int64(x)>int64(-y) && uint64(x)>uint64(-y) - // result: (FlagGT_UGT) + // match: (ANDshiftRL x y:(SRLconst x [c]) [d]) + // cond: c==d + // result: y for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if y.Op != OpARM64SRLconst { break } - x := v_0.AuxInt - if !(int64(x) > int64(-y) && uint64(x) > uint64(-y)) { + c := y.AuxInt + if x != y.Args[0] { break } - v.reset(OpARM64FlagGT_UGT) + if !(c == d) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { - b := v.Block - _ = b - // match: (CMP x (MOVDconst [c])) +func rewriteValueARM64_OpARM64BIC_0(v *Value) bool { + // match: (BIC x (MOVDconst [c])) // cond: - // result: (CMPconst [c] x) + // result: (ANDconst [^c] x) for { _ = v.Args[1] x := v.Args[0] @@ -2370,32 +3124,27 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64CMPconst) - v.AuxInt = c + v.reset(OpARM64ANDconst) + v.AuxInt = ^c v.AddArg(x) return true } - // match: (CMP (MOVDconst [c]) x) + // match: (BIC x x) // cond: - // result: (InvertFlags (CMPconst [c] x)) + // result: (MOVDconst [0]) for { _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + if x != v.Args[1] { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (CMP x0 x1:(SLLconst [c] y)) + // match: (BIC x0 x1:(SLLconst [c] y)) // cond: clobberIfDead(x1) - // result: (CMPshiftLL x0 y [c]) + // result: (BICshiftLL x0 y [c]) for { _ = v.Args[1] x0 := v.Args[0] @@ -2408,38 +3157,15 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { if !(clobberIfDead(x1)) { break } - v.reset(OpARM64CMPshiftLL) + v.reset(OpARM64BICshiftLL) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - // match: (CMP x0:(SLLconst [c] y) x1) - // cond: clobberIfDead(x0) - // result: (InvertFlags (CMPshiftLL x1 y [c])) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpARM64SLLconst { - break - } - c := x0.AuxInt - y := x0.Args[0] - x1 := v.Args[1] - if !(clobberIfDead(x0)) { - break - } - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (CMP x0 x1:(SRLconst [c] y)) + // match: (BIC x0 x1:(SRLconst [c] y)) // cond: clobberIfDead(x1) - // result: (CMPshiftRL x0 y [c]) + // result: (BICshiftRL x0 y [c]) for { _ = v.Args[1] x0 := v.Args[0] @@ -2452,38 +3178,15 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { if !(clobberIfDead(x1)) { break } - v.reset(OpARM64CMPshiftRL) + v.reset(OpARM64BICshiftRL) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - // match: (CMP x0:(SRLconst [c] y) x1) - // cond: clobberIfDead(x0) - // result: (InvertFlags (CMPshiftRL x1 y [c])) - for { - _ = v.Args[1] - x0 := v.Args[0] - if x0.Op != OpARM64SRLconst { - break - } - c := x0.AuxInt - y := x0.Args[0] - x1 := v.Args[1] - if !(clobberIfDead(x0)) { - break - } - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(x1) - v0.AddArg(y) - v.AddArg(v0) - return true - } - // match: (CMP x0 x1:(SRAconst [c] y)) + // match: (BIC x0 x1:(SRAconst [c] y)) // cond: clobberIfDead(x1) - // result: (CMPshiftRA x0 y [c]) + // result: (BICshiftRA x0 y [c]) for { _ = v.Args[1] x0 := v.Args[0] @@ -2496,44 +3199,62 @@ func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { if !(clobberIfDead(x1)) { break } - v.reset(OpARM64CMPshiftRA) + v.reset(OpARM64BICshiftRA) v.AuxInt = c v.AddArg(x0) v.AddArg(y) return true } - // match: (CMP x0:(SRAconst [c] y) x1) - // cond: clobberIfDead(x0) - // result: (InvertFlags (CMPshiftRA x1 y [c])) + return false +} +func rewriteValueARM64_OpARM64BICshiftLL_0(v *Value) bool { + // match: (BICshiftLL x (MOVDconst [c]) [d]) + // cond: + // result: (ANDconst x [^int64(uint64(c)<>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] @@ -2541,147 +3262,276 @@ func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64CMPWconst) - v.AuxInt = int64(int32(c)) + v.reset(OpARM64ANDconst) + v.AuxInt = ^(c >> uint64(d)) v.AddArg(x) return true } - // match: (CMPW (MOVDconst [c]) x) - // cond: - // result: (InvertFlags (CMPWconst [int64(int32(c))] x)) + // match: (BICshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRAconst { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) - v0.AuxInt = int64(int32(c)) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { - // match: (CMPWconst (MOVDconst [x]) [y]) - // cond: int32(x)==int32(y) - // result: (FlagEQ) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + c := v_1.AuxInt + if x != v_1.Args[0] { break } - x := v_0.AuxInt - if !(int32(x) == int32(y)) { + if !(c == d) { break } - v.reset(OpARM64FlagEQ) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (CMPWconst (MOVDconst [x]) [y]) - // cond: int32(x)>uint64(d))]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64FlagLT_ULT) + c := v_1.AuxInt + v.reset(OpARM64ANDconst) + v.AuxInt = ^int64(uint64(c) >> uint64(d)) + v.AddArg(x) return true } - // match: (CMPWconst (MOVDconst [x]) [y]) - // cond: int32(x)uint32(y) - // result: (FlagLT_UGT) + // match: (BICshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - x := v_0.AuxInt - if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + c := v_1.AuxInt + if x != v_1.Args[0] { break } - v.reset(OpARM64FlagLT_UGT) + if !(c == d) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (CMPWconst (MOVDconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x) int32(y) && uint32(x) < uint32(y)) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64CMNconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMN x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftLL x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - v.reset(OpARM64FlagGT_ULT) + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMNshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (CMPWconst (MOVDconst [x]) [y]) - // cond: int32(x)>int32(y) && uint32(x)>uint32(y) - // result: (FlagGT_UGT) + // match: (CMN x1:(SLLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (CMNshiftLL x0 y [c]) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SLLconst { break } - x := v_0.AuxInt - if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FlagGT_UGT) + v.reset(OpARM64CMNshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (CMPWconst (MOVBUreg _) [c]) - // cond: 0xff < int32(c) - // result: (FlagLT_ULT) + // match: (CMN x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftRL x0 y [c]) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVBUreg { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - if !(0xff < int32(c)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64CMNshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (CMPWconst (MOVHUreg _) [c]) - // cond: 0xffff < int32(c) - // result: (FlagLT_ULT) + // match: (CMN x1:(SRLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (CMNshiftRL x0 y [c]) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVHUreg { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRLconst { break } - if !(0xffff < int32(c)) { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64CMNshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (CMN x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMNshiftRA x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMNshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (CMN x1:(SRAconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (CMNshiftRA x0 y [c]) + for { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMNshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { - // match: (CMPconst (MOVDconst [x]) [y]) - // cond: x==y +func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool { + // match: (CMNW x (MOVDconst [c])) + // cond: + // result: (CMNWconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64CMNWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMNW (MOVDconst [c]) x) + // cond: + // result: (CMNWconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64CMNWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64CMNWconst_0(v *Value) bool { + // match: (CMNWconst (MOVDconst [x]) [y]) + // cond: int32(x)==int32(-y) // result: (FlagEQ) for { y := v.AuxInt @@ -2690,14 +3540,14 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(x == y) { + if !(int32(x) == int32(-y)) { break } v.reset(OpARM64FlagEQ) return true } - // match: (CMPconst (MOVDconst [x]) [y]) - // cond: xuint64(y) + // match: (CMNWconst (MOVDconst [x]) [y]) + // cond: int32(x)uint32(-y) // result: (FlagLT_UGT) for { y := v.AuxInt @@ -2722,14 +3572,14 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(x < y && uint64(x) > uint64(y)) { + if !(int32(x) < int32(-y) && uint32(x) > uint32(-y)) { break } v.reset(OpARM64FlagLT_UGT) return true } - // match: (CMPconst (MOVDconst [x]) [y]) - // cond: x>y && uint64(x)int32(-y) && uint32(x) y && uint64(x) < uint64(y)) { + if !(int32(x) > int32(-y) && uint32(x) < uint32(-y)) { break } v.reset(OpARM64FlagGT_ULT) return true } - // match: (CMPconst (MOVDconst [x]) [y]) - // cond: x>y && uint64(x)>uint64(y) + // match: (CMNWconst (MOVDconst [x]) [y]) + // cond: int32(x)>int32(-y) && uint32(x)>uint32(-y) // result: (FlagGT_UGT) for { y := v.AuxInt @@ -2754,97 +3604,103 @@ func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { break } x := v_0.AuxInt - if !(x > y && uint64(x) > uint64(y)) { + if !(int32(x) > int32(-y) && uint32(x) > uint32(-y)) { break } v.reset(OpARM64FlagGT_UGT) return true } - // match: (CMPconst (MOVBUreg _) [c]) - // cond: 0xff < c - // result: (FlagLT_ULT) + return false +} +func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool { + // match: (CMNconst (MOVDconst [x]) [y]) + // cond: int64(x)==int64(-y) + // result: (FlagEQ) for { - c := v.AuxInt + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64MOVBUreg { + if v_0.Op != OpARM64MOVDconst { break } - if !(0xff < c) { + x := v_0.AuxInt + if !(int64(x) == int64(-y)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64FlagEQ) return true } - // match: (CMPconst (MOVHUreg _) [c]) - // cond: 0xffff < c + // match: (CMNconst (MOVDconst [x]) [y]) + // cond: int64(x)uint64(-y) + // result: (FlagLT_UGT) for { - c := v.AuxInt + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64MOVWUreg { + if v_0.Op != OpARM64MOVDconst { break } - if !(0xffffffff < c) { + x := v_0.AuxInt + if !(int64(x) < int64(-y) && uint64(x) > uint64(-y)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64FlagLT_UGT) return true } - // match: (CMPconst (ANDconst _ [m]) [n]) - // cond: 0 <= m && m < n - // result: (FlagLT_ULT) + // match: (CMNconst (MOVDconst [x]) [y]) + // cond: int64(x)>int64(-y) && uint64(x) int64(-y) && uint64(x) < uint64(-y)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64FlagGT_ULT) return true } - // match: (CMPconst (SRLconst _ [c]) [n]) - // cond: 0 <= n && 0 < c && c <= 63 && (1<int64(-y) && uint64(x)>uint64(-y) + // result: (FlagGT_UGT) for { - n := v.AuxInt + y := v.AuxInt v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - if !(0 <= n && 0 < c && c <= 63 && (1< int64(-y) && uint64(x) > uint64(-y)) { break } - v.reset(OpARM64FlagLT_ULT) + v.reset(OpARM64FlagGT_UGT) return true } return false } -func rewriteValueARM64_OpARM64CMPshiftLL_0(v *Value) bool { +func rewriteValueARM64_OpARM64CMNshiftLL_0(v *Value) bool { b := v.Block _ = b - // match: (CMPshiftLL (MOVDconst [c]) x [d]) + // match: (CMNshiftLL (MOVDconst [c]) x [d]) // cond: - // result: (InvertFlags (CMPconst [c] (SLLconst x [d]))) + // result: (CMNconst [c] (SLLconst x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -2854,19 +3710,17 @@ func rewriteValueARM64_OpARM64CMPshiftLL_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64CMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) return true } - // match: (CMPshiftLL x (MOVDconst [c]) [d]) + // match: (CMNshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (CMPconst x [int64(uint64(c)< x [d]))) + // result: (CMNconst [c] (SRAconst x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -2898,19 +3752,17 @@ func rewriteValueARM64_OpARM64CMPshiftRA_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64CMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) return true } - // match: (CMPshiftRA x (MOVDconst [c]) [d]) + // match: (CMNshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (CMPconst x [c>>uint64(d)]) + // result: (CMNconst x [c>>uint64(d)]) for { d := v.AuxInt _ = v.Args[1] @@ -2920,19 +3772,19 @@ func rewriteValueARM64_OpARM64CMPshiftRA_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64CMPconst) + v.reset(OpARM64CMNconst) v.AuxInt = c >> uint64(d) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { +func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool { b := v.Block _ = b - // match: (CMPshiftRL (MOVDconst [c]) x [d]) + // match: (CMNshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) + // result: (CMNconst [c] (SRLconst x [d])) for { d := v.AuxInt _ = v.Args[1] @@ -2942,19 +3794,17 @@ func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - v.reset(OpARM64InvertFlags) - v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = c - v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) - v1.AuxInt = d - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64CMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) return true } - // match: (CMPshiftRL x (MOVDconst [c]) [d]) + // match: (CMNshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (CMPconst x [int64(uint64(c)>>uint64(d))]) + // result: (CMNconst x [int64(uint64(c)>>uint64(d))]) for { d := v.AuxInt _ = v.Args[1] @@ -2964,1248 +3814,1221 @@ func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64CMPconst) + v.reset(OpARM64CMNconst) v.AuxInt = int64(uint64(c) >> uint64(d)) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { - // match: (CSEL {cc} x (MOVDconst [0]) flag) +func rewriteValueARM64_OpARM64CMP_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMP x (MOVDconst [c])) // cond: - // result: (CSEL0 {cc} x flag) + // result: (CMPconst [c] x) for { - cc := v.Aux - _ = v.Args[2] + _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != 0 { - break - } - flag := v.Args[2] - v.reset(OpARM64CSEL0) - v.Aux = cc + c := v_1.AuxInt + v.reset(OpARM64CMPconst) + v.AuxInt = c v.AddArg(x) - v.AddArg(flag) return true } - // match: (CSEL {cc} (MOVDconst [0]) y flag) + // match: (CMP (MOVDconst [c]) x) // cond: - // result: (CSEL0 {arm64Negate(cc.(Op))} y flag) + // result: (InvertFlags (CMPconst [c] x)) for { - cc := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - if v_0.AuxInt != 0 { - break - } - y := v.Args[1] - flag := v.Args[2] - v.reset(OpARM64CSEL0) - v.Aux = arm64Negate(cc.(Op)) - v.AddArg(y) - v.AddArg(flag) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (CSEL {cc} x y (InvertFlags cmp)) - // cond: - // result: (CSEL {arm64Invert(cc.(Op))} x y cmp) + // match: (CMP x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftLL x0 y [c]) for { - cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64InvertFlags { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - cmp := v_2.Args[0] - v.reset(OpARM64CSEL) - v.Aux = arm64Invert(cc.(Op)) - v.AddArg(x) + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMPshiftLL) + v.AuxInt = c + v.AddArg(x0) v.AddArg(y) - v.AddArg(cmp) return true } - // match: (CSEL {cc} x _ flag) - // cond: ccARM64Eval(cc, flag) > 0 - // result: x + // match: (CMP x0:(SLLconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftLL x1 y [c])) for { - cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - flag := v.Args[2] - if !(ccARM64Eval(cc, flag) > 0) { + _ = v.Args[1] + x0 := v.Args[0] + if x0.Op != OpARM64SLLconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + c := x0.AuxInt + y := x0.Args[0] + x1 := v.Args[1] + if !(clobberIfDead(x0)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x1) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (CSEL {cc} _ y flag) - // cond: ccARM64Eval(cc, flag) < 0 - // result: y + // match: (CMP x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftRL x0 y [c]) for { - cc := v.Aux - _ = v.Args[2] - y := v.Args[1] - flag := v.Args[2] - if !(ccARM64Eval(cc, flag) < 0) { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - v.reset(OpCopy) - v.Type = y.Type + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64CMPshiftRL) + v.AuxInt = c + v.AddArg(x0) v.AddArg(y) return true } - // match: (CSEL {cc} x y (CMPWconst [0] bool)) - // cond: cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil - // result: (CSEL {bool.Op} x y flagArg(bool)) + // match: (CMP x0:(SRLconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftRL x1 y [c])) for { - cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64CMPWconst { - break - } - if v_2.AuxInt != 0 { + _ = v.Args[1] + x0 := v.Args[0] + if x0.Op != OpARM64SRLconst { break } - bool := v_2.Args[0] - if !(cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil) { + c := x0.AuxInt + y := x0.Args[0] + x1 := v.Args[1] + if !(clobberIfDead(x0)) { break } - v.reset(OpARM64CSEL) - v.Aux = bool.Op - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(bool)) + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x1) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (CSEL {cc} x y (CMPWconst [0] bool)) - // cond: cc.(Op) == OpARM64Equal && flagArg(bool) != nil - // result: (CSEL {arm64Negate(bool.Op)} x y flagArg(bool)) + // match: (CMP x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (CMPshiftRA x0 y [c]) for { - cc := v.Aux - _ = v.Args[2] - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64CMPWconst { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { break } - if v_2.AuxInt != 0 { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - bool := v_2.Args[0] - if !(cc.(Op) == OpARM64Equal && flagArg(bool) != nil) { + v.reset(OpARM64CMPshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (CMP x0:(SRAconst [c] y) x1) + // cond: clobberIfDead(x0) + // result: (InvertFlags (CMPshiftRA x1 y [c])) + for { + _ = v.Args[1] + x0 := v.Args[0] + if x0.Op != OpARM64SRAconst { break } - v.reset(OpARM64CSEL) - v.Aux = arm64Negate(bool.Op) - v.AddArg(x) - v.AddArg(y) - v.AddArg(flagArg(bool)) + c := x0.AuxInt + y := x0.Args[0] + x1 := v.Args[1] + if !(clobberIfDead(x0)) { + break + } + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x1) + v0.AddArg(y) + v.AddArg(v0) return true } return false } -func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { - // match: (CSEL0 {cc} x (InvertFlags cmp)) +func rewriteValueARM64_OpARM64CMPW_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPW x (MOVDconst [c])) // cond: - // result: (CSEL0 {arm64Invert(cc.(Op))} x cmp) + // result: (CMPWconst [int64(int32(c))] x) for { - cc := v.Aux _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64InvertFlags { + if v_1.Op != OpARM64MOVDconst { break } - cmp := v_1.Args[0] - v.reset(OpARM64CSEL0) - v.Aux = arm64Invert(cc.(Op)) + c := v_1.AuxInt + v.reset(OpARM64CMPWconst) + v.AuxInt = int64(int32(c)) v.AddArg(x) - v.AddArg(cmp) return true } - // match: (CSEL0 {cc} x flag) - // cond: ccARM64Eval(cc, flag) > 0 - // result: x + // match: (CMPW (MOVDconst [c]) x) + // cond: + // result: (InvertFlags (CMPWconst [int64(int32(c))] x)) for { - cc := v.Aux _ = v.Args[1] - x := v.Args[0] - flag := v.Args[1] - if !(ccARM64Eval(cc, flag) > 0) { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) + v0.AuxInt = int64(int32(c)) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (CSEL0 {cc} _ flag) - // cond: ccARM64Eval(cc, flag) < 0 - // result: (MOVDconst [0]) + return false +} +func rewriteValueARM64_OpARM64CMPWconst_0(v *Value) bool { + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) for { - cc := v.Aux - _ = v.Args[1] - flag := v.Args[1] - if !(ccARM64Eval(cc, flag) < 0) { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + x := v_0.AuxInt + if !(int32(x) == int32(y)) { + break + } + v.reset(OpARM64FlagEQ) return true } - // match: (CSEL0 {cc} x (CMPWconst [0] bool)) - // cond: cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil - // result: (CSEL0 {bool.Op} x flagArg(bool)) + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)uint32(y) + // result: (FlagLT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64CSEL0) - v.Aux = bool.Op - v.AddArg(x) - v.AddArg(flagArg(bool)) + x := v_0.AuxInt + if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARM64FlagLT_UGT) return true } - // match: (CSEL0 {cc} x (CMPWconst [0] bool)) - // cond: cc.(Op) == OpARM64Equal && flagArg(bool) != nil - // result: (CSEL0 {arm64Negate(bool.Op)} x flagArg(bool)) + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x) int32(y) && uint32(x) < uint32(y)) { break } - bool := v_1.Args[0] - if !(cc.(Op) == OpARM64Equal && flagArg(bool) != nil) { + v.reset(OpARM64FlagGT_ULT) + return true + } + // match: (CMPWconst (MOVDconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64CSEL0) - v.Aux = arm64Negate(bool.Op) - v.AddArg(x) - v.AddArg(flagArg(bool)) + x := v_0.AuxInt + if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARM64FlagGT_UGT) + return true + } + // match: (CMPWconst (MOVBUreg _) [c]) + // cond: 0xff < int32(c) + // result: (FlagLT_ULT) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVBUreg { + break + } + if !(0xff < int32(c)) { + break + } + v.reset(OpARM64FlagLT_ULT) + return true + } + // match: (CMPWconst (MOVHUreg _) [c]) + // cond: 0xffff < int32(c) + // result: (FlagLT_ULT) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVHUreg { + break + } + if !(0xffff < int32(c)) { + break + } + v.reset(OpARM64FlagLT_ULT) return true } return false } -func rewriteValueARM64_OpARM64DIV_0(v *Value) bool { - // match: (DIV (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [c/d]) +func rewriteValueARM64_OpARM64CMPconst_0(v *Value) bool { + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) for { - _ = v.Args[1] + y := v.AuxInt v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x := v_0.AuxInt + if !(x == y) { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c / d + v.reset(OpARM64FlagEQ) return true } - return false -} -func rewriteValueARM64_OpARM64DIVW_0(v *Value) bool { - // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(int32(c)/int32(d))]) + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: xuint64(y) + // result: (FlagLT_UGT) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = ^c - v.AddArg(x) + x := v_0.AuxInt + if !(x < y && uint64(x) > uint64(y)) { + break + } + v.reset(OpARM64FlagLT_UGT) return true } - // match: (EON x x) - // cond: - // result: (MOVDconst [-1]) + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: x>y && uint64(x) y && uint64(x) < uint64(y)) { + break + } + v.reset(OpARM64FlagGT_ULT) return true } - // match: (EON x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (EONshiftLL x0 y [c]) + // match: (CMPconst (MOVDconst [x]) [y]) + // cond: x>y && uint64(x)>uint64(y) + // result: (FlagGT_UGT) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + x := v_0.AuxInt + if !(x > y && uint64(x) > uint64(y)) { break } - v.reset(OpARM64EONshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64FlagGT_UGT) return true } - // match: (EON x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (EONshiftRL x0 y [c]) + // match: (CMPconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagLT_ULT) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVBUreg { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if !(0xff < c) { break } - v.reset(OpARM64EONshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64FlagLT_ULT) return true } - // match: (EON x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (EONshiftRA x0 y [c]) + // match: (CMPconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagLT_ULT) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVHUreg { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if !(0xffff < c) { break } - v.reset(OpARM64EONshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64FlagLT_ULT) return true } - return false -} -func rewriteValueARM64_OpARM64EONshiftLL_0(v *Value) bool { - // match: (EONshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (XORconst x [^int64(uint64(c)<>uint64(d))]) + // result: (InvertFlags (CMPconst [c] (SLLconst x [d]))) for { d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = ^(c >> uint64(d)) - v.AddArg(x) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (EONshiftRA x (SRAconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [-1]) + // match: (CMPshiftLL x (MOVDconst [c]) [d]) + // cond: + // result: (CMPconst x [int64(uint64(c)<>uint64(d))]) + // result: (InvertFlags (CMPconst [c] (SRAconst x [d]))) for { d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = ^int64(uint64(c) >> uint64(d)) - v.AddArg(x) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (EONshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [-1]) + // match: (CMPshiftRA x (MOVDconst [c]) [d]) + // cond: + // result: (CMPconst x [c>>uint64(d)]) for { d := v.AuxInt _ = v.Args[1] x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c == d) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = -1 + v.reset(OpARM64CMPconst) + v.AuxInt = c >> uint64(d) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { - // match: (Equal (FlagEQ)) +func rewriteValueARM64_OpARM64CMPshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMPshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (MOVDconst [1]) + // result: (InvertFlags (CMPconst [c] (SRLconst x [d]))) for { + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64InvertFlags) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v0.AuxInt = c + v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v1.AuxInt = d + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (Equal (FlagLT_ULT)) + // match: (CMPshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (MOVDconst [0]) + // result: (CMPconst x [int64(uint64(c)>>uint64(d))]) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_1.AuxInt + v.reset(OpARM64CMPconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) + v.AddArg(x) return true } - // match: (Equal (FlagLT_UGT)) + return false +} +func rewriteValueARM64_OpARM64CSEL_0(v *Value) bool { + // match: (CSEL {cc} x (MOVDconst [0]) flag) // cond: - // result: (MOVDconst [0]) + // result: (CSEL0 {cc} x flag) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + cc := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (Equal (FlagGT_ULT)) - // cond: - // result: (MOVDconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + if v_1.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + flag := v.Args[2] + v.reset(OpARM64CSEL0) + v.Aux = cc + v.AddArg(x) + v.AddArg(flag) return true } - // match: (Equal (FlagGT_UGT)) + // match: (CSEL {cc} (MOVDconst [0]) y flag) // cond: - // result: (MOVDconst [0]) + // result: (CSEL0 {arm64Negate(cc.(Op))} y flag) for { + cc := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + if v_0.AuxInt != 0 { + break + } + y := v.Args[1] + flag := v.Args[2] + v.reset(OpARM64CSEL0) + v.Aux = arm64Negate(cc.(Op)) + v.AddArg(y) + v.AddArg(flag) return true } - // match: (Equal (InvertFlags x)) + // match: (CSEL {cc} x y (InvertFlags cmp)) // cond: - // result: (Equal x) + // result: (CSEL {arm64Invert(cc.(Op))} x y cmp) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + cc := v.Aux + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64InvertFlags { break } - x := v_0.Args[0] - v.reset(OpARM64Equal) + cmp := v_2.Args[0] + v.reset(OpARM64CSEL) + v.Aux = arm64Invert(cc.(Op)) v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) return true } - return false -} -func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { - // match: (FADDD a (FMULD x y)) - // cond: - // result: (FMADDD a x y) + // match: (CSEL {cc} x _ flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FMULD { + cc := v.Aux + _ = v.Args[2] + x := v.Args[0] + flag := v.Args[2] + if !(ccARM64Eval(cc, flag) > 0) { break } - _ = v_1.Args[1] - x := v_1.Args[0] - y := v_1.Args[1] - v.reset(OpARM64FMADDD) - v.AddArg(a) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) - v.AddArg(y) return true } - // match: (FADDD (FMULD x y) a) - // cond: - // result: (FMADDD a x y) + // match: (CSEL {cc} _ y flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: y for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FMULD { + cc := v.Aux + _ = v.Args[2] + y := v.Args[1] + flag := v.Args[2] + if !(ccARM64Eval(cc, flag) < 0) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - a := v.Args[1] - v.reset(OpARM64FMADDD) - v.AddArg(a) - v.AddArg(x) + v.reset(OpCopy) + v.Type = y.Type v.AddArg(y) return true } - // match: (FADDD a (FNMULD x y)) - // cond: - // result: (FMSUBD a x y) + // match: (CSEL {cc} x y (CMPWconst [0] bool)) + // cond: cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil + // result: (CSEL {bool.Op} x y flagArg(bool)) for { - _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNMULD { + cc := v.Aux + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64CMPWconst { break } - _ = v_1.Args[1] - x := v_1.Args[0] - y := v_1.Args[1] - v.reset(OpARM64FMSUBD) - v.AddArg(a) + if v_2.AuxInt != 0 { + break + } + bool := v_2.Args[0] + if !(cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil) { + break + } + v.reset(OpARM64CSEL) + v.Aux = bool.Op v.AddArg(x) v.AddArg(y) + v.AddArg(flagArg(bool)) return true } - // match: (FADDD (FNMULD x y) a) - // cond: - // result: (FMSUBD a x y) + // match: (CSEL {cc} x y (CMPWconst [0] bool)) + // cond: cc.(Op) == OpARM64Equal && flagArg(bool) != nil + // result: (CSEL {arm64Negate(bool.Op)} x y flagArg(bool)) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULD { + cc := v.Aux + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64CMPWconst { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - a := v.Args[1] - v.reset(OpARM64FMSUBD) - v.AddArg(a) + if v_2.AuxInt != 0 { + break + } + bool := v_2.Args[0] + if !(cc.(Op) == OpARM64Equal && flagArg(bool) != nil) { + break + } + v.reset(OpARM64CSEL) + v.Aux = arm64Negate(bool.Op) v.AddArg(x) v.AddArg(y) + v.AddArg(flagArg(bool)) return true } return false } -func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { - // match: (FADDS a (FMULS x y)) +func rewriteValueARM64_OpARM64CSEL0_0(v *Value) bool { + // match: (CSEL0 {cc} x (InvertFlags cmp)) // cond: - // result: (FMADDS a x y) + // result: (CSEL0 {arm64Invert(cc.(Op))} x cmp) for { + cc := v.Aux _ = v.Args[1] - a := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64FMULS { + if v_1.Op != OpARM64InvertFlags { break } - _ = v_1.Args[1] - x := v_1.Args[0] - y := v_1.Args[1] - v.reset(OpARM64FMADDS) - v.AddArg(a) + cmp := v_1.Args[0] + v.reset(OpARM64CSEL0) + v.Aux = arm64Invert(cc.(Op)) v.AddArg(x) - v.AddArg(y) + v.AddArg(cmp) return true } - // match: (FADDS (FMULS x y) a) - // cond: - // result: (FMADDS a x y) + // match: (CSEL0 {cc} x flag) + // cond: ccARM64Eval(cc, flag) > 0 + // result: x for { + cc := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FMULS { + x := v.Args[0] + flag := v.Args[1] + if !(ccARM64Eval(cc, flag) > 0) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - a := v.Args[1] - v.reset(OpARM64FMADDS) - v.AddArg(a) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) - v.AddArg(y) return true } - // match: (FADDS a (FNMULS x y)) - // cond: - // result: (FMSUBS a x y) + // match: (CSEL0 {cc} _ flag) + // cond: ccARM64Eval(cc, flag) < 0 + // result: (MOVDconst [0]) for { + cc := v.Aux _ = v.Args[1] - a := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNMULS { + flag := v.Args[1] + if !(ccARM64Eval(cc, flag) < 0) { break } - _ = v_1.Args[1] - x := v_1.Args[0] - y := v_1.Args[1] - v.reset(OpARM64FMSUBS) - v.AddArg(a) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (FADDS (FNMULS x y) a) - // cond: - // result: (FMSUBS a x y) + // match: (CSEL0 {cc} x (CMPWconst [0] bool)) + // cond: cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil + // result: (CSEL0 {bool.Op} x flagArg(bool)) for { + cc := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULS { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CMPWconst { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - a := v.Args[1] - v.reset(OpARM64FMSUBS) - v.AddArg(a) + if v_1.AuxInt != 0 { + break + } + bool := v_1.Args[0] + if !(cc.(Op) == OpARM64NotEqual && flagArg(bool) != nil) { + break + } + v.reset(OpARM64CSEL0) + v.Aux = bool.Op v.AddArg(x) - v.AddArg(y) + v.AddArg(flagArg(bool)) return true } - return false -} -func rewriteValueARM64_OpARM64FMOVDgpfp_0(v *Value) bool { - b := v.Block - _ = b - // match: (FMOVDgpfp (Arg [off] {sym})) - // cond: - // result: @b.Func.Entry (Arg [off] {sym}) + // match: (CSEL0 {cc} x (CMPWconst [0] bool)) + // cond: cc.(Op) == OpARM64Equal && flagArg(bool) != nil + // result: (CSEL0 {arm64Negate(bool.Op)} x flagArg(bool)) for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpArg { + cc := v.Aux + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CMPWconst { break } - off := v_0.AuxInt - sym := v_0.Aux - b = b.Func.Entry - v0 := b.NewValue0(v.Pos, OpArg, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym + if v_1.AuxInt != 0 { + break + } + bool := v_1.Args[0] + if !(cc.(Op) == OpARM64Equal && flagArg(bool) != nil) { + break + } + v.reset(OpARM64CSEL0) + v.Aux = arm64Negate(bool.Op) + v.AddArg(x) + v.AddArg(flagArg(bool)) return true } return false } -func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVDload [off1+off2] {sym} ptr mem) +func rewriteValueARM64_OpARM64DIV_0(v *Value) bool { + // match: (DIV (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [c/d]) for { - off1 := v.AuxInt - sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + if v_0.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64FMOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c / d return true } - // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + return false +} +func rewriteValueARM64_OpARM64DIVW_0(v *Value) bool { + // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c)/int32(d))]) for { - off1 := v.AuxInt - sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if v_0.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64FMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) / int32(d)) return true } return false } -func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (FMOVDstore ptr (FMOVDgpfp val) mem) +func rewriteValueARM64_OpARM64EON_0(v *Value) bool { + // match: (EON x (MOVDconst [c])) // cond: - // result: (MOVDstore ptr val mem) + // result: (XORconst [^c] x) for { - _ = v.Args[2] - ptr := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64FMOVDgpfp { + if v_1.Op != OpARM64MOVDconst { break } - val := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = ^c + v.AddArg(x) return true } - // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVDstore [off1+off2] {sym} ptr val mem) + // match: (EON x x) + // cond: + // result: (MOVDconst [-1]) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = v.Args[1] + x := v.Args[0] + if x != v.Args[1] { break } - v.reset(OpARM64FMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } - // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (EON x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftLL x0 y [c]) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64EONshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - return false -} -func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVSload [off1+off2] {sym} ptr mem) + // match: (EON x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftRL x0 y [c]) for { - off1 := v.AuxInt - sym := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FMOVSload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64EONshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (EON x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (EONshiftRA x0 y [c]) for { - off1 := v.AuxInt - sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FMOVSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64EONshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (FMOVSstore [off1+off2] {sym} ptr val mem) +func rewriteValueARM64_OpARM64EONshiftLL_0(v *Value) bool { + // match: (EONshiftLL x (MOVDconst [c]) [d]) + // cond: + // result: (XORconst x [^int64(uint64(c)<>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGD { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64FNMULD) + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = ^(c >> uint64(d)) v.AddArg(x) - v.AddArg(y) return true } - // match: (FMULD y (FNEGD x)) - // cond: - // result: (FNMULD x y) + // match: (EONshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [-1]) for { + d := v.AuxInt _ = v.Args[1] - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGD { + if v_1.Op != OpARM64SRAconst { break } - x := v_1.Args[0] - v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } return false } -func rewriteValueARM64_OpARM64FMULS_0(v *Value) bool { - // match: (FMULS (FNEGS x) y) +func rewriteValueARM64_OpARM64EONshiftRL_0(v *Value) bool { + // match: (EONshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (FNMULS x y) + // result: (XORconst x [^int64(uint64(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGS { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64FNMULS) + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = ^int64(uint64(c) >> uint64(d)) v.AddArg(x) - v.AddArg(y) return true } - // match: (FMULS y (FNEGS x)) - // cond: - // result: (FNMULS x y) + // match: (EONshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [-1]) for { + d := v.AuxInt _ = v.Args[1] - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGS { + if v_1.Op != OpARM64SRLconst { break } - x := v_1.Args[0] - v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64FNEGD_0(v *Value) bool { - // match: (FNEGD (FMULD x y)) - // cond: - // result: (FNMULD x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FMULD { + c := v_1.AuxInt + if x != v_1.Args[0] { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64FNMULD) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (FNEGD (FNMULD x y)) - // cond: - // result: (FMULD x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULD { + if !(c == d) { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } return false } -func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { - // match: (FNEGS (FMULS x y)) +func rewriteValueARM64_OpARM64Equal_0(v *Value) bool { + // match: (Equal (FlagEQ)) // cond: - // result: (FNMULS x y) + // result: (MOVDconst [1]) for { v_0 := v.Args[0] - if v_0.Op != OpARM64FMULS { + if v_0.Op != OpARM64FlagEQ { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64FNMULS) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (FNEGS (FNMULS x y)) + // match: (Equal (FlagLT_ULT)) // cond: - // result: (FMULS x y) + // result: (MOVDconst [0]) for { v_0 := v.Args[0] - if v_0.Op != OpARM64FNMULS { + if v_0.Op != OpARM64FlagLT_ULT { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - return false -} -func rewriteValueARM64_OpARM64FNMULD_0(v *Value) bool { - // match: (FNMULD (FNEGD x) y) + // match: (Equal (FlagLT_UGT)) // cond: - // result: (FMULD x y) + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGD { + if v_0.Op != OpARM64FlagLT_UGT { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (FNMULD y (FNEGD x)) + // match: (Equal (FlagGT_ULT)) // cond: - // result: (FMULD x y) + // result: (MOVDconst [0]) for { - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGD { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_ULT { break } - x := v_1.Args[0] - v.reset(OpARM64FMULD) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - return false -} -func rewriteValueARM64_OpARM64FNMULS_0(v *Value) bool { - // match: (FNMULS (FNEGS x) y) + // match: (Equal (FlagGT_UGT)) // cond: - // result: (FMULS x y) + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FNEGS { + if v_0.Op != OpARM64FlagGT_UGT { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64FMULS) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (FNMULS y (FNEGS x)) + // match: (Equal (InvertFlags x)) // cond: - // result: (FMULS x y) + // result: (Equal x) for { - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FNEGS { + v_0 := v.Args[0] + if v_0.Op != OpARM64InvertFlags { break } - x := v_1.Args[0] - v.reset(OpARM64FMULS) + x := v_0.Args[0] + v.reset(OpARM64Equal) v.AddArg(x) - v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { - // match: (FSUBD a (FMULD x y)) +func rewriteValueARM64_OpARM64FADDD_0(v *Value) bool { + // match: (FADDD a (FMULD x y)) // cond: - // result: (FMSUBD a x y) + // result: (FMADDD a x y) for { _ = v.Args[1] a := v.Args[0] @@ -4216,15 +5039,15 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { _ = v_1.Args[1] x := v_1.Args[0] y := v_1.Args[1] - v.reset(OpARM64FMSUBD) + v.reset(OpARM64FMADDD) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBD (FMULD x y) a) + // match: (FADDD (FMULD x y) a) // cond: - // result: (FNMSUBD a x y) + // result: (FMADDD a x y) for { _ = v.Args[1] v_0 := v.Args[0] @@ -4235,15 +5058,15 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { x := v_0.Args[0] y := v_0.Args[1] a := v.Args[1] - v.reset(OpARM64FNMSUBD) + v.reset(OpARM64FMADDD) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBD a (FNMULD x y)) + // match: (FADDD a (FNMULD x y)) // cond: - // result: (FMADDD a x y) + // result: (FMSUBD a x y) for { _ = v.Args[1] a := v.Args[0] @@ -4254,15 +5077,15 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { _ = v_1.Args[1] x := v_1.Args[0] y := v_1.Args[1] - v.reset(OpARM64FMADDD) + v.reset(OpARM64FMSUBD) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBD (FNMULD x y) a) + // match: (FADDD (FNMULD x y) a) // cond: - // result: (FNMADDD a x y) + // result: (FMSUBD a x y) for { _ = v.Args[1] v_0 := v.Args[0] @@ -4273,7 +5096,7 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { x := v_0.Args[0] y := v_0.Args[1] a := v.Args[1] - v.reset(OpARM64FNMADDD) + v.reset(OpARM64FMSUBD) v.AddArg(a) v.AddArg(x) v.AddArg(y) @@ -4281,10 +5104,10 @@ func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { - // match: (FSUBS a (FMULS x y)) +func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool { + // match: (FADDS a (FMULS x y)) // cond: - // result: (FMSUBS a x y) + // result: (FMADDS a x y) for { _ = v.Args[1] a := v.Args[0] @@ -4295,15 +5118,15 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { _ = v_1.Args[1] x := v_1.Args[0] y := v_1.Args[1] - v.reset(OpARM64FMSUBS) + v.reset(OpARM64FMADDS) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBS (FMULS x y) a) + // match: (FADDS (FMULS x y) a) // cond: - // result: (FNMSUBS a x y) + // result: (FMADDS a x y) for { _ = v.Args[1] v_0 := v.Args[0] @@ -4314,15 +5137,15 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { x := v_0.Args[0] y := v_0.Args[1] a := v.Args[1] - v.reset(OpARM64FNMSUBS) + v.reset(OpARM64FMADDS) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBS a (FNMULS x y)) + // match: (FADDS a (FNMULS x y)) // cond: - // result: (FMADDS a x y) + // result: (FMSUBS a x y) for { _ = v.Args[1] a := v.Args[0] @@ -4333,15 +5156,15 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { _ = v_1.Args[1] x := v_1.Args[0] y := v_1.Args[1] - v.reset(OpARM64FMADDS) + v.reset(OpARM64FMSUBS) v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } - // match: (FSUBS (FNMULS x y) a) + // match: (FADDS (FNMULS x y) a) // cond: - // result: (FNMADDS a x y) + // result: (FMSUBS a x y) for { _ = v.Args[1] v_0 := v.Args[0] @@ -4352,7 +5175,7 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { x := v_0.Args[0] y := v_0.Args[1] a := v.Args[1] - v.reset(OpARM64FNMADDS) + v.reset(OpARM64FMSUBS) v.AddArg(a) v.AddArg(x) v.AddArg(y) @@ -4360,388 +5183,1018 @@ func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { - // match: (GreaterEqual (FlagEQ)) +func rewriteValueARM64_OpARM64FMOVDfpgp_0(v *Value) bool { + b := v.Block + _ = b + // match: (FMOVDfpgp (Arg [off] {sym})) // cond: - // result: (MOVDconst [1]) + // result: @b.Func.Entry (Arg [off] {sym}) for { + t := v.Type v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + if v_0.Op != OpArg { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym return true } - // match: (GreaterEqual (FlagLT_ULT)) + return false +} +func rewriteValueARM64_OpARM64FMOVDgpfp_0(v *Value) bool { + b := v.Block + _ = b + // match: (FMOVDgpfp (Arg [off] {sym})) // cond: - // result: (MOVDconst [0]) + // result: @b.Func.Entry (Arg [off] {sym}) for { + t := v.Type v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + if v_0.Op != OpArg { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym return true } - // match: (GreaterEqual (FlagLT_UGT)) + return false +} +func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) // cond: - // result: (MOVDconst [0]) + // result: (FMOVDgpfp val) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDstore { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (GreaterEqual (FlagGT_ULT)) - // cond: - // result: (MOVDconst [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + if v_1.AuxInt != off { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpARM64FMOVDgpfp) + v.AddArg(val) return true } - // match: (GreaterEqual (FlagGT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVDload [off1+off2] {sym} ptr mem) for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterEqual (InvertFlags x)) - // cond: - // result: (LessEqual x) + // match: (FMOVDload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVDloadidx ptr idx mem) for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + if v_0.Op != OpARM64ADD { break } - x := v_0.Args[0] - v.reset(OpARM64LessEqual) - v.AddArg(x) + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { - // match: (GreaterEqualU (FlagEQ)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + if v_0.Op != OpARM64MOVDaddr { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterEqualU (FlagLT_ULT)) + return false +} +func rewriteValueARM64_OpARM64FMOVDloadidx_0(v *Value) bool { + // match: (FMOVDloadidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVDconst [0]) + // result: (FMOVDload [c] ptr mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64FMOVDload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterEqualU (FlagLT_UGT)) + // match: (FMOVDloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVDconst [1]) + // result: (FMOVDload [c] ptr mem) for { + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64FMOVDload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterEqualU (FlagGT_ULT)) + return false +} +func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) // cond: - // result: (MOVDconst [0]) + // result: (MOVDstore [off] {sym} ptr val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMOVDgpfp { + break + } + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVDstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVDstore [off1+off2] {sym} ptr val mem) for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + if v_0.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterEqualU (FlagGT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVDstoreidx ptr idx val mem) for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64ADD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVDstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterEqualU (InvertFlags x)) - // cond: - // result: (LessEqualU x) + // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + if v_0.Op != OpARM64MOVDaddr { break } - x := v_0.Args[0] - v.reset(OpARM64LessEqualU) - v.AddArg(x) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { - // match: (GreaterThan (FlagEQ)) +func rewriteValueARM64_OpARM64FMOVDstoreidx_0(v *Value) bool { + // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVDconst [0]) + // result: (FMOVDstore [c] ptr val mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64FMOVDstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterThan (FlagLT_ULT)) + // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVDconst [0]) + // result: (FMOVDstore [c] idx val mem) for { + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_0.AuxInt + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64FMOVDstore) + v.AuxInt = c + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterThan (FlagLT_UGT)) + return false +} +func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) // cond: - // result: (MOVDconst [0]) + // result: (FMOVSgpfp val) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWstore { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpARM64FMOVSgpfp) + v.AddArg(val) return true } - // match: (GreaterThan (FlagGT_ULT)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVSload [off1+off2] {sym} ptr mem) for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + if v_0.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterThan (FlagGT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVSload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (FMOVSloadidx ptr idx mem) for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64ADD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (GreaterThan (InvertFlags x)) - // cond: - // result: (LessThan x) + // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + if v_0.Op != OpARM64MOVDaddr { break } - x := v_0.Args[0] - v.reset(OpARM64LessThan) - v.AddArg(x) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVSload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { - // match: (GreaterThanU (FlagEQ)) +func rewriteValueARM64_OpARM64FMOVSloadidx_0(v *Value) bool { + // match: (FMOVSloadidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVDconst [0]) + // result: (FMOVSload [c] ptr mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64FMOVSload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterThanU (FlagLT_ULT)) + // match: (FMOVSloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVDconst [0]) + // result: (FMOVSload [c] ptr mem) for { + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64FMOVSload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (GreaterThanU (FlagLT_UGT)) + return false +} +func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) // cond: - // result: (MOVDconst [1]) + // result: (MOVWstore [off] {sym} ptr val mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMOVSgpfp { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterThanU (FlagGT_ULT)) - // cond: - // result: (MOVDconst [0]) + // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVSstore [off1+off2] {sym} ptr val mem) for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + if v_0.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterThanU (FlagGT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (FMOVSstoreidx ptr idx val mem) for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64ADD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64FMOVSstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (GreaterThanU (InvertFlags x)) - // cond: - // result: (LessThanU x) + // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + if v_0.Op != OpARM64MOVDaddr { break } - x := v_0.Args[0] - v.reset(OpARM64LessThanU) - v.AddArg(x) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { - // match: (LessEqual (FlagEQ)) +func rewriteValueARM64_OpARM64FMOVSstoreidx_0(v *Value) bool { + // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVDconst [1]) + // result: (FMOVSstore [c] ptr val mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64FMOVSstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (LessEqual (FlagLT_ULT)) + // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVDconst [1]) + // result: (FMOVSstore [c] idx val mem) for { + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + c := v_0.AuxInt + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64FMOVSstore) + v.AuxInt = c + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (LessEqual (FlagLT_UGT)) + return false +} +func rewriteValueARM64_OpARM64FMULD_0(v *Value) bool { + // match: (FMULD (FNEGD x) y) // cond: - // result: (MOVDconst [1]) + // result: (FNMULD x y) for { + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + if v_0.Op != OpARM64FNEGD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64FNMULD) + v.AddArg(x) + v.AddArg(y) return true } - // match: (LessEqual (FlagGT_ULT)) + // match: (FMULD y (FNEGD x)) // cond: - // result: (MOVDconst [0]) + // result: (FNMULD x y) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNEGD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + x := v_1.Args[0] + v.reset(OpARM64FNMULD) + v.AddArg(x) + v.AddArg(y) return true } - // match: (LessEqual (FlagGT_UGT)) + return false +} +func rewriteValueARM64_OpARM64FMULS_0(v *Value) bool { + // match: (FMULS (FNEGS x) y) // cond: - // result: (MOVDconst [0]) + // result: (FNMULS x y) for { + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + if v_0.Op != OpARM64FNEGS { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64FNMULS) + v.AddArg(x) + v.AddArg(y) return true } - // match: (LessEqual (InvertFlags x)) + // match: (FMULS y (FNEGS x)) // cond: - // result: (GreaterEqual x) + // result: (FNMULS x y) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNEGS { break } - x := v_0.Args[0] - v.reset(OpARM64GreaterEqual) + x := v_1.Args[0] + v.reset(OpARM64FNMULS) v.AddArg(x) + v.AddArg(y) return true } return false } -func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { - // match: (LessEqualU (FlagEQ)) +func rewriteValueARM64_OpARM64FNEGD_0(v *Value) bool { + // match: (FNEGD (FMULD x y)) + // cond: + // result: (FNMULD x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64FNMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FNEGD (FNMULD x y)) + // cond: + // result: (FMULD x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FNMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64FMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FNEGS_0(v *Value) bool { + // match: (FNEGS (FMULS x y)) + // cond: + // result: (FNMULS x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FMULS { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64FNMULS) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FNEGS (FNMULS x y)) + // cond: + // result: (FMULS x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FNMULS { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64FMULS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FNMULD_0(v *Value) bool { + // match: (FNMULD (FNEGD x) y) + // cond: + // result: (FMULD x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FNEGD { + break + } + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64FMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FNMULD y (FNEGD x)) + // cond: + // result: (FMULD x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNEGD { + break + } + x := v_1.Args[0] + v.reset(OpARM64FMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FNMULS_0(v *Value) bool { + // match: (FNMULS (FNEGS x) y) + // cond: + // result: (FMULS x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FNEGS { + break + } + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64FMULS) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FNMULS y (FNEGS x)) + // cond: + // result: (FMULS x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNEGS { + break + } + x := v_1.Args[0] + v.reset(OpARM64FMULS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FSUBD_0(v *Value) bool { + // match: (FSUBD a (FMULD x y)) + // cond: + // result: (FMSUBD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + v.reset(OpARM64FMSUBD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBD (FMULD x y) a) + // cond: + // result: (FNMSUBD a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + v.reset(OpARM64FNMSUBD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBD a (FNMULD x y)) + // cond: + // result: (FMADDD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + v.reset(OpARM64FMADDD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBD (FNMULD x y) a) + // cond: + // result: (FNMADDD a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FNMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + v.reset(OpARM64FNMADDD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64FSUBS_0(v *Value) bool { + // match: (FSUBS a (FMULS x y)) + // cond: + // result: (FMSUBS a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMULS { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + v.reset(OpARM64FMSUBS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBS (FMULS x y) a) + // cond: + // result: (FNMSUBS a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FMULS { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + v.reset(OpARM64FNMSUBS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBS a (FNMULS x y)) + // cond: + // result: (FMADDS a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FNMULS { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + v.reset(OpARM64FMADDS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (FSUBS (FNMULS x y) a) + // cond: + // result: (FNMADDS a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64FNMULS { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + v.reset(OpARM64FNMADDS) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64GreaterEqual_0(v *Value) bool { + // match: (GreaterEqual (FlagEQ)) // cond: // result: (MOVDconst [1]) for { @@ -4753,19 +6206,19 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessEqualU (FlagLT_ULT)) + // match: (GreaterEqual (FlagLT_ULT)) // cond: - // result: (MOVDconst [1]) + // result: (MOVDconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessEqualU (FlagLT_UGT)) + // match: (GreaterEqual (FlagLT_UGT)) // cond: // result: (MOVDconst [0]) for { @@ -4777,7 +6230,7 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessEqualU (FlagGT_ULT)) + // match: (GreaterEqual (FlagGT_ULT)) // cond: // result: (MOVDconst [1]) for { @@ -4789,59 +6242,59 @@ func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessEqualU (FlagGT_UGT)) + // match: (GreaterEqual (FlagGT_UGT)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessEqualU (InvertFlags x)) + // match: (GreaterEqual (InvertFlags x)) // cond: - // result: (GreaterEqualU x) + // result: (LessEqual x) for { v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] - v.reset(OpARM64GreaterEqualU) + v.reset(OpARM64LessEqual) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { - // match: (LessThan (FlagEQ)) +func rewriteValueARM64_OpARM64GreaterEqualU_0(v *Value) bool { + // match: (GreaterEqualU (FlagEQ)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagEQ { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThan (FlagLT_ULT)) + // match: (GreaterEqualU (FlagLT_ULT)) // cond: - // result: (MOVDconst [1]) + // result: (MOVDconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessThan (FlagLT_UGT)) + // match: (GreaterEqualU (FlagLT_UGT)) // cond: // result: (MOVDconst [1]) for { @@ -4853,7 +6306,7 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessThan (FlagGT_ULT)) + // match: (GreaterEqualU (FlagGT_ULT)) // cond: // result: (MOVDconst [0]) for { @@ -4865,35 +6318,35 @@ func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThan (FlagGT_UGT)) + // match: (GreaterEqualU (FlagGT_UGT)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThan (InvertFlags x)) + // match: (GreaterEqualU (InvertFlags x)) // cond: - // result: (GreaterThan x) + // result: (LessEqualU x) for { v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] - v.reset(OpARM64GreaterThan) + v.reset(OpARM64LessEqualU) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { - // match: (LessThanU (FlagEQ)) +func rewriteValueARM64_OpARM64GreaterThan_0(v *Value) bool { + // match: (GreaterThan (FlagEQ)) // cond: // result: (MOVDconst [0]) for { @@ -4905,19 +6358,19 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThanU (FlagLT_ULT)) + // match: (GreaterThan (FlagLT_ULT)) // cond: - // result: (MOVDconst [1]) + // result: (MOVDconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagLT_ULT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (LessThanU (FlagLT_UGT)) + // match: (GreaterThan (FlagLT_UGT)) // cond: // result: (MOVDconst [0]) for { @@ -4929,7 +6382,7 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { v.AuxInt = 0 return true } - // match: (LessThanU (FlagGT_ULT)) + // match: (GreaterThan (FlagGT_ULT)) // cond: // result: (MOVDconst [1]) for { @@ -4941,684 +6394,510 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { v.AuxInt = 1 return true } - // match: (LessThanU (FlagGT_UGT)) + // match: (GreaterThan (FlagGT_UGT)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpARM64FlagGT_UGT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (LessThanU (InvertFlags x)) + // match: (GreaterThan (InvertFlags x)) // cond: - // result: (GreaterThanU x) + // result: (LessThan x) for { v_0 := v.Args[0] if v_0.Op != OpARM64InvertFlags { break } x := v_0.Args[0] - v.reset(OpARM64GreaterThanU) + v.reset(OpARM64LessThan) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { - b := v.Block - _ = b - // match: (MNEG x (MOVDconst [-1])) +func rewriteValueARM64_OpARM64GreaterThanU_0(v *Value) bool { + // match: (GreaterThanU (FlagEQ)) // cond: - // result: x + // result: (MOVDconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != -1 { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [-1]) x) + // match: (GreaterThanU (FlagLT_ULT)) // cond: - // result: x + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != -1 { + if v_0.Op != OpARM64FlagLT_ULT { break } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG _ (MOVDconst [0])) + // match: (GreaterThanU (FlagLT_UGT)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDconst [1]) for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != 0 { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = 1 return true } - // match: (MNEG (MOVDconst [0]) _) + // match: (GreaterThanU (FlagGT_ULT)) // cond: // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 0 { + if v_0.Op != OpARM64FlagGT_ULT { break } v.reset(OpARM64MOVDconst) v.AuxInt = 0 return true } - // match: (MNEG x (MOVDconst [1])) + // match: (GreaterThanU (FlagGT_UGT)) // cond: - // result: (NEG x) + // result: (MOVDconst [1]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != 1 { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_UGT { break } - v.reset(OpARM64NEG) - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG (MOVDconst [1]) x) + // match: (GreaterThanU (InvertFlags x)) // cond: - // result: (NEG x) + // result: (LessThanU x) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 1 { + if v_0.Op != OpARM64InvertFlags { break } - x := v.Args[1] - v.reset(OpARM64NEG) + x := v_0.Args[0] + v.reset(OpARM64LessThanU) v.AddArg(x) return true } - // match: (MNEG x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) + return false +} +func rewriteValueARM64_OpARM64LessEqual_0(v *Value) bool { + // match: (LessEqual (FlagEQ)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) + // match: (LessEqual (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c)) { + if v_0.Op != OpARM64FlagLT_ULT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (LessEqual (FlagLT_UGT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (LessEqual (FlagGT_ULT)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_ULT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (LessEqual (FlagGT_UGT)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64FlagGT_UGT { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c-1) && c >= 3) { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (LessEqual (InvertFlags x)) + // cond: + // result: (GreaterEqual x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64InvertFlags { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + x := v_0.Args[0] + v.reset(OpARM64GreaterEqual) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MNEG_10(v *Value) bool { - b := v.Block - _ = b - // match: (MNEG x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) +func rewriteValueARM64_OpARM64LessEqualU_0(v *Value) bool { + // match: (LessEqualU (FlagEQ)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) + // match: (LessEqualU (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c+1) && c >= 7) { + if v_0.Op != OpARM64FlagLT_ULT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + // match: (LessEqualU (FlagLT_UGT)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + // match: (LessEqualU (FlagGT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + if v_0.Op != OpARM64FlagGT_ULT { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + // match: (LessEqualU (FlagGT_UGT)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_UGT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + // match: (LessEqualU (InvertFlags x)) + // cond: + // result: (GreaterEqualU x) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + if v_0.Op != OpARM64InvertFlags { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0.Args[0] + v.reset(OpARM64GreaterEqualU) + v.AddArg(x) return true } - // match: (MNEG x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + return false +} +func rewriteValueARM64_OpARM64LessThan_0(v *Value) bool { + // match: (LessThan (FlagEQ)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + // match: (LessThan (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7)) { + if v_0.Op != OpARM64FlagLT_ULT { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + // match: (LessThan (FlagLT_UGT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEG (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + // match: (LessThan (FlagGT_ULT)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + if v_0.Op != OpARM64FlagGT_ULT { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - return false -} -func rewriteValueARM64_OpARM64MNEG_20(v *Value) bool { - // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) + // match: (LessThan (FlagGT_UGT)) // cond: - // result: (MOVDconst [-c*d]) + // result: (MOVDconst [0]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64FlagGT_UGT { break } - d := v_1.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = -c * d + v.AuxInt = 0 return true } - // match: (MNEG (MOVDconst [d]) (MOVDconst [c])) + // match: (LessThan (InvertFlags x)) // cond: - // result: (MOVDconst [-c*d]) + // result: (GreaterThan x) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64InvertFlags { break } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -c * d + x := v_0.Args[0] + v.reset(OpARM64GreaterThan) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { - b := v.Block - _ = b - // match: (MNEGW x (MOVDconst [c])) - // cond: int32(c)==-1 - // result: x +func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool { + // match: (LessThanU (FlagEQ)) + // cond: + // result: (MOVDconst [0]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == -1) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: int32(c)==-1 - // result: x + // match: (LessThanU (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(int32(c) == -1) { + if v_0.Op != OpARM64FlagLT_ULT { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 return true } - // match: (MNEGW _ (MOVDconst [c])) - // cond: int32(c)==0 + // match: (LessThanU (FlagLT_UGT)) + // cond: // result: (MOVDconst [0]) for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == 0) { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } v.reset(OpARM64MOVDconst) v.AuxInt = 0 return true } - // match: (MNEGW (MOVDconst [c]) _) - // cond: int32(c)==0 - // result: (MOVDconst [0]) + // match: (LessThanU (FlagGT_ULT)) + // cond: + // result: (MOVDconst [1]) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64FlagGT_ULT { break } - c := v_0.AuxInt - if !(int32(c) == 0) { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (LessThanU (FlagGT_UGT)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_UGT { break } v.reset(OpARM64MOVDconst) v.AuxInt = 0 return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: int32(c)==1 - // result: (NEG x) + // match: (LessThanU (InvertFlags x)) + // cond: + // result: (GreaterThanU x) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == 1) { + v_0 := v.Args[0] + if v_0.Op != OpARM64InvertFlags { break } - v.reset(OpARM64NEG) + x := v_0.Args[0] + v.reset(OpARM64GreaterThanU) v.AddArg(x) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: int32(c)==1 - // result: (NEG x) + return false +} +func rewriteValueARM64_OpARM64MADD_0(v *Value) bool { + b := v.Block + _ = b + // match: (MADD a x (MOVDconst [-1])) + // cond: + // result: (SUB a x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(int32(c) == 1) { + if v_2.AuxInt != -1 { break } - v.reset(OpARM64NEG) + v.reset(OpARM64SUB) + v.AddArg(a) v.AddArg(x) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) + // match: (MADD a _ (MOVDconst [0])) + // cond: + // result: a for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + if v_2.AuxInt != 0 { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (NEG (SLLconst [log2(c)] x)) + // match: (MADD a x (MOVDconst [1])) + // cond: + // result: (ADD a x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + if v_2.AuxInt != 1 { + break + } + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (ADDshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { + break + } + c := v_2.AuxInt if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && c>=3 + // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + c := v_2.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { break } - v.reset(OpARM64NEG) + v.reset(OpARM64ADD) + v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = log2(c - 1) v0.AddArg(x) @@ -5626,769 +6905,880 @@ func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (NEG (ADDshiftLL x x [log2(c-1)])) + // match: (MADD a x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c>=7 + // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + c := v_2.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MNEGW_10(v *Value) bool { - b := v.Block - _ = b - // match: (MNEGW x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) + // match: (MADD a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + c := v_2.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) + // match: (MADD a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + c := v_2.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - v.reset(OpARM64NEG) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) + v0.AuxInt = 2 + v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + // match: (MADD a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + c := v_2.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = 3 v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + // match: (MADD a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + c := v_2.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + return false +} +func rewriteValueARM64_OpARM64MADD_10(v *Value) bool { + b := v.Block + _ = b + // match: (MADD a (MOVDconst [-1]) x) + // cond: + // result: (SUB a x) for { - _ = v.Args[1] - x := v.Args[0] + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + if v_1.AuxInt != -1 { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + x := v.Args[2] + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + // match: (MADD a (MOVDconst [0]) _) + // cond: + // result: a for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + if v_1.AuxInt != 0 { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) - v.AddArg(v0) + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + // match: (MADD a (MOVDconst [1]) x) + // cond: + // result: (ADD a x) for { - _ = v.Args[1] - x := v.Args[0] + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + x := v.Args[2] + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (ADDshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + x := v.Args[2] + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && c>=3 + // result: (ADD a (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c-1) && c >= 3) { + break + } + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + // match: (MADD a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && c>=7 + // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c+1) && c >= 7) { break } - v.reset(OpARM64SLLconst) - v.Type = x.Type - v.AuxInt = log2(c / 7) + v.reset(OpARM64SUB) + v.AddArg(a) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = log2(c + 1) v0.AddArg(x) v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + // match: (MADD a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - _ = v.Args[1] - x := v.Args[0] + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + x := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + // match: (MADD a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + c := v_1.AuxInt + x := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - v.reset(OpARM64NEG) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) - v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 - v1.AddArg(x) - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MNEGW_20(v *Value) bool { - // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [-int64(int32(c)*int32(d))]) + // match: (MADD a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -int64(int32(c) * int32(d)) + c := v_1.AuxInt + x := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MNEGW (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [-int64(int32(c)*int32(d))]) + // match: (MADD a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -int64(int32(c) * int32(d)) + x := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } return false } -func rewriteValueARM64_OpARM64MOD_0(v *Value) bool { - // match: (MOD (MOVDconst [c]) (MOVDconst [d])) +func rewriteValueARM64_OpARM64MADD_20(v *Value) bool { + b := v.Block + _ = b + // match: (MADD (MOVDconst [c]) x y) // cond: - // result: (MOVDconst [c%d]) + // result: (ADDconst [c] (MUL x y)) for { - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c % d + x := v.Args[1] + y := v.Args[2] + v.reset(OpARM64ADDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MODW_0(v *Value) bool { - // match: (MODW (MOVDconst [c]) (MOVDconst [d])) + // match: (MADD a (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (MOVDconst [int64(int32(c)%int32(d))]) + // result: (ADDconst [c*d] a) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) % int32(d)) + c := v_1.AuxInt + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { + break + } + d := v_2.AuxInt + v.reset(OpARM64ADDconst) + v.AuxInt = c * d + v.AddArg(a) return true } return false } -func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { +func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config - // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBUload [off1+off2] {sym} ptr mem) + // match: (MADDW a x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (SUB a x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_2.AuxInt + if !(int32(c) == -1) { break } - v.reset(OpARM64MOVBUload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) return true } - // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVBUloadidx ptr idx mem) + // match: (MADDW a _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: a for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + _ = v.Args[2] + a := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + c := v_2.AuxInt + if !(int32(c) == 0) { break } - v.reset(OpARM64MOVBUloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) return true } - // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MADDW a x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (ADD a x) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_2.AuxInt + if !(int32(c) == 1) { break } - v.reset(OpARM64MOVBUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) return true } - // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (ADDshiftLL a x [log2(c)]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVBstorezero { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + c := v_2.AuxInt + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { - // match: (MOVBUloadidx ptr (MOVDconst [c]) mem) - // cond: - // result: (MOVBUload [c] ptr mem) + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c)>=3 + // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVBUload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + c := v_2.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) - // cond: - // result: (MOVBUload [c] ptr mem) + // match: (MADDW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c)>=7 + // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBUload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + c := v_2.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MADDW a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] + a := v.Args[0] + x := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVBstorezeroidx { + if v_2.Op != OpARM64MOVDconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + c := v_2.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { - // match: (MOVBUreg x:(MOVBUload _ _)) - // cond: - // result: (MOVDreg x) + // match: (MADDW a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUload { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_2.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBUreg x:(MOVBUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) + // match: (MADDW a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUloadidx { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_2.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBUreg x:(MOVBUreg _)) - // cond: - // result: (MOVDreg x) + // match: (MADDW a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUreg { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_2.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBUreg (ANDconst [c] x)) - // cond: - // result: (ANDconst [c&(1<<8-1)] x) + return false +} +func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool { + b := v.Block + _ = b + // match: (MADDW a (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (SUB a x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<8 - 1) + c := v_1.AuxInt + x := v.Args[2] + if !(int32(c) == -1) { + break + } + v.reset(OpARM64SUB) + v.AddArg(a) v.AddArg(x) return true } - // match: (MOVBUreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(uint8(c))]) + // match: (MADDW a (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: a for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint8(c)) + c := v_1.AuxInt + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) return true } - // match: (MOVBUreg x) - // cond: x.Type.IsBoolean() - // result: (MOVDreg x) + // match: (MADDW a (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: (ADD a x) for { - x := v.Args[0] - if !(x.Type.IsBoolean()) { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) + c := v_1.AuxInt + x := v.Args[2] + if !(int32(c) == 1) { + break + } + v.reset(OpARM64ADD) + v.AddArg(a) v.AddArg(x) return true } - // match: (MOVBUreg (SLLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<8-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (ADDshiftLL a x [log2(c)]) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<8-1, sc)) { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) v.AddArg(x) return true } - // match: (MOVBUreg (SRLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<8-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 8)] x) + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && int32(c)>=3 + // result: (ADD a (ADDshiftLL x x [log2(c-1)])) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<8-1, 0)) { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 8) - v.AddArg(x) + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBload [off1+off2] {sym} ptr mem) + // match: (MADDW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && int32(c)>=7 + // result: (SUB a (SUBshiftLL x x [log2(c+1)])) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - v.reset(OpARM64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBload [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVBloadidx ptr idx mem) + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + c := v_1.AuxInt + x := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - v.reset(OpARM64MOVBloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_1.AuxInt + x := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - v.reset(OpARM64MOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] + _ = v.Args[2] + a := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVBstorezero { + if v_1.Op != OpARM64MOVDconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + c := v_1.AuxInt + x := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { - // match: (MOVBloadidx ptr (MOVDconst [c]) mem) - // cond: - // result: (MOVBload [c] ptr mem) + // match: (MADDW a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) for { _ = v.Args[2] - ptr := v.Args[0] + a := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVBload) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBloadidx (MOVDconst [c]) ptr mem) + return false +} +func rewriteValueARM64_OpARM64MADDW_20(v *Value) bool { + b := v.Block + _ = b + // match: (MADDW (MOVDconst [c]) x y) // cond: - // result: (MOVBload [c] ptr mem) + // result: (ADDconst [c] (MULW x y)) for { _ = v.Args[2] v_0 := v.Args[0] @@ -6396,1593 +7786,2504 @@ func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { break } c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBload) + x := v.Args[1] + y := v.Args[2] + v.reset(OpARM64ADDconst) v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) - return true + v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true } - // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MADDW a (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (ADDconst [int64(int32(c)*int32(d))] a) for { _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVBstorezeroidx { + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + c := v_1.AuxInt + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + d := v_2.AuxInt + v.reset(OpARM64ADDconst) + v.AuxInt = int64(int32(c) * int32(d)) + v.AddArg(a) return true } return false } -func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { - // match: (MOVBreg x:(MOVBload _ _)) +func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool { + b := v.Block + _ = b + // match: (MNEG x (MOVDconst [-1])) // cond: - // result: (MOVDreg x) + // result: x for { + _ = v.Args[1] x := v.Args[0] - if x.Op != OpARM64MOVBload { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) + if v_1.AuxInt != -1 { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (MOVBreg x:(MOVBloadidx _ _ _)) + // match: (MNEG (MOVDconst [-1]) x) // cond: - // result: (MOVDreg x) + // result: x for { - x := v.Args[0] - if x.Op != OpARM64MOVBloadidx { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) + if v_0.AuxInt != -1 { + break + } + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (MOVBreg x:(MOVBreg _)) + // match: (MNEG _ (MOVDconst [0])) // cond: - // result: (MOVDreg x) + // result: (MOVDconst [0]) for { - x := v.Args[0] - if x.Op != OpARM64MOVBreg { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + if v_1.AuxInt != 0 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVBreg (MOVDconst [c])) + // match: (MNEG (MOVDconst [0]) _) // cond: - // result: (MOVDconst [int64(int8(c))]) + // result: (MOVDconst [0]) for { + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + if v_0.AuxInt != 0 { + break + } v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = 0 return true } - // match: (MOVBreg (SLLconst [lc] x)) - // cond: lc < 8 - // result: (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x) + // match: (MNEG x (MOVDconst [1])) + // cond: + // result: (NEG x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - lc := v_0.AuxInt - x := v_0.Args[0] - if !(lc < 8) { + if v_1.AuxInt != 1 { break } - v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 8-lc) + v.reset(OpARM64NEG) v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstore [off1+off2] {sym} ptr val mem) + // match: (MNEG (MOVDconst [1]) x) + // cond: + // result: (NEG x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + if v_0.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if v_0.AuxInt != 1 { break } - v.reset(OpARM64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[1] + v.reset(OpARM64NEG) + v.AddArg(x) return true } - // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) - // cond: off == 0 && sym == nil - // result: (MOVBstoreidx ptr idx val mem) + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (NEG (SLLconst [log2(c)] x)) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(off == 0 && sym == nil) { + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (MNEG (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (NEG (SLLconst [log2(c)] x)) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if v_0.Op != OpARM64MOVDconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64MOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) - // cond: - // result: (MOVBstorezero [off] {sym} ptr mem) + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && c >= 3 + // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != 0 { + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { break } - mem := v.Args[2] - v.reset(OpARM64MOVBstorezero) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + // match: (MNEG (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && c >= 3 + // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVBreg { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVBUreg { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c-1) && c >= 3) { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + return false +} +func rewriteValueARM64_OpARM64MNEG_10(v *Value) bool { + b := v.Block + _ = b + // match: (MNEG x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHreg { + if v_1.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHUreg { + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) + // match: (MNEG (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWreg { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) - // cond: - // result: (MOVBstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c+1) && c >= 7) { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { - // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w mem) + // match: (MNEG x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 8 { + if v_1.Op != OpARM64MOVDconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if x.AuxInt != i-1 { + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEG (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) + // match: (MNEG (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_0.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != 8 { + c := v_0.AuxInt + x := v.Args[1] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MNEG x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEG (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7)) { + break + } + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w mem) + // match: (MNEG x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - if v_1.AuxInt != arm64BFAuxInt(8, 8) { + if v_1.Op != OpARM64MOVDconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - if x.AuxInt != i-1 { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MNEG (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MNEG_20(v *Value) bool { + // match: (MNEG (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [-c*d]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -c * d return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) + // match: (MNEG (MOVDconst [d]) (MOVDconst [c])) + // cond: + // result: (MOVDconst [-c*d]) for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] + d := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { + if v_1.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != arm64BFAuxInt(8, 8) { + c := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -c * d + return true + } + return false +} +func rewriteValueARM64_OpARM64MNEGW_0(v *Value) bool { + b := v.Block + _ = b + // match: (MNEGW x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + c := v_1.AuxInt + if !(int32(c) == -1) { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: x + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == -1) { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w mem) + // match: (MNEGW _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVDconst [0]) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + _ = v.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - if v_1.AuxInt != arm64BFAuxInt(8, 24) { + if v_1.Op != OpARM64MOVDconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + c := v_1.AuxInt + if !(int32(c) == 0) { break } - if x.AuxInt != i-1 { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MNEGW (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.Aux != s { + c := v_0.AuxInt + if !(int32(c) == 0) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MNEGW x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (NEG x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + c := v_1.AuxInt + if !(int32(c) == 1) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64NEG) + v.AddArg(x) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) + // match: (MNEGW (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: (NEG x) for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == 1) { break } - if v_1.AuxInt != arm64BFAuxInt(8, 24) { + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (NEG (SLLconst [log2(c)] x)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (NEG (SLLconst [log2(c)] x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w mem) + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (NEG (ADDshiftLL x x [log2(c-1)])) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 8 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { + if v_1.Op != OpARM64MOVDconst { break } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - if x.AuxInt != i-1 { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (NEG (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MNEGW_10(v *Value) bool { + b := v.Block + _ = b + // match: (MNEGW x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w mem) + // match: (MNEGW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) for { - if v.AuxInt != 1 { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + for { + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != 8 { + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + c := v_0.AuxInt + x := v.Args[1] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w0 mem) + // match: (MNEGW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVDconst { break } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - if x.AuxInt != i-1 { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - w0 := x.Args[1] - if w0.Op != OpARM64SRLconst { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MNEGW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if w0.AuxInt != j-8 { + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - if w != w0.Args[0] { + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) - v.AddArg(mem) + v.reset(OpARM64SLLconst) + v.Type = x.Type + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) + // match: (MNEGW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) for { - if v.AuxInt != 1 { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (MNEGW (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + c := v_0.AuxInt + x := v.Args[1] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { + v.reset(OpARM64NEG) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MNEGW_20(v *Value) bool { + // match: (MNEGW (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [-int64(int32(c)*int32(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if w0.AuxInt != j-8 { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if w != w0.Args[0] { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -int64(int32(c) * int32(d)) + return true + } + // match: (MNEGW (MOVDconst [d]) (MOVDconst [c])) + // cond: + // result: (MOVDconst [-int64(int32(c)*int32(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + c := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -int64(int32(c) * int32(d)) return true } return false } -func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w0 mem) +func rewriteValueARM64_OpARM64MOD_0(v *Value) bool { + // match: (MOD (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [c%d]) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - bfc := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.AuxInt != i-1 { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x.Aux != s { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c % d + return true + } + return false +} +func rewriteValueARM64_OpARM64MODW_0(v *Value) bool { + // match: (MODW (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c)%int32(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = x.Args[2] - ptr1 := x.Args[0] - w0 := x.Args[1] - if w0.Op != OpARM64UBFX { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - bfc2 := w0.AuxInt - if w != w0.Args[0] { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) % int32(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBUload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) + v.reset(OpARM64MOVBUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) + // match: (MOVBUload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBUloadidx ptr idx mem) for { - if v.AuxInt != 1 { - break - } - s := v.Aux - _ = v.Args[2] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - bfc := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64UBFX { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { break } - bfc2 := w0.AuxInt - if w != w0.Args[0] { + v.reset(OpARM64MOVBUloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) + v.reset(OpARM64MOVBUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstore [i-1] {s} ptr0 w0 mem) + // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - _ = x.Args[2] - ptr1 := x.Args[0] - w0 := x.Args[1] - if w0.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVBstorezero { break } - if w0.AuxInt != j-8 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - w0_0 := w0.Args[0] - if w0_0.Op != OpARM64MOVDreg { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MOVBUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read8(sym, off))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { break } - if w != w0_0.Args[0] { + if !(symIsRO(sym)) { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(read8(sym, off)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBUloadidx_0(v *Value) bool { + // match: (MOVBUloadidx ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVBUload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVBUload) + v.AuxInt = c + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr1 idx1 w0 mem) + // match: (MOVBUloadidx (MOVDconst [c]) ptr mem) + // cond: + // result: (MOVBUload [c] ptr mem) for { - if v.AuxInt != 1 { - break - } - s := v.Aux _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64MOVDconst { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBUload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVBstorezeroidx { break } - j := v_1.AuxInt - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - w0_0 := w0.Args[0] - if w0_0.Op != OpARM64MOVDreg { - break - } - if w != w0_0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVDstore [i-7] {s} ptr (REV w) mem) + return false +} +func rewriteValueARM64_OpARM64MOVBUreg_0(v *Value) bool { + // match: (MOVBUreg x:(MOVBUload _ _)) + // cond: + // result: (MOVDreg x) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + x := v.Args[0] + if x.Op != OpARM64MOVBUload { break } - if x0.AuxInt != i-1 { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUloadidx { break } - if x0.Aux != s { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUreg { break } - _ = x0.Args[2] - if ptr != x0.Args[0] { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c&(1<<8-1)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64ANDconst { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst { + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = c & (1<<8 - 1) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(uint8(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x0_1.AuxInt != 8 { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint8(c)) + return true + } + // match: (MOVBUreg x) + // cond: x.Type.IsBoolean() + // result: (MOVDreg x) + for { + x := v.Args[0] + if !(x.Type.IsBoolean()) { break } - if w != x0_1.Args[0] { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (SLLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<8-1, sc) + // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<8-1, sc)) { break } - if x1.AuxInt != i-2 { + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) + v.AddArg(x) + return true + } + // match: (MOVBUreg (SRLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<8-1, 0) + // result: (UBFX [arm64BFAuxInt(sc, 8)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { break } - if x1.Aux != s { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<8-1, 0)) { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, 8) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if x1_1.AuxInt != 16 { + v.reset(OpARM64MOVBload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBloadidx ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if w != x1_1.Args[0] { + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstore { + v.reset(OpARM64MOVBloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - if x2.AuxInt != i-3 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if x2.Aux != s { + v.reset(OpARM64MOVBload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVBstorezero { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64SRLconst { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBloadidx_0(v *Value) bool { + // match: (MOVBloadidx ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVBload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x2_1.AuxInt != 24 { + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVBload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx (MOVDconst [c]) ptr mem) + // cond: + // result: (MOVBload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if w != x2_1.Args[0] { + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVBstorezeroidx { break } - x3 := x2.Args[2] - if x3.Op != OpARM64MOVBstore { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - if x3.AuxInt != i-4 { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBreg_0(v *Value) bool { + // match: (MOVBreg x:(MOVBload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBload { break } - if x3.Aux != s { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBloadidx { break } - _ = x3.Args[2] - if ptr != x3.Args[0] { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBreg { break } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64SRLconst { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int8(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x3_1.AuxInt != 32 { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int8(c)) + return true + } + // match: (MOVBreg (SLLconst [lc] x)) + // cond: lc < 8 + // result: (SBFIZ [arm64BFAuxInt(lc, 8-lc)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - if w != x3_1.Args[0] { + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc < 8) { break } - x4 := x3.Args[2] - if x4.Op != OpARM64MOVBstore { + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BFAuxInt(lc, 8-lc) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - if x4.AuxInt != i-5 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if x4.Aux != s { + v.reset(OpARM64MOVBstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVBstoreidx ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - _ = x4.Args[2] - if ptr != x4.Args[0] { + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { break } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64SRLconst { + v.reset(OpARM64MOVBstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - if x4_1.AuxInt != 40 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if w != x4_1.Args[0] { + v.reset(OpARM64MOVBstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: + // result: (MOVBstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x5 := x4.Args[2] - if x5.Op != OpARM64MOVBstore { + if v_1.AuxInt != 0 { break } - if x5.AuxInt != i-6 { + mem := v.Args[2] + v.reset(OpARM64MOVBstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVBreg { break } - if x5.Aux != s { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVBUreg { break } - _ = x5.Args[2] - if ptr != x5.Args[0] { - break - } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64SRLconst { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVHreg { break } - if x5_1.AuxInt != 48 { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVHUreg { break } - if w != x5_1.Args[0] { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWreg { break } - x6 := x5.Args[2] - if x6.Op != OpARM64MOVBstore { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWUreg { break } - if x6.AuxInt != i-7 { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstore_10(v *Value) bool { + // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if x6.Aux != s { + if v_1.AuxInt != 8 { break } - _ = x6.Args[2] - if ptr != x6.Args[0] { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64SRLconst { + if x.AuxInt != i-1 { break } - if x6_1.AuxInt != 56 { + if x.Aux != s { break } - if w != x6_1.Args[0] { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - mem := x6.Args[2] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = i - 7 + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(ptr0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVDstoreidx ptr0 idx0 (REV w) mem) + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w mem) for { - if v.AuxInt != 7 { + if v.AuxInt != 1 { break } s := v.Aux _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x0.AuxInt != 6 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if x0.Aux != s { + if v_1.AuxInt != 8 { break } - _ = x0.Args[2] - if p != x0.Args[0] { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - if x0_1.AuxInt != 8 { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - if w != x0_1.Args[0] { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + if v_1.AuxInt != arm64BFAuxInt(8, 8) { break } - if x1.AuxInt != 5 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if x1.Aux != s { + if x.AuxInt != i-1 { break } - _ = x1.Args[2] - if p != x1.Args[0] { + if x.Aux != s { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - if x1_1.AuxInt != 16 { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - if w != x1_1.Args[0] { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w mem) + for { + if v.AuxInt != 1 { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstore { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x2.AuxInt != 4 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - if x2.Aux != s { + if v_1.AuxInt != arm64BFAuxInt(8, 8) { break } - _ = x2.Args[2] - if p != x2.Args[0] { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64SRLconst { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - if x2_1.AuxInt != 24 { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - if w != x2_1.Args[0] { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - x3 := x2.Args[2] - if x3.Op != OpARM64MOVBstore { + if v_1.AuxInt != arm64BFAuxInt(8, 24) { break } - if x3.AuxInt != 3 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if x3.Aux != s { + if x.AuxInt != i-1 { break } - _ = x3.Args[2] - if p != x3.Args[0] { + if x.Aux != s { break } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64SRLconst { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - if x3_1.AuxInt != 32 { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - if w != x3_1.Args[0] { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w mem) + for { + if v.AuxInt != 1 { break } - x4 := x3.Args[2] - if x4.Op != OpARM64MOVBstore { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x4.AuxInt != 2 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - if x4.Aux != s { + if v_1.AuxInt != arm64BFAuxInt(8, 24) { break } - _ = x4.Args[2] - if p != x4.Args[0] { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64SRLconst { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - if x4_1.AuxInt != 40 { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - if w != x4_1.Args[0] { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - x5 := x4.Args[2] - if x5.Op != OpARM64MOVBstore { + if v_1.AuxInt != 8 { break } - if x5.AuxInt != 1 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { break } - if x5.Aux != s { + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - _ = x5.Args[2] - p1 := x5.Args[0] - if p1.Op != OpARM64ADD { + if x.AuxInt != i-1 { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64SRLconst { + if x.Aux != s { break } - if x5_1.AuxInt != 48 { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - if w != x5_1.Args[0] { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - x6 := x5.Args[2] - if x6.Op != OpARM64MOVBstoreidx { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w mem) + for { + if v.AuxInt != 1 { break } - _ = x6.Args[3] - ptr0 := x6.Args[0] - idx0 := x6.Args[1] - x6_2 := x6.Args[2] - if x6_2.Op != OpARM64SRLconst { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x6_2.AuxInt != 56 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if w != x6_2.Args[0] { + if v_1.AuxInt != 8 { break } - mem := x6.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV, w.Type) - v0.AddArg(w) - v.AddArg(v0) + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + break + } + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) + // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w0 mem) for { i := v.AuxInt s := v.Aux _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if x0.AuxInt != i-1 { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if x0.Aux != s { + if x.AuxInt != i-1 { break } - _ = x0.Args[2] - if ptr != x0.Args[0] { + if x.Aux != s { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64UBFX { + _ = x.Args[2] + ptr1 := x.Args[0] + w0 := x.Args[1] + if w0.Op != OpARM64SRLconst { break } - if x0_1.AuxInt != arm64BFAuxInt(8, 24) { + if w0.AuxInt != j-8 { break } - if w != x0_1.Args[0] { + if w != w0.Args[0] { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - if x1.AuxInt != i-2 { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w0 mem) + for { + if v.AuxInt != 1 { break } - if x1.Aux != s { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64UBFX { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - if x1_1.AuxInt != arm64BFAuxInt(16, 16) { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { break } - if w != x1_1.Args[0] { + if w0.AuxInt != j-8 { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstore { + if w != w0.Args[0] { break } - if x2.AuxInt != i-3 { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - if x2.Aux != s { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + bfc := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64UBFX { + if x.AuxInt != i-1 { break } - if x2_1.AuxInt != arm64BFAuxInt(24, 8) { + if x.Aux != s { break } - if w != x2_1.Args[0] { + _ = x.Args[2] + ptr1 := x.Args[0] + w0 := x.Args[1] + if w0.Op != OpARM64UBFX { break } - mem := x2.Args[2] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + bfc2 := w0.AuxInt + if w != w0.Args[0] { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 3 + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(ptr0) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32 - getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32 - getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc) - 8 && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w0 mem) for { - if v.AuxInt != 3 { + if v.AuxInt != 1 { break } s := v.Aux _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x0.AuxInt != 2 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - if x0.Aux != s { + bfc := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - _ = x0.Args[2] - if p != x0.Args[0] { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64UBFX { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64UBFX { + bfc2 := w0.AuxInt + if w != w0.Args[0] { break } - if x0_1.AuxInt != arm64BFAuxInt(8, 24) { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && getARM64BFwidth(bfc) == 32-getARM64BFlsb(bfc) && getARM64BFwidth(bfc2) == 32-getARM64BFlsb(bfc2) && getARM64BFlsb(bfc2) == getARM64BFlsb(bfc)-8 && clobber(x)) { break } - if w != x0_1.Args[0] { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstore [i-1] {s} ptr0 w0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + j := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { break } - if x1.AuxInt != 1 { + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if x1.Aux != s { + if x.AuxInt != i-1 { break } - _ = x1.Args[2] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if x.Aux != s { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64UBFX { + _ = x.Args[2] + ptr1 := x.Args[0] + w0 := x.Args[1] + if w0.Op != OpARM64SRLconst { break } - if x1_1.AuxInt != arm64BFAuxInt(16, 16) { + if w0.AuxInt != j-8 { break } - if w != x1_1.Args[0] { + w0_0 := w0.Args[0] + if w0_0.Op != OpARM64MOVDreg { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { + if w != w0_0.Args[0] { break } - _ = x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64UBFX { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr1 idx1 w0 mem) + for { + if v.AuxInt != 1 { break } - if w != x2_2.Args[0] { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - mem := x2.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) + j := v_1.AuxInt + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { + break + } + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { + break + } + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + w0_0 := w0.Args[0] + if w0_0.Op != OpARM64MOVDreg { + break + } + if w != w0_0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) + // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem)))))))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) + // result: (MOVDstore [i-7] {s} ptr (REV w) mem) for { i := v.AuxInt s := v.Aux @@ -8010,11 +10311,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x0_1.AuxInt != 8 { break } - x0_1_0 := x0_1.Args[0] - if x0_1_0.Op != OpARM64MOVDreg { - break - } - if w != x0_1_0.Args[0] { + if w != x0_1.Args[0] { break } x1 := x0.Args[2] @@ -8038,11 +10335,7 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x1_1.AuxInt != 16 { break } - x1_1_0 := x1_1.Args[0] - if x1_1_0.Op != OpARM64MOVDreg { - break - } - if w != x1_1_0.Args[0] { + if w != x1_1.Args[0] { break } x2 := x1.Args[2] @@ -8066,158 +10359,142 @@ func rewriteValueARM64_OpARM64MOVBstore_20(v *Value) bool { if x2_1.AuxInt != 24 { break } - x2_1_0 := x2_1.Args[0] - if x2_1_0.Op != OpARM64MOVDreg { + if w != x2_1.Args[0] { break } - if w != x2_1_0.Args[0] { + x3 := x2.Args[2] + if x3.Op != OpARM64MOVBstore { break } - mem := x2.Args[2] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + if x3.AuxInt != i-4 { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) - for { - if v.AuxInt != 3 { + if x3.Aux != s { break } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + _ = x3.Args[2] + if ptr != x3.Args[0] { break } - if x0.AuxInt != 2 { + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64SRLconst { break } - if x0.Aux != s { + if x3_1.AuxInt != 32 { break } - _ = x0.Args[2] - if p != x0.Args[0] { + if w != x3_1.Args[0] { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst { + x4 := x3.Args[2] + if x4.Op != OpARM64MOVBstore { break } - if x0_1.AuxInt != 8 { + if x4.AuxInt != i-5 { break } - x0_1_0 := x0_1.Args[0] - if x0_1_0.Op != OpARM64MOVDreg { + if x4.Aux != s { break } - if w != x0_1_0.Args[0] { + _ = x4.Args[2] + if ptr != x4.Args[0] { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64SRLconst { break } - if x1.AuxInt != 1 { + if x4_1.AuxInt != 40 { break } - if x1.Aux != s { + if w != x4_1.Args[0] { break } - _ = x1.Args[2] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + x5 := x4.Args[2] + if x5.Op != OpARM64MOVBstore { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst { + if x5.AuxInt != i-6 { break } - if x1_1.AuxInt != 16 { + if x5.Aux != s { break } - x1_1_0 := x1_1.Args[0] - if x1_1_0.Op != OpARM64MOVDreg { + _ = x5.Args[2] + if ptr != x5.Args[0] { break } - if w != x1_1_0.Args[0] { + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64SRLconst { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { + if x5_1.AuxInt != 48 { break } - _ = x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst { + if w != x5_1.Args[0] { break } - if x2_2.AuxInt != 24 { + x6 := x5.Args[2] + if x6.Op != OpARM64MOVBstore { break } - x2_2_0 := x2_2.Args[0] - if x2_2_0.Op != OpARM64MOVDreg { + if x6.AuxInt != i-7 { break } - if w != x2_2_0.Args[0] { + if x6.Aux != s { break } - mem := x2.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + _ = x6.Args[2] + if ptr != x6.Args[0] { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) + x6_1 := x6.Args[1] + if x6_1.Op != OpARM64SRLconst { + break + } + if x6_1.AuxInt != 56 { + break + } + if w != x6_1.Args[0] { + break + } + mem := x6.Args[2] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = i - 7 + v.Aux = s + v.AddArg(ptr) + v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) + // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem)))))))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) + // result: (MOVDstoreidx ptr0 idx0 (REV w) mem) for { - i := v.AuxInt + if v.AuxInt != 7 { + break + } s := v.Aux _ = v.Args[2] - ptr := v.Args[0] + p := v.Args[0] w := v.Args[1] x0 := v.Args[2] if x0.Op != OpARM64MOVBstore { break } - if x0.AuxInt != i-1 { + if x0.AuxInt != 6 { break } if x0.Aux != s { break } _ = x0.Args[2] - if ptr != x0.Args[0] { + if p != x0.Args[0] { break } x0_1 := x0.Args[1] @@ -8234,14 +10511,14 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x1.Op != OpARM64MOVBstore { break } - if x1.AuxInt != i-2 { + if x1.AuxInt != 5 { break } if x1.Aux != s { break } _ = x1.Args[2] - if ptr != x1.Args[0] { + if p != x1.Args[0] { break } x1_1 := x1.Args[1] @@ -8258,14 +10535,14 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if x2.Op != OpARM64MOVBstore { break } - if x2.AuxInt != i-3 { + if x2.AuxInt != 4 { break } if x2.Aux != s { break } _ = x2.Args[2] - if ptr != x2.Args[0] { + if p != x2.Args[0] { break } x2_1 := x2.Args[1] @@ -8278,599 +10555,507 @@ func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { if w != x2_1.Args[0] { break } - mem := x2.Args[2] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + x3 := x2.Args[2] + if x3.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) - for { - if v.AuxInt != 3 { + if x3.AuxInt != 3 { break } - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - w := v.Args[1] - x0 := v.Args[2] - if x0.Op != OpARM64MOVBstore { + if x3.Aux != s { break } - if x0.AuxInt != 2 { + _ = x3.Args[2] + if p != x3.Args[0] { break } - if x0.Aux != s { + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64SRLconst { break } - _ = x0.Args[2] - if p != x0.Args[0] { + if x3_1.AuxInt != 32 { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64SRLconst { + if w != x3_1.Args[0] { break } - if x0_1.AuxInt != 8 { + x4 := x3.Args[2] + if x4.Op != OpARM64MOVBstore { break } - if w != x0_1.Args[0] { + if x4.AuxInt != 2 { break } - x1 := x0.Args[2] - if x1.Op != OpARM64MOVBstore { + if x4.Aux != s { break } - if x1.AuxInt != 1 { + _ = x4.Args[2] + if p != x4.Args[0] { break } - if x1.Aux != s { + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64SRLconst { break } - _ = x1.Args[2] - p1 := x1.Args[0] + if x4_1.AuxInt != 40 { + break + } + if w != x4_1.Args[0] { + break + } + x5 := x4.Args[2] + if x5.Op != OpARM64MOVBstore { + break + } + if x5.AuxInt != 1 { + break + } + if x5.Aux != s { + break + } + _ = x5.Args[2] + p1 := x5.Args[0] if p1.Op != OpARM64ADD { break } _ = p1.Args[1] ptr1 := p1.Args[0] idx1 := p1.Args[1] - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64SRLconst { + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64SRLconst { break } - if x1_1.AuxInt != 16 { + if x5_1.AuxInt != 48 { break } - if w != x1_1.Args[0] { + if w != x5_1.Args[0] { break } - x2 := x1.Args[2] - if x2.Op != OpARM64MOVBstoreidx { + x6 := x5.Args[2] + if x6.Op != OpARM64MOVBstoreidx { break } - _ = x2.Args[3] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64SRLconst { + _ = x6.Args[3] + ptr0 := x6.Args[0] + idx0 := x6.Args[1] + x6_2 := x6.Args[2] + if x6_2.Op != OpARM64SRLconst { break } - if x2_2.AuxInt != 24 { + if x6_2.AuxInt != 56 { break } - if w != x2_2.Args[0] { + if w != x6_2.Args[0] { break } - mem := x2.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x6.Args[3] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { break } - v.reset(OpARM64MOVWstoreidx) + v.reset(OpARM64MOVDstoreidx) v.AddArg(ptr0) v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) + v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) + // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) for { i := v.AuxInt s := v.Aux _ = v.Args[2] ptr := v.Args[0] w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - if x.AuxInt != i-1 { + if x0.AuxInt != i-1 { break } - if x.Aux != s { + if x0.Aux != s { break } - _ = x.Args[2] - if ptr != x.Args[0] { + _ = x0.Args[2] + if ptr != x0.Args[0] { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64SRLconst { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64UBFX { break } - if x_1.AuxInt != 8 { + if x0_1.AuxInt != arm64BFAuxInt(8, 24) { break } - if w != x_1.Args[0] { + if w != x0_1.Args[0] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { + if x1.AuxInt != i-2 { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if x1.Aux != s { break } - _ = v_0.Args[1] - ptr1 := v_0.Args[0] - idx1 := v_0.Args[1] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - _ = x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64UBFX { break } - if x_2.AuxInt != 8 { + if x1_1.AuxInt != arm64BFAuxInt(16, 16) { break } - if w != x_2.Args[0] { + if w != x1_1.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + if x2.AuxInt != i-3 { + break + } + if x2.Aux != s { + break + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64UBFX { + break + } + if x2_1.AuxInt != arm64BFAuxInt(24, 8) { + break + } + if w != x2_1.Args[0] { + break + } + mem := x2.Args[2] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 3 + v.Aux = s + v.AddArg(ptr) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) + // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) for { - i := v.AuxInt + if v.AuxInt != 3 { + break + } s := v.Aux _ = v.Args[2] - ptr := v.Args[0] + p := v.Args[0] w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - if x.AuxInt != i-1 { + if x0.AuxInt != 2 { break } - if x.Aux != s { + if x0.Aux != s { break } - _ = x.Args[2] - if ptr != x.Args[0] { + _ = x0.Args[2] + if p != x0.Args[0] { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64UBFX { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64UBFX { break } - if x_1.AuxInt != arm64BFAuxInt(8, 8) { + if x0_1.AuxInt != arm64BFAuxInt(8, 24) { break } - if w != x_1.Args[0] { + if w != x0_1.Args[0] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { + if x1.AuxInt != 1 { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if x1.Aux != s { break } - _ = v_0.Args[1] - ptr1 := v_0.Args[0] - idx1 := v_0.Args[1] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + _ = x1.Args[2] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - _ = x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64UBFX { break } - if x_2.AuxInt != arm64BFAuxInt(8, 8) { + if x1_1.AuxInt != arm64BFAuxInt(16, 16) { break } - if w != x_2.Args[0] { + if w != x1_1.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstoreidx { break } - v.reset(OpARM64MOVHstoreidx) + _ = x2.Args[3] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64UBFX { + break + } + if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + break + } + if w != x2_2.Args[0] { + break + } + mem := x2.Args[3] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr0) v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) + // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) for { i := v.AuxInt s := v.Aux _ = v.Args[2] ptr := v.Args[0] w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - if x.AuxInt != i-1 { + if x0.AuxInt != i-1 { break } - if x.Aux != s { + if x0.Aux != s { break } - _ = x.Args[2] - if ptr != x.Args[0] { + _ = x0.Args[2] + if ptr != x0.Args[0] { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64SRLconst { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64SRLconst { break } - if x_1.AuxInt != 8 { + if x0_1.AuxInt != 8 { break } - x_1_0 := x_1.Args[0] - if x_1_0.Op != OpARM64MOVDreg { + x0_1_0 := x0_1.Args[0] + if x0_1_0.Op != OpARM64MOVDreg { break } - if w != x_1_0.Args[0] { + if w != x0_1_0.Args[0] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { + if x1.AuxInt != i-2 { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if x1.Aux != s { break } - _ = v_0.Args[1] - ptr1 := v_0.Args[0] - idx1 := v_0.Args[1] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - _ = x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst { break } - if x_2.AuxInt != 8 { + if x1_1.AuxInt != 16 { break } - x_2_0 := x_2.Args[0] - if x_2_0.Op != OpARM64MOVDreg { + x1_1_0 := x1_1.Args[0] + if x1_1_0.Op != OpARM64MOVDreg { break } - if w != x_2_0.Args[0] { + if w != x1_1_0.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstore { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + if x2.AuxInt != i-3 { break } - if x.AuxInt != i-1 { + if x2.Aux != s { break } - if x.Aux != s { + _ = x2.Args[2] + if ptr != x2.Args[0] { break } - _ = x.Args[2] - if ptr != x.Args[0] { + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64SRLconst { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64UBFX { + if x2_1.AuxInt != 24 { break } - if x_1.AuxInt != arm64BFAuxInt(8, 24) { + x2_1_0 := x2_1.Args[0] + if x2_1_0.Op != OpARM64MOVDreg { break } - if w != x_1.Args[0] { + if w != x2_1_0.Args[0] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + mem := x2.Args[2] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 3 v.Aux = s v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) + // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) for { - if v.AuxInt != 1 { + if v.AuxInt != 3 { break } s := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr1 := v_0.Args[0] - idx1 := v_0.Args[1] + p := v.Args[0] w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - _ = x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX { + if x0.AuxInt != 2 { break } - if x_2.AuxInt != arm64BFAuxInt(8, 24) { + if x0.Aux != s { break } - if w != x_2.Args[0] { + _ = x0.Args[2] + if p != x0.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr0) - v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstore { + if x0_1.AuxInt != 8 { break } - if x.AuxInt != i-1 { + x0_1_0 := x0_1.Args[0] + if x0_1_0.Op != OpARM64MOVDreg { break } - if x.Aux != s { + if w != x0_1_0.Args[0] { break } - _ = x.Args[2] - if ptr != x.Args[0] { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64SRLconst { + if x1.AuxInt != 1 { break } - if x_1.AuxInt != 8 { + if x1.Aux != s { break } - x_1_0 := x_1.Args[0] - if x_1_0.Op != OpARM64MOVDreg { + _ = x1.Args[2] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - if w != x_1_0.Args[0] { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + if x1_1.AuxInt != 16 { break } - v.reset(OpARM64MOVHstore) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) - for { - if v.AuxInt != 1 { + x1_1_0 := x1_1.Args[0] + if x1_1_0.Op != OpARM64MOVDreg { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if w != x1_1_0.Args[0] { break } - _ = v_0.Args[1] - ptr1 := v_0.Args[0] - idx1 := v_0.Args[1] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpARM64MOVBstoreidx { + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstoreidx { break } - _ = x.Args[3] - ptr0 := x.Args[0] - idx0 := x.Args[1] - x_2 := x.Args[2] - if x_2.Op != OpARM64SRLconst { + _ = x2.Args[3] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64SRLconst { break } - if x_2.AuxInt != 8 { + if x2_2.AuxInt != 24 { break } - x_2_0 := x_2.Args[0] - if x_2_0.Op != OpARM64MOVDreg { + x2_2_0 := x2_2.Args[0] + if x2_2_0.Op != OpARM64MOVDreg { break } - if w != x_2_0.Args[0] { + if w != x2_2_0.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + mem := x2.Args[3] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { break } - v.reset(OpARM64MOVHstoreidx) + v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr0) v.AddArg(idx0) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) @@ -8878,474 +11063,362 @@ func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { - // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) - // cond: - // result: (MOVBstore [c] ptr val mem) +func rewriteValueARM64_OpARM64MOVBstore_30(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstore [i-3] {s} ptr (REVW w) mem) for { - _ = v.Args[3] + i := v.AuxInt + s := v.Aux + _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + w := v.Args[1] + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVBstore) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) - // cond: - // result: (MOVBstore [c] idx val mem) - for { - _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if x0.AuxInt != i-1 { break } - c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVBstore) - v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVBstorezeroidx ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + if x0.Aux != s { break } - if v_2.AuxInt != 0 { + _ = x0.Args[2] + if ptr != x0.Args[0] { break } - mem := v.Args[3] - v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVBreg { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64SRLconst { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVBUreg { + if x0_1.AuxInt != 8 { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHreg { + if w != x0_1.Args[0] { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHUreg { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWreg { + if x1.AuxInt != i-2 { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) - // cond: - // result: (MOVBstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWUreg { + if x1.Aux != s { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVBstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx ptr idx w mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - if v_1.AuxInt != 1 { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst { break } - idx := v_1.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpARM64SRLconst { + if x1_1.AuxInt != 16 { break } - if v_2.AuxInt != 8 { + if w != x1_1.Args[0] { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpARM64MOVBstoreidx { + x2 := x1.Args[2] + if x2.Op != OpARM64MOVBstore { break } - _ = x.Args[3] - if ptr != x.Args[0] { + if x2.AuxInt != i-3 { break } - if idx != x.Args[1] { + if x2.Aux != s { break } - if w != x.Args[2] { + _ = x2.Args[2] + if ptr != x2.Args[0] { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr idx (REVW w) mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { + if x2_1.AuxInt != 24 { break } - if v_1.AuxInt != 3 { + if w != x2_1.Args[0] { break } - idx := v_1.Args[0] - w := v.Args[2] - x0 := v.Args[3] - if x0.Op != OpARM64MOVBstoreidx { + mem := x2.Args[2] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { break } - _ = x0.Args[3] - if ptr != x0.Args[0] { + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 3 + v.Aux = s + v.AddArg(ptr) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstoreidx ptr0 idx0 (REVW w) mem) + for { + if v.AuxInt != 3 { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + w := v.Args[1] + x0 := v.Args[2] + if x0.Op != OpARM64MOVBstore { break } - if x0_1.AuxInt != 2 { + if x0.AuxInt != 2 { break } - if idx != x0_1.Args[0] { + if x0.Aux != s { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpARM64UBFX { + _ = x0.Args[2] + if p != x0.Args[0] { break } - if x0_2.AuxInt != arm64BFAuxInt(8, 24) { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64SRLconst { break } - if w != x0_2.Args[0] { + if x0_1.AuxInt != 8 { break } - x1 := x0.Args[3] - if x1.Op != OpARM64MOVBstoreidx { + if w != x0_1.Args[0] { break } - _ = x1.Args[3] - if ptr != x1.Args[0] { + x1 := x0.Args[2] + if x1.Op != OpARM64MOVBstore { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + if x1.AuxInt != 1 { break } - if x1_1.AuxInt != 1 { + if x1.Aux != s { break } - if idx != x1_1.Args[0] { + _ = x1.Args[2] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpARM64UBFX { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64SRLconst { break } - if x1_2.AuxInt != arm64BFAuxInt(16, 16) { + if x1_1.AuxInt != 16 { break } - if w != x1_2.Args[0] { + if w != x1_1.Args[0] { break } - x2 := x1.Args[3] + x2 := x1.Args[2] if x2.Op != OpARM64MOVBstoreidx { break } _ = x2.Args[3] - if ptr != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } + ptr0 := x2.Args[0] + idx0 := x2.Args[1] x2_2 := x2.Args[2] - if x2_2.Op != OpARM64UBFX { + if x2_2.Op != OpARM64SRLconst { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + if x2_2.AuxInt != 24 { break } if w != x2_2.Args[0] { break } mem := x2.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2)) { break } v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVWstoreidx ptr idx w mem) + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { - _ = v.Args[3] + i := v.AuxInt + s := v.Aux + _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x0 := v.Args[3] - if x0.Op != OpARM64MOVBstoreidx { - break - } - _ = x0.Args[3] - if ptr != x0.Args[0] { - break - } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if x0_1.AuxInt != 1 { + if x.AuxInt != i-1 { break } - if idx != x0_1.Args[0] { + if x.Aux != s { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpARM64UBFX { + _ = x.Args[2] + if ptr != x.Args[0] { break } - if x0_2.AuxInt != arm64BFAuxInt(8, 24) { + x_1 := x.Args[1] + if x_1.Op != OpARM64SRLconst { break } - if w != x0_2.Args[0] { + if x_1.AuxInt != 8 { break } - x1 := x0.Args[3] - if x1.Op != OpARM64MOVBstoreidx { + if w != x_1.Args[0] { break } - _ = x1.Args[3] - if ptr != x1.Args[0] { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(ptr) + v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) + for { + if v.AuxInt != 1 { break } - if x1_1.AuxInt != 2 { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if idx != x1_1.Args[0] { + _ = v_0.Args[1] + ptr1 := v_0.Args[0] + idx1 := v_0.Args[1] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpARM64UBFX { + _ = x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64SRLconst { break } - if x1_2.AuxInt != arm64BFAuxInt(16, 16) { + if x_2.AuxInt != 8 { break } - if w != x1_2.Args[0] { + if w != x_2.Args[0] { break } - x2 := x1.Args[3] - if x2.Op != OpARM64MOVBstoreidx { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - _ = x2.Args[3] - if ptr != x2.Args[0] { + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst { + if x.AuxInt != i-1 { break } - if x2_1.AuxInt != 3 { + if x.Aux != s { break } - if idx != x2_1.Args[0] { + _ = x.Args[2] + if ptr != x.Args[0] { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpARM64UBFX { + x_1 := x.Args[1] + if x_1.Op != OpARM64UBFX { break } - if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + if x_1.AuxInt != arm64BFAuxInt(8, 8) { break } - if w != x2_2.Args[0] { + if w != x_1.Args[0] { break } - mem := x2.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpARM64MOVWstoreidx) + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) + v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx ptr idx (REV16W w) mem) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { + if v.AuxInt != 1 { break } - if v_1.AuxInt != 1 { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - idx := v_1.Args[0] - w := v.Args[2] - x := v.Args[3] + _ = v_0.Args[1] + ptr1 := v_0.Args[0] + idx1 := v_0.Args[1] + w := v.Args[1] + x := v.Args[2] if x.Op != OpARM64MOVBstoreidx { break } _ = x.Args[3] - if ptr != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } + ptr0 := x.Args[0] + idx0 := x.Args[1] x_2 := x.Args[2] if x_2.Op != OpARM64UBFX { break @@ -9357,1106 +11430,1009 @@ func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { break } mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } v.reset(OpARM64MOVHstoreidx) - v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(ptr0) + v.AddArg(idx0) v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx ptr idx w mem) + // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { - _ = v.Args[3] + i := v.AuxInt + s := v.Aux + _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpARM64MOVBstoreidx { + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - _ = x.Args[3] - if ptr != x.Args[0] { + if x.AuxInt != i-1 { break } - x_1 := x.Args[1] - if x_1.Op != OpARM64ADDconst { + if x.Aux != s { break } - if x_1.AuxInt != 1 { + _ = x.Args[2] + if ptr != x.Args[0] { break } - if idx != x_1.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpARM64SRLconst { break } - x_2 := x.Args[2] - if x_2.Op != OpARM64UBFX { + if x_1.AuxInt != 8 { break } - if x_2.AuxInt != arm64BFAuxInt(8, 8) { - break + x_1_0 := x_1.Args[0] + if x_1_0.Op != OpARM64MOVDreg { + break } - if w != x_2.Args[0] { + if w != x_1_0.Args[0] { break } - mem := x.Args[3] + mem := x.Args[2] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpARM64MOVHstoreidx) + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) + v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] + if v.AuxInt != 1 { + break + } + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + if v_0.Op != OpARM64ADD { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = v_0.Args[1] + ptr1 := v_0.Args[0] + idx1 := v_0.Args[1] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - v.reset(OpARM64MOVBstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + _ = x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64SRLconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if x_2.AuxInt != 8 { break } - v.reset(OpARM64MOVBstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVBstorezeroidx ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + x_2_0 := x_2.Args[0] + if x_2_0.Op != OpARM64MOVDreg { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + if w != x_2_0.Args[0] { break } - v.reset(OpARM64MOVBstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem)) - // cond: x.Uses == 1 && areAdjacentOffsets(i,j,1) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVHstorezero [min(i,j)] {s} ptr0 mem) + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [arm64BFAuxInt(8, 24)] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVBstorezero { + _ = v.Args[2] + ptr := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { + break + } + if x.AuxInt != i-1 { break } - j := x.AuxInt if x.Aux != s { break } - _ = x.Args[1] - ptr1 := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && areAdjacentOffsets(i, j, 1) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + _ = x.Args[2] + if ptr != x.Args[0] { break } - v.reset(OpARM64MOVHstorezero) - v.AuxInt = min(i, j) + x_1 := x.Args[1] + if x_1.Op != OpARM64UBFX { + break + } + if x_1.AuxInt != arm64BFAuxInt(8, 24) { + break + } + if w != x_1.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 v.Aux = s - v.AddArg(ptr0) + v.AddArg(ptr) + v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem)) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [arm64BFAuxInt(8, 24)] w) mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVHstorezeroidx ptr1 idx1 mem) + // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { if v.AuxInt != 1 { break } s := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - x := v.Args[1] - if x.Op != OpARM64MOVBstorezeroidx { + ptr1 := v_0.Args[0] + idx1 := v_0.Args[1] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - _ = x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + _ = x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64UBFX { break } - v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { - // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem) - // cond: - // result: (MOVBstorezero [c] ptr mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if x_2.AuxInt != arm64BFAuxInt(8, 24) { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVBstorezero) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) - // cond: - // result: (MOVBstorezero [c] idx mem) - for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if w != x_2.Args[0] { break } - c := v_0.AuxInt - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBstorezero) - v.AuxInt = c - v.AddArg(idx) + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem)) + return false +} +func rewriteValueARM64_OpARM64MOVBstore_40(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstorezeroidx ptr idx mem) + // result: (MOVHstore [i-1] {s} ptr (REV16W w) mem) for { + i := v.AuxInt + s := v.Aux _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstore { break } - if v_1.AuxInt != 1 { + if x.AuxInt != i-1 { break } - idx := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVBstorezeroidx { + if x.Aux != s { break } _ = x.Args[2] if ptr != x.Args[0] { break } - if idx != x.Args[1] { + x_1 := x.Args[1] + if x_1.Op != OpARM64SRLconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + if x_1.AuxInt != 8 { break } - v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + x_1_0 := x_1.Args[0] + if x_1_0.Op != OpARM64MOVDreg { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if w != x_1_0.Args[0] { break } - v.reset(OpARM64MOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstore) + v.AuxInt = i - 1 + v.Aux = s v.AddArg(ptr) + v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVDload [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVDloadidx ptr idx mem) + // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstoreidx ptr0 idx0 (REV16W w) mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] + if v.AuxInt != 1 { + break + } + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + ptr1 := v_0.Args[0] + idx1 := v_0.Args[1] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpARM64MOVBstoreidx { break } - v.reset(OpARM64MOVDloadidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVDloadidx8 ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + _ = x.Args[3] + ptr0 := x.Args[0] + idx0 := x.Args[1] + x_2 := x.Args[2] + if x_2.Op != OpARM64SRLconst { break } - if v_0.AuxInt != 3 { + if x_2.AuxInt != 8 { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + x_2_0 := x_2.Args[0] + if x_2_0.Op != OpARM64MOVDreg { break } - v.reset(OpARM64MOVDloadidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if w != x_2_0.Args[0] { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - v.reset(OpARM64MOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr0) + v.AddArg(idx0) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDstorezero { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } return false } -func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { - // match: (MOVDloadidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVBstoreidx_0(v *Value) bool { + // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVDload [c] ptr mem) + // result: (MOVBstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVDload) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVBstore) v.AuxInt = c v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVDloadidx (MOVDconst [c]) ptr mem) + // match: (MOVBstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVDload [c] ptr mem) + // result: (MOVBstore [c] idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDload) + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVBstore) v.AuxInt = c - v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) + // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem) // cond: - // result: (MOVDloadidx8 ptr idx mem) + // result: (MOVBstorezeroidx ptr idx mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != 3 { + if v_2.AuxInt != 0 { break } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVDloadidx8) + mem := v.Args[3] + v.reset(OpARM64MOVBstorezeroidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) + // match: (MOVBstoreidx ptr idx (MOVBreg x) mem) // cond: - // result: (MOVDloadidx8 ptr idx mem) + // result: (MOVBstoreidx ptr idx x mem) for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - if v_0.AuxInt != 3 { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVBreg { break } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDloadidx8) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem) + // cond: + // result: (MOVBstoreidx ptr idx x mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDstorezeroidx { - break - } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + if v_2.Op != OpARM64MOVBUreg { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVDloadidx8_0(v *Value) bool { - // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem) + // match: (MOVBstoreidx ptr idx (MOVHreg x) mem) // cond: - // result: (MOVDload [c<<3] ptr mem) + // result: (MOVBstoreidx ptr idx x mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHreg { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVDload) - v.AuxInt = c << 3 + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) - // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) - // result: (MOVDconst [0]) + // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem) + // cond: + // result: (MOVBstoreidx ptr idx x mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDstorezeroidx8 { - break - } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVDreg_0(v *Value) bool { - // match: (MOVDreg x) - // cond: x.Uses == 1 - // result: (MOVDnop x) - for { - x := v.Args[0] - if !(x.Uses == 1) { + if v_2.Op != OpARM64MOVHUreg { break } - v.reset(OpARM64MOVDnop) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVDreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVDstore ptr (FMOVDfpgp val) mem) + // match: (MOVBstoreidx ptr idx (MOVWreg x) mem) // cond: - // result: (FMOVDstore ptr val mem) + // result: (MOVBstoreidx ptr idx x mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64FMOVDfpgp { + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWreg { break } - val := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64FMOVDstore) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) - v.AddArg(val) + v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDstore [off1+off2] {sym} ptr val mem) + // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem) + // cond: + // result: (MOVBstoreidx ptr idx x mem) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVBstoreidx) v.AddArg(ptr) - v.AddArg(val) + v.AddArg(idx) + v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) - // cond: off == 0 && sym == nil - // result: (MOVDstoreidx ptr idx val mem) + // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstoreidx ptr idx w mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(off == 0 && sym == nil) { + if v_1.AuxInt != 1 { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) - // cond: off == 0 && sym == nil - // result: (MOVDstoreidx8 ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + idx := v_1.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64SRLconst { break } - if v_0.AuxInt != 3 { + if v_2.AuxInt != 8 { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(off == 0 && sym == nil) { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpARM64MOVBstoreidx { break } - v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + _ = x.Args[3] + if ptr != x.Args[0] { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if idx != x.Args[1] { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) - // cond: - // result: (MOVDstorezero [off] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if w != x.Args[2] { break } - if v_1.AuxInt != 0 { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - mem := v.Args[2] - v.reset(OpARM64MOVDstorezero) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(w) v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { - // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) - // cond: - // result: (MOVDstore [c] ptr val mem) +func rewriteValueARM64_OpARM64MOVBstoreidx_10(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstoreidx ptr idx (REVW w) mem) for { _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64ADDconst { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVDstore) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) - // cond: - // result: (MOVDstore [c] idx val mem) - for { - _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_1.AuxInt != 3 { break } - c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVDstore) - v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) - // cond: - // result: (MOVDstoreidx8 ptr idx val mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + idx := v_1.Args[0] + w := v.Args[2] + x0 := v.Args[3] + if x0.Op != OpARM64MOVBstoreidx { break } - if v_1.AuxInt != 3 { + _ = x0.Args[3] + if ptr != x0.Args[0] { break } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) - // cond: - // result: (MOVDstoreidx8 ptr idx val mem) - for { - _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { break } - if v_0.AuxInt != 3 { + if x0_1.AuxInt != 2 { break } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVDstoreidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVDstorezeroidx ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + if idx != x0_1.Args[0] { break } - if v_2.AuxInt != 0 { + x0_2 := x0.Args[2] + if x0_2.Op != OpARM64UBFX { break } - mem := v.Args[3] - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVDstoreidx8_0(v *Value) bool { - // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) - // cond: - // result: (MOVDstore [c<<3] ptr val mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if x0_2.AuxInt != arm64BFAuxInt(8, 24) { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVDstore) - v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVDstorezeroidx8 ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + if w != x0_2.Args[0] { break } - if v_2.AuxInt != 0 { + x1 := x0.Args[3] + if x1.Op != OpARM64MOVBstoreidx { break } - mem := v.Args[3] - v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDstorezero [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + _ = x1.Args[3] + if ptr != x1.Args[0] { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if x1_1.AuxInt != 1 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if idx != x1_1.Args[0] { break } - v.reset(OpARM64MOVDstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVDstorezeroidx ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + x1_2 := x1.Args[2] + if x1_2.Op != OpARM64UBFX { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + if x1_2.AuxInt != arm64BFAuxInt(16, 16) { break } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVDstorezeroidx8 ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + if w != x1_2.Args[0] { break } - if v_0.AuxInt != 3 { + x2 := x1.Args[3] + if x2.Op != OpARM64MOVBstoreidx { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + _ = x2.Args[3] + if ptr != x2.Args[0] { break } - v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem)) - // cond: x.Uses == 1 && areAdjacentOffsets(i,j,8) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVQstorezero [min(i,j)] {s} ptr0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVDstorezero { + if idx != x2.Args[1] { break } - j := x.AuxInt - if x.Aux != s { + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64UBFX { break } - _ = x.Args[1] - ptr1 := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && areAdjacentOffsets(i, j, 8) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + if x2_2.AuxInt != arm64BFAuxInt(24, 8) { break } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = min(i, j) - v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) - return true + if w != x2_2.Args[0] { + break + } + mem := x2.Args[3] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true } - // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVQstorezero [0] {s} p0 mem) + // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [arm64BFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [arm64BFAuxInt(24, 8)] w) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstoreidx ptr idx w mem) for { - if v.AuxInt != 8 { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x0 := v.Args[3] + if x0.Op != OpARM64MOVBstoreidx { break } - s := v.Aux + _ = x0.Args[3] + if ptr != x0.Args[0] { + break + } + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 1 { + break + } + if idx != x0_1.Args[0] { + break + } + x0_2 := x0.Args[2] + if x0_2.Op != OpARM64UBFX { + break + } + if x0_2.AuxInt != arm64BFAuxInt(8, 24) { + break + } + if w != x0_2.Args[0] { + break + } + x1 := x0.Args[3] + if x1.Op != OpARM64MOVBstoreidx { + break + } + _ = x1.Args[3] + if ptr != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { + break + } + if x1_1.AuxInt != 2 { + break + } + if idx != x1_1.Args[0] { + break + } + x1_2 := x1.Args[2] + if x1_2.Op != OpARM64UBFX { + break + } + if x1_2.AuxInt != arm64BFAuxInt(16, 16) { + break + } + if w != x1_2.Args[0] { + break + } + x2 := x1.Args[3] + if x2.Op != OpARM64MOVBstoreidx { + break + } + _ = x2.Args[3] + if ptr != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { + break + } + if x2_1.AuxInt != 3 { + break + } + if idx != x2_1.Args[0] { + break + } + x2_2 := x2.Args[2] + if x2_2.Op != OpARM64UBFX { + break + } + if x2_2.AuxInt != arm64BFAuxInt(24, 8) { + break + } + if w != x2_2.Args[0] { + break + } + mem := x2.Args[3] + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstoreidx ptr idx (REV16W w) mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { + break + } + if v_1.AuxInt != 1 { + break + } + idx := v_1.Args[0] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpARM64MOVBstoreidx { + break + } + _ = x.Args[3] + if ptr != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpARM64UBFX { + break + } + if x_2.AuxInt != arm64BFAuxInt(8, 8) { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type) + v0.AddArg(w) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [arm64BFAuxInt(8, 8)] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstoreidx ptr idx w mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpARM64MOVBstoreidx { + break + } + _ = x.Args[3] + if ptr != x.Args[0] { + break + } + x_1 := x.Args[1] + if x_1.Op != OpARM64ADDconst { + break + } + if x_1.AuxInt != 1 { + break + } + if idx != x_1.Args[0] { + break + } + x_2 := x.Args[2] + if x_2.Op != OpARM64UBFX { + break + } + if x_2.AuxInt != arm64BFAuxInt(8, 8) { + break + } + if w != x_2.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVBstorezero_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux _ = v.Args[1] - p0 := v.Args[0] - if p0.Op != OpARM64ADD { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - _ = p0.Args[1] - ptr0 := p0.Args[0] - idx0 := p0.Args[1] + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64MOVBstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVBstorezeroidx ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVBstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem)) + // cond: x.Uses == 1 && areAdjacentOffsets(i,j,1) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVHstorezero [min(i,j)] {s} ptr0 mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[1] + ptr0 := v.Args[0] x := v.Args[1] - if x.Op != OpARM64MOVDstorezeroidx { + if x.Op != OpARM64MOVBstorezero { break } - _ = x.Args[2] + j := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] ptr1 := x.Args[0] - idx1 := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + mem := x.Args[1] + if !(x.Uses == 1 && areAdjacentOffsets(i, j, 1) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = 0 + v.reset(OpARM64MOVHstorezero) + v.AuxInt = min(i, j) v.Aux = s - v.AddArg(p0) + v.AddArg(ptr0) v.AddArg(mem) return true } - // match: (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVQstorezero [0] {s} p0 mem) + // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVHstorezeroidx ptr1 idx1 mem) for { - if v.AuxInt != 8 { + if v.AuxInt != 1 { break } s := v.Aux _ = v.Args[1] - p0 := v.Args[0] - if p0.Op != OpARM64ADDshiftLL { - break - } - if p0.AuxInt != 3 { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - _ = p0.Args[1] - ptr0 := p0.Args[0] - idx0 := p0.Args[1] + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] x := v.Args[1] - if x.Op != OpARM64MOVDstorezeroidx8 { + if x.Op != OpARM64MOVBstorezeroidx { break } _ = x.Args[2] ptr1 := x.Args[0] idx1 := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = 0 - v.Aux = s - v.AddArg(p0) + v.reset(OpARM64MOVHstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { - // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVBstorezeroidx_0(v *Value) bool { + // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVDstorezero [c] ptr mem) + // result: (MOVBstorezero [c] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -10466,15 +12442,15 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVDstorezero) + v.reset(OpARM64MOVBstorezero) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) + // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem) // cond: - // result: (MOVDstorezero [c] idx mem) + // result: (MOVBstorezero [c] idx mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -10484,49 +12460,42 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { c := v_0.AuxInt idx := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVDstorezero) + v.reset(OpARM64MOVBstorezero) v.AuxInt = c v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) - // cond: - // result: (MOVDstorezeroidx8 ptr idx mem) + // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVHstorezeroidx ptr idx mem) for { _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + if v_1.Op != OpARM64ADDconst { break } - if v_1.AuxInt != 3 { + if v_1.AuxInt != 1 { break } idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVDstorezeroidx8) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) - // cond: - // result: (MOVDstorezeroidx8 ptr idx mem) - for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + x := v.Args[2] + if x.Op != OpARM64MOVBstorezeroidx { break } - if v_0.AuxInt != 3 { + _ = x.Args[2] + if ptr != x.Args[0] { break } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDstorezeroidx8) + if idx != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVHstorezeroidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) @@ -10534,35 +12503,41 @@ func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64MOVDstorezeroidx8_0(v *Value) bool { - // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) // cond: - // result: (MOVDstorezero [c<<3] ptr mem) + // result: (FMOVDfpgp val) for { - _ = v.Args[2] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64FMOVDstore { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVDstorezero) - v.AuxInt = c << 3 - v.AddArg(ptr) - v.AddArg(mem) + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpARM64FMOVDfpgp) + v.AddArg(val) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHUload [off1+off2] {sym} ptr mem) + // result: (MOVDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -10577,16 +12552,16 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHUload) + v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) + // match: (MOVDload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVHUloadidx ptr idx mem) + // result: (MOVDloadidx ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -10602,15 +12577,15 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHUloadidx) + v.reset(OpARM64MOVDloadidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVHUloadidx2 ptr idx mem) + // result: (MOVDloadidx8 ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -10619,7 +12594,7 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { if v_0.Op != OpARM64ADDshiftLL { break } - if v_0.AuxInt != 1 { + if v_0.AuxInt != 3 { break } _ = v_0.Args[1] @@ -10629,15 +12604,15 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHUloadidx2) + v.reset(OpARM64MOVDloadidx8) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -10653,14 +12628,14 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHUload) + v.reset(OpARM64MOVDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) + // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) // result: (MOVDconst [0]) for { @@ -10669,7 +12644,7 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHstorezero { + if v_1.Op != OpARM64MOVDstorezero { break } off2 := v_1.AuxInt @@ -10683,12 +12658,30 @@ func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { v.AuxInt = 0 return true } + // match: (MOVDload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read64(sym, off, config.BigEndian))]) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(read64(sym, off, config.BigEndian)) + return true + } return false } -func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { - // match: (MOVHUloadidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVDloadidx_0(v *Value) bool { + // match: (MOVDloadidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVHUload [c] ptr mem) + // result: (MOVDload [c] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -10698,15 +12691,15 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVHUload) + v.reset(OpARM64MOVDload) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) + // match: (MOVDloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVHUload [c] ptr mem) + // result: (MOVDload [c] ptr mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -10716,15 +12709,15 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { c := v_0.AuxInt ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVHUload) + v.reset(OpARM64MOVDload) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) + // match: (MOVDloadidx ptr (SLLconst [3] idx) mem) // cond: - // result: (MOVHUloadidx2 ptr idx mem) + // result: (MOVDloadidx8 ptr idx mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -10732,62 +12725,39 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { if v_1.Op != OpARM64SLLconst { break } - if v_1.AuxInt != 1 { + if v_1.AuxInt != 3 { break } idx := v_1.Args[0] mem := v.Args[2] - v.reset(OpARM64MOVHUloadidx2) + v.reset(OpARM64MOVDloadidx8) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHUloadidx ptr (ADD idx idx) mem) + // match: (MOVDloadidx (SLLconst [3] idx) ptr mem) // cond: - // result: (MOVHUloadidx2 ptr idx mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADD { - break - } - _ = v_1.Args[1] - idx := v_1.Args[0] - if idx != v_1.Args[1] { - break - } - mem := v.Args[2] - v.reset(OpARM64MOVHUloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHUloadidx (ADD idx idx) ptr mem) - // cond: - // result: (MOVHUloadidx2 ptr idx mem) + // result: (MOVDloadidx8 ptr idx mem) for { _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64SLLconst { break } - _ = v_0.Args[1] - idx := v_0.Args[0] - if idx != v_0.Args[1] { + if v_0.AuxInt != 3 { break } + idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVHUloadidx2) + v.reset(OpARM64MOVDloadidx8) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) // result: (MOVDconst [0]) for { @@ -10795,7 +12765,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHstorezeroidx { + if v_2.Op != OpARM64MOVDstorezeroidx { break } _ = v_2.Args[2] @@ -10810,10 +12780,10 @@ func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { - // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVDloadidx8_0(v *Value) bool { + // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem) // cond: - // result: (MOVHUload [c<<1] ptr mem) + // result: (MOVDload [c<<3] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -10823,13 +12793,13 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVHUload) - v.AuxInt = c << 1 + v.reset(OpARM64MOVDload) + v.AuxInt = c << 3 v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) + // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) // result: (MOVDconst [0]) for { @@ -10837,7 +12807,7 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHstorezeroidx2 { + if v_2.Op != OpARM64MOVDstorezeroidx8 { break } _ = v_2.Args[2] @@ -10852,114 +12822,22 @@ func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { - // match: (MOVHUreg x:(MOVBUload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVHUload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVBUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUloadidx { - break - } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVHUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx { - break - } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx2 { - break - } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVBUreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUreg { - break - } - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg x:(MOVHUreg _)) - // cond: - // result: (MOVDreg x) +func rewriteValueARM64_OpARM64MOVDreg_0(v *Value) bool { + // match: (MOVDreg x) + // cond: x.Uses == 1 + // result: (MOVDnop x) for { x := v.Args[0] - if x.Op != OpARM64MOVHUreg { - break - } - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHUreg (ANDconst [c] x)) - // cond: - // result: (ANDconst [c&(1<<16-1)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + if !(x.Uses == 1) { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<16 - 1) + v.reset(OpARM64MOVDnop) v.AddArg(x) return true } - // match: (MOVHUreg (MOVDconst [c])) + // match: (MOVDreg (MOVDconst [c])) // cond: - // result: (MOVDconst [int64(uint16(c))]) + // result: (MOVDconst [c]) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { @@ -10967,86 +12845,71 @@ func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint16(c)) - return true - } - // match: (MOVHUreg (SLLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<16-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<16-1, sc)) { - break - } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) - v.AddArg(x) + v.AuxInt = c return true } return false } -func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { - // match: (MOVHUreg (SRLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<16-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 16)] x) +func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) + // cond: + // result: (FMOVDstore [off] {sym} ptr val mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { - break - } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<16-1, 0)) { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMOVDfpgp { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 16) - v.AddArg(x) + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64FMOVDstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHload [off1+off2] {sym} ptr mem) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHload) + v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHload [off] {sym} (ADD ptr idx) mem) + // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) // cond: off == 0 && sym == nil - // result: (MOVHloadidx ptr idx mem) + // result: (MOVDstoreidx ptr idx val mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break @@ -11054,50 +12917,54 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHloadidx) + v.reset(OpARM64MOVDstoreidx) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) // cond: off == 0 && sym == nil - // result: (MOVHloadidx2 ptr idx mem) + // result: (MOVDstoreidx8 ptr idx val mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break } - if v_0.AuxInt != 1 { + if v_0.AuxInt != 3 { break } _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHloadidx2) + v.reset(OpARM64MOVDstoreidx8) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break @@ -11105,411 +12972,543 @@ func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHload) + v.reset(OpARM64MOVDstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: + // result: (MOVDstorezero [off] {sym} ptr mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[1] + _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHstorezero { + if v_1.Op != OpARM64MOVDconst { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if v_1.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + mem := v.Args[2] + v.reset(OpARM64MOVDstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { - // match: (MOVHloadidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVDstoreidx_0(v *Value) bool { + // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVHload [c] ptr mem) + // result: (MOVDstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVHload) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVDstore) v.AuxInt = c v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHloadidx (MOVDconst [c]) ptr mem) + // match: (MOVDstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVHload [c] ptr mem) + // result: (MOVDstore [c] idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHload) + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVDstore) v.AuxInt = c - v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) + // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem) // cond: - // result: (MOVHloadidx2 ptr idx mem) + // result: (MOVDstoreidx8 ptr idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64SLLconst { break } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVHloadidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHloadidx ptr (ADD idx idx) mem) - // cond: - // result: (MOVHloadidx2 ptr idx mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADD { + if v_1.AuxInt != 3 { break } - _ = v_1.Args[1] idx := v_1.Args[0] - if idx != v_1.Args[1] { - break - } - mem := v.Args[2] - v.reset(OpARM64MOVHloadidx2) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVDstoreidx8) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHloadidx (ADD idx idx) ptr mem) + // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem) // cond: - // result: (MOVHloadidx2 ptr idx mem) + // result: (MOVDstoreidx8 ptr idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64SLLconst { break } - _ = v_0.Args[1] - idx := v_0.Args[0] - if idx != v_0.Args[1] { + if v_0.AuxInt != 3 { break } + idx := v_0.Args[0] ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHloadidx2) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVDstoreidx8) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem) + // cond: + // result: (MOVDstorezeroidx ptr idx mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHstorezeroidx { + if v_2.Op != OpARM64MOVDconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + if v_2.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + mem := v.Args[3] + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVHloadidx2_0(v *Value) bool { - // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVDstoreidx8_0(v *Value) bool { + // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVHload [c<<1] ptr mem) + // result: (MOVDstore [c<<3] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVHload) - v.AuxInt = c << 1 + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVDstore) + v.AuxInt = c << 3 v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) - // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) - // result: (MOVDconst [0]) + // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) + // cond: + // result: (MOVDstorezeroidx8 ptr idx mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHstorezeroidx2 { + if v_2.Op != OpARM64MOVDconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + if v_2.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + mem := v.Args[3] + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { - // match: (MOVHreg x:(MOVBload _ _)) - // cond: - // result: (MOVDreg x) +func rewriteValueARM64_OpARM64MOVDstorezero_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstorezero [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBload { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBUload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUload { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + v.reset(OpARM64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVHload _ _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHload { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBloadidx { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + v.reset(OpARM64MOVDstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVBUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDstorezeroidx ptr idx mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUloadidx { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVHloadidx _ _ _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVDstorezeroidx8 ptr idx mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHloadidx { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDshiftLL { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + if v_0.AuxInt != 3 { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVHloadidx2 _ _ _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem)) + // cond: x.Uses == 1 && areAdjacentOffsets(i,j,8) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVQstorezero [min(i,j)] {s} ptr0 mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHloadidx2 { + i := v.AuxInt + s := v.Aux + _ = v.Args[1] + ptr0 := v.Args[0] + x := v.Args[1] + if x.Op != OpARM64MOVDstorezero { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + j := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + ptr1 := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && areAdjacentOffsets(i, j, 8) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = min(i, j) + v.Aux = s + v.AddArg(ptr0) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVBreg _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVQstorezero [0] {s} p0 mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBreg { + if v.AuxInt != 8 { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + s := v.Aux + _ = v.Args[1] + p0 := v.Args[0] + if p0.Op != OpARM64ADD { + break + } + _ = p0.Args[1] + ptr0 := p0.Args[0] + idx0 := p0.Args[1] + x := v.Args[1] + if x.Op != OpARM64MOVDstorezeroidx { + break + } + _ = x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = 0 + v.Aux = s + v.AddArg(p0) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVBUreg _)) - // cond: - // result: (MOVDreg x) + // match: (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVQstorezero [0] {s} p0 mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUreg { + if v.AuxInt != 8 { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + s := v.Aux + _ = v.Args[1] + p0 := v.Args[0] + if p0.Op != OpARM64ADDshiftLL { + break + } + if p0.AuxInt != 3 { + break + } + _ = p0.Args[1] + ptr0 := p0.Args[0] + idx0 := p0.Args[1] + x := v.Args[1] + if x.Op != OpARM64MOVDstorezeroidx8 { + break + } + _ = x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + break + } + v.reset(OpARM64MOVQstorezero) + v.AuxInt = 0 + v.Aux = s + v.AddArg(p0) + v.AddArg(mem) return true } - // match: (MOVHreg x:(MOVHreg _)) + return false +} +func rewriteValueARM64_OpARM64MOVDstorezeroidx_0(v *Value) bool { + // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVDreg x) + // result: (MOVDstorezero [c] ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHreg { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVDstorezero) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { - // match: (MOVHreg (MOVDconst [c])) + // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem) // cond: - // result: (MOVDconst [int64(int16(c))]) + // result: (MOVDstorezero [c] idx mem) for { + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int16(c)) + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVDstorezero) + v.AuxInt = c + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVHreg (SLLconst [lc] x)) - // cond: lc < 16 - // result: (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x) + // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem) + // cond: + // result: (MOVDstorezeroidx8 ptr idx mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SLLconst { + break + } + if v_1.AuxInt != 3 { + break + } + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem) + // cond: + // result: (MOVDstorezeroidx8 ptr idx mem) for { + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } - lc := v_0.AuxInt - x := v_0.Args[0] - if !(lc < 16) { + if v_0.AuxInt != 3 { break } - v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 16-lc) - v.AddArg(x) + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVDstorezeroidx8) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { +func rewriteValueARM64_OpARM64MOVDstorezeroidx8_0(v *Value) bool { + // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVDstorezero [c<<3] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVDstorezero) + v.AuxInt = c << 3 + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHstore [off1+off2] {sym} ptr val mem) + // result: (MOVHUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHstore) + v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) + // match: (MOVHUload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVHstoreidx ptr idx val mem) + // result: (MOVHUloadidx ptr idx mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break @@ -11517,25 +13516,23 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHstoreidx) + v.reset(OpARM64MOVHUloadidx) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) + // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVHstoreidx2 ptr idx val mem) + // result: (MOVHUloadidx2 ptr idx mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break @@ -11546,25 +13543,23 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVHstoreidx2) + v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break @@ -11572,371 +13567,446 @@ func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVHstore) + v.reset(OpARM64MOVHUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) - // cond: - // result: (MOVHstorezero [off] {sym} ptr mem) + // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64MOVHstorezero { break } - if v_1.AuxInt != 0 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - mem := v.Args[2] - v.reset(OpARM64MOVHstorezero) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // match: (MOVHUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read16(sym, off, config.BigEndian))]) for { off := v.AuxInt sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(read16(sym, off, config.BigEndian)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUloadidx_0(v *Value) bool { + // match: (MOVHUloadidx ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVHUload [c] ptr mem) + for { _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHreg { + if v_1.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] + c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVHUload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // match: (MOVHUloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // result: (MOVHUload [c] ptr mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVHUreg { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] + c := v_0.AuxInt + ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVHUload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem) // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // result: (MOVHUloadidx2 ptr idx mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWreg { + if v_1.Op != OpARM64SLLconst { break } - x := v_1.Args[0] + if v_1.AuxInt != 1 { + break + } + idx := v_1.Args[0] mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) - v.AddArg(x) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // match: (MOVHUloadidx ptr (ADD idx idx) mem) // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // result: (MOVHUloadidx2 ptr idx mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + if v_1.Op != OpARM64ADD { + break + } + _ = v_1.Args[1] + idx := v_1.Args[0] + if idx != v_1.Args[1] { break } - x := v_1.Args[0] mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVHUloadidx2) v.AddArg(ptr) - v.AddArg(x) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVWstore [i-2] {s} ptr0 w mem) + // match: (MOVHUloadidx (ADD idx idx) ptr mem) + // cond: + // result: (MOVHUloadidx2 ptr idx mem) for { - i := v.AuxInt - s := v.Aux _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstore { - break - } - if x.AuxInt != i-2 { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if x.Aux != s { + _ = v_0.Args[1] + idx := v_0.Args[0] + if idx != v_0.Args[1] { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHUloadidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHstorezeroidx { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } return false } -func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { - b := v.Block - _ = b - // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) +func rewriteValueARM64_OpARM64MOVHUloadidx2_0(v *Value) bool { + // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVHUload [c<<1] ptr mem) for { - if v.AuxInt != 2 { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVHUload) + v.AuxInt = c << 1 + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) + return false +} +func rewriteValueARM64_OpARM64MOVHUreg_0(v *Value) bool { + // match: (MOVHUreg x:(MOVBUload _ _)) + // cond: + // result: (MOVDreg x) for { - if v.AuxInt != 2 { + x := v.Args[0] + if x.Op != OpARM64MOVBUload { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUload { break } - if v_0.AuxInt != 1 { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUloadidx { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUloadidx { break } - if v_1.AuxInt != 16 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUloadidx2 { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx2 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUreg { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUreg { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c&(1<<16-1)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64ANDconst { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 1 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = c & (1<<16 - 1) + v.AddArg(x) return true } - // match: (MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVWstore [i-2] {s} ptr0 w mem) + // match: (MOVHUreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(uint16(c))]) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint16(c)) + return true + } + // match: (MOVHUreg (SLLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<16-1, sc) + // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstore { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<16-1, sc)) { break } - if x.AuxInt != i-2 { + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHUreg_10(v *Value) bool { + // match: (MOVHUreg (SRLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<16-1, 0) + // result: (UBFX [arm64BFAuxInt(sc, 16)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { break } - if x.Aux != s { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<16-1, 0)) { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, 16) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) + v.reset(OpARM64MOVHload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) + // match: (MOVHload [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHloadidx ptr idx mem) for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) + v.reset(OpARM64MOVHloadidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) + // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVHloadidx2 ptr idx mem) for { - if v.AuxInt != 2 { - break - } - s := v.Aux - _ = v.Args[2] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break @@ -11945,480 +14015,478 @@ func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { break } _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64UBFX { - break - } - if v_1.AuxInt != arm64BFAuxInt(16, 16) { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx2 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + v.reset(OpARM64MOVHloadidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 1 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) + v.reset(OpARM64MOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVWstore [i-2] {s} ptr0 w mem) + // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) for { - i := v.AuxInt - s := v.Aux - _ = v.Args[2] - ptr0 := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVHstorezero { break } - if v_1.AuxInt != 16 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVHloadidx_0(v *Value) bool { + // match: (MOVHloadidx ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVHload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstore { + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVHload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx (MOVDconst [c]) ptr mem) + // cond: + // result: (MOVHload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x.AuxInt != i-2 { + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx ptr (SLLconst [1] idx) mem) + // cond: + // result: (MOVHloadidx2 ptr idx mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SLLconst { break } - if x.Aux != s { + if v_1.AuxInt != 1 { break } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVHloadidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx ptr (ADD idx idx) mem) + // cond: + // result: (MOVHloadidx2 ptr idx mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADD { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + _ = v_1.Args[1] + idx := v_1.Args[0] + if idx != v_1.Args[1] { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) + mem := v.Args[2] + v.reset(OpARM64MOVHloadidx2) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w mem) + // match: (MOVHloadidx (ADD idx idx) ptr mem) + // cond: + // result: (MOVHloadidx2 ptr idx mem) for { - if v.AuxInt != 2 { - break - } - s := v.Aux _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { + idx := v_0.Args[0] + if idx != v_0.Args[1] { break } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { - break - } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHloadidx2) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) + // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) for { - if v.AuxInt != 2 { - break - } - s := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { - break - } - if v_0.AuxInt != 1 { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64MOVDreg { - break - } - w := v_1_0.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx2 { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHstorezeroidx { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 1 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVWstore [i-2] {s} ptr0 w0 mem) + return false +} +func rewriteValueARM64_OpARM64MOVHloadidx2_0(v *Value) bool { + // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVHload [c<<1] ptr mem) for { - i := v.AuxInt - s := v.Aux _ = v.Args[2] - ptr0 := v.Args[0] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - _ = x.Args[2] - ptr1 := x.Args[0] - w0 := x.Args[1] - if w0.Op != OpARM64SRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVHload) + v.AuxInt = c << 1 + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstoreidx ptr1 idx1 w0 mem) + // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) for { - if v.AuxInt != 2 { - break - } - s := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHstorezeroidx2 { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } return false } -func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { - b := v.Block - _ = b - // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w0 mem) +func rewriteValueARM64_OpARM64MOVHreg_0(v *Value) bool { + // match: (MOVHreg x:(MOVBload _ _)) + // cond: + // result: (MOVDreg x) for { - if v.AuxInt != 2 { + x := v.Args[0] + if x.Op != OpARM64MOVBload { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUload { break } - if v_0.AuxInt != 1 { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHload { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBloadidx { break } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstoreidx2 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUloadidx { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHloadidx { break } - if w0.AuxInt != j-16 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHloadidx2 _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHloadidx2 { break } - if w != w0.Args[0] { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBreg { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUreg { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 1 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { - // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) + // match: (MOVHreg x:(MOVHreg _)) // cond: - // result: (MOVHstore [c] ptr val mem) + // result: (MOVDreg x) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x := v.Args[0] + if x.Op != OpARM64MOVHreg { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstore) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) + return false +} +func rewriteValueARM64_OpARM64MOVHreg_10(v *Value) bool { + // match: (MOVHreg (MOVDconst [c])) // cond: - // result: (MOVHstore [c] idx val mem) + // result: (MOVDconst [int64(int16(c))]) for { - _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstore) - v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int16(c)) return true } - // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) - // cond: - // result: (MOVHstoreidx2 ptr idx val mem) + // match: (MOVHreg (SLLconst [lc] x)) + // cond: lc < 16 + // result: (SBFIZ [arm64BFAuxInt(lc, 16-lc)] x) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - if v_1.AuxInt != 1 { + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc < 16) { break } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BFAuxInt(lc, 16-lc) + v.AddArg(x) return true } - // match: (MOVHstoreidx ptr (ADD idx idx) val mem) - // cond: - // result: (MOVHstoreidx2 ptr idx val mem) + return false +} +func rewriteValueARM64_OpARM64MOVHstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADD { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - _ = v_1.Args[1] - idx := v_1.Args[0] - if idx != v_1.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) + v.reset(OpARM64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) - // cond: - // result: (MOVHstoreidx2 ptr idx val mem) + // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVHstoreidx ptr idx val mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_0.Op != OpARM64ADD { break } - if v_0.AuxInt != 1 { + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { break } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) + v.reset(OpARM64MOVHstoreidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstoreidx (ADD idx idx) ptr val mem) - // cond: + // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) + // cond: off == 0 && sym == nil // result: (MOVHstoreidx2 ptr idx val mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + if v_0.Op != OpARM64ADDshiftLL { + break + } + if v_0.AuxInt != 1 { break } _ = v_0.Args[1] - idx := v_0.Args[0] - if idx != v_0.Args[1] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { break } - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) @@ -12426,426 +14494,340 @@ func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVHstorezeroidx ptr idx mem) + // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - if v_2.AuxInt != 0 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - mem := v.Args[3] - v.reset(OpARM64MOVHstorezeroidx) + v.reset(OpARM64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) // cond: - // result: (MOVHstoreidx ptr idx x mem) + // result: (MOVHstorezero [off] {sym} ptr mem) for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHreg { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx) + if v_1.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpARM64MOVHstorezero) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // cond: - // result: (MOVHstoreidx ptr idx x mem) + // result: (MOVHstore [off] {sym} ptr x mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHUreg { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVHreg { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) // cond: - // result: (MOVHstoreidx ptr idx x mem) + // result: (MOVHstore [off] {sym} ptr x mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWreg { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVHUreg { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHstoreidx_10(v *Value) bool { - // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) // cond: - // result: (MOVHstoreidx ptr idx x mem) + // result: (MOVHstore [off] {sym} ptr x mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWUreg { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWreg { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AuxInt = off + v.Aux = sym v.AddArg(ptr) - v.AddArg(idx) v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx ptr idx w mem) + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) for { - _ = v.Args[3] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { - break - } - if v_1.AuxInt != 2 { + if v_1.Op != OpARM64MOVWUreg { break } - idx := v_1.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpARM64SRLconst { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVWstore [i-2] {s} ptr0 w mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if v_2.AuxInt != 16 { + if v_1.AuxInt != 16 { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpARM64MOVHstoreidx { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstore { break } - _ = x.Args[3] - if ptr != x.Args[0] { + if x.AuxInt != i-2 { break } - if idx != x.Args[1] { + if x.Aux != s { break } - if w != x.Args[2] { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(ptr0) v.AddArg(w) v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { - // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) - // cond: - // result: (MOVHstore [c<<1] ptr val mem) +func rewriteValueARM64_OpARM64MOVHstore_10(v *Value) bool { + b := v.Block + _ = b + // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVWstoreidx ptr1 idx1 w mem) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v.AuxInt != 2 { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVHstore) - v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVHstorezeroidx2 ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - if v_2.AuxInt != 0 { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - mem := v.Args[3] - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) - // cond: - // result: (MOVHstoreidx2 ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHreg { + if v_1.AuxInt != 16 { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) - // cond: - // result: (MOVHstoreidx2 ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVHUreg { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) - // cond: - // result: (MOVHstoreidx2 ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWreg { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) - // cond: - // result: (MOVHstoreidx2 ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWUreg { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVHstoreidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHstorezero [off1+off2] {sym} ptr mem) + // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if v.AuxInt != 2 { break } - v.reset(OpARM64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if v_0.Op != OpARM64ADDshiftLL { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if v_0.AuxInt != 1 { break } - v.reset(OpARM64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVHstorezeroidx ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + if v_1.AuxInt != 16 { break } - v.reset(OpARM64MOVHstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVHstorezeroidx2 ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx2 { break } - if v_0.AuxInt != 1 { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { break } - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 1 + v0.AddArg(idx1) + v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem)) - // cond: x.Uses == 1 && areAdjacentOffsets(i,j,2) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVWstorezero [min(i,j)] {s} ptr0 mem) + // match: (MOVHstore [i] {s} ptr0 (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVWstore [i-2] {s} ptr0 w mem) for { i := v.AuxInt s := v.Aux - _ = v.Args[1] + _ = v.Args[2] ptr0 := v.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVHstorezero { + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { + break + } + if v_1.AuxInt != arm64BFAuxInt(16, 16) { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstore { + break + } + if x.AuxInt != i-2 { break } - j := x.AuxInt if x.Aux != s { break } - _ = x.Args[1] + _ = x.Args[2] ptr1 := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && areAdjacentOffsets(i, j, 2) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + if w != x.Args[1] { break } - v.reset(OpARM64MOVWstorezero) - v.AuxInt = min(i, j) + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 2 v.Aux = s v.AddArg(ptr0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem)) + // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVWstorezeroidx ptr1 idx1 mem) + // result: (MOVWstoreidx ptr1 idx1 w mem) for { if v.AuxInt != 2 { break } s := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break @@ -12853,32 +14835,44 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { _ = v_0.Args[1] ptr0 := v_0.Args[0] idx0 := v_0.Args[1] - x := v.Args[1] - if x.Op != OpARM64MOVHstorezeroidx { + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - _ = x.Args[2] + if v_1.AuxInt != arm64BFAuxInt(16, 16) { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + break + } + _ = x.Args[3] ptr1 := x.Args[0] idx1 := x.Args[1] - mem := x.Args[2] + if w != x.Args[2] { + break + } + mem := x.Args[3] if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - v.reset(OpARM64MOVWstorezeroidx) + v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr1) v.AddArg(idx1) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem)) + // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [arm64BFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem)) // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVWstorezeroidx ptr1 (SLLconst [1] idx1) mem) + // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) for { if v.AuxInt != 2 { break } s := v.Aux - _ = v.Args[1] + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break @@ -12889,757 +14883,770 @@ func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { _ = v_0.Args[1] ptr0 := v_0.Args[0] idx0 := v_0.Args[1] - x := v.Args[1] - if x.Op != OpARM64MOVHstorezeroidx2 { + v_1 := v.Args[1] + if v_1.Op != OpARM64UBFX { break } - _ = x.Args[2] + if v_1.AuxInt != arm64BFAuxInt(16, 16) { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx2 { + break + } + _ = x.Args[3] ptr1 := x.Args[0] idx1 := x.Args[1] - mem := x.Args[2] + if w != x.Args[2] { + break + } + mem := x.Args[3] if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { break } - v.reset(OpARM64MOVWstorezeroidx) + v.reset(OpARM64MOVWstoreidx) v.AddArg(ptr1) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) v0.AuxInt = 1 v0.AddArg(idx1) v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { - // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem) - // cond: - // result: (MOVHstorezero [c] ptr mem) + // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVWstore [i-2] {s} ptr0 w mem) for { + i := v.AuxInt + s := v.Aux _ = v.Args[2] - ptr := v.Args[0] + ptr0 := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVHstorezero) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) - // cond: - // result: (MOVHstorezero [c] idx mem) - for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_1.AuxInt != 16 { break } - c := v_0.AuxInt - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHstorezero) - v.AuxInt = c - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) - // cond: - // result: (MOVHstorezeroidx2 ptr idx mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { break } - if v_1.AuxInt != 1 { + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstore { break } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) - // cond: - // result: (MOVHstorezeroidx2 ptr idx mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADD { + if x.AuxInt != i-2 { break } - _ = v_1.Args[1] - idx := v_1.Args[0] - if idx != v_1.Args[1] { + if x.Aux != s { break } - mem := v.Args[2] - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) - // cond: - // result: (MOVHstorezeroidx2 ptr idx mem) - for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - if v_0.AuxInt != 1 { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) - // cond: - // result: (MOVHstorezeroidx2 ptr idx mem) + // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVWstoreidx ptr1 idx1 w mem) for { + if v.AuxInt != 2 { + break + } + s := v.Aux _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - idx := v_0.Args[0] - if idx != v_0.Args[1] { + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHstorezeroidx2) - v.AddArg(ptr) - v.AddArg(idx) + if v_1.AuxInt != 16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { + break + } + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + break + } + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstorezeroidx ptr idx mem) + // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w mem) for { + if v.AuxInt != 2 { + break + } + s := v.Aux _ = v.Args[2] - ptr := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDshiftLL { + break + } + if v_0.AuxInt != 1 { + break + } + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { + if v_1.Op != OpARM64SRLconst { break } - if v_1.AuxInt != 2 { + if v_1.AuxInt != 16 { break } - idx := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVHstorezeroidx { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVDreg { break } - _ = x.Args[2] - if ptr != x.Args[0] { + w := v_1_0.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx2 { break } - if idx != x.Args[1] { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { break } - v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 1 + v0.AddArg(idx1) + v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVHstorezeroidx2_0(v *Value) bool { - // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) - // cond: - // result: (MOVHstorezero [c<<1] ptr mem) + // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVWstore [i-2] {s} ptr0 w0 mem) for { + i := v.AuxInt + s := v.Aux _ = v.Args[2] - ptr := v.Args[0] + ptr0 := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVHstorezero) - v.AuxInt = c << 1 - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVQstorezero [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstore { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if x.AuxInt != i-2 { break } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if x.Aux != s { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = x.Args[2] + ptr1 := x.Args[0] + w0 := x.Args[1] + if w0.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVQstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWUload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + if w0.AuxInt != j-16 { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + if w != w0.Args[0] { break } - v.reset(OpARM64MOVWUload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVWUloadidx ptr idx mem) + // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVWstoreidx ptr1 idx1 w0 mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] + if v.AuxInt != 2 { + break + } + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break } _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVWUloadidx) - v.AddArg(ptr) - v.AddArg(idx) + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx { + break + } + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { + break + } + if w0.AuxInt != j-16 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) - // cond: off == 0 && sym == nil - // result: (MOVWUloadidx4 ptr idx mem) + return false +} +func rewriteValueARM64_OpARM64MOVHstore_20(v *Value) bool { + b := v.Block + _ = b + // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVWstoreidx ptr1 (SLLconst [1] idx1) w0 mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] + if v.AuxInt != 2 { + break + } + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break } - if v_0.AuxInt != 2 { + if v_0.AuxInt != 1 { break } _ = v_0.Args[1] - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(off == 0 && sym == nil) { + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVWUloadidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstoreidx2 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVWUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWstorezero { + if w0.AuxInt != j-16 { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if w != w0.Args[0] { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 1 + v0.AddArg(idx1) + v.AddArg(v0) + v.AddArg(w0) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { - // match: (MOVWUloadidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVHstoreidx_0(v *Value) bool { + // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVWUload [c] ptr mem) + // result: (MOVHstore [c] ptr val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVWUload) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstore) v.AuxInt = c v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) + // match: (MOVHstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVWUload [c] ptr mem) + // result: (MOVHstore [c] idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVWUload) + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstore) v.AuxInt = c - v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) + // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem) // cond: - // result: (MOVWUloadidx4 ptr idx mem) + // result: (MOVHstoreidx2 ptr idx val mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64SLLconst { break } - if v_1.AuxInt != 2 { + if v_1.AuxInt != 1 { break } idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpARM64MOVWUloadidx4) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) + // match: (MOVHstoreidx ptr (ADD idx idx) val mem) // cond: - // result: (MOVWUloadidx4 ptr idx mem) + // result: (MOVHstoreidx2 ptr idx val mem) for { - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - if v_0.AuxInt != 2 { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADD { break } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVWUloadidx4) + _ = v_1.Args[1] + idx := v_1.Args[0] + if idx != v_1.Args[1] { + break + } + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem) + // cond: + // result: (MOVHstoreidx2 ptr idx val mem) for { - _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWstorezeroidx { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + if v_0.AuxInt != 1 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + idx := v_0.Args[0] + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVWUloadidx4_0(v *Value) bool { - // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem) + // match: (MOVHstoreidx (ADD idx idx) ptr val mem) // cond: - // result: (MOVWUload [c<<2] ptr mem) + // result: (MOVHstoreidx2 ptr idx val mem) for { - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - c := v_1.AuxInt - mem := v.Args[2] - v.reset(OpARM64MOVWUload) - v.AuxInt = c << 2 + _ = v_0.Args[1] + idx := v_0.Args[0] + if idx != v_0.Args[1] { + break + } + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) - // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) - // result: (MOVDconst [0]) + // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem) + // cond: + // result: (MOVHstorezeroidx ptr idx mem) for { - _ = v.Args[2] + _ = v.Args[3] ptr := v.Args[0] idx := v.Args[1] v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWstorezeroidx4 { + if v_2.Op != OpARM64MOVDconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + if v_2.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + mem := v.Args[3] + v.reset(OpARM64MOVHstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { - // match: (MOVWUreg x:(MOVBUload _ _)) + // match: (MOVHstoreidx ptr idx (MOVHreg x) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstoreidx ptr idx x mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUload { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHreg { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg x:(MOVHUload _ _)) + // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstoreidx ptr idx x mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHUload { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHUreg { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg x:(MOVWUload _ _)) + // match: (MOVHstoreidx ptr idx (MOVWreg x) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstoreidx ptr idx x mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVWUload { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWreg { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg x:(MOVBUloadidx _ _ _)) + return false +} +func rewriteValueARM64_OpARM64MOVHstoreidx_10(v *Value) bool { + // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstoreidx ptr idx x mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBUloadidx { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWUreg { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg x:(MOVHUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) + // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx ptr idx w mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWUreg x:(MOVWUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVWUloadidx { + if v_1.AuxInt != 2 { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx2 { + idx := v_1.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64SRLconst { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVWUloadidx4 { + if v_2.AuxInt != 16 { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWUreg x:(MOVBUreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUreg { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpARM64MOVHstoreidx { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + _ = x.Args[3] + if ptr != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (MOVWUreg x:(MOVHUreg _)) + return false +} +func rewriteValueARM64_OpARM64MOVHstoreidx2_0(v *Value) bool { + // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstore [c<<1] ptr val mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHUreg { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVHstore) + v.AuxInt = c << 1 + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { - // match: (MOVWUreg x:(MOVWUreg _)) + // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstorezeroidx2 ptr idx mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVWUreg { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + if v_2.AuxInt != 0 { + break + } + mem := v.Args[3] + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVWUreg (ANDconst [c] x)) + // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem) // cond: - // result: (ANDconst [c&(1<<32-1)] x) + // result: (MOVHstoreidx2 ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHreg { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<32 - 1) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg (MOVDconst [c])) + // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) // cond: - // result: (MOVDconst [int64(uint32(c))]) + // result: (MOVHstoreidx2 ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVHUreg { break } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c)) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg (SLLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<32-1, sc) - // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem) + // cond: + // result: (MOVHstoreidx2 ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<32-1, sc)) { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWreg { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWUreg (SRLconst [sc] x)) - // cond: isARM64BFMask(sc, 1<<32-1, 0) - // result: (UBFX [arm64BFAuxInt(sc, 32)] x) + // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) + // cond: + // result: (MOVHstoreidx2 ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { - break - } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<32-1, 0)) { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, 32) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVHstoreidx2) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { +func rewriteValueARM64_OpARM64MOVHstorezero_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWload [off1+off2] {sym} ptr mem) + // result: (MOVHstorezero [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -13654,16 +15661,41 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWload) + v.reset(OpARM64MOVHstorezero) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWload [off] {sym} (ADD ptr idx) mem) + // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWloadidx ptr idx mem) + // result: (MOVHstorezeroidx ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -13679,15 +15711,15 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWloadidx) + v.reset(OpARM64MOVHstorezeroidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWloadidx4 ptr idx mem) + // result: (MOVHstorezeroidx2 ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -13696,7 +15728,7 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { if v_0.Op != OpARM64ADDshiftLL { break } - if v_0.AuxInt != 2 { + if v_0.AuxInt != 1 { break } _ = v_0.Args[1] @@ -13706,66 +15738,119 @@ func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWloadidx4) + v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem)) + // cond: x.Uses == 1 && areAdjacentOffsets(i,j,2) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVWstorezero [min(i,j)] {s} ptr0 mem) for { - off1 := v.AuxInt - sym1 := v.Aux + i := v.AuxInt + s := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + ptr0 := v.Args[0] + x := v.Args[1] + if x.Op != OpARM64MOVHstorezero { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + j := x.AuxInt + if x.Aux != s { break } - v.reset(OpARM64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) + _ = x.Args[1] + ptr1 := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && areAdjacentOffsets(i, j, 2) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = min(i, j) + v.Aux = s + v.AddArg(ptr0) v.AddArg(mem) return true } - // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDconst [0]) + // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVWstorezeroidx ptr1 idx1 mem) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[1] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWstorezero { + if v.AuxInt != 2 { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux - _ = v_1.Args[1] - ptr2 := v_1.Args[0] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + x := v.Args[1] + if x.Op != OpARM64MOVHstorezeroidx { + break + } + _ = x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(mem) + return true + } + // match: (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVWstorezeroidx ptr1 (SLLconst [1] idx1) mem) + for { + if v.AuxInt != 2 { + break + } + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDshiftLL { + break + } + if v_0.AuxInt != 1 { + break + } + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + x := v.Args[1] + if x.Op != OpARM64MOVHstorezeroidx2 { + break + } + _ = x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + break + } + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 1 + v0.AddArg(idx1) + v.AddArg(v0) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { - // match: (MOVWloadidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVHstorezeroidx_0(v *Value) bool { + // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVWload [c] ptr mem) + // result: (MOVHstorezero [c] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -13775,15 +15860,15 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVWload) + v.reset(OpARM64MOVHstorezero) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWloadidx (MOVDconst [c]) ptr mem) + // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem) // cond: - // result: (MOVWload [c] ptr mem) + // result: (MOVHstorezero [c] idx mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -13791,17 +15876,17 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { break } c := v_0.AuxInt - ptr := v.Args[1] + idx := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWload) + v.reset(OpARM64MOVHstorezero) v.AuxInt = c - v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) + // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem) // cond: - // result: (MOVWloadidx4 ptr idx mem) + // result: (MOVHstorezeroidx2 ptr idx mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -13809,407 +15894,259 @@ func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { if v_1.Op != OpARM64SLLconst { break } - if v_1.AuxInt != 2 { + if v_1.AuxInt != 1 { break } idx := v_1.Args[0] mem := v.Args[2] - v.reset(OpARM64MOVWloadidx4) + v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) + // match: (MOVHstorezeroidx ptr (ADD idx idx) mem) // cond: - // result: (MOVWloadidx4 ptr idx mem) + // result: (MOVHstorezeroidx2 ptr idx mem) for { _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADD { break } - if v_0.AuxInt != 2 { + _ = v_1.Args[1] + idx := v_1.Args[0] + if idx != v_1.Args[1] { break } - idx := v_0.Args[0] - ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWloadidx4) + v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) - // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) - // result: (MOVDconst [0]) + // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem) + // cond: + // result: (MOVHstorezeroidx2 ptr idx mem) for { _ = v.Args[2] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWstorezeroidx { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { + if v_0.AuxInt != 1 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHstorezeroidx2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MOVWloadidx4_0(v *Value) bool { - // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem) + // match: (MOVHstorezeroidx (ADD idx idx) ptr mem) // cond: - // result: (MOVWload [c<<2] ptr mem) + // result: (MOVHstorezeroidx2 ptr idx mem) for { _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - c := v_1.AuxInt + _ = v_0.Args[1] + idx := v_0.Args[0] + if idx != v_0.Args[1] { + break + } + ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWload) - v.AuxInt = c << 2 + v.reset(OpARM64MOVHstorezeroidx2) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) - // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) - // result: (MOVDconst [0]) + // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstorezeroidx ptr idx mem) for { _ = v.Args[2] ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWstorezeroidx4 { - break - } - _ = v_2.Args[2] - ptr2 := v_2.Args[0] - idx2 := v_2.Args[1] - if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { - // match: (MOVWreg x:(MOVBload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBUload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHUload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUload { - break - } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVWload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVWload { + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { break } - _ = x.Args[1] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBloadidx { + if v_1.AuxInt != 2 { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUloadidx { + idx := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVHstorezeroidx { break } _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHloadidx { + if ptr != x.Args[0] { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHUloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx { + if idx != x.Args[1] { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVWloadidx _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVWloadidx { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { - // match: (MOVWreg x:(MOVHloadidx2 _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHloadidx2 { - break - } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHUloadidx2 _ _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHUloadidx2 { - break - } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVWloadidx4 _ _ _)) +func rewriteValueARM64_OpARM64MOVHstorezeroidx2_0(v *Value) bool { + // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) // cond: - // result: (MOVDreg x) + // result: (MOVHstorezero [c<<1] ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVWloadidx4 { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x.Args[2] - v.reset(OpARM64MOVDreg) - v.AddArg(x) + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVHstorezero) + v.AuxInt = c << 1 + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVWreg x:(MOVBreg _)) - // cond: - // result: (MOVDreg x) + return false +} +func rewriteValueARM64_OpARM64MOVQstorezero_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVQstorezero [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVBreg { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBUreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVBUreg { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + v.reset(OpARM64MOVQstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVWreg x:(MOVHreg _)) - // cond: - // result: (MOVDreg x) + // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - if x.Op != OpARM64MOVHreg { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpARM64MOVHreg { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) + v.reset(OpARM64MOVQstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVWreg x:(MOVWreg _)) + return false +} +func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) // cond: - // result: (MOVDreg x) + // result: (FMOVSfpgp val) for { - x := v.Args[0] - if x.Op != OpARM64MOVWreg { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64FMOVSstore { break } - v.reset(OpARM64MOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(int32(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_1.AuxInt != off { break } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c)) - return true - } - // match: (MOVWreg (SLLconst [lc] x)) - // cond: lc < 32 - // result: (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_1.Aux != sym { break } - lc := v_0.AuxInt - x := v_0.Args[0] - if !(lc < 32) { + _ = v_1.Args[2] + if ptr != v_1.Args[0] { break } - v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc, 32-lc) - v.AddArg(x) + val := v_1.Args[1] + v.reset(OpARM64FMOVSfpgp) + v.AddArg(val) return true } - return false -} -func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) + // result: (MOVWUload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADDconst { break } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWstore) + v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) + // match: (MOVWUload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWstoreidx ptr idx val mem) + // result: (MOVWUloadidx ptr idx mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADD { break @@ -14217,25 +16154,23 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWstoreidx) + v.reset(OpARM64MOVWUloadidx) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) + // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWstoreidx4 ptr idx val mem) + // result: (MOVWUloadidx4 ptr idx mem) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64ADDshiftLL { break @@ -14246,25 +16181,23 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { _ = v_0.Args[1] ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWstoreidx4) + v.reset(OpARM64MOVWUloadidx4) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDaddr { break @@ -14272,675 +16205,422 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWstore) + v.reset(OpARM64MOVWUload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) - // cond: - // result: (MOVWstorezero [off] {sym} ptr mem) + // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) for { off := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64MOVWstorezero { break } - if v_1.AuxInt != 0 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - mem := v.Args[2] - v.reset(OpARM64MOVWstorezero) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) + // match: (MOVWUload [off] {sym} (SB) _) + // cond: symIsRO(sym) + // result: (MOVDconst [int64(read32(sym, off, config.BigEndian))]) for { off := v.AuxInt sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpSB { + break + } + if !(symIsRO(sym)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(read32(sym, off, config.BigEndian)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUloadidx_0(v *Value) bool { + // match: (MOVWUloadidx ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVWUload [c] ptr mem) + for { _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWreg { + if v_1.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] + c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVWUload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // match: (MOVWUloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVWstore [off] {sym} ptr x mem) + // result: (MOVWUload [c] ptr mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] + c := v_0.AuxInt + ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWstore) - v.AuxInt = off - v.Aux = sym + v.reset(OpARM64MOVWUload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVDstore [i-4] {s} ptr0 w mem) + // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem) + // cond: + // result: (MOVWUloadidx4 ptr idx mem) for { - i := v.AuxInt - s := v.Aux _ = v.Args[2] - ptr0 := v.Args[0] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - _ = x.Args[2] - ptr1 := x.Args[0] - if w != x.Args[1] { + if v_1.Op != OpARM64SLLconst { break } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + if v_1.AuxInt != 2 { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w) + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVWUloadidx4) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstoreidx ptr1 idx1 w mem) + // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem) + // cond: + // result: (MOVWUloadidx4 ptr idx mem) for { - if v.AuxInt != 4 { - break - } - s := v.Aux _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + if v_0.Op != OpARM64SLLconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + if v_0.AuxInt != 2 { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w) + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVWUloadidx4) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVDstoreidx ptr1 (SLLconst [2] idx1) w mem) + // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) for { - if v.AuxInt != 4 { - break - } - s := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { - break - } - if v_0.AuxInt != 2 { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx4 { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - if w != x.Args[2] { + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWstorezeroidx { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 2 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } return false } -func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem)) - // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVDstore [i-4] {s} ptr0 w0 mem) +func rewriteValueARM64_OpARM64MOVWUloadidx4_0(v *Value) bool { + // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVWUload [c<<2] ptr mem) for { - i := v.AuxInt - s := v.Aux _ = v.Args[2] - ptr0 := v.Args[0] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - _ = x.Args[2] - ptr1 := x.Args[0] - w0 := x.Args[1] - if w0.Op != OpARM64SRLconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(ptr0) - v.AddArg(w0) + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVWUload) + v.AuxInt = c << 2 + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstoreidx ptr1 idx1 w0 mem) + // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) for { - if v.AuxInt != 4 { - break - } - s := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx { - break - } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { - break - } - if w0.AuxInt != j-32 { + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWstorezeroidx4 { break } - if w != w0.Args[0] { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWUreg_0(v *Value) bool { + // match: (MOVWUreg x:(MOVBUload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUload { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v.AddArg(idx1) - v.AddArg(w0) - v.AddArg(mem) + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVDstoreidx ptr1 (SLLconst [2] idx1) w0 mem) + // match: (MOVWUreg x:(MOVHUload _ _)) + // cond: + // result: (MOVDreg x) for { - if v.AuxInt != 4 { + x := v.Args[0] + if x.Op != OpARM64MOVHUload { break } - s := v.Aux - _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVWUload { break } - if v_0.AuxInt != 2 { + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUloadidx { break } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUloadidx { break } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstoreidx4 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVWUloadidx { break } - _ = x.Args[3] - ptr1 := x.Args[0] - idx1 := x.Args[1] - w0 := x.Args[2] - if w0.Op != OpARM64SRLconst { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUloadidx2 { break } - if w0.AuxInt != j-32 { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVWUloadidx4 { break } - if w != w0.Args[0] { + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVBUreg { break } - mem := x.Args[3] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + v.reset(OpARM64MOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpARM64MOVHUreg { break } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 2 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { - // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) +func rewriteValueARM64_OpARM64MOVWUreg_10(v *Value) bool { + // match: (MOVWUreg x:(MOVWUreg _)) // cond: - // result: (MOVWstore [c] ptr val mem) + // result: (MOVDreg x) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x := v.Args[0] + if x.Op != OpARM64MOVWUreg { break } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVWstore) - v.AuxInt = c - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) + // match: (MOVWUreg (ANDconst [c] x)) // cond: - // result: (MOVWstore [c] idx val mem) + // result: (ANDconst [c&(1<<32-1)] x) for { - _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64ANDconst { break } c := v_0.AuxInt - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVWstore) - v.AuxInt = c - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + x := v_0.Args[0] + v.reset(OpARM64ANDconst) + v.AuxInt = c & (1<<32 - 1) + v.AddArg(x) return true } - // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) + // match: (MOVWUreg (MOVDconst [c])) // cond: - // result: (MOVWstoreidx4 ptr idx val mem) + // result: (MOVDconst [int64(uint32(c))]) for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { - break - } - if v_1.AuxInt != 2 { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint32(c)) return true } - // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx4 ptr idx val mem) + // match: (MOVWUreg (SLLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<32-1, sc) + // result: (UBFIZ [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) for { - _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64SLLconst { break } - if v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVWstorezeroidx ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { - break - } - if v_2.AuxInt != 0 { - break - } - mem := v.Args[3] - v.reset(OpARM64MOVWstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) - // cond: - // result: (MOVWstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWreg { - break - } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) - // cond: - // result: (MOVWstoreidx ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWUreg { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<32-1, sc)) { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx ptr idx w mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { - break - } - if v_1.AuxInt != 4 { - break - } - idx := v_1.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpARM64SRLconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpARM64MOVWstoreidx { - break - } - _ = x.Args[3] - if ptr != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpARM64MOVDstoreidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { - // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) - // cond: - // result: (MOVWstore [c<<2] ptr val mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - val := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64MOVWstore) - v.AuxInt = c << 2 - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) - // cond: - // result: (MOVWstorezeroidx4 ptr idx mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { - break - } - if v_2.AuxInt != 0 { - break - } - mem := v.Args[3] - v.reset(OpARM64MOVWstorezeroidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) return true } - // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) - // cond: - // result: (MOVWstoreidx4 ptr idx x mem) + // match: (MOVWUreg (SRLconst [sc] x)) + // cond: isARM64BFMask(sc, 1<<32-1, 0) + // result: (UBFX [arm64BFAuxInt(sc, 32)] x) for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWreg { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) - // cond: - // result: (MOVWstoreidx4 ptr idx x mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVWUreg { + sc := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<32-1, 0)) { break } - x := v_2.Args[0] - mem := v.Args[3] - v.reset(OpARM64MOVWstoreidx4) - v.AddArg(ptr) - v.AddArg(idx) + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, 32) v.AddArg(x) - v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { +func rewriteValueARM64_OpARM64MOVWload_0(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstorezero [off1+off2] {sym} ptr mem) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -14955,41 +16635,16 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVWstorezero) + v.reset(OpARM64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(OpARM64MOVWstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) + // match: (MOVWload [off] {sym} (ADD ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWstorezeroidx ptr idx mem) + // result: (MOVWloadidx ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -15005,15 +16660,15 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWstorezeroidx) + v.reset(OpARM64MOVWloadidx) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) // cond: off == 0 && sym == nil - // result: (MOVWstorezeroidx4 ptr idx mem) + // result: (MOVWloadidx4 ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -15032,119 +16687,66 @@ func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { if !(off == 0 && sym == nil) { break } - v.reset(OpARM64MOVWstorezeroidx4) + v.reset(OpARM64MOVWloadidx4) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem)) - // cond: x.Uses == 1 && areAdjacentOffsets(i,j,4) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) - // result: (MOVDstorezero [min(i,j)] {s} ptr0 mem) - for { - i := v.AuxInt - s := v.Aux - _ = v.Args[1] - ptr0 := v.Args[0] - x := v.Args[1] - if x.Op != OpARM64MOVWstorezero { - break - } - j := x.AuxInt - if x.Aux != s { - break - } - _ = x.Args[1] - ptr1 := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && areAdjacentOffsets(i, j, 4) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { - break - } - v.reset(OpARM64MOVDstorezero) - v.AuxInt = min(i, j) - v.Aux = s - v.AddArg(ptr0) - v.AddArg(mem) - return true - } - // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) - // result: (MOVDstorezeroidx ptr1 idx1 mem) + // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - if v.AuxInt != 4 { - break - } - s := v.Aux + off1 := v.AuxInt + sym1 := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64ADD { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - x := v.Args[1] - if x.Op != OpARM64MOVWstorezeroidx { + if v_0.Op != OpARM64MOVDaddr { break } - _ = x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v.AddArg(idx1) + v.reset(OpARM64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem)) - // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) - // result: (MOVDstorezeroidx ptr1 (SLLconst [2] idx1) mem) + // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDconst [0]) for { - if v.AuxInt != 4 { - break - } - s := v.Aux + off := v.AuxInt + sym := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDshiftLL { - break - } - if v_0.AuxInt != 2 { - break - } - _ = v_0.Args[1] - ptr0 := v_0.Args[0] - idx0 := v_0.Args[1] - x := v.Args[1] - if x.Op != OpARM64MOVWstorezeroidx4 { + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWstorezero { break } - _ = x.Args[2] - ptr1 := x.Args[0] - idx1 := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + _ = v_1.Args[1] + ptr2 := v_1.Args[0] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr1) - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) - v0.AuxInt = 2 - v0.AddArg(idx1) - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } return false } -func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { - // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVWloadidx_0(v *Value) bool { + // match: (MOVWloadidx ptr (MOVDconst [c]) mem) // cond: - // result: (MOVWstorezero [c] ptr mem) + // result: (MOVWload [c] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -15154,15 +16756,15 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVWstorezero) + v.reset(OpARM64MOVWload) v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem) + // match: (MOVWloadidx (MOVDconst [c]) ptr mem) // cond: - // result: (MOVWstorezero [c] idx mem) + // result: (MOVWload [c] ptr mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -15170,17 +16772,17 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { break } c := v_0.AuxInt - idx := v.Args[1] + ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWstorezero) + v.reset(OpARM64MOVWload) v.AuxInt = c - v.AddArg(idx) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem) + // match: (MOVWloadidx ptr (SLLconst [2] idx) mem) // cond: - // result: (MOVWstorezeroidx4 ptr idx mem) + // result: (MOVWloadidx4 ptr idx mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -15193,15 +16795,15 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { } idx := v_1.Args[0] mem := v.Args[2] - v.reset(OpARM64MOVWstorezeroidx4) + v.reset(OpARM64MOVWloadidx4) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem) + // match: (MOVWloadidx (SLLconst [2] idx) ptr mem) // cond: - // result: (MOVWstorezeroidx4 ptr idx mem) + // result: (MOVWloadidx4 ptr idx mem) for { _ = v.Args[2] v_0 := v.Args[0] @@ -15214,53 +16816,39 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] - v.reset(OpARM64MOVWstorezeroidx4) + v.reset(OpARM64MOVWloadidx4) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstorezeroidx ptr idx mem) + // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) + // result: (MOVDconst [0]) for { _ = v.Args[2] ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ADDconst { - break - } - if v_1.AuxInt != 4 { - break - } - idx := v_1.Args[0] - x := v.Args[2] - if x.Op != OpARM64MOVWstorezeroidx { - break - } - _ = x.Args[2] - if ptr != x.Args[0] { - break - } - if idx != x.Args[1] { + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWstorezeroidx { break } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) { break } - v.reset(OpARM64MOVDstorezeroidx) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } return false } -func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool { - // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) +func rewriteValueARM64_OpARM64MOVWloadidx4_0(v *Value) bool { + // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem) // cond: - // result: (MOVWstorezero [c<<2] ptr mem) + // result: (MOVWload [c<<2] ptr mem) for { _ = v.Args[2] ptr := v.Args[0] @@ -15270,3702 +16858,5115 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - v.reset(OpARM64MOVWstorezero) + v.reset(OpARM64MOVWload) v.AuxInt = c << 2 v.AddArg(ptr) v.AddArg(mem) return true } + // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) + // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) + // result: (MOVDconst [0]) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWstorezeroidx4 { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + idx2 := v_2.Args[1] + if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } return false } -func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { - // match: (MUL (NEG x) y) +func rewriteValueARM64_OpARM64MOVWreg_0(v *Value) bool { + // match: (MOVWreg x:(MOVBload _ _)) // cond: - // result: (MNEG x y) + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64NEG { + x := v.Args[0] + if x.Op != OpARM64MOVBload { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64MNEG) + _ = x.Args[1] + v.reset(OpARM64MOVDreg) v.AddArg(x) - v.AddArg(y) return true } - // match: (MUL y (NEG x)) + // match: (MOVWreg x:(MOVBUload _ _)) // cond: - // result: (MNEG x y) + // result: (MOVDreg x) for { - _ = v.Args[1] - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64NEG { + x := v.Args[0] + if x.Op != OpARM64MOVBUload { break } - x := v_1.Args[0] - v.reset(OpARM64MNEG) + _ = x.Args[1] + v.reset(OpARM64MOVDreg) v.AddArg(x) - v.AddArg(y) return true } - // match: (MUL x (MOVDconst [-1])) + // match: (MOVWreg x:(MOVHload _ _)) // cond: - // result: (NEG x) + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != -1 { + if x.Op != OpARM64MOVHload { break } - v.reset(OpARM64NEG) + _ = x.Args[1] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL (MOVDconst [-1]) x) + // match: (MOVWreg x:(MOVHUload _ _)) // cond: - // result: (NEG x) + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != -1 { + x := v.Args[0] + if x.Op != OpARM64MOVHUload { break } - x := v.Args[1] - v.reset(OpARM64NEG) + _ = x.Args[1] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL _ (MOVDconst [0])) + // match: (MOVWreg x:(MOVWload _ _)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDreg x) for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != 0 { + x := v.Args[0] + if x.Op != OpARM64MOVWload { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + _ = x.Args[1] + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL (MOVDconst [0]) _) + // match: (MOVWreg x:(MOVBloadidx _ _ _)) // cond: - // result: (MOVDconst [0]) + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 0 { + x := v.Args[0] + if x.Op != OpARM64MOVBloadidx { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + _ = x.Args[2] + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL x (MOVDconst [1])) + // match: (MOVWreg x:(MOVBUloadidx _ _ _)) // cond: - // result: x + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - if v_1.AuxInt != 1 { + if x.Op != OpARM64MOVBUloadidx { break } - v.reset(OpCopy) - v.Type = x.Type + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL (MOVDconst [1]) x) + // match: (MOVWreg x:(MOVHloadidx _ _ _)) // cond: - // result: x + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 1 { + x := v.Args[0] + if x.Op != OpARM64MOVHloadidx { break } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) + // match: (MOVWreg x:(MOVHUloadidx _ _ _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + if x.Op != OpARM64MOVHUloadidx { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) + // match: (MOVWreg x:(MOVWloadidx _ _ _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c)) { + x := v.Args[0] + if x.Op != OpARM64MOVWloadidx { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { - b := v.Block - _ = b - // match: (MUL x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) +func rewriteValueARM64_OpARM64MOVWreg_10(v *Value) bool { + // match: (MOVWreg x:(MOVHloadidx2 _ _ _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && c >= 3) { + if x.Op != OpARM64MOVHloadidx2 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) + // match: (MOVWreg x:(MOVHUloadidx2 _ _ _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c-1) && c >= 3) { + x := v.Args[0] + if x.Op != OpARM64MOVHUloadidx2 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + // match: (MOVWreg x:(MOVWloadidx4 _ _ _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && c >= 7) { + if x.Op != OpARM64MOVWloadidx4 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) + _ = x.Args[2] + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + // match: (MOVWreg x:(MOVBreg _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c+1) && c >= 7) { + x := v.Args[0] + if x.Op != OpARM64MOVBreg { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDreg) v.AddArg(x) return true } - // match: (MUL x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + // match: (MOVWreg x:(MOVBUreg _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + if x.Op != OpARM64MOVBUreg { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3)) { + x := v.Args[0] + if x.Op != OpARM64MOVHreg { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + if x.Op != OpARM64MOVHreg { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + // match: (MOVWreg x:(MOVWreg _)) + // cond: + // result: (MOVDreg x) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5)) { + x := v.Args[0] + if x.Op != OpARM64MOVWreg { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDreg) + v.AddArg(x) return true } - // match: (MUL x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + // match: (MOVWreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(c))]) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7)) { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c)) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + // match: (MOVWreg (SLLconst [lc] x)) + // cond: lc < 32 + // result: (SBFIZ [arm64BFAuxInt(lc, 32-lc)] x) for { - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64SLLconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7)) { + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc < 32) { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BFAuxInt(lc, 32-lc) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64MUL_20(v *Value) bool { +func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool { b := v.Block _ = b - // match: (MUL x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + config := b.Func.Config + _ = config + // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) + // cond: + // result: (FMOVSstore [off] {sym} ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + if v_1.Op != OpARM64FMOVSfpgp { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64FMOVSstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { - _ = v.Args[1] + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64ADDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (MUL (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [c*d]) + // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVWstoreidx ptr idx val mem) for { - _ = v.Args[1] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64ADD { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c * d + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (MUL (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c*d]) + // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) + // cond: off == 0 && sym == nil + // result: (MOVWstoreidx4 ptr idx val mem) for { - _ = v.Args[1] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64ADDshiftLL { break } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_0.AuxInt != 2 { break } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c * d + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstoreidx4) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { - // match: (MULW (NEG x) y) - // cond: - // result: (MNEGW x y) + // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - _ = v.Args[1] + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64NEG { + if v_0.Op != OpARM64MOVDaddr { break } - x := v_0.Args[0] - y := v.Args[1] - v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (MULW y (NEG x)) + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) // cond: - // result: (MNEGW x y) + // result: (MOVWstorezero [off] {sym} ptr mem) for { - _ = v.Args[1] - y := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64NEG { + if v_1.Op != OpARM64MOVDconst { break } - x := v_1.Args[0] - v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + if v_1.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpARM64MOVWstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MULW x (MOVDconst [c])) - // cond: int32(c)==-1 - // result: (NEG x) + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) for { - _ = v.Args[1] - x := v.Args[0] + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == -1) { + if v_1.Op != OpARM64MOVWreg { break } - v.reset(OpARM64NEG) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==-1 - // result: (NEG x) + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(int32(c) == -1) { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64NEG) + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARM64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MULW _ (MOVDconst [c])) - // cond: int32(c)==0 - // result: (MOVDconst [0]) + // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVDstore [i-4] {s} ptr0 w mem) for { - _ = v.Args[1] + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(int32(c) == 0) { + if v_1.AuxInt != 32 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULW (MOVDconst [c]) _) - // cond: int32(c)==0 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstore { break } - c := v_0.AuxInt - if !(int32(c) == 0) { + if x.AuxInt != i-4 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: int32(c)==1 - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if x.Aux != s { break } - c := v_1.AuxInt - if !(int32(c) == 1) { + _ = x.Args[2] + ptr1 := x.Args[0] + if w != x.Args[1] { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==1 - // result: x + // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVDstoreidx ptr1 idx1 w mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v.AuxInt != 4 { break } - c := v_0.AuxInt - x := v.Args[1] - if !(int32(c) == 1) { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - _ = v.Args[1] - x := v.Args[0] + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + if v_1.AuxInt != 32 { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c)) { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { + break + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { +func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool { b := v.Block _ = b - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) + // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVDstoreidx ptr1 (SLLconst [2] idx1) w mem) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + if v.AuxInt != 4 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - _ = v.Args[1] + s := v.Aux + _ = v.Args[2] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64ADDshiftLL { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + if v_0.AuxInt != 2 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - _ = v.Args[1] - x := v.Args[0] + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + if v_1.AuxInt != 32 { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx4 { break } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + if w != x.Args[2] { break } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + break + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 2 + v0.AddArg(idx1) v.AddArg(v0) - v.AddArg(x) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (MULW x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem)) + // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVDstore [i-4] {s} ptr0 w0 mem) for { - _ = v.Args[1] - x := v.Args[0] + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + ptr0 := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstore { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if x.AuxInt != i-4 { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + if x.Aux != s { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = x.Args[2] + ptr1 := x.Args[0] + w0 := x.Args[1] + if w0.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + if w0.AuxInt != j-32 { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if w != w0.Args[0] { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + mem := x.Args[2] + if !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(ptr0) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (MULW x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVDstoreidx ptr1 idx1 w0 mem) for { - _ = v.Args[1] - x := v.Args[0] + if v.AuxInt != 4 { + break + } + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { + break + } + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + if w0.AuxInt != j-32 { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - return false -} -func rewriteValueARM64_OpARM64MULW_20(v *Value) bool { - b := v.Block - _ = b - // match: (MULW x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if w != w0.Args[0] { break } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (MULW (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem)) + // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x) + // result: (MOVDstoreidx ptr1 (SLLconst [2] idx1) w0 mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v.AuxInt != 4 { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + s := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDshiftLL { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(int32(c)*int32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.AuxInt != 2 { break } - c := v_0.AuxInt + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_1.Op != OpARM64SRLconst { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstoreidx4 { + break + } + _ = x.Args[3] + ptr1 := x.Args[0] + idx1 := x.Args[1] + w0 := x.Args[2] + if w0.Op != OpARM64SRLconst { + break + } + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) { + break + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr1) + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type) + v0.AuxInt = 2 + v0.AddArg(idx1) + v.AddArg(v0) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (MULW (MOVDconst [d]) (MOVDconst [c])) + return false +} +func rewriteValueARM64_OpARM64MOVWstoreidx_0(v *Value) bool { + // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) // cond: - // result: (MOVDconst [int64(int32(c)*int32(d))]) + // result: (MOVWstore [c] ptr val mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt + _ = v.Args[3] + ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVWstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { - // match: (MVN (MOVDconst [c])) + // match: (MOVWstoreidx (MOVDconst [c]) idx val mem) // cond: - // result: (MOVDconst [^c]) + // result: (MOVWstore [c] idx val mem) for { + _ = v.Args[3] v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = ^c + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVWstore) + v.AuxInt = c + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { - // match: (NEG (MUL x y)) + // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem) // cond: - // result: (MNEG x y) + // result: (MOVWstoreidx4 ptr idx val mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MUL { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SLLconst { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64MNEG) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (NEG (MULW x y)) - // cond: - // result: (MNEGW x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MULW { + if v_1.AuxInt != 2 { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpARM64MNEGW) - v.AddArg(x) - v.AddArg(y) + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx4) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (NEG (MOVDconst [c])) + // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem) // cond: - // result: (MOVDconst [-c]) + // result: (MOVWstoreidx4 ptr idx val mem) for { + _ = v.Args[3] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64SLLconst { break } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -c - return true - } - return false -} -func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { - // match: (NotEqual (FlagEQ)) - // cond: - // result: (MOVDconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + if v_0.AuxInt != 2 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + idx := v_0.Args[0] + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx4) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (NotEqual (FlagLT_ULT)) + // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem) // cond: - // result: (MOVDconst [1]) + // result: (MOVWstorezeroidx ptr idx mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 - return true - } - // match: (NotEqual (FlagLT_UGT)) - // cond: - // result: (MOVDconst [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + if v_2.AuxInt != 0 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + mem := v.Args[3] + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (NotEqual (FlagGT_ULT)) + // match: (MOVWstoreidx ptr idx (MOVWreg x) mem) // cond: - // result: (MOVDconst [1]) + // result: (MOVWstoreidx ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWreg { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) return true } - // match: (NotEqual (FlagGT_UGT)) + // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem) // cond: - // result: (MOVDconst [1]) + // result: (MOVWstoreidx ptr idx x mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) return true } - // match: (NotEqual (InvertFlags x)) - // cond: - // result: (NotEqual x) + // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDstoreidx ptr idx w mem) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { break } - x := v_0.Args[0] - v.reset(OpARM64NotEqual) - v.AddArg(x) + if v_1.AuxInt != 4 { + break + } + idx := v_1.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64SRLconst { + break + } + if v_2.AuxInt != 32 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpARM64MOVWstoreidx { + break + } + _ = x.Args[3] + if ptr != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpARM64MOVDstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } return false } -func rewriteValueARM64_OpARM64OR_0(v *Value) bool { - // match: (OR x (MOVDconst [c])) +func rewriteValueARM64_OpARM64MOVWstoreidx4_0(v *Value) bool { + // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) // cond: - // result: (ORconst [c] x) + // result: (MOVWstore [c<<2] ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] + _ = v.Args[3] + ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = c - v.AddArg(x) + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64MOVWstore) + v.AuxInt = c << 2 + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (OR (MOVDconst [c]) x) + // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) // cond: - // result: (ORconst [c] x) + // result: (MOVWstorezeroidx4 ptr idx mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ORconst) - v.AuxInt = c - v.AddArg(x) + if v_2.AuxInt != 0 { + break + } + mem := v.Args[3] + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (OR x x) + // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem) // cond: - // result: x + // result: (MOVWstoreidx4 ptr idx x mem) for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWreg { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx4) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) + v.AddArg(mem) return true } - // match: (OR x (MVN y)) + // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) // cond: - // result: (ORN x y) + // result: (MOVWstoreidx4 ptr idx x mem) for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVWUreg { break } - y := v_1.Args[0] - v.reset(OpARM64ORN) + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpARM64MOVWstoreidx4) + v.AddArg(ptr) + v.AddArg(idx) v.AddArg(x) - v.AddArg(y) + v.AddArg(mem) return true } - // match: (OR (MVN y) x) - // cond: - // result: (ORN x y) + return false +} +func rewriteValueARM64_OpARM64MOVWstorezero_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+off2] {sym} ptr mem) for { + off1 := v.AuxInt + sym := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { + if v_0.Op != OpARM64ADDconst { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64ORN) - v.AddArg(x) - v.AddArg(y) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (OR x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORshiftLL x0 y [c]) + // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { + off1 := v.AuxInt + sym1 := v.Aux _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(OpARM64ORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64MOVWstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (OR x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftLL x0 y [c]) + // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWstorezeroidx ptr idx mem) for { + off := v.AuxInt + sym := v.Aux _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { - break + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { + break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { break } - v.reset(OpARM64ORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64MOVWstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (OR x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORshiftRL x0 y [c]) + // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) + // cond: off == 0 && sym == nil + // result: (MOVWstorezeroidx4 ptr idx mem) for { + off := v.AuxInt + sym := v.Aux _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDshiftLL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if v_0.AuxInt != 2 { break } - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(off == 0 && sym == nil) { + break + } + v.reset(OpARM64MOVWstorezeroidx4) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (OR x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftRL x0 y [c]) + // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem)) + // cond: x.Uses == 1 && areAdjacentOffsets(i,j,4) && is32Bit(min(i,j)) && isSamePtr(ptr0, ptr1) && clobber(x) + // result: (MOVDstorezero [min(i,j)] {s} ptr0 mem) for { + i := v.AuxInt + s := v.Aux _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { + ptr0 := v.Args[0] + x := v.Args[1] + if x.Op != OpARM64MOVWstorezero { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + j := x.AuxInt + if x.Aux != s { break } - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + _ = x.Args[1] + ptr1 := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && areAdjacentOffsets(i, j, 4) && is32Bit(min(i, j)) && isSamePtr(ptr0, ptr1) && clobber(x)) { + break + } + v.reset(OpARM64MOVDstorezero) + v.AuxInt = min(i, j) + v.Aux = s + v.AddArg(ptr0) + v.AddArg(mem) return true } - // match: (OR x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORshiftRA x0 y [c]) + // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem)) + // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x) + // result: (MOVDstorezeroidx ptr1 idx1 mem) for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + if v.AuxInt != 4 { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + s := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADD { break } - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64OR_10(v *Value) bool { - b := v.Block - _ = b - // match: (OR x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (ORshiftRA x0 y [c]) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { + _ = v_0.Args[1] + ptr0 := v_0.Args[0] + idx0 := v_0.Args[1] + x := v.Args[1] + if x.Op != OpARM64MOVWstorezeroidx { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + _ = x.Args[2] + ptr1 := x.Args[0] + idx1 := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) { break } - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg(ptr1) + v.AddArg(idx1) + v.AddArg(mem) return true } - // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) - // cond: ac == ^((1< [2] idx1) mem) for { + if v.AuxInt != 4 { + break + } + s := v.Aux _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64UBFIZ { + if v_0.Op != OpARM64ADDshiftLL { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64ANDconst { + if v_0.AuxInt != 2 { break } - ac := v_1.AuxInt - y := v_1.Args[0] - if !(ac == ^((1< o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDstorezeroidx ptr idx mem) for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64ADDconst { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + if v_1.AuxInt != 4 { break } - if o1.AuxInt != 16 { + idx := v_1.Args[0] + x := v.Args[2] + if x.Op != OpARM64MOVWstorezeroidx { break } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + _ = x.Args[2] + if ptr != x.Args[0] { break } - if s0.AuxInt != 24 { + if idx != x.Args[1] { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + v.reset(OpARM64MOVDstorezeroidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool { + // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) + // cond: + // result: (MOVWstorezero [c<<2] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - i3 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARM64MOVWstorezero) + v.AuxInt = c << 2 + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool { + b := v.Block + _ = b + // match: (MSUB a x (MOVDconst [-1])) + // cond: + // result: (ADD a x) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if v_2.AuxInt != -1 { break } - i2 := x1.AuxInt - if x1.Aux != s { + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a _ (MOVDconst [0])) + // cond: + // result: a + for { + _ = v.Args[2] + a := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x1.Args[1] - if p != x1.Args[0] { + if v_2.AuxInt != 0 { break } - if mem != x1.Args[1] { + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MSUB a x (MOVDconst [1])) + // cond: + // result: (SUB a x) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + if v_2.AuxInt != 1 { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SUBshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - i1 := x2.AuxInt - if x2.Aux != s { + c := v_2.AuxInt + if !(isPowerOfTwo(c)) { break } - _ = x2.Args[1] - if p != x2.Args[0] { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && c>=3 + // result: (SUB a (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if mem != x2.Args[1] { + c := v_2.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { break } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c>=7 + // result: (ADD a (SUBshiftLL x x [log2(c+1)])) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + c := v_2.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { break } - i0 := x3.AuxInt - if x3.Aux != s { + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x3.Args[1] - if p != x3.Args[0] { + c := v_2.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if mem != x3.Args[1] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_2.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (MSUB a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i0 := x3.AuxInt - s := x3.Aux - _ = x3.Args[1] - p := x3.Args[0] - mem := x3.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if s0.AuxInt != 24 { + c := v_2.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + c := v_2.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - i3 := x0.AuxInt - if x0.Aux != s { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool { + b := v.Block + _ = b + // match: (MSUB a (MOVDconst [-1]) x) + // cond: + // result: (ADD a x) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x0.Args[1] - if p != x0.Args[0] { + if v_1.AuxInt != -1 { break } - if mem != x0.Args[1] { + x := v.Args[2] + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a (MOVDconst [0]) _) + // cond: + // result: a + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + if v_1.AuxInt != 0 { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MSUB a (MOVDconst [1]) x) + // cond: + // result: (SUB a x) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - i2 := x1.AuxInt - if x1.Aux != s { + if v_1.AuxInt != 1 { break } - _ = x1.Args[1] - if p != x1.Args[0] { + x := v.Args[2] + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SUBshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x1.Args[1] { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c)) { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && c>=3 + // result: (SUB a (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c-1) && c >= 3) { break } - i1 := x2.AuxInt - if x2.Aux != s { + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && c>=7 + // result: (ADD a (SUBshiftLL x x [log2(c+1)])) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x2.Args[1] - if p != x2.Args[0] { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c+1) && c >= 7) { break } - if mem != x2.Args[1] { + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_1.AuxInt + x := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o0.AuxInt != 8 { + c := v_1.AuxInt + x := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_1.AuxInt + x := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if s0.AuxInt != 24 { + c := v_1.AuxInt + x := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUB_20(v *Value) bool { + b := v.Block + _ = b + // match: (MSUB (MOVDconst [c]) x y) + // cond: + // result: (ADDconst [c] (MNEG x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARM64ADDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (MSUB a (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (SUBconst [c*d] a) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x0.AuxInt != 3 { + c := v_1.AuxInt + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + d := v_2.AuxInt + v.reset(OpARM64SUBconst) + v.AuxInt = c * d + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool { + b := v.Block + _ = b + // match: (MSUBW a x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (ADD a x) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + c := v_2.AuxInt + if !(int32(c) == -1) { break } - if x1.AuxInt != 2 { + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: a + for { + _ = v.Args[2] + a := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if x1.Aux != s { + c := v_2.AuxInt + if !(int32(c) == 0) { break } - _ = x1.Args[1] - if p != x1.Args[0] { + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: int32(c)==1 + // result: (SUB a x) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if mem != x1.Args[1] { + c := v_2.AuxInt + if !(int32(c) == 1) { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SUBshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + c := v_2.AuxInt + if !(isPowerOfTwo(c)) { break } - if x2.AuxInt != 1 { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c)>=3 + // result: (SUB a (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if x2.Aux != s { + c := v_2.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c)>=7 + // result: (ADD a (SUBshiftLL x x [log2(c+1)])) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x2.Args[1] { + c := v_2.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + c := v_2.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - _ = x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - if mem != x3.Args[2] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_2.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) for { - t := v.Type - _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x3.Args[2] - ptr0 := x3.Args[0] - idx0 := x3.Args[1] - mem := x3.Args[2] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { + c := v_2.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - if o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { - break - } - if s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - if x0.AuxInt != 3 { - break - } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + for { + _ = v.Args[2] + a := v.Args[0] + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + c := v_2.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool { + b := v.Block + _ = b + // match: (MSUBW a (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (ADD a x) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x1.AuxInt != 2 { + c := v_1.AuxInt + x := v.Args[2] + if !(int32(c) == -1) { break } - if x1.Aux != s { + v.reset(OpARM64ADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: a + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x1.Args[1] - if p != x1.Args[0] { + c := v_1.AuxInt + if !(int32(c) == 0) { break } - if mem != x1.Args[1] { + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: (SUB a x) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + c := v_1.AuxInt + x := v.Args[2] + if !(int32(c) == 1) { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + v.reset(OpARM64SUB) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SUBshiftLL a x [log2(c)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x2.AuxInt != 1 { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c)) { break } - if x2.Aux != s { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && int32(c)>=3 + // result: (SUB a (ADDshiftLL x x [log2(c-1)])) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x2.Args[1] - p1 := x2.Args[0] - if p1.Op != OpARM64ADD { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x2.Args[1] { + v.reset(OpARM64SUB) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && int32(c)>=7 + // result: (ADD a (SUBshiftLL x x [log2(c+1)])) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_1.AuxInt + x := v.Args[2] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) + v.reset(OpARM64ADD) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + c := v_1.AuxInt + x := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - if o1.AuxInt != 16 { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 3) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + c := v_1.AuxInt + x := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - if s0.AuxInt != 24 { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 5) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + c := v_1.AuxInt + x := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c / 7) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + c := v_1.AuxInt + x := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - if x0_1.AuxInt != 3 { + v.reset(OpARM64SUBshiftLL) + v.AuxInt = log2(c / 9) + v.AddArg(a) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool { + b := v.Block + _ = b + // match: (MSUBW (MOVDconst [c]) x y) + // cond: + // result: (ADDconst [c] (MNEGW x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - idx := x0_1.Args[0] - mem := x0.Args[2] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARM64ADDconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (SUBconst [int64(int32(c)*int32(d))] a) + for { + _ = v.Args[2] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + c := v_1.AuxInt + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + d := v_2.AuxInt + v.reset(OpARM64SUBconst) + v.AuxInt = int64(int32(c) * int32(d)) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM64_OpARM64MUL_0(v *Value) bool { + // match: (MUL (NEG x) y) + // cond: + // result: (MNEG x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64NEG { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64MNEG) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MUL y (NEG x)) + // cond: + // result: (MNEG x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64NEG { break } - if x1_1.AuxInt != 2 { + x := v_1.Args[0] + v.reset(OpARM64MNEG) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MUL x (MOVDconst [-1])) + // cond: + // result: (NEG x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if idx != x1_1.Args[0] { + if v_1.AuxInt != -1 { break } - if mem != x1.Args[2] { + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MUL (MOVDconst [-1]) x) + // cond: + // result: (NEG x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + if v_0.AuxInt != -1 { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + x := v.Args[1] + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MUL _ (MOVDconst [0])) + // cond: + // result: (MOVDconst [0]) + for { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + if v_1.AuxInt != 0 { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MUL (MOVDconst [0]) _) + // cond: + // result: (MOVDconst [0]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if x2_1.AuxInt != 1 { + if v_0.AuxInt != 0 { break } - if idx != x2_1.Args[0] { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MUL x (MOVDconst [1])) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x2.Args[2] { + if v_1.AuxInt != 1 { break } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MUL (MOVDconst [1]) x) + // cond: + // result: x + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + if v_0.AuxInt != 1 { break } - _ = x3.Args[2] - if ptr != x3.Args[0] { + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if idx != x3.Args[1] { + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { break } - if mem != x3.Args[2] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpARM64OR_20(v *Value) bool { +func rewriteValueARM64_OpARM64MUL_10(v *Value) bool { b := v.Block _ = b - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && c >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) for { - t := v.Type _ = v.Args[1] - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && c >= 3) { break } - _ = x3.Args[2] - ptr := x3.Args[0] - idx := x3.Args[1] - mem := x3.Args[2] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && c >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c-1) && c >= 3) { break } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MUL x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if s0.AuxInt != 24 { + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && c >= 7) { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && c >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c+1) && c >= 7) { break } - _ = x0.Args[2] - if ptr != x0.Args[0] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MUL x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if x0_1.AuxInt != 3 { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if idx != x0_1.Args[0] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if mem != x0.Args[2] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if x1_1.AuxInt != 2 { + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - if idx != x1_1.Args[0] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if mem != x1.Args[2] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MUL_20(v *Value) bool { + b := v.Block + _ = b + // match: (MUL x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst { + c := v_0.AuxInt + x := v.Args[1] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - if x2_1.AuxInt != 1 { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [c*d]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if idx != x2_1.Args[0] { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x2.Args[2] { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c * d + return true + } + // match: (MUL (MOVDconst [d]) (MOVDconst [c])) + // cond: + // result: (MOVDconst [c*d]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + c := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c * d return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) + return false +} +func rewriteValueARM64_OpARM64MULW_0(v *Value) bool { + // match: (MULW (NEG x) y) + // cond: + // result: (MNEGW x y) for { - t := v.Type _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + v_0 := v.Args[0] + if v_0.Op != OpARM64NEG { break } - if o0.AuxInt != 8 { + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARM64MNEGW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MULW y (NEG x)) + // cond: + // result: (MNEGW x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64NEG { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + x := v_1.Args[0] + v.reset(OpARM64MNEGW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (NEG x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_1.AuxInt + if !(int32(c) == -1) { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (NEG x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o2.AuxInt != 24 { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == -1) { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MULW _ (MOVDconst [c])) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + _ = v.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o3.AuxInt != 32 { + c := v_1.AuxInt + if !(int32(c) == 0) { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULW (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o4.AuxInt != 40 { + c := v_0.AuxInt + if !(int32(c) == 0) { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==1 + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o5.AuxInt != 48 { + c := v_1.AuxInt + if !(int32(c) == 1) { break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: x + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if s0.AuxInt != 56 { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == 1) { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { break } - i7 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - i6 := x1.AuxInt - if x1.Aux != s { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64MULW_10(v *Value) bool { + b := v.Block + _ = b + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x1.Args[1] - if p != x1.Args[0] { + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - i5 := x2.AuxInt - if x2.Aux != s { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = x2.Args[1] - if p != x2.Args[0] { + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - if mem != x2.Args[1] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - i4 := x3.AuxInt - if x3.Aux != s { + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - _ = x3.Args[1] - if p != x3.Args[0] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if mem != x3.Args[1] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - i3 := x4.AuxInt - if x4.Aux != s { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = x4.Args[1] - if p != x4.Args[0] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - if mem != x4.Args[1] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - i2 := x5.AuxInt - if x5.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - _ = x5.Args[1] - if p != x5.Args[0] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM64_OpARM64MULW_20(v *Value) bool { + b := v.Block + _ = b + // match: (MULW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x5.Args[1] { + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - i1 := x6.AuxInt - if x6.Aux != s { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c)*int32(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = x6.Args[1] - if p != x6.Args[0] { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x6.Args[1] { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) * int32(d)) + return true + } + // match: (MULW (MOVDconst [d]) (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(c)*int32(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { + c := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) * int32(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVN_0(v *Value) bool { + // match: (MVN (MOVDconst [c])) + // cond: + // result: (MOVDconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - i0 := x7.AuxInt - if x7.Aux != s { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = ^c + return true + } + // match: (MVN x:(SLLconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftLL [c] y) + for { + x := v.Args[0] + if x.Op != OpARM64SLLconst { break } - _ = x7.Args[1] - if p != x7.Args[0] { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - if mem != x7.Args[1] { + v.reset(OpARM64MVNshiftLL) + v.AuxInt = c + v.AddArg(y) + return true + } + // match: (MVN x:(SRLconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftRL [c] y) + for { + x := v.Args[0] + if x.Op != OpARM64SRLconst { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) + v.reset(OpARM64MVNshiftRL) + v.AuxInt = c + v.AddArg(y) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) + // match: (MVN x:(SRAconst [c] y)) + // cond: clobberIfDead(x) + // result: (MVNshiftRA [c] y) for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { + x := v.Args[0] + if x.Op != OpARM64SRAconst { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - i0 := x7.AuxInt - s := x7.Aux - _ = x7.Args[1] - p := x7.Args[0] - mem := x7.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { + v.reset(OpARM64MVNshiftRA) + v.AuxInt = c + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVNshiftLL_0(v *Value) bool { + // match: (MVNshiftLL (MOVDconst [c]) [d]) + // cond: + // result: (MOVDconst [^int64(uint64(c)<>uint64(d))]) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = ^(c >> uint64(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVNshiftRL_0(v *Value) bool { + // match: (MVNshiftRL (MOVDconst [c]) [d]) + // cond: + // result: (MOVDconst [^int64(uint64(c)>>uint64(d))]) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = ^int64(uint64(c) >> uint64(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEG_0(v *Value) bool { + // match: (NEG (MUL x y)) + // cond: + // result: (MNEG x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MUL { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64MNEG) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (NEG (MULW x y)) + // cond: + // result: (MNEGW x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MULW { break } - if o2.AuxInt != 24 { + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpARM64MNEGW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (NEG (MOVDconst [c])) + // cond: + // result: (MOVDconst [-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -c + return true + } + // match: (NEG x:(SLLconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftLL [c] y) + for { + x := v.Args[0] + if x.Op != OpARM64SLLconst { break } - if o3.AuxInt != 32 { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + v.reset(OpARM64NEGshiftLL) + v.AuxInt = c + v.AddArg(y) + return true + } + // match: (NEG x:(SRLconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftRL [c] y) + for { + x := v.Args[0] + if x.Op != OpARM64SRLconst { break } - if o4.AuxInt != 40 { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + v.reset(OpARM64NEGshiftRL) + v.AuxInt = c + v.AddArg(y) + return true + } + // match: (NEG x:(SRAconst [c] y)) + // cond: clobberIfDead(x) + // result: (NEGshiftRA [c] y) + for { + x := v.Args[0] + if x.Op != OpARM64SRAconst { break } - if o5.AuxInt != 48 { + c := x.AuxInt + y := x.Args[0] + if !(clobberIfDead(x)) { break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpARM64NEGshiftRA) + v.AuxInt = c + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEGshiftLL_0(v *Value) bool { + // match: (NEGshiftLL (MOVDconst [c]) [d]) + // cond: + // result: (MOVDconst [-int64(uint64(c)<>uint64(d))]) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -(c >> uint64(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64NEGshiftRL_0(v *Value) bool { + // match: (NEGshiftRL (MOVDconst [c]) [d]) + // cond: + // result: (MOVDconst [-int64(uint64(c)>>uint64(d))]) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i7 := x0.AuxInt - if x0.Aux != s { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -int64(uint64(c) >> uint64(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool { + // match: (NotEqual (FlagEQ)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - _ = x0.Args[1] - if p != x0.Args[0] { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (NotEqual (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_ULT { break } - if mem != x0.Args[1] { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagLT_UGT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagGT_ULT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_ULT { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagGT_UGT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_UGT { break } - i6 := x1.AuxInt - if x1.Aux != s { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (InvertFlags x)) + // cond: + // result: (NotEqual x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64InvertFlags { break } - _ = x1.Args[1] - if p != x1.Args[0] { + x := v_0.Args[0] + v.reset(OpARM64NotEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64OR_0(v *Value) bool { + // match: (OR x (MOVDconst [c])) + // cond: + // result: (ORconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if mem != x1.Args[1] { + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR (MOVDconst [c]) x) + // cond: + // result: (ORconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x x) + // cond: + // result: x + for { + _ = v.Args[1] + x := v.Args[0] + if x != v.Args[1] { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (OR x (MVN y)) + // cond: + // result: (ORN x y) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MVN { break } - i5 := x2.AuxInt - if x2.Aux != s { + y := v_1.Args[0] + v.reset(OpARM64ORN) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (MVN y) x) + // cond: + // result: (ORN x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MVN { break } - _ = x2.Args[1] - if p != x2.Args[0] { + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64ORN) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftLL x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - if mem != x2.Args[1] { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (OR x1:(SLLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (ORshiftLL x0 y [c]) + for { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SLLconst { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - i4 := x3.AuxInt - if x3.Aux != s { + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (OR x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftRL x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - _ = x3.Args[1] - if p != x3.Args[0] { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - if mem != x3.Args[1] { + v.reset(OpARM64ORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (OR x1:(SRLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (ORshiftRL x0 y [c]) + for { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRLconst { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + v.reset(OpARM64ORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (OR x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORshiftRA x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { break } - i3 := x4.AuxInt - if x4.Aux != s { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - _ = x4.Args[1] - if p != x4.Args[0] { + v.reset(OpARM64ORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM64_OpARM64OR_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (OR x1:(SRAconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (ORshiftRA x0 y [c]) + for { + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRAconst { break } - if mem != x4.Args[1] { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + v.reset(OpARM64ORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (OR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLL { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - i2 := x5.AuxInt - if x5.Aux != s { + t := v_0_1.Type + if v_0_1.AuxInt != 63 { break } - _ = x5.Args[1] - if p != x5.Args[0] { + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { break } - if mem != x5.Args[1] { + if v_1.Type != typ.UInt64 { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { + if v_1_0.Type != typ.UInt64 { break } - i1 := x6.AuxInt - if x6.Aux != s { + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { break } - _ = x6.Args[1] - if p != x6.Args[0] { + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { break } - if mem != x6.Args[1] { + if v_1_0_1.Type != t { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { break } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + if v_1_0_1_0.AuxInt != 64 { break } - if o0.AuxInt != 8 { + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + if v_1_0_1_1.Type != t { break } - if o1.AuxInt != 16 { + if v_1_0_1_1.AuxInt != 63 { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + if y != v_1_0_1_1.Args[0] { break } - if o2.AuxInt != 24 { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + if v_1_1.AuxInt != 64 { break } - if o3.AuxInt != 32 { + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + if v_1_1_0.Type != t { break } - if o4.AuxInt != 40 { + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + if v_1_1_0_0.AuxInt != 64 { break } - if o5.AuxInt != 48 { + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + if v_1_1_0_1.Type != t { break } - if s0.AuxInt != 56 { + if v_1_1_0_1.AuxInt != 63 { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + if y != v_1_1_0_1.Args[0] { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + if !(cc.(Op) == OpARM64LessThanU) { break } - if x0.AuxInt != 7 { + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (OR (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + if v_0.Type != typ.UInt64 { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { break } - if x1.AuxInt != 6 { + if v_0_0.Type != typ.UInt64 { break } - if x1.Aux != s { + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { break } - _ = x1.Args[1] - if p != x1.Args[0] { + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { break } - if mem != x1.Args[1] { + if v_0_0_1_0.AuxInt != 64 { break } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + if v_0_0_1_1.Type != t { break } - if x2.AuxInt != 5 { + if v_0_0_1_1.AuxInt != 63 { break } - if x2.Aux != s { + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { break } - _ = x2.Args[1] - if p != x2.Args[0] { + if v_0_1.AuxInt != 64 { break } - if mem != x2.Args[1] { + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { break } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { + if v_0_1_0.Type != t { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { break } - if x3.AuxInt != 4 { + if v_0_1_0_0.AuxInt != 64 { break } - if x3.Aux != s { + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { break } - _ = x3.Args[1] - if p != x3.Args[0] { + if v_0_1_0_1.Type != t { break } - if mem != x3.Args[1] { + if v_0_1_0_1.AuxInt != 63 { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + if y != v_0_1_0_1.Args[0] { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + v_1 := v.Args[1] + if v_1.Op != OpARM64SLL { break } - if x4.AuxInt != 3 { + _ = v_1.Args[1] + if x != v_1.Args[0] { break } - if x4.Aux != s { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { break } - _ = x4.Args[1] - if p != x4.Args[0] { + if v_1_1.Type != t { break } - if mem != x4.Args[1] { + if v_1_1.AuxInt != 63 { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + if y != v_1_1.Args[0] { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { + if !(cc.(Op) == OpARM64LessThanU) { break } - if x5.AuxInt != 2 { + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (OR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { break } - if x5.Aux != s { + if v_0.Type != typ.UInt64 { break } - _ = x5.Args[1] - if p != x5.Args[0] { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - if mem != x5.Args[1] { + t := v_0_1.Type + if v_0_1.AuxInt != 63 { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { + if v_1.Type != typ.UInt64 { break } - if x6.AuxInt != 1 { + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { break } - if x6.Aux != s { + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { break } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x6.Args[1] { + if v_1_0_1.Type != t { break } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { + if v_1_0_1_0.AuxInt != 64 { break } - _ = x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - if mem != x7.Args[2] { + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + if v_1_0_1_1.Type != t { break } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { + if v_1_0_1_1.AuxInt != 63 { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { + if y != v_1_0_1_1.Args[0] { break } - _ = x7.Args[2] - ptr0 := x7.Args[0] - idx0 := x7.Args[1] - mem := x7.Args[2] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { break } - if o0.AuxInt != 8 { + if v_1_1.AuxInt != 64 { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { break } - if o1.AuxInt != 16 { + if v_1_1_0.Type != t { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { break } - if o2.AuxInt != 24 { + if v_1_1_0_0.AuxInt != 64 { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { break } - if o3.AuxInt != 32 { + if v_1_1_0_1.Type != t { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + if v_1_1_0_1.AuxInt != 63 { break } - if o4.AuxInt != 40 { + if y != v_1_1_0_1.Args[0] { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + if !(cc.(Op) == OpARM64LessThanU) { break } - if o5.AuxInt != 48 { - break + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { + break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + if v_0.Type != typ.UInt64 { break } - if s0.AuxInt != 56 { + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { break } - if x0.AuxInt != 7 { + if v_0_0_1_0.AuxInt != 64 { break } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - if mem != x0.Args[1] { + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { break } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + if v_0_0_1_1.Type != t { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if v_0_0_1_1.AuxInt != 63 { break } - if x1.AuxInt != 6 { + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { break } - if x1.Aux != s { + if v_0_1.AuxInt != 64 { break } - _ = x1.Args[1] - if p != x1.Args[0] { + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { break } - if mem != x1.Args[1] { + if v_0_1_0.Type != t { break } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + if v_0_1_0_0.AuxInt != 64 { break } - if x2.AuxInt != 5 { + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { break } - if x2.Aux != s { + if v_0_1_0_1.Type != t { break } - _ = x2.Args[1] - if p != x2.Args[0] { + if v_0_1_0_1.AuxInt != 63 { break } - if mem != x2.Args[1] { + if y != v_0_1_0_1.Args[0] { break } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + if v_1.Type != typ.UInt64 { break } - if x3.AuxInt != 4 { + _ = v_1.Args[1] + if x != v_1.Args[0] { break } - if x3.Aux != s { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { break } - _ = x3.Args[1] - if p != x3.Args[0] { + if v_1_1.Type != t { break } - if mem != x3.Args[1] { + if v_1_1.AuxInt != 63 { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + if y != v_1_1.Args[0] { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + if !(cc.(Op) == OpARM64LessThanU) { break } - if x4.AuxInt != 3 { + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLL { break } - if x4.Aux != s { + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - _ = x4.Args[1] - if p != x4.Args[0] { + t := v_0_1.Type + if v_0_1.AuxInt != 31 { break } - if mem != x4.Args[1] { + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + if v_1.Type != typ.UInt32 { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { break } - if x5.AuxInt != 2 { + if v_1_0.Type != typ.UInt32 { break } - if x5.Aux != s { + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg { break } - _ = x5.Args[1] - if p != x5.Args[0] { + if x != v_1_0_0.Args[0] { break } - if mem != x5.Args[1] { + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + if v_1_0_1.Type != t { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { break } - if x6.AuxInt != 1 { + if v_1_0_1_0.AuxInt != 32 { break } - if x6.Aux != s { + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { break } - _ = x6.Args[1] - p1 := x6.Args[0] - if p1.Op != OpARM64ADD { + if v_1_0_1_1.Type != t { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x6.Args[1] { + if v_1_0_1_1.AuxInt != 31 { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + if y != v_1_0_1_1.Args[0] { break } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { break } - if o0.AuxInt != 8 { + if v_1_1.AuxInt != 64 { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { break } - if o1.AuxInt != 16 { + if v_1_1_0.Type != t { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { break } - if o2.AuxInt != 24 { + if v_1_1_0_0.AuxInt != 32 { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { break } - if o3.AuxInt != 32 { + if v_1_1_0_1.Type != t { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + if v_1_1_0_1.AuxInt != 31 { break } - if o4.AuxInt != 40 { + if y != v_1_1_0_1.Args[0] { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + if !(cc.(Op) == OpARM64LessThanU) { break } - if o5.AuxInt != 48 { + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (OR (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + if v_0.Type != typ.UInt32 { break } - if s0.AuxInt != 56 { + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + if v_0_0.Type != typ.UInt32 { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + _ = v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpARM64MOVWUreg { break } - _ = x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + x := v_0_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { break } - if x0_1.AuxInt != 7 { + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { break } - idx := x0_1.Args[0] - mem := x0.Args[2] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + if v_0_0_1_0.AuxInt != 32 { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + if v_0_0_1_1.Type != t { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + if v_0_0_1_1.AuxInt != 31 { break } - if x1_1.AuxInt != 6 { + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { break } - if idx != x1_1.Args[0] { + if v_0_1.AuxInt != 64 { break } - if mem != x1.Args[2] { + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { break } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { + if v_0_1_0.Type != t { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + if v_0_1_0_0.AuxInt != 32 { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst { + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { break } - if x2_1.AuxInt != 5 { + if v_0_1_0_1.Type != t { break } - if idx != x2_1.Args[0] { + if v_0_1_0_1.AuxInt != 31 { break } - if mem != x2.Args[2] { + if y != v_0_1_0_1.Args[0] { break } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { + v_1 := v.Args[1] + if v_1.Op != OpARM64SLL { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + _ = v_1.Args[1] + if x != v_1.Args[0] { break } - _ = x3.Args[2] - if ptr != x3.Args[0] { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { break } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst { + if v_1_1.Type != t { break } - if x3_1.AuxInt != 4 { + if v_1_1.AuxInt != 31 { break } - if idx != x3_1.Args[0] { + if y != v_1_1.Args[0] { break } - if mem != x3.Args[2] { + if !(cc.(Op) == OpARM64LessThanU) { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (OR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { + if v_0.Type != typ.UInt32 { break } - _ = x4.Args[2] - if ptr != x4.Args[0] { + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { break } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst { + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { break } - if x4_1.AuxInt != 3 { + t := v_0_1.Type + if v_0_1.AuxInt != 31 { break } - if idx != x4_1.Args[0] { + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { break } - if mem != x4.Args[2] { + if v_1.Type != typ.UInt32 { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { break } - _ = x5.Args[2] - if ptr != x5.Args[0] { + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { break } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst { + if v_1_0_1.Type != t { break } - if x5_1.AuxInt != 2 { + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { break } - if idx != x5_1.Args[0] { + if v_1_0_1_0.AuxInt != 32 { break } - if mem != x5.Args[2] { + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + if v_1_0_1_1.Type != t { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { + if v_1_0_1_1.AuxInt != 31 { break } - _ = x6.Args[2] - if ptr != x6.Args[0] { + if y != v_1_0_1_1.Args[0] { break } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { break } - if x6_1.AuxInt != 1 { + if v_1_1.AuxInt != 64 { break } - if idx != x6_1.Args[0] { + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { break } - if mem != x6.Args[2] { + if v_1_1_0.Type != t { break } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { + if v_1_1_0_0.AuxInt != 32 { break } - _ = x7.Args[2] - if ptr != x7.Args[0] { + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { break } - if idx != x7.Args[1] { + if v_1_1_0_1.Type != t { break } - if mem != x7.Args[2] { + if v_1_1_0_1.AuxInt != 31 { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + if y != v_1_1_0_1.Args[0] { break } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) + // match: (OR (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) for { - t := v.Type _ = v.Args[1] - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUloadidx { + if v_0.Type != typ.UInt32 { break } - _ = x7.Args[2] - ptr := x7.Args[0] - idx := x7.Args[1] - mem := x7.Args[2] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { break } - if o0.AuxInt != 8 { + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + if v_0_0_1_0.AuxInt != 32 { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { break } - if o2.AuxInt != 24 { + if v_0_0_1_1.Type != t { break } - _ = o2.Args[1] - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { + if v_0_0_1_1.AuxInt != 31 { break } - if o3.AuxInt != 32 { + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { break } - _ = o3.Args[1] - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { + if v_0_1.AuxInt != 64 { break } - if o4.AuxInt != 40 { + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { break } - _ = o4.Args[1] - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { + if v_0_1_0.Type != t { break } - if o5.AuxInt != 48 { + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { break } - _ = o5.Args[1] - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { + if v_0_1_0_0.AuxInt != 32 { break } - if s0.AuxInt != 56 { + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + if v_0_1_0_1.Type != t { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if v_0_1_0_1.AuxInt != 31 { break } - _ = x0.Args[2] - if ptr != x0.Args[0] { + if y != v_0_1_0_1.Args[0] { break } - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { break } - if x0_1.AuxInt != 7 { + if v_1.Type != typ.UInt32 { break } - if idx != x0_1.Args[0] { + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVWUreg { break } - if mem != x0.Args[2] { + if x != v_1_0.Args[0] { break } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + if v_1_1.Type != t { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + if v_1_1.AuxInt != 31 { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + if y != v_1_1.Args[0] { break } - if x1_1.AuxInt != 6 { + if !(cc.(Op) == OpARM64LessThanU) { break } - if idx != x1_1.Args[0] { + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) + // cond: ac == ^((1< o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + for { + t := v.Type + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - if x3_1.AuxInt != 4 { + if o0.AuxInt != 8 { break } - if idx != x3_1.Args[0] { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - if mem != x3.Args[2] { + if o1.AuxInt != 16 { break } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { + if s0.AuxInt != 24 { break } - _ = x4.Args[2] - if ptr != x4.Args[0] { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } - if x4_1.AuxInt != 3 { + i3 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { break } - if idx != x4_1.Args[0] { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - if mem != x4.Args[2] { + i2 := x1.AuxInt + if x1.Aux != s { break } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { + _ = x1.Args[1] + if p != x1.Args[0] { break } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUloadidx { + if mem != x1.Args[1] { break } - _ = x5.Args[2] - if ptr != x5.Args[0] { + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x5_1 := x5.Args[1] - if x5_1.Op != OpARM64ADDconst { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - if x5_1.AuxInt != 2 { + i1 := x2.AuxInt + if x2.Aux != s { break } - if idx != x5_1.Args[0] { + _ = x2.Args[1] + if p != x2.Args[0] { break } - if mem != x5.Args[2] { + if mem != x2.Args[1] { break } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { break } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUloadidx { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - _ = x6.Args[2] - if ptr != x6.Args[0] { - break - } - x6_1 := x6.Args[1] - if x6_1.Op != OpARM64ADDconst { - break - } - if x6_1.AuxInt != 1 { - break - } - if idx != x6_1.Args[0] { - break - } - if mem != x6.Args[2] { - break - } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - _ = o1.Args[1] - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { - break - } - if s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - _ = x2.Args[1] - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt + i0 := x3.AuxInt if x3.Aux != s { break } @@ -18980,22 +21981,20 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v0.Aux = s + v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))) + // match: (OR y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) for { t := v.Type _ = v.Args[1] @@ -19007,7 +22006,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i3 := x3.AuxInt + i0 := x3.AuxInt s := x3.Aux _ = x3.Args[1] p := x3.Args[0] @@ -19043,7 +22042,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt + i3 := x0.AuxInt if x0.Aux != s { break } @@ -19062,7 +22061,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + i2 := x1.AuxInt if x1.Aux != s { break } @@ -19081,7 +22080,7 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + i1 := x2.AuxInt if x2.Aux != s { break } @@ -19096,22 +22095,20 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v0.Aux = s + v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) for { t := v.Type _ = v.Args[1] @@ -19143,13 +22140,16 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x0.Op != OpARM64MOVBUload { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - mem := x0.Args[2] + if x0.AuxInt != 3 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break @@ -19158,18 +22158,16 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 1 { + if x1.AuxInt != 2 { + break + } + if x1.Aux != s { break } - s := x1.Aux _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if p != x1.Args[0] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] if mem != x1.Args[1] { break } @@ -19181,14 +22179,20 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 2 { + if x2.AuxInt != 1 { break } if x2.Aux != s { break } _ = x2.Args[1] - p := x2.Args[0] + p1 := x2.Args[0] + if p1.Op != OpARM64ADD { + break + } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] if mem != x2.Args[1] { break } @@ -19197,44 +22201,30 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool { break } x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - if x3.AuxInt != 3 { - break - } - if x3.Aux != s { - break - } - _ = x3.Args[1] - if p != x3.Args[0] { + if x3.Op != OpARM64MOVBUloadidx { break } - if mem != x3.Args[1] { + _ = x3.Args[2] + ptr0 := x3.Args[0] + idx0 := x3.Args[1] + if mem != x3.Args[2] { break } if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64OR_30(v *Value) bool { - b := v.Block - _ = b - // match: (OR y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))) + // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr0 idx0 mem) for { t := v.Type _ = v.Args[1] @@ -19243,16 +22233,13 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - if x3.AuxInt != 3 { + if x3.Op != OpARM64MOVBUloadidx { break } - s := x3.Aux - _ = x3.Args[1] - p := x3.Args[0] - mem := x3.Args[1] + _ = x3.Args[2] + ptr0 := x3.Args[0] + idx0 := x3.Args[1] + mem := x3.Args[2] o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { break @@ -19281,13 +22268,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x0.Op != OpARM64MOVBUload { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { + if x0.AuxInt != 3 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + if mem != x0.Args[1] { break } y1 := o1.Args[1] @@ -19298,20 +22288,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 1 { + if x1.AuxInt != 2 { break } if x1.Aux != s { break } _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if p != x1.Args[0] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] if mem != x1.Args[1] { break } @@ -19323,16 +22309,20 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 2 { + if x2.AuxInt != 1 { break } if x2.Aux != s { break } _ = x2.Args[1] - if p != x2.Args[0] { + p1 := x2.Args[0] + if p1.Op != OpARM64ADD { break } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] if mem != x2.Args[1] { break } @@ -19340,19 +22330,17 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) for { t := v.Type _ = v.Args[1] @@ -19389,7 +22377,14 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { } _ = x0.Args[2] ptr := x0.Args[0] - idx := x0.Args[1] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 3 { + break + } + idx := x0_1.Args[0] mem := x0.Args[2] y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { @@ -19407,7 +22402,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 1 { + if x1_1.AuxInt != 2 { break } if idx != x1_1.Args[0] { @@ -19432,7 +22427,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2_1.Op != OpARM64ADDconst { break } - if x2_1.AuxInt != 2 { + if x2_1.AuxInt != 1 { break } if idx != x2_1.Args[0] { @@ -19453,14 +22448,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if ptr != x3.Args[0] { break } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst { - break - } - if x3_1.AuxInt != 3 { - break - } - if idx != x3_1.Args[0] { + if idx != x3.Args[1] { break } if mem != x3.Args[2] { @@ -19470,19 +22458,17 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))) + // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx ptr idx mem) for { t := v.Type _ = v.Args[1] @@ -19496,14 +22482,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { } _ = x3.Args[2] ptr := x3.Args[0] - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst { - break - } - if x3_1.AuxInt != 3 { - break - } - idx := x3_1.Args[0] + idx := x3.Args[1] mem := x3.Args[2] o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { @@ -19540,7 +22519,14 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if ptr != x0.Args[0] { break } - if idx != x0.Args[1] { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 3 { + break + } + if idx != x0_1.Args[0] { break } if mem != x0.Args[2] { @@ -19562,7 +22548,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 1 { + if x1_1.AuxInt != 2 { break } if idx != x1_1.Args[0] { @@ -19587,7 +22573,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2_1.Op != OpARM64ADDconst { break } - if x2_1.AuxInt != 2 { + if x2_1.AuxInt != 1 { break } if idx != x2_1.Args[0] { @@ -19600,19 +22586,17 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) for { t := v.Type _ = v.Args[1] @@ -19679,7 +22663,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt + i7 := x0.AuxInt s := x0.Aux _ = x0.Args[1] p := x0.Args[0] @@ -19692,7 +22676,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + i6 := x1.AuxInt if x1.Aux != s { break } @@ -19711,7 +22695,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + i5 := x2.AuxInt if x2.Aux != s { break } @@ -19730,7 +22714,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i3 := x3.AuxInt + i4 := x3.AuxInt if x3.Aux != s { break } @@ -19749,7 +22733,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i4 := x4.AuxInt + i3 := x4.AuxInt if x4.Aux != s { break } @@ -19768,7 +22752,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - i5 := x5.AuxInt + i2 := x5.AuxInt if x5.Aux != s { break } @@ -19787,7 +22771,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - i6 := x6.AuxInt + i1 := x6.AuxInt if x6.Aux != s { break } @@ -19806,7 +22790,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x7.Op != OpARM64MOVBUload { break } - i7 := x7.AuxInt + i0 := x7.AuxInt if x7.Aux != s { break } @@ -19821,22 +22805,25 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v0.Aux = s + v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))) + return false +} +func rewriteValueARM64_OpARM64OR_30(v *Value) bool { + b := v.Block + _ = b + // match: (OR y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))) // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} (OffPtr [i0] p) mem) for { t := v.Type _ = v.Args[1] @@ -19848,7 +22835,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x7.Op != OpARM64MOVBUload { break } - i7 := x7.AuxInt + i0 := x7.AuxInt s := x7.Aux _ = x7.Args[1] p := x7.Args[0] @@ -19916,7 +22903,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt + i7 := x0.AuxInt if x0.Aux != s { break } @@ -19935,7 +22922,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + i6 := x1.AuxInt if x1.Aux != s { break } @@ -19954,7 +22941,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + i5 := x2.AuxInt if x2.Aux != s { break } @@ -19973,7 +22960,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i3 := x3.AuxInt + i4 := x3.AuxInt if x3.Aux != s { break } @@ -19992,7 +22979,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i4 := x4.AuxInt + i3 := x4.AuxInt if x4.Aux != s { break } @@ -20011,7 +22998,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - i5 := x5.AuxInt + i2 := x5.AuxInt if x5.Aux != s { break } @@ -20030,7 +23017,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - i6 := x6.AuxInt + i1 := x6.AuxInt if x6.Aux != s { break } @@ -20045,22 +23032,20 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(x6.Pos, OpARM64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v0.Aux = s + v1 := b.NewValue0(x6.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem))) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) for { t := v.Type _ = v.Args[1] @@ -20124,13 +23109,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x0.Op != OpARM64MOVBUload { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - mem := x0.Args[2] + if x0.AuxInt != 7 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break @@ -20139,18 +23127,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 1 { + if x1.AuxInt != 6 { + break + } + if x1.Aux != s { break } - s := x1.Aux _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if p != x1.Args[0] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] if mem != x1.Args[1] { break } @@ -20162,14 +23148,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 2 { + if x2.AuxInt != 5 { break } if x2.Aux != s { break } _ = x2.Args[1] - p := x2.Args[0] + if p != x2.Args[0] { + break + } if mem != x2.Args[1] { break } @@ -20181,7 +23169,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - if x3.AuxInt != 3 { + if x3.AuxInt != 4 { break } if x3.Aux != s { @@ -20202,7 +23190,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - if x4.AuxInt != 4 { + if x4.AuxInt != 3 { break } if x4.Aux != s { @@ -20223,7 +23211,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - if x5.AuxInt != 5 { + if x5.AuxInt != 2 { break } if x5.Aux != s { @@ -20244,16 +23232,20 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - if x6.AuxInt != 6 { + if x6.AuxInt != 1 { break } if x6.Aux != s { break } _ = x6.Args[1] - if p != x6.Args[0] { + p1 := x6.Args[0] + if p1.Op != OpARM64ADD { break } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] if mem != x6.Args[1] { break } @@ -20262,39 +23254,30 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - if x7.AuxInt != 7 { - break - } - if x7.Aux != s { - break - } - _ = x7.Args[1] - if p != x7.Args[0] { + if x7.Op != OpARM64MOVBUloadidx { break } - if mem != x7.Args[1] { + _ = x7.Args[2] + ptr0 := x7.Args[0] + idx0 := x7.Args[1] + if mem != x7.Args[2] { break } if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))) + // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))) // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr0 idx0 mem) for { t := v.Type _ = v.Args[1] @@ -20303,16 +23286,13 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - if x7.AuxInt != 7 { + if x7.Op != OpARM64MOVBUloadidx { break } - s := x7.Aux - _ = x7.Args[1] - p := x7.Args[0] - mem := x7.Args[1] + _ = x7.Args[2] + ptr0 := x7.Args[0] + idx0 := x7.Args[1] + mem := x7.Args[2] o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { break @@ -20373,13 +23353,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x0.Op != OpARM64MOVBUload { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - if mem != x0.Args[2] { + if x0.AuxInt != 7 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + if mem != x0.Args[1] { break } y1 := o5.Args[1] @@ -20390,20 +23373,16 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 1 { + if x1.AuxInt != 6 { break } if x1.Aux != s { break } _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if p != x1.Args[0] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] if mem != x1.Args[1] { break } @@ -20415,7 +23394,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 2 { + if x2.AuxInt != 5 { break } if x2.Aux != s { @@ -20436,7 +23415,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - if x3.AuxInt != 3 { + if x3.AuxInt != 4 { break } if x3.Aux != s { @@ -20457,7 +23436,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - if x4.AuxInt != 4 { + if x4.AuxInt != 3 { break } if x4.Aux != s { @@ -20478,7 +23457,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - if x5.AuxInt != 5 { + if x5.AuxInt != 2 { break } if x5.Aux != s { @@ -20499,16 +23478,20 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - if x6.AuxInt != 6 { + if x6.AuxInt != 1 { break } if x6.Aux != s { break } _ = x6.Args[1] - if p != x6.Args[0] { + p1 := x6.Args[0] + if p1.Op != OpARM64ADD { break } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] if mem != x6.Args[1] { break } @@ -20516,19 +23499,17 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) for { t := v.Type _ = v.Args[1] @@ -20597,7 +23578,14 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { } _ = x0.Args[2] ptr := x0.Args[0] - idx := x0.Args[1] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 7 { + break + } + idx := x0_1.Args[0] mem := x0.Args[2] y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { @@ -20615,7 +23603,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 1 { + if x1_1.AuxInt != 6 { break } if idx != x1_1.Args[0] { @@ -20640,7 +23628,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2_1.Op != OpARM64ADDconst { break } - if x2_1.AuxInt != 2 { + if x2_1.AuxInt != 5 { break } if idx != x2_1.Args[0] { @@ -20665,7 +23653,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3_1.Op != OpARM64ADDconst { break } - if x3_1.AuxInt != 3 { + if x3_1.AuxInt != 4 { break } if idx != x3_1.Args[0] { @@ -20690,7 +23678,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4_1.Op != OpARM64ADDconst { break } - if x4_1.AuxInt != 4 { + if x4_1.AuxInt != 3 { break } if idx != x4_1.Args[0] { @@ -20715,7 +23703,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5_1.Op != OpARM64ADDconst { break } - if x5_1.AuxInt != 5 { + if x5_1.AuxInt != 2 { break } if idx != x5_1.Args[0] { @@ -20740,7 +23728,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6_1.Op != OpARM64ADDconst { break } - if x6_1.AuxInt != 6 { + if x6_1.AuxInt != 1 { break } if idx != x6_1.Args[0] { @@ -20761,14 +23749,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if ptr != x7.Args[0] { break } - x7_1 := x7.Args[1] - if x7_1.Op != OpARM64ADDconst { - break - } - if x7_1.AuxInt != 7 { - break - } - if idx != x7_1.Args[0] { + if idx != x7.Args[1] { break } if mem != x7.Args[2] { @@ -20778,19 +23759,17 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))) + // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx ptr idx mem) for { t := v.Type _ = v.Args[1] @@ -20804,14 +23783,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { } _ = x7.Args[2] ptr := x7.Args[0] - x7_1 := x7.Args[1] - if x7_1.Op != OpARM64ADDconst { - break - } - if x7_1.AuxInt != 7 { - break - } - idx := x7_1.Args[0] + idx := x7.Args[1] mem := x7.Args[2] o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { @@ -20880,7 +23852,14 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if ptr != x0.Args[0] { break } - if idx != x0.Args[1] { + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 7 { + break + } + if idx != x0_1.Args[0] { break } if mem != x0.Args[2] { @@ -20902,7 +23881,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 1 { + if x1_1.AuxInt != 6 { break } if idx != x1_1.Args[0] { @@ -20927,7 +23906,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x2_1.Op != OpARM64ADDconst { break } - if x2_1.AuxInt != 2 { + if x2_1.AuxInt != 5 { break } if idx != x2_1.Args[0] { @@ -20952,7 +23931,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x3_1.Op != OpARM64ADDconst { break } - if x3_1.AuxInt != 3 { + if x3_1.AuxInt != 4 { break } if idx != x3_1.Args[0] { @@ -20977,7 +23956,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x4_1.Op != OpARM64ADDconst { break } - if x4_1.AuxInt != 4 { + if x4_1.AuxInt != 3 { break } if idx != x4_1.Args[0] { @@ -21002,7 +23981,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x5_1.Op != OpARM64ADDconst { break } - if x5_1.AuxInt != 5 { + if x5_1.AuxInt != 2 { break } if idx != x5_1.Args[0] { @@ -21027,7 +24006,7 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { if x6_1.Op != OpARM64ADDconst { break } - if x6_1.AuxInt != 6 { + if x6_1.AuxInt != 1 { break } if idx != x6_1.Args[0] { @@ -21040,544 +24019,276 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { - // match: (ORN x (MOVDconst [c])) - // cond: - // result: (ORconst [^c] x) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) for { + t := v.Type _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = ^c - v.AddArg(x) - return true - } - // match: (ORN x x) - // cond: - // result: (MOVDconst [-1]) - for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + if o0.AuxInt != 8 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = -1 - return true - } - // match: (ORN x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORNshiftLL x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if o1.AuxInt != 16 { break } - v.reset(OpARM64ORNshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (ORN x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORNshiftRL x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if s0.AuxInt != 24 { break } - v.reset(OpARM64ORNshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (ORN x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (ORNshiftRA x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } - v.reset(OpARM64ORNshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORNshiftLL_0(v *Value) bool { - // match: (ORNshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (ORconst x [^int64(uint64(c)<>uint64(d))]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if mem != x1.Args[1] { break } - c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = ^(c >> uint64(d)) - v.AddArg(x) - return true - } - // match: (ORNshiftRA x (SRAconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [-1]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRAconst { + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - if !(c == d) { + i2 := x2.AuxInt + if x2.Aux != s { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = -1 - return true - } - return false -} -func rewriteValueARM64_OpARM64ORNshiftRL_0(v *Value) bool { - // match: (ORNshiftRL x (MOVDconst [c]) [d]) - // cond: - // result: (ORconst x [^int64(uint64(c)>>uint64(d))]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = x2.Args[1] + if p != x2.Args[0] { break } - c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = ^int64(uint64(c) >> uint64(d)) - v.AddArg(x) - return true - } - // match: (ORNshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [-1]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if mem != x2.Args[1] { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { break } - if !(c == d) { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = -1 - return true - } - return false -} -func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool { - // match: (ORconst [0] x) - // cond: - // result: x - for { - if v.AuxInt != 0 { + i3 := x3.AuxInt + if x3.Aux != s { break } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORconst [-1] _) - // cond: - // result: (MOVDconst [-1]) - for { - if v.AuxInt != -1 { + _ = x3.Args[1] + if p != x3.Args[0] { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = -1 - return true - } - // match: (ORconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if mem != x3.Args[1] { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c | d - return true - } - // match: (ORconst [c] (ORconst [d] x)) - // cond: - // result: (ORconst [c|d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64ORconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ORconst) - v.AuxInt = c | d - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { - b := v.Block - _ = b - // match: (ORshiftLL (MOVDconst [c]) x [d]) - // cond: - // result: (ORconst [c] (SLLconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ORconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) + v.reset(OpCopy) v.AddArg(v0) + v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t) + v1.Aux = s + v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) return true } - // match: (ORshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (ORconst x [int64(uint64(c)< y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) for { - d := v.AuxInt + t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SLLconst { - break - } - c := y.AuxInt - if x != y.Args[0] { - break - } - if !(c == d) { + y3 := v.Args[0] + if y3.Op != OpARM64MOVDnop { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (ORshiftLL [c] (SRLconst x [64-c]) x) - // cond: - // result: (RORconst [64-c] x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - if v_0.AuxInt != 64-c { + i3 := x3.AuxInt + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o0 := v.Args[1] + if o0.Op != OpARM64ORshiftLL { break } - x := v_0.Args[0] - if x != v.Args[1] { + if o0.AuxInt != 8 { break } - v.reset(OpARM64RORconst) - v.AuxInt = 64 - c - v.AddArg(x) - return true - } - // match: (ORshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if x != v.Args[1] { + if o1.AuxInt != 16 { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { break } - v.reset(OpARM64RORWconst) - v.AuxInt = 32 - c - v.AddArg(x) - return true - } - // match: (ORshiftLL [c] (SRLconst x [64-c]) x2) - // cond: - // result: (EXTRconst [64-c] x2 x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + if s0.AuxInt != 24 { break } - if v_0.AuxInt != 64-c { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - x := v_0.Args[0] - x2 := v.Args[1] - v.reset(OpARM64EXTRconst) - v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) - return true - } - // match: (ORshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (EXTRWconst [32-c] x2 x) - for { - t := v.Type - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + i0 := x0.AuxInt + if x0.Aux != s { break } - v.reset(OpARM64EXTRWconst) - v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) - return true - } - // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) - // cond: sc == getARM64BFwidth(bfc) - // result: (BFXIL [bfc] y x) - for { - sc := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + _ = x0.Args[1] + if p != x0.Args[0] { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if mem != x0.Args[1] { break } - if v_1.AuxInt != sc { + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { break } - y := v_1.Args[0] - if !(sc == getARM64BFwidth(bfc)) { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - v.reset(OpARM64BFXIL) - v.AuxInt = bfc - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i0] p) mem) - for { - t := v.Type - if v.AuxInt != 8 { + i1 := x1.AuxInt + if x1.Aux != s { break } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { + _ = x1.Args[1] + if p != x1.Args[0] { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + if mem != x1.Args[1] { break } - i0 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt - if x1.Aux != s { + i2 := x2.AuxInt + if x2.Aux != s { break } - _ = x1.Args[1] - if p != x1.Args[0] { + _ = x2.Args[1] + if p != x2.Args[0] { break } - if mem != x1.Args[1] { + if mem != x2.Args[1] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, t) + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) v.reset(OpCopy) v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) + v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) + v1.Aux = s + v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (MOVHUloadidx ptr0 idx0 mem) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) for { t := v.Type - if v.AuxInt != 8 { + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - _ = v.Args[1] - y0 := v.Args[0] + if o0.AuxInt != 8 { + break + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 16 { + break + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 24 { + break + } + y0 := s0.Args[0] if y0.Op != OpARM64MOVDnop { break } @@ -21589,7 +24300,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { ptr0 := x0.Args[0] idx0 := x0.Args[1] mem := x0.Args[2] - y1 := v.Args[1] + y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -21612,186 +24323,119 @@ func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool { if mem != x1.Args[1] { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { - b := v.Block - _ = b - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (MOVHUloadidx ptr idx mem) - for { - t := v.Type - if v.AuxInt != 8 { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { + if x2.AuxInt != 2 { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x2.Aux != s { break } - _ = x0.Args[2] - ptr := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + if x3.AuxInt != 3 { break } - if x1_1.AuxInt != 1 { + if x3.Aux != s { break } - if idx != x1_1.Args[0] { + _ = x3.Args[1] + if p != x3.Args[0] { break } - if mem != x1.Args[2] { + if mem != x3.Args[1] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x3.Pos, OpARM64REVW, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) return true } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (OR y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr0 idx0 mem)) for { t := v.Type - if v.AuxInt != 24 { - break - } _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 16 { - break - } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpARM64MOVHUload { - break - } - i0 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o0.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - _ = x1.Args[1] - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { + y3 := v.Args[0] + if y3.Op != OpARM64MOVDnop { break } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + if x3.AuxInt != 3 { break } - i3 := x2.AuxInt - if x2.Aux != s { + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o0 := v.Args[1] + if o0.Op != OpARM64ORshiftLL { break } - _ = x2.Args[1] - if p != x2.Args[0] { + if o0.AuxInt != 8 { break } - if mem != x2.Args[1] { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + if o1.AuxInt != 16 { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr0 idx0 mem) - for { - t := v.Type - if v.AuxInt != 24 { + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { break } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + if s0.AuxInt != 24 { break } - if o0.AuxInt != 16 { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpARM64MOVHUloadidx { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } _ = x0.Args[2] ptr0 := x0.Args[0] idx0 := x0.Args[1] - mem := x0.Args[2] - y1 := o0.Args[1] + if mem != x0.Args[2] { + break + } + y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -21799,10 +24443,12 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 2 { + if x1.AuxInt != 1 { + break + } + if x1.Aux != s { break } - s := x1.Aux _ = x1.Args[1] p1 := x1.Args[0] if p1.Op != OpARM64ADD { @@ -21814,7 +24460,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x1.Args[1] { break } - y2 := v.Args[1] + y2 := o0.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -21822,55 +24468,75 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 3 { + if x2.AuxInt != 2 { break } if x2.Aux != s { break } _ = x2.Args[1] - p := x2.Args[0] + if p != x2.Args[0] { + break + } if mem != x2.Args[1] { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) + v1 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) return true } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr idx mem) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) for { t := v.Type - if v.AuxInt != 24 { - break - } _ = v.Args[1] o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 16 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpARM64MOVHUloadidx { + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 16 { + break + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 24 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } _ = x0.Args[2] ptr := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - y1 := o0.Args[1] + y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -21886,7 +24552,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 2 { + if x1_1.AuxInt != 1 { break } if idx != x1_1.Args[0] { @@ -21895,7 +24561,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x1.Args[2] { break } - y2 := v.Args[1] + y2 := o0.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -21911,7 +24577,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x2_1.Op != OpARM64ADDconst { break } - if x2_1.AuxInt != 3 { + if x2_1.AuxInt != 2 { break } if idx != x2_1.Args[0] { @@ -21920,117 +24586,191 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x2.Args[2] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + break + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + break + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst { + break + } + if x3_1.AuxInt != 3 { + break + } + if idx != x3_1.Args[0] { + break + } + if mem != x3.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Pos, OpARM64REVW, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) + v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) return true } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr0 (SLLconst [1] idx0) mem) + return false +} +func rewriteValueARM64_OpARM64OR_40(v *Value) bool { + b := v.Block + _ = b + // match: (OR y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUloadidx ptr idx mem)) for { t := v.Type - if v.AuxInt != 24 { + _ = v.Args[1] + y3 := v.Args[0] + if y3.Op != OpARM64MOVDnop { break } - _ = v.Args[1] - o0 := v.Args[0] + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + break + } + _ = x3.Args[2] + ptr := x3.Args[0] + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst { + break + } + if x3_1.AuxInt != 3 { + break + } + idx := x3_1.Args[0] + mem := x3.Args[2] + o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 16 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] - x0 := o0.Args[0] - if x0.Op != OpARM64MOVHUloadidx2 { + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 16 { + break + } + _ = o1.Args[1] + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 24 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - mem := x0.Args[2] - y1 := o0.Args[1] + if ptr != x0.Args[0] { + break + } + if idx != x0.Args[1] { + break + } + if mem != x0.Args[2] { + break + } + y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break } x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if x1.Op != OpARM64MOVBUloadidx { break } - if x1.AuxInt != 2 { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADDshiftLL { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { break } - if p1.AuxInt != 1 { + if x1_1.AuxInt != 1 { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x1.Args[1] { + if idx != x1_1.Args[0] { break } - y2 := v.Args[1] + if mem != x1.Args[2] { + break + } + y2 := o0.Args[1] if y2.Op != OpARM64MOVDnop { break } x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + if x2.Op != OpARM64MOVBUloadidx { break } - if x2.AuxInt != 3 { + _ = x2.Args[2] + if ptr != x2.Args[0] { break } - if x2.Aux != s { + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { break } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { + if x2_1.AuxInt != 2 { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + if idx != x2_1.Args[0] { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + if mem != x2.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Pos, OpARM64REVW, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v1 := b.NewValue0(v.Pos, OpARM64SLLconst, idx0.Type) - v1.AuxInt = 1 - v1.AddArg(idx0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) - // cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i0] p) mem) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) for { t := v.Type - if v.AuxInt != 56 { - break - } _ = v.Args[1] o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 48 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] @@ -22038,7 +24778,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if o1.Op != OpARM64ORshiftLL { break } - if o1.AuxInt != 40 { + if o1.AuxInt != 16 { break } _ = o1.Args[1] @@ -22046,12 +24786,47 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if o2.Op != OpARM64ORshiftLL { break } - if o2.AuxInt != 32 { + if o2.AuxInt != 24 { break } _ = o2.Args[1] - x0 := o2.Args[0] - if x0.Op != OpARM64MOVWUload { + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { + break + } + if o3.AuxInt != 32 { + break + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { + break + } + if o4.AuxInt != 40 { + break + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { + break + } + if o5.AuxInt != 48 { + break + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 56 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } i0 := x0.AuxInt @@ -22059,7 +24834,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { _ = x0.Args[1] p := x0.Args[0] mem := x0.Args[1] - y1 := o2.Args[1] + y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -22067,7 +24842,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i4 := x1.AuxInt + i1 := x1.AuxInt if x1.Aux != s { break } @@ -22078,7 +24853,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x1.Args[1] { break } - y2 := o1.Args[1] + y2 := o4.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -22086,7 +24861,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i5 := x2.AuxInt + i2 := x2.AuxInt if x2.Aux != s { break } @@ -22097,7 +24872,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x2.Args[1] { break } - y3 := o0.Args[1] + y3 := o3.Args[1] if y3.Op != OpARM64MOVDnop { break } @@ -22105,7 +24880,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i6 := x3.AuxInt + i3 := x3.AuxInt if x3.Aux != s { break } @@ -22116,7 +24891,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x3.Args[1] { break } - y4 := v.Args[1] + y4 := o2.Args[1] if y4.Op != OpARM64MOVDnop { break } @@ -22124,7 +24899,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i7 := x4.AuxInt + i4 := x4.AuxInt if x4.Aux != s { break } @@ -22135,200 +24910,174 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x4.Args[1] { break } - if !(i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr0 idx0 mem) - for { - t := v.Type - if v.AuxInt != 56 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 48 { + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { break } - if o1.AuxInt != 40 { + i5 := x5.AuxInt + if x5.Aux != s { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + _ = x5.Args[1] + if p != x5.Args[0] { break } - if o2.AuxInt != 32 { + if mem != x5.Args[1] { break } - _ = o2.Args[1] - x0 := o2.Args[0] - if x0.Op != OpARM64MOVWUloadidx { + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - mem := x0.Args[2] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + i6 := x6.AuxInt + if x6.Aux != s { break } - if x1.AuxInt != 4 { + _ = x6.Args[1] + if p != x6.Args[0] { break } - s := x1.Aux - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if mem != x6.Args[1] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x1.Args[1] { + y7 := v.Args[1] + if y7.Op != OpARM64MOVDnop { break } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + i7 := x7.AuxInt + if x7.Aux != s { break } - if x2.AuxInt != 5 { + _ = x7.Args[1] + if p != x7.Args[0] { break } - if x2.Aux != s { + if mem != x7.Args[1] { break } - _ = x2.Args[1] - p := x2.Args[0] - if mem != x2.Args[1] { + if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { break } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x7.Pos, OpARM64REV, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t) + v1.Aux = s + v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (OR y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + for { + t := v.Type + _ = v.Args[1] + y7 := v.Args[0] + if y7.Op != OpARM64MOVDnop { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { break } - if x3.AuxInt != 6 { + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o0 := v.Args[1] + if o0.Op != OpARM64ORshiftLL { break } - if x3.Aux != s { + if o0.AuxInt != 8 { break } - _ = x3.Args[1] - if p != x3.Args[0] { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - if mem != x3.Args[1] { + if o1.AuxInt != 16 { break } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + if o2.AuxInt != 24 { break } - if x4.AuxInt != 7 { + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { break } - if x4.Aux != s { + if o3.AuxInt != 32 { break } - _ = x4.Args[1] - if p != x4.Args[0] { + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { break } - if mem != x4.Args[1] { + if o4.AuxInt != 40 { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { break } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr0) - v0.AddArg(idx0) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx4 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr0 (SLLconst [2] idx0) mem) - for { - t := v.Type - if v.AuxInt != 56 { + if o5.AuxInt != 48 { break } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { break } - if o0.AuxInt != 48 { + if s0.AuxInt != 56 { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - if o1.AuxInt != 40 { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + i0 := x0.AuxInt + if x0.Aux != s { break } - if o2.AuxInt != 32 { + _ = x0.Args[1] + if p != x0.Args[0] { break } - _ = o2.Args[1] - x0 := o2.Args[0] - if x0.Op != OpARM64MOVWUloadidx4 { + if mem != x0.Args[1] { break } - _ = x0.Args[2] - ptr0 := x0.Args[0] - idx0 := x0.Args[1] - mem := x0.Args[2] - y1 := o2.Args[1] + y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -22336,25 +25085,18 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - if x1.AuxInt != 4 { + i1 := x1.AuxInt + if x1.Aux != s { break } - s := x1.Aux _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADDshiftLL { - break - } - if p1.AuxInt != 2 { + if p != x1.Args[0] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] if mem != x1.Args[1] { break } - y2 := o1.Args[1] + y2 := o4.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -22362,18 +25104,18 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - if x2.AuxInt != 5 { - break - } + i2 := x2.AuxInt if x2.Aux != s { break } _ = x2.Args[1] - p := x2.Args[0] + if p != x2.Args[0] { + break + } if mem != x2.Args[1] { break } - y3 := o0.Args[1] + y3 := o3.Args[1] if y3.Op != OpARM64MOVDnop { break } @@ -22381,9 +25123,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - if x3.AuxInt != 6 { - break - } + i3 := x3.AuxInt if x3.Aux != s { break } @@ -22394,7 +25134,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x3.Args[1] { break } - y4 := v.Args[1] + y4 := o2.Args[1] if y4.Op != OpARM64MOVDnop { break } @@ -22402,9 +25142,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - if x4.AuxInt != 7 { - break - } + i4 := x4.AuxInt if x4.Aux != s { break } @@ -22415,35 +25153,72 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if mem != x4.Args[1] { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { - break + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { + break } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + break + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x6.Pos, OpARM64REV, t) v.reset(OpCopy) v.AddArg(v0) - v0.AddArg(ptr0) - v1 := b.NewValue0(v.Pos, OpARM64SLLconst, idx0.Type) - v1.AuxInt = 2 - v1.AddArg(idx0) + v1 := b.NewValue0(x6.Pos, OpARM64MOVDload, t) + v1.Aux = s + v2 := b.NewValue0(x6.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr idx mem) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) for { t := v.Type - if v.AuxInt != 56 { - break - } _ = v.Args[1] o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 48 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] @@ -22451,7 +25226,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if o1.Op != OpARM64ORshiftLL { break } - if o1.AuxInt != 40 { + if o1.AuxInt != 16 { break } _ = o1.Args[1] @@ -22459,340 +25234,305 @@ func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { if o2.Op != OpARM64ORshiftLL { break } - if o2.AuxInt != 32 { + if o2.AuxInt != 24 { break } _ = o2.Args[1] - x0 := o2.Args[0] - if x0.Op != OpARM64MOVWUloadidx { + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { break } - _ = x0.Args[2] - ptr := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { + if o3.AuxInt != 32 { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + if o4.AuxInt != 40 { break } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { break } - if x1_1.AuxInt != 4 { + if o5.AuxInt != 48 { break } - if idx != x1_1.Args[0] { + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { break } - if mem != x1.Args[2] { + if s0.AuxInt != 56 { break } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + mem := x0.Args[2] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { break } - x2_1 := x2.Args[1] - if x2_1.Op != OpARM64ADDconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - if x2_1.AuxInt != 5 { + if x1.AuxInt != 1 { break } - if idx != x2_1.Args[0] { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - if mem != x2.Args[2] { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUloadidx { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - _ = x3.Args[2] - if ptr != x3.Args[0] { + if x2.AuxInt != 2 { break } - x3_1 := x3.Args[1] - if x3_1.Op != OpARM64ADDconst { + if x2.Aux != s { break } - if x3_1.AuxInt != 6 { + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { break } - if idx != x3_1.Args[0] { + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { break } - if mem != x3.Args[2] { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { + if x3.AuxInt != 3 { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { + if x3.Aux != s { break } - _ = x4.Args[2] - if ptr != x4.Args[0] { + _ = x3.Args[1] + if p != x3.Args[0] { break } - x4_1 := x4.Args[1] - if x4_1.Op != OpARM64ADDconst { + if mem != x3.Args[1] { break } - if x4_1.AuxInt != 7 { + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { break } - if idx != x4_1.Args[0] { + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { break } - if mem != x4.Args[2] { + if x4.AuxInt != 4 { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + if x4.Aux != s { break } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (REV16W (MOVHUload [i0] {s} p mem)) - for { - t := v.Type - if v.AuxInt != 8 { + _ = x4.Args[1] + if p != x4.Args[0] { break } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { + if mem != x4.Args[1] { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { break } - i1 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if x5.AuxInt != 5 { break } - i0 := x1.AuxInt - if x1.Aux != s { + if x5.Aux != s { break } - _ = x1.Args[1] - if p != x1.Args[0] { + _ = x5.Args[1] + if p != x5.Args[0] { break } - if mem != x1.Args[1] { + if mem != x5.Args[1] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHUload, t) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - return false -} -func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { - b := v.Block - _ = b - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (REV16W (MOVHUloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 8 { + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { break } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { + if x6.AuxInt != 6 { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + if x6.Aux != s { break } - if x0.AuxInt != 1 { + _ = x6.Args[1] + if p != x6.Args[0] { break } - s := x0.Aux - _ = x0.Args[1] - p1 := x0.Args[0] - if p1.Op != OpARM64ADD { + if mem != x6.Args[1] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - mem := x0.Args[1] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { + y7 := v.Args[1] + if y7.Op != OpARM64MOVDnop { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { break } - _ = x1.Args[2] - ptr0 := x1.Args[0] - idx0 := x1.Args[1] - if mem != x1.Args[2] { + if x7.AuxInt != 7 { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if x7.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x7.Pos, OpARM64REV, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) + v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t) v1.AddArg(ptr0) v1.AddArg(idx0) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (REV16W (MOVHUloadidx ptr idx mem)) + // match: (OR y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr0 idx0 mem)) for { t := v.Type - if v.AuxInt != 8 { + _ = v.Args[1] + y7 := v.Args[0] + if y7.Op != OpARM64MOVDnop { break } - _ = v.Args[1] - y0 := v.Args[0] - if y0.Op != OpARM64MOVDnop { + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUloadidx { + if x7.AuxInt != 7 { break } - _ = x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o0 := v.Args[1] + if o0.Op != OpARM64ORshiftLL { break } - if x0_1.AuxInt != 1 { + if o0.AuxInt != 8 { break } - idx := x0_1.Args[0] - mem := x0.Args[2] - y1 := v.Args[1] - if y1.Op != OpARM64MOVDnop { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { + if o1.AuxInt != 16 { break } - _ = x1.Args[2] - if ptr != x1.Args[0] { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - if idx != x1.Args[1] { + if o2.AuxInt != 24 { break } - if mem != x1.Args[2] { + _ = o2.Args[1] + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { break } - if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if o3.AuxInt != 32 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - if v.AuxInt != 24 { + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { break } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + if o4.AuxInt != 40 { break } - if o0.AuxInt != 16 { + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { break } - _ = o0.Args[1] - y0 := o0.Args[0] - if y0.Op != OpARM64REV16W { + if o5.AuxInt != 48 { + break + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 56 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVHUload { + if x0.Op != OpARM64MOVBUloadidx { break } - i2 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o0.Args[1] + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + if mem != x0.Args[2] { + break + } + y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -22800,18 +25540,24 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != 1 { + break + } if x1.Aux != s { break } _ = x1.Args[1] - if p != x1.Args[0] { + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] if mem != x1.Args[1] { break } - y2 := v.Args[1] + y2 := o4.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -22819,7 +25565,9 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i0 := x2.AuxInt + if x2.AuxInt != 2 { + break + } if x2.Aux != s { break } @@ -22830,215 +25578,115 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if mem != x2.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 24 { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + if x3.AuxInt != 3 { break } - if o0.AuxInt != 16 { + if x3.Aux != s { break } - _ = o0.Args[1] - y0 := o0.Args[0] - if y0.Op != OpARM64REV16W { + _ = x3.Args[1] + if p != x3.Args[0] { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVHUload { + if mem != x3.Args[1] { break } - if x0.AuxInt != 2 { + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { break } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o0.Args[1] - if y1.Op != OpARM64MOVDnop { + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if x4.AuxInt != 4 { break } - if x1.AuxInt != 1 { + if x4.Aux != s { break } - if x1.Aux != s { + _ = x4.Args[1] + if p != x4.Args[0] { break } - _ = x1.Args[1] - p1 := x1.Args[0] - if p1.Op != OpARM64ADD { + if mem != x4.Args[1] { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x1.Args[1] { + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { break } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUload { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + if x5.AuxInt != 5 { break } - _ = x2.Args[2] - ptr0 := x2.Args[0] - idx0 := x2.Args[1] - if mem != x2.Args[2] { + if x5.Aux != s { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + _ = x5.Args[1] + if p != x5.Args[0] { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr idx mem)) - for { - t := v.Type - if v.AuxInt != 24 { - break - } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 16 { - break - } - _ = o0.Args[1] - y0 := o0.Args[0] - if y0.Op != OpARM64REV16W { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVHUloadidx { - break - } - _ = x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { - break - } - if x0_1.AuxInt != 2 { - break - } - idx := x0_1.Args[0] - mem := x0.Args[2] - y1 := o0.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUloadidx { - break - } - _ = x1.Args[2] - if ptr != x1.Args[0] { - break - } - x1_1 := x1.Args[1] - if x1_1.Op != OpARM64ADDconst { - break - } - if x1_1.AuxInt != 1 { - break - } - if idx != x1_1.Args[0] { + if mem != x5.Args[1] { break } - if mem != x1.Args[2] { + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { break } - y2 := v.Args[1] - if y2.Op != OpARM64MOVDnop { + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUload { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUloadidx { + if x6.AuxInt != 6 { break } - _ = x2.Args[2] - if ptr != x2.Args[0] { + if x6.Aux != s { break } - if idx != x2.Args[1] { + _ = x6.Args[1] + if p != x6.Args[0] { break } - if mem != x2.Args[2] { + if mem != x6.Args[1] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(x6.Pos, OpARM64REV, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) - v1.AddArg(ptr) - v1.AddArg(idx) + v1 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) for { t := v.Type - if v.AuxInt != 56 { - break - } _ = v.Args[1] o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 48 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] @@ -23046,7 +25694,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if o1.Op != OpARM64ORshiftLL { break } - if o1.AuxInt != 40 { + if o1.AuxInt != 16 { break } _ = o1.Args[1] @@ -23054,273 +25702,272 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if o2.Op != OpARM64ORshiftLL { break } - if o2.AuxInt != 32 { + if o2.AuxInt != 24 { break } _ = o2.Args[1] - y0 := o2.Args[0] - if y0.Op != OpARM64REVW { + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVWUload { + if o3.AuxInt != 32 { break } - i4 := x0.AuxInt - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + if o4.AuxInt != 40 { break } - i3 := x1.AuxInt - if x1.Aux != s { + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { break } - _ = x1.Args[1] - if p != x1.Args[0] { + if o5.AuxInt != 48 { break } - if mem != x1.Args[1] { + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { break } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { + if s0.AuxInt != 56 { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } - i2 := x2.AuxInt - if x2.Aux != s { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } - _ = x2.Args[1] - if p != x2.Args[0] { + _ = x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + y1 := o5.Args[1] + if y1.Op != OpARM64MOVDnop { break } - if mem != x2.Args[1] { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { break } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { break } - i1 := x3.AuxInt - if x3.Aux != s { + if x1_1.AuxInt != 1 { break } - _ = x3.Args[1] - if p != x3.Args[0] { + if idx != x1_1.Args[0] { break } - if mem != x3.Args[1] { + if mem != x1.Args[2] { break } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { + y2 := o4.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { break } - i0 := x4.AuxInt - if x4.Aux != s { + _ = x2.Args[2] + if ptr != x2.Args[0] { break } - _ = x4.Args[1] - if p != x4.Args[0] { + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { break } - if mem != x4.Args[1] { + if x2_1.AuxInt != 2 { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + if idx != x2_1.Args[0] { break } - b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem))) - // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDloadidx ptr0 idx0 mem)) - for { - t := v.Type - if v.AuxInt != 56 { + if mem != x2.Args[2] { break } - _ = v.Args[1] - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + y3 := o3.Args[1] + if y3.Op != OpARM64MOVDnop { break } - if o0.AuxInt != 48 { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { break } - _ = o0.Args[1] - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + _ = x3.Args[2] + if ptr != x3.Args[0] { break } - if o1.AuxInt != 40 { + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst { break } - _ = o1.Args[1] - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { + if x3_1.AuxInt != 3 { break } - if o2.AuxInt != 32 { + if idx != x3_1.Args[0] { break } - _ = o2.Args[1] - y0 := o2.Args[0] - if y0.Op != OpARM64REVW { + if mem != x3.Args[2] { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVWUload { + y4 := o2.Args[1] + if y4.Op != OpARM64MOVDnop { break } - if x0.AuxInt != 4 { + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { break } - s := x0.Aux - _ = x0.Args[1] - p := x0.Args[0] - mem := x0.Args[1] - y1 := o2.Args[1] - if y1.Op != OpARM64MOVDnop { + _ = x4.Args[2] + if ptr != x4.Args[0] { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64ADDconst { break } - if x1.AuxInt != 3 { + if x4_1.AuxInt != 4 { break } - if x1.Aux != s { + if idx != x4_1.Args[0] { break } - _ = x1.Args[1] - if p != x1.Args[0] { + if mem != x4.Args[2] { break } - if mem != x1.Args[1] { + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { break } - y2 := o1.Args[1] - if y2.Op != OpARM64MOVDnop { + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUloadidx { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + _ = x5.Args[2] + if ptr != x5.Args[0] { break } - if x2.AuxInt != 2 { + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64ADDconst { break } - if x2.Aux != s { + if x5_1.AuxInt != 5 { break } - _ = x2.Args[1] - if p != x2.Args[0] { + if idx != x5_1.Args[0] { break } - if mem != x2.Args[1] { + if mem != x5.Args[2] { break } - y3 := o0.Args[1] - if y3.Op != OpARM64MOVDnop { + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { break } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUloadidx { break } - if x3.AuxInt != 1 { + _ = x6.Args[2] + if ptr != x6.Args[0] { break } - if x3.Aux != s { + x6_1 := x6.Args[1] + if x6_1.Op != OpARM64ADDconst { break } - _ = x3.Args[1] - p1 := x3.Args[0] - if p1.Op != OpARM64ADD { + if x6_1.AuxInt != 6 { break } - _ = p1.Args[1] - ptr1 := p1.Args[0] - idx1 := p1.Args[1] - if mem != x3.Args[1] { + if idx != x6_1.Args[0] { break } - y4 := v.Args[1] - if y4.Op != OpARM64MOVDnop { + if mem != x6.Args[2] { break } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUloadidx { + y7 := v.Args[1] + if y7.Op != OpARM64MOVDnop { break } - _ = x4.Args[2] - ptr0 := x4.Args[0] - idx0 := x4.Args[1] - if mem != x4.Args[2] { + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUloadidx { break } - if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + _ = x7.Args[2] + if ptr != x7.Args[0] { break } - b = mergePoint(b, x0, x1, x2, x3, x4) + x7_1 := x7.Args[1] + if x7_1.Op != OpARM64ADDconst { + break + } + if x7_1.AuxInt != 7 { + break + } + if idx != x7_1.Args[0] { + break + } + if mem != x7.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64REV, t) v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) - v1.AddArg(ptr0) - v1.AddArg(idx0) + v1.AddArg(ptr) + v1.AddArg(idx) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDloadidx ptr idx mem)) + // match: (OR y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDloadidx ptr idx mem)) for { t := v.Type - if v.AuxInt != 56 { + _ = v.Args[1] + y7 := v.Args[0] + if y7.Op != OpARM64MOVDnop { break } - _ = v.Args[1] - o0 := v.Args[0] + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUloadidx { + break + } + _ = x7.Args[2] + ptr := x7.Args[0] + x7_1 := x7.Args[1] + if x7_1.Op != OpARM64ADDconst { + break + } + if x7_1.AuxInt != 7 { + break + } + idx := x7_1.Args[0] + mem := x7.Args[2] + o0 := v.Args[1] if o0.Op != OpARM64ORshiftLL { break } - if o0.AuxInt != 48 { + if o0.AuxInt != 8 { break } _ = o0.Args[1] @@ -23328,7 +25975,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if o1.Op != OpARM64ORshiftLL { break } - if o1.AuxInt != 40 { + if o1.AuxInt != 16 { break } _ = o1.Args[1] @@ -23336,30 +25983,60 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if o2.Op != OpARM64ORshiftLL { break } - if o2.AuxInt != 32 { + if o2.AuxInt != 24 { break } _ = o2.Args[1] - y0 := o2.Args[0] - if y0.Op != OpARM64REVW { + o3 := o2.Args[0] + if o3.Op != OpARM64ORshiftLL { + break + } + if o3.AuxInt != 32 { + break + } + _ = o3.Args[1] + o4 := o3.Args[0] + if o4.Op != OpARM64ORshiftLL { + break + } + if o4.AuxInt != 40 { + break + } + _ = o4.Args[1] + o5 := o4.Args[0] + if o5.Op != OpARM64ORshiftLL { + break + } + if o5.AuxInt != 48 { + break + } + _ = o5.Args[1] + s0 := o5.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 56 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { break } x0 := y0.Args[0] - if x0.Op != OpARM64MOVWUloadidx { + if x0.Op != OpARM64MOVBUloadidx { break } _ = x0.Args[2] - ptr := x0.Args[0] - x0_1 := x0.Args[1] - if x0_1.Op != OpARM64ADDconst { + if ptr != x0.Args[0] { break } - if x0_1.AuxInt != 4 { + if idx != x0.Args[1] { break } - idx := x0_1.Args[0] - mem := x0.Args[2] - y1 := o2.Args[1] + if mem != x0.Args[2] { + break + } + y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break } @@ -23375,7 +26052,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x1_1.Op != OpARM64ADDconst { break } - if x1_1.AuxInt != 3 { + if x1_1.AuxInt != 1 { break } if idx != x1_1.Args[0] { @@ -23384,7 +26061,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if mem != x1.Args[2] { break } - y2 := o1.Args[1] + y2 := o4.Args[1] if y2.Op != OpARM64MOVDnop { break } @@ -23409,7 +26086,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if mem != x2.Args[2] { break } - y3 := o0.Args[1] + y3 := o3.Args[1] if y3.Op != OpARM64MOVDnop { break } @@ -23425,7 +26102,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if x3_1.Op != OpARM64ADDconst { break } - if x3_1.AuxInt != 1 { + if x3_1.AuxInt != 3 { break } if idx != x3_1.Args[0] { @@ -23434,7 +26111,7 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if mem != x3.Args[2] { break } - y4 := v.Args[1] + y4 := o2.Args[1] if y4.Op != OpARM64MOVDnop { break } @@ -23446,16 +26123,73 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { if ptr != x4.Args[0] { break } - if idx != x4.Args[1] { + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64ADDconst { + break + } + if x4_1.AuxInt != 4 { + break + } + if idx != x4_1.Args[0] { break } if mem != x4.Args[2] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + y5 := o1.Args[1] + if y5.Op != OpARM64MOVDnop { break } - b = mergePoint(b, x0, x1, x2, x3, x4) + x5 := y5.Args[0] + if x5.Op != OpARM64MOVBUloadidx { + break + } + _ = x5.Args[2] + if ptr != x5.Args[0] { + break + } + x5_1 := x5.Args[1] + if x5_1.Op != OpARM64ADDconst { + break + } + if x5_1.AuxInt != 5 { + break + } + if idx != x5_1.Args[0] { + break + } + if mem != x5.Args[2] { + break + } + y6 := o0.Args[1] + if y6.Op != OpARM64MOVDnop { + break + } + x6 := y6.Args[0] + if x6.Op != OpARM64MOVBUloadidx { + break + } + _ = x6.Args[2] + if ptr != x6.Args[0] { + break + } + x6_1 := x6.Args[1] + if x6_1.Op != OpARM64ADDconst { + break + } + if x6_1.AuxInt != 6 { + break + } + if idx != x6_1.Args[0] { + break + } + if mem != x6.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) v0 := b.NewValue0(v.Pos, OpARM64REV, t) v.reset(OpCopy) v.AddArg(v0) @@ -23468,97 +26202,105 @@ func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { } return false } -func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { - b := v.Block - _ = b - // match: (ORshiftRA (MOVDconst [c]) x [d]) - // cond: - // result: (ORconst [c] (SRAconst x [d])) +func rewriteValueARM64_OpARM64ORN_0(v *Value) bool { + // match: (ORN x (MOVDconst [c])) + // cond: + // result: (ORconst [^c] x) for { - d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARM64ORconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) + v.AuxInt = ^c + v.AddArg(x) return true } - // match: (ORshiftRA x (MOVDconst [c]) [d]) + // match: (ORN x x) // cond: - // result: (ORconst x [c>>uint64(d)]) + // result: (MOVDconst [-1]) for { - d := v.AuxInt _ = v.Args[1] x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if x != v.Args[1] { break } - c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = c >> uint64(d) - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } - // match: (ORshiftRA x y:(SRAconst x [c]) [d]) - // cond: c==d - // result: y + // match: (ORN x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORNshiftLL x0 y [c]) for { - d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SRAconst { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - c := y.AuxInt - if x != y.Args[0] { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - if !(c == d) { + v.reset(OpARM64ORNshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true + } + // match: (ORN x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORNshiftRL x0 y [c]) + for { + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - v.reset(OpCopy) - v.Type = y.Type + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64ORNshiftRL) + v.AuxInt = c + v.AddArg(x0) v.AddArg(y) return true } - return false -} -func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { - b := v.Block - _ = b - // match: (ORshiftRL (MOVDconst [c]) x [d]) - // cond: - // result: (ORconst [c] (SRLconst x [d])) + // match: (ORN x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (ORNshiftRA x0 y [c]) for { - d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ORconst) + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64ORNshiftRA) v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(x0) + v.AddArg(y) return true } - // match: (ORshiftRL x (MOVDconst [c]) [d]) + return false +} +func rewriteValueARM64_OpARM64ORNshiftLL_0(v *Value) bool { + // match: (ORNshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (ORconst x [int64(uint64(c)>>uint64(d))]) + // result: (ORconst x [^int64(uint64(c)<> uint64(d)) + v.AuxInt = ^int64(uint64(c) << uint64(d)) v.AddArg(x) return true } - // match: (ORshiftRL x y:(SRLconst x [c]) [d]) + // match: (ORNshiftLL x (SLLconst x [c]) [d]) // cond: c==d - // result: y + // result: (MOVDconst [-1]) for { d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - if y.Op != OpARM64SRLconst { + v_1 := v.Args[1] + if v_1.Op != OpARM64SLLconst { break } - c := y.AuxInt - if x != y.Args[0] { + c := v_1.AuxInt + if x != v_1.Args[0] { break } if !(c == d) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } - // match: (ORshiftRL [c] (SLLconst x [64-c]) x) + return false +} +func rewriteValueARM64_OpARM64ORNshiftRA_0(v *Value) bool { + // match: (ORNshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (RORconst [ c] x) + // result: (ORconst x [^(c>>uint64(d))]) for { - c := v.AuxInt + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - if v_0.AuxInt != 64-c { - break - } - x := v_0.Args[0] - if x != v.Args[1] { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64RORconst) - v.AuxInt = c + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = ^(c >> uint64(d)) v.AddArg(x) return true } - // match: (ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) + // match: (ORNshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [-1]) for { - t := v.Type - c := v.AuxInt + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { - break - } - if v_0.AuxInt != 32-c { - break - } - x := v_0.Args[0] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + if v_1.Op != OpARM64SRAconst { break } + c := v_1.AuxInt if x != v_1.Args[0] { break } - if !(c < 32 && t.Size() == 4) { + if !(c == d) { break } - v.reset(OpARM64RORWconst) - v.AuxInt = c - v.AddArg(x) + v.reset(OpARM64MOVDconst) + v.AuxInt = -1 return true } - // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) - // cond: lc > rc && ac == ^((1<>uint64(d))]) for { - rc := v.AuxInt + d := v.AuxInt _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - ac := v_0.AuxInt - x := v_0.Args[0] + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = ^int64(uint64(c) >> uint64(d)) + v.AddArg(x) + return true + } + // match: (ORNshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [-1]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + if v_1.Op != OpARM64SRLconst { break } - lc := v_1.AuxInt - y := v_1.Args[0] - if !(lc > rc && ac == ^((1< x [d])) for { - sc := v.AuxInt + d := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVWUreg { + if v_0.Op != OpARM64MOVDconst { break } - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<32-1, 0)) { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (ORshiftLL x (MOVDconst [c]) [d]) + // cond: + // result: (ORconst x [int64(uint64(c)< [c] (UBFX [bfc] x) x) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (RORWconst [32-c] x) for { - sc := v.AuxInt + t := v.Type + c := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64UBFIZ { + if v_0.Op != OpARM64UBFX { break } bfc := v_0.AuxInt x := v_0.Args[0] - if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) { + if x != v.Args[1] { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64SRA_0(v *Value) bool { - // match: (SRA x (MOVDconst [c])) - // cond: - // result: (SRAconst x [c&63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { break } - c := v_1.AuxInt - v.reset(OpARM64SRAconst) - v.AuxInt = c & 63 + v.reset(OpARM64RORWconst) + v.AuxInt = 32 - c v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64SRAconst_0(v *Value) bool { - // match: (SRAconst [c] (MOVDconst [d])) + // match: (ORshiftLL [c] (SRLconst x [64-c]) x2) // cond: - // result: (MOVDconst [d>>uint64(c)]) + // result: (EXTRconst [64-c] x2 x) for { c := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.Op != OpARM64SRLconst { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = d >> uint64(c) - return true - } - // match: (SRAconst [rc] (SLLconst [lc] x)) - // cond: lc > rc - // result: (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) - for { - rc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_0.AuxInt != 64-c { break } - lc := v_0.AuxInt x := v_0.Args[0] - if !(lc > rc) { - break - } - v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) + x2 := v.Args[1] + v.reset(OpARM64EXTRconst) + v.AuxInt = 64 - c + v.AddArg(x2) v.AddArg(x) return true } - // match: (SRAconst [rc] (SLLconst [lc] x)) - // cond: lc <= rc - // result: (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) + // match: (ORshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) for { - rc := v.AuxInt + t := v.Type + c := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if v_0.Op != OpARM64UBFX { break } - lc := v_0.AuxInt + bfc := v_0.AuxInt x := v_0.Args[0] - if !(lc <= rc) { + x2 := v.Args[1] + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { break } - v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) + v.reset(OpARM64EXTRWconst) + v.AuxInt = 32 - c + v.AddArg(x2) v.AddArg(x) return true } - // match: (SRAconst [rc] (MOVWreg x)) - // cond: rc < 32 - // result: (SBFX [arm64BFAuxInt(rc, 32-rc)] x) + // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) + // cond: sc == getARM64BFwidth(bfc) + // result: (BFXIL [bfc] y x) for { - rc := v.AuxInt + sc := v.AuxInt + _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpARM64MOVWreg { + if v_0.Op != OpARM64UBFX { break } + bfc := v_0.AuxInt x := v_0.Args[0] - if !(rc < 32) { + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 32-rc) + if v_1.AuxInt != sc { + break + } + y := v_1.Args[0] + if !(sc == getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64BFXIL) + v.AuxInt = bfc + v.AddArg(y) v.AddArg(x) return true } - // match: (SRAconst [rc] (MOVHreg x)) - // cond: rc < 16 - // result: (SBFX [arm64BFAuxInt(rc, 16-rc)] x) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) + // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i0] p) mem) for { - rc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVHreg { + t := v.Type + if v.AuxInt != 8 { break } - x := v_0.Args[0] - if !(rc < 16) { + _ = v.Args[1] + y0 := v.Args[0] + if y0.Op != OpARM64MOVDnop { break } - v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 16-rc) - v.AddArg(x) - return true - } - // match: (SRAconst [rc] (MOVBreg x)) - // cond: rc < 8 - // result: (SBFX [arm64BFAuxInt(rc, 8-rc)] x) - for { - rc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVBreg { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { break } - x := v_0.Args[0] - if !(rc < 8) { + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { break } - v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(rc, 8-rc) - v.AddArg(x) - return true - } - // match: (SRAconst [sc] (SBFIZ [bfc] x)) - // cond: sc < getARM64BFlsb(bfc) - // result: (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SBFIZ { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if !(sc < getARM64BFlsb(bfc)) { + i1 := x1.AuxInt + if x1.Aux != s { break } - v.reset(OpARM64SBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) - v.AddArg(x) - return true - } - // match: (SRAconst [sc] (SBFIZ [bfc] x)) - // cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SBFIZ { + _ = x1.Args[1] + if p != x1.Args[0] { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if !(sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + if mem != x1.Args[1] { break } - v.reset(OpARM64SBFX) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64SRL_0(v *Value) bool { - // match: (SRL x (MOVDconst [c])) - // cond: - // result: (SRLconst x [c&63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { break } - c := v_1.AuxInt - v.reset(OpARM64SRLconst) - v.AuxInt = c & 63 - v.AddArg(x) + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.Aux = s + v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { - // match: (SRLconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (MOVHUloadidx ptr0 idx0 mem) for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + t := v.Type + if v.AuxInt != 8 { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(d) >> uint64(c)) - return true - } - // match: (SRLconst [c] (SLLconst [c] x)) - // cond: 0 < c && c < 64 - // result: (ANDconst [1< rc - // result: (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) - for { - rc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - lc := v_0.AuxInt - x := v_0.Args[0] - if !(lc > rc) { + if x1.AuxInt != 1 { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) - v.AddArg(x) - return true - } - // match: (SRLconst [sc] (ANDconst [ac] x)) - // cond: isARM64BFMask(sc, ac, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64ANDconst { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - ac := v_0.AuxInt - x := v_0.Args[0] - if !(isARM64BFMask(sc, ac, sc)) { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc)) - v.AddArg(x) + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - // match: (SRLconst [sc] (MOVWUreg x)) - // cond: isARM64BFMask(sc, 1<<32-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) + return false +} +func rewriteValueARM64_OpARM64ORshiftLL_10(v *Value) bool { + b := v.Block + _ = b + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (MOVHUloadidx ptr idx mem) for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVWUreg { + t := v.Type + if v.AuxInt != 8 { break } - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<32-1, sc)) { + _ = v.Args[1] + y0 := v.Args[0] + if y0.Op != OpARM64MOVDnop { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) - v.AddArg(x) - return true - } - // match: (SRLconst [sc] (MOVHUreg x)) - // cond: isARM64BFMask(sc, 1<<16-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVHUreg { + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { break } - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<16-1, sc)) { + _ = x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) - v.AddArg(x) - return true - } - // match: (SRLconst [sc] (MOVBUreg x)) - // cond: isARM64BFMask(sc, 1<<8-1, sc) - // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVBUreg { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { break } - x := v_0.Args[0] - if !(isARM64BFMask(sc, 1<<8-1, sc)) { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) - v.AddArg(x) - return true - } - // match: (SRLconst [rc] (SLLconst [lc] x)) - // cond: lc < rc - // result: (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) - for { - rc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { break } - lc := v_0.AuxInt - x := v_0.Args[0] - if !(lc < rc) { + if x1_1.AuxInt != 1 { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) - v.AddArg(x) - return true - } - // match: (SRLconst [sc] (UBFX [bfc] x)) - // cond: sc < getARM64BFwidth(bfc) - // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + if idx != x1_1.Args[0] { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if !(sc < getARM64BFwidth(bfc)) { + if mem != x1.Args[2] { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) - v.AddArg(x) + if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (SRLconst [sc] (UBFIZ [bfc] x)) - // cond: sc == getARM64BFlsb(bfc) - // result: (ANDconst [1< [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) + // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i0] p) mem) for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFIZ { + t := v.Type + if v.AuxInt != 24 { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if !(sc == getARM64BFlsb(bfc)) { + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - v.reset(OpARM64ANDconst) - v.AuxInt = 1< getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) - for { - sc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFIZ { + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64STP_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) - // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (STP [off1+off2] {sym} ptr val1 val2 mem) - for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + i2 := x1.AuxInt + if x1.Aux != s { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val1 := v.Args[1] - val2 := v.Args[2] - mem := v.Args[3] - if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + _ = x1.Args[1] + if p != x1.Args[0] { break } - v.reset(OpARM64STP) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) - return true - } - // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[3] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDaddr { + if mem != x1.Args[1] { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val1 := v.Args[1] - val2 := v.Args[2] - mem := v.Args[3] - if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { break } - v.reset(OpARM64STP) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val1) - v.AddArg(val2) - v.AddArg(mem) - return true - } - // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) - // cond: - // result: (MOVQstorezero [off] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[3] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - if v_1.AuxInt != 0 { + i3 := x2.AuxInt + if x2.Aux != s { break } - v_2 := v.Args[2] - if v_2.Op != OpARM64MOVDconst { + _ = x2.Args[1] + if p != x2.Args[0] { break } - if v_2.AuxInt != 0 { + if mem != x2.Args[1] { break } - mem := v.Args[3] - v.reset(OpARM64MOVQstorezero) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { - b := v.Block - _ = b - // match: (SUB x (MOVDconst [c])) - // cond: - // result: (SUBconst [c] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { break } - c := v_1.AuxInt - v.reset(OpARM64SUBconst) - v.AuxInt = c - v.AddArg(x) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.Aux = s + v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (SUB x x) - // cond: - // result: (MOVDconst [0]) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr0 idx0 mem) for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + t := v.Type + if v.AuxInt != 24 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (SUB x (SUB y z)) - // cond: - // result: (SUB (ADD x z) y) - for { _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SUB { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - _ = v_1.Args[1] - y := v_1.Args[0] - z := v_1.Args[1] - v.reset(OpARM64SUB) - v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(y) - return true - } - // match: (SUB (SUB x y) z) - // cond: - // result: (SUB x (ADD y z)) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SUB { + if o0.AuxInt != 16 { break } - _ = v_0.Args[1] - x := v_0.Args[0] - y := v_0.Args[1] - z := v.Args[1] - v.reset(OpARM64SUB) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) - v0.AddArg(y) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (SUB x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (SUBshiftLL x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpARM64MOVHUloadidx { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + mem := x0.Args[2] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { break } - v.reset(OpARM64SUBshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (SUB x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (SUBshiftRL x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if x1.AuxInt != 2 { break } - v.reset(OpARM64SUBshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (SUB x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (SUBshiftRA x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - v.reset(OpARM64SUBshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { - // match: (SUBconst [0] x) - // cond: - // result: x - for { - if v.AuxInt != 0 { + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (SUBconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [d-c]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = d - c - return true - } - // match: (SUBconst [c] (SUBconst [d] x)) - // cond: - // result: (ADDconst [-c-d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SUBconst { + if x2.AuxInt != 3 { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ADDconst) - v.AuxInt = -c - d - v.AddArg(x) - return true - } - // match: (SUBconst [c] (ADDconst [d] x)) - // cond: - // result: (ADDconst [-c+d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64ADDconst { + if x2.Aux != s { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64ADDconst) - v.AuxInt = -c + d - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64SUBshiftLL_0(v *Value) bool { - // match: (SUBshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (SUBconst x [int64(uint64(c)< [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr idx mem) for { - d := v.AuxInt + t := v.Type + if v.AuxInt != 24 { + break + } _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if o0.AuxInt != 16 { break } - if !(c == d) { + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpARM64MOVHUloadidx { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64SUBshiftRA_0(v *Value) bool { - // match: (SUBshiftRA x (MOVDconst [c]) [d]) - // cond: - // result: (SUBconst x [c>>uint64(d)]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { break } - c := v_1.AuxInt - v.reset(OpARM64SUBconst) - v.AuxInt = c >> uint64(d) - v.AddArg(x) - return true - } - // match: (SUBshiftRA x (SRAconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRAconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + _ = x1.Args[2] + if ptr != x1.Args[0] { break } - if !(c == d) { + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64SUBshiftRL_0(v *Value) bool { - // match: (SUBshiftRL x (MOVDconst [c]) [d]) - // cond: - // result: (SUBconst x [int64(uint64(c)>>uint64(d))]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if x1_1.AuxInt != 2 { break } - c := v_1.AuxInt - v.reset(OpARM64SUBconst) - v.AuxInt = int64(uint64(c) >> uint64(d)) - v.AddArg(x) - return true - } - // match: (SUBshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if idx != x1_1.Args[0] { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if mem != x1.Args[2] { break } - if !(c == d) { + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64TST_0(v *Value) bool { - // match: (TST x (MOVDconst [c])) - // cond: - // result: (TSTconst [c] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { break } - c := v_1.AuxInt - v.reset(OpARM64TSTconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { - // match: (TSTWconst (MOVDconst [x]) [y]) - // cond: int32(x&y)==0 - // result: (FlagEQ) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x2.Args[2] + if ptr != x2.Args[0] { break } - x := v_0.AuxInt - if !(int32(x&y) == 0) { + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { break } - v.reset(OpARM64FlagEQ) - return true - } - // match: (TSTWconst (MOVDconst [x]) [y]) - // cond: int32(x&y)<0 - // result: (FlagLT_UGT) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if x2_1.AuxInt != 3 { break } - x := v_0.AuxInt - if !(int32(x&y) < 0) { + if idx != x2_1.Args[0] { break } - v.reset(OpARM64FlagLT_UGT) - return true - } - // match: (TSTWconst (MOVDconst [x]) [y]) - // cond: int32(x&y)>0 - // result: (FlagGT_UGT) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if mem != x2.Args[2] { break } - x := v_0.AuxInt - if !(int32(x&y) > 0) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { break } - v.reset(OpARM64FlagGT_UGT) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { - // match: (TSTconst (MOVDconst [x]) [y]) - // cond: int64(x&y)==0 - // result: (FlagEQ) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx ptr0 (SLLconst [1] idx0) mem) for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + t := v.Type + if v.AuxInt != 24 { break } - x := v_0.AuxInt - if !(int64(x&y) == 0) { + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - v.reset(OpARM64FlagEQ) - return true - } - // match: (TSTconst (MOVDconst [x]) [y]) - // cond: int64(x&y)<0 - // result: (FlagLT_UGT) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if o0.AuxInt != 16 { break } - x := v_0.AuxInt - if !(int64(x&y) < 0) { + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpARM64MOVHUloadidx2 { break } - v.reset(OpARM64FlagLT_UGT) - return true - } - // match: (TSTconst (MOVDconst [x]) [y]) - // cond: int64(x&y)>0 - // result: (FlagGT_UGT) - for { - y := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + mem := x0.Args[2] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { break } - x := v_0.AuxInt - if !(int64(x&y) > 0) { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - v.reset(OpARM64FlagGT_UGT) - return true - } - return false -} -func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { - // match: (UBFIZ [bfc] (SLLconst [sc] x)) - // cond: sc < getARM64BFwidth(bfc) - // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) - for { - bfc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if x1.AuxInt != 2 { break } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(sc < getARM64BFwidth(bfc)) { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADDshiftLL { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { - // match: (UBFX [bfc] (SRLconst [sc] x)) - // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 - // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) - for { - bfc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + if p1.AuxInt != 1 { break } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - v.reset(OpARM64UBFX) - v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)) - v.AddArg(x) - return true - } - // match: (UBFX [bfc] (SLLconst [sc] x)) - // cond: sc == getARM64BFlsb(bfc) - // result: (ANDconst [1< getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) - // result: (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) - for { - bfc := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { break } - sc := v_0.AuxInt - x := v_0.Args[0] - if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { break } - v.reset(OpARM64UBFIZ) - v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) - v.AddArg(x) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type) + v1.AuxInt = 1 + v1.AddArg(idx0) + v0.AddArg(v1) + v0.AddArg(mem) return true } - return false -} -func rewriteValueARM64_OpARM64UDIV_0(v *Value) bool { - // match: (UDIV x (MOVDconst [1])) - // cond: - // result: x + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) + // cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i0] p) mem) for { + t := v.Type + if v.AuxInt != 56 { + break + } _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - if v_1.AuxInt != 1 { + if o0.AuxInt != 48 { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (UDIV x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (SRLconst [log2(c)] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + if o1.AuxInt != 40 { break } - v.reset(OpARM64SRLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(uint64(c)/uint64(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if o2.AuxInt != 32 { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(c) / uint64(d)) - return true - } - return false -} -func rewriteValueARM64_OpARM64UDIVW_0(v *Value) bool { - // match: (UDIVW x (MOVDconst [c])) - // cond: uint32(c)==1 - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = o2.Args[1] + x0 := o2.Args[0] + if x0.Op != OpARM64MOVWUload { break } - c := v_1.AuxInt - if !(uint32(c) == 1) { + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (UDIVW x (MOVDconst [c])) - // cond: isPowerOfTwo(c) && is32Bit(c) - // result: (SRLconst [log2(c)] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c) && is32Bit(c)) { + i4 := x1.AuxInt + if x1.Aux != s { break } - v.reset(OpARM64SRLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (UDIVW (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(uint32(c)/uint32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x1.Args[1] + if p != x1.Args[0] { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if mem != x1.Args[1] { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c) / uint32(d)) - return true - } - return false -} -func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { - // match: (UMOD _ (MOVDconst [1])) - // cond: - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { break } - if v_1.AuxInt != 1 { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (UMOD x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (ANDconst [c-1] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + i5 := x2.AuxInt + if x2.Aux != s { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { + _ = x2.Args[1] + if p != x2.Args[0] { break } - v.reset(OpARM64ANDconst) - v.AuxInt = c - 1 - v.AddArg(x) - return true - } - // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(uint64(c)%uint64(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if mem != x2.Args[1] { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(c) % uint64(d)) - return true - } - return false -} -func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { - // match: (UMODW _ (MOVDconst [c])) - // cond: uint32(c)==1 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { break } - c := v_1.AuxInt - if !(uint32(c) == 1) { + i6 := x3.AuxInt + if x3.Aux != s { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (UMODW x (MOVDconst [c])) - // cond: isPowerOfTwo(c) && is32Bit(c) - // result: (ANDconst [c-1] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + _ = x3.Args[1] + if p != x3.Args[0] { break } - c := v_1.AuxInt - if !(isPowerOfTwo(c) && is32Bit(c)) { + if mem != x3.Args[1] { break } - v.reset(OpARM64ANDconst) - v.AuxInt = c - 1 - v.AddArg(x) - return true - } - // match: (UMODW (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(uint32(c)%uint32(d))]) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { break } - d := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c) % uint32(d)) - return true - } - return false -} -func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { - // match: (XOR x (MOVDconst [c])) - // cond: - // result: (XORconst [c] x) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + i7 := x4.AuxInt + if x4.Aux != s { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR (MOVDconst [c]) x) - // cond: - // result: (XORconst [c] x) - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x4.Args[1] + if p != x4.Args[0] { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XOR x x) - // cond: - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - x := v.Args[0] - if x != v.Args[1] { + if mem != x4.Args[1] { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (XOR x (MVN y)) - // cond: - // result: (EON x y) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MVN { + if !(i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { break } - y := v_1.Args[0] - v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.Aux = s + v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) + v1.AuxInt = i0 + v1.AddArg(p) + v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (XOR (MVN y) x) - // cond: - // result: (EON x y) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr0 idx0 mem) for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { + t := v.Type + if v.AuxInt != 56 { break } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64EON) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (XOR x0 x1:(SLLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (XORshiftLL x0 y [c]) - for { _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SLLconst { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if o0.AuxInt != 48 { break } - v.reset(OpARM64XORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x1:(SLLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftLL x0 y [c]) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SLLconst { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + if o1.AuxInt != 40 { break } - v.reset(OpARM64XORshiftLL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x0 x1:(SRLconst [c] y)) - // cond: clobberIfDead(x1) - // result: (XORshiftRL x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRLconst { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if o2.AuxInt != 32 { break } - v.reset(OpARM64XORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x1:(SRLconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftRL x0 y [c]) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRLconst { + _ = o2.Args[1] + x0 := o2.Args[0] + if x0.Op != OpARM64MOVWUloadidx { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + mem := x0.Args[2] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { break } - v.reset(OpARM64XORshiftRL) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - // match: (XOR x0 x1:(SRAconst [c] y)) - // cond: clobberIfDead(x1) - // result: (XORshiftRA x0 y [c]) - for { - _ = v.Args[1] - x0 := v.Args[0] - x1 := v.Args[1] - if x1.Op != OpARM64SRAconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - c := x1.AuxInt - y := x1.Args[0] - if !(clobberIfDead(x1)) { + if x1.AuxInt != 4 { break } - v.reset(OpARM64XORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64XOR_10(v *Value) bool { - // match: (XOR x1:(SRAconst [c] y) x0) - // cond: clobberIfDead(x1) - // result: (XORshiftRA x0 y [c]) - for { - _ = v.Args[1] - x1 := v.Args[0] - if x1.Op != OpARM64SRAconst { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { break } - c := x1.AuxInt - y := x1.Args[0] - x0 := v.Args[1] - if !(clobberIfDead(x1)) { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - v.reset(OpARM64XORshiftRA) - v.AuxInt = c - v.AddArg(x0) - v.AddArg(y) - return true - } - return false -} -func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { - // match: (XORconst [0] x) - // cond: - // result: x - for { - if v.AuxInt != 0 { + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (XORconst [-1] x) - // cond: - // result: (MVN x) - for { - if v.AuxInt != -1 { - break + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + break } - x := v.Args[0] - v.reset(OpARM64MVN) - v.AddArg(x) - return true - } - // match: (XORconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [c^d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if x2.AuxInt != 5 { break } - d := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c ^ d - return true - } - // match: (XORconst [c] (XORconst [d] x)) - // cond: - // result: (XORconst [c^d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpARM64XORconst { + if x2.Aux != s { break } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpARM64XORconst) - v.AuxInt = c ^ d - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { - b := v.Block - _ = b - // match: (XORshiftLL (MOVDconst [c]) x [d]) - // cond: - // result: (XORconst [c] (SLLconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64XORconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (XORshiftLL x (MOVDconst [c]) [d]) - // cond: - // result: (XORconst x [int64(uint64(c)< [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - if x != v.Args[1] { + if x4.AuxInt != 7 { break } - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if x4.Aux != s { break } - v.reset(OpARM64RORWconst) - v.AuxInt = 32 - c - v.AddArg(x) - return true - } - // match: (XORshiftLL [c] (SRLconst x [64-c]) x2) - // cond: - // result: (EXTRconst [64-c] x2 x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + _ = x4.Args[1] + if p != x4.Args[0] { break } - if v_0.AuxInt != 64-c { + if mem != x4.Args[1] { break } - x := v_0.Args[0] - x2 := v.Args[1] - v.reset(OpARM64EXTRconst) - v.AuxInt = 64 - c - v.AddArg(x2) - v.AddArg(x) + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v0.AddArg(idx0) + v0.AddArg(mem) return true } - // match: (XORshiftLL [c] (UBFX [bfc] x) x2) - // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) - // result: (EXTRWconst [32-c] x2 x) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx4 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr0 (SLLconst [2] idx0) mem) for { t := v.Type - c := v.AuxInt + if v.AuxInt != 56 { + break + } _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64UBFX { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - bfc := v_0.AuxInt - x := v_0.Args[0] - x2 := v.Args[1] - if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + if o0.AuxInt != 48 { break } - v.reset(OpARM64EXTRWconst) - v.AuxInt = 32 - c - v.AddArg(x2) - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { - b := v.Block - _ = b - // match: (XORshiftRA (MOVDconst [c]) x [d]) - // cond: - // result: (XORconst [c] (SRAconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64XORconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (XORshiftRA x (MOVDconst [c]) [d]) - // cond: - // result: (XORconst x [c>>uint64(d)]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if o1.AuxInt != 40 { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = c >> uint64(d) - v.AddArg(x) - return true - } - // match: (XORshiftRA x (SRAconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRAconst { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if o2.AuxInt != 32 { break } - if !(c == d) { + _ = o2.Args[1] + x0 := o2.Args[0] + if x0.Op != OpARM64MOVWUloadidx4 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { - b := v.Block - _ = b - // match: (XORshiftRL (MOVDconst [c]) x [d]) - // cond: - // result: (XORconst [c] (SRLconst x [d])) - for { - d := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + _ = x0.Args[2] + ptr0 := x0.Args[0] + idx0 := x0.Args[1] + mem := x0.Args[2] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64XORconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) - v0.AuxInt = d - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (XORshiftRL x (MOVDconst [c]) [d]) - // cond: - // result: (XORconst x [int64(uint64(c)>>uint64(d))]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { break } - c := v_1.AuxInt - v.reset(OpARM64XORconst) - v.AuxInt = int64(uint64(c) >> uint64(d)) - v.AddArg(x) - return true - } - // match: (XORshiftRL x (SRLconst x [c]) [d]) - // cond: c==d - // result: (MOVDconst [0]) - for { - d := v.AuxInt - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if x1.AuxInt != 4 { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + s := x1.Aux + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADDshiftLL { break } - if !(c == d) { + if p1.AuxInt != 2 { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (XORshiftRL [c] (SLLconst x [64-c]) x) - // cond: - // result: (RORconst [ c] x) - for { - c := v.AuxInt - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { break } - if v_0.AuxInt != 64-c { + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { break } - x := v_0.Args[0] - if x != v.Args[1] { + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { break } - v.reset(OpARM64RORconst) - v.AuxInt = c - v.AddArg(x) + if x2.AuxInt != 5 { + break + } + if x2.Aux != s { + break + } + _ = x2.Args[1] + p := x2.Args[0] + if mem != x2.Args[1] { + break + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + break + } + if x3.AuxInt != 6 { + break + } + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + break + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { + break + } + if x4.AuxInt != 7 { + break + } + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr0) + v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type) + v1.AuxInt = 2 + v1.AddArg(idx0) + v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (XORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx ptr idx mem) for { t := v.Type - c := v.AuxInt + if v.AuxInt != 56 { + break + } _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - if v_0.AuxInt != 32-c { + if o0.AuxInt != 48 { break } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVWUreg { + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { break } - if x != v_1.Args[0] { + if o1.AuxInt != 40 { break } - if !(c < 32 && t.Size() == 4) { + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { break } - v.reset(OpARM64RORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueARM64_OpAdd16_0(v *Value) bool { - // match: (Add16 x y) - // cond: - // result: (ADD x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAdd32_0(v *Value) bool { - // match: (Add32 x y) - // cond: - // result: (ADD x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAdd32F_0(v *Value) bool { - // match: (Add32F x y) - // cond: - // result: (FADDS x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64FADDS) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAdd64_0(v *Value) bool { - // match: (Add64 x y) - // cond: - // result: (ADD x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAdd64F_0(v *Value) bool { - // match: (Add64F x y) - // cond: - // result: (FADDD x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64FADDD) - v.AddArg(x) - v.AddArg(y) + if o2.AuxInt != 32 { + break + } + _ = o2.Args[1] + x0 := o2.Args[0] + if x0.Op != OpARM64MOVWUloadidx { + break + } + _ = x0.Args[2] + ptr := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + break + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { + break + } + if x1_1.AuxInt != 4 { + break + } + if idx != x1_1.Args[0] { + break + } + if mem != x1.Args[2] { + break + } + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + break + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { + break + } + if x2_1.AuxInt != 5 { + break + } + if idx != x2_1.Args[0] { + break + } + if mem != x2.Args[2] { + break + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + break + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + break + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst { + break + } + if x3_1.AuxInt != 6 { + break + } + if idx != x3_1.Args[0] { + break + } + if mem != x3.Args[2] { + break + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + break + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + break + } + _ = x4.Args[2] + if ptr != x4.Args[0] { + break + } + x4_1 := x4.Args[1] + if x4_1.Op != OpARM64ADDconst { + break + } + if x4_1.AuxInt != 7 { + break + } + if idx != x4_1.Args[0] { + break + } + if mem != x4.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } -} -func rewriteValueARM64_OpAdd8_0(v *Value) bool { - // match: (Add8 x y) - // cond: - // result: (ADD x y) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) + // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (REV16W (MOVHUload [i0] {s} p mem)) for { + t := v.Type + if v.AuxInt != 8 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v.AddArg(x) - v.AddArg(y) + y0 := v.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + break + } + i1 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + i0 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t) + v1.AuxInt = i0 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) + v0.AddArg(v1) return true } + return false } -func rewriteValueARM64_OpAddPtr_0(v *Value) bool { - // match: (AddPtr x y) - // cond: - // result: (ADD x y) +func rewriteValueARM64_OpARM64ORshiftLL_20(v *Value) bool { + b := v.Block + _ = b + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (REV16W (MOVHUloadidx ptr0 idx0 mem)) for { + t := v.Type + if v.AuxInt != 8 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v.AddArg(x) - v.AddArg(y) + y0 := v.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + break + } + if x0.AuxInt != 1 { + break + } + s := x0.Aux + _ = x0.Args[1] + p1 := x0.Args[0] + if p1.Op != OpARM64ADD { + break + } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + mem := x0.Args[1] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + break + } + _ = x1.Args[2] + ptr0 := x1.Args[0] + idx0 := x1.Args[1] + if mem != x1.Args[2] { + break + } + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAddr_0(v *Value) bool { - // match: (Addr {sym} base) - // cond: - // result: (MOVDaddr {sym} base) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (REV16W (MOVHUloadidx ptr idx mem)) for { - sym := v.Aux - base := v.Args[0] - v.reset(OpARM64MOVDaddr) - v.Aux = sym - v.AddArg(base) + t := v.Type + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + y0 := v.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUloadidx { + break + } + _ = x0.Args[2] + ptr := x0.Args[0] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 1 { + break + } + idx := x0_1.Args[0] + mem := x0.Args[2] + y1 := v.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + break + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpARM64REV16W, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAnd16_0(v *Value) bool { - // match: (And16 x y) - // cond: - // result: (AND x y) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem))) + // cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) for { + t := v.Type + if v.AuxInt != 24 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64AND) - v.AddArg(x) - v.AddArg(y) + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 16 { + break + } + _ = o0.Args[1] + y0 := o0.Args[0] + if y0.Op != OpARM64REV16W { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVHUload { + break + } + i2 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + break + } + i0 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if !(i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x2.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t) + v1.Aux = s + v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAnd32_0(v *Value) bool { - // match: (And32 x y) - // cond: - // result: (AND x y) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr0 idx0 mem)) for { + t := v.Type + if v.AuxInt != 24 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64AND) - v.AddArg(x) - v.AddArg(y) + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 16 { + break + } + _ = o0.Args[1] + y0 := o0.Args[0] + if y0.Op != OpARM64REV16W { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVHUload { + break + } + if x0.AuxInt != 2 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + if x1.AuxInt != 1 { + break + } + if x1.Aux != s { + break + } + _ = x1.Args[1] + p1 := x1.Args[0] + if p1.Op != OpARM64ADD { + break + } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x1.Args[1] { + break + } + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + break + } + _ = x2.Args[2] + ptr0 := x2.Args[0] + idx0 := x2.Args[1] + if mem != x2.Args[2] { + break + } + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(x1.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAnd64_0(v *Value) bool { - // match: (And64 x y) - // cond: - // result: (AND x y) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUloadidx ptr idx mem)) for { + t := v.Type + if v.AuxInt != 24 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64AND) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAnd8_0(v *Value) bool { - // match: (And8 x y) - // cond: - // result: (AND x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64AND) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAndB_0(v *Value) bool { - // match: (AndB x y) - // cond: - // result: (AND x y) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64AND) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM64_OpAtomicAdd32_0(v *Value) bool { - // match: (AtomicAdd32 ptr val mem) - // cond: - // result: (LoweredAtomicAdd32 ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicAdd32) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicAdd32Variant_0(v *Value) bool { - // match: (AtomicAdd32Variant ptr val mem) - // cond: - // result: (LoweredAtomicAdd32Variant ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicAdd32Variant) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicAdd64_0(v *Value) bool { - // match: (AtomicAdd64 ptr val mem) - // cond: - // result: (LoweredAtomicAdd64 ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicAdd64) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicAdd64Variant_0(v *Value) bool { - // match: (AtomicAdd64Variant ptr val mem) - // cond: - // result: (LoweredAtomicAdd64Variant ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicAdd64Variant) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicAnd8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (AtomicAnd8 ptr val mem) - // cond: - // result: (Select1 (LoweredAtomicAnd8 ptr val mem)) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 16 { + break + } + _ = o0.Args[1] + y0 := o0.Args[0] + if y0.Op != OpARM64REV16W { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVHUloadidx { + break + } + _ = x0.Args[2] + ptr := x0.Args[0] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 2 { + break + } + idx := x0_1.Args[0] + mem := x0.Args[2] + y1 := o0.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + break + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { + break + } + if x1_1.AuxInt != 1 { + break + } + if idx != x1_1.Args[0] { + break + } + if mem != x1.Args[2] { + break + } + y2 := v.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + break + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + break + } + if idx != x2.Args[1] { + break + } + if mem != x2.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v.reset(OpCopy) v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAtomicCompareAndSwap32_0(v *Value) bool { - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // cond: - // result: (LoweredAtomicCas32 ptr old new_ mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64LoweredAtomicCas32) - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicCompareAndSwap64_0(v *Value) bool { - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // cond: - // result: (LoweredAtomicCas64 ptr old new_ mem) - for { - _ = v.Args[3] - ptr := v.Args[0] - old := v.Args[1] - new_ := v.Args[2] - mem := v.Args[3] - v.reset(OpARM64LoweredAtomicCas64) - v.AddArg(ptr) - v.AddArg(old) - v.AddArg(new_) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicExchange32_0(v *Value) bool { - // match: (AtomicExchange32 ptr val mem) - // cond: - // result: (LoweredAtomicExchange32 ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicExchange32) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicExchange64_0(v *Value) bool { - // match: (AtomicExchange64 ptr val mem) - // cond: - // result: (LoweredAtomicExchange64 ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredAtomicExchange64) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicLoad32_0(v *Value) bool { - // match: (AtomicLoad32 ptr mem) - // cond: - // result: (LDARW ptr mem) - for { - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64LDARW) - v.AddArg(ptr) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicLoad64_0(v *Value) bool { - // match: (AtomicLoad64 ptr mem) - // cond: - // result: (LDAR ptr mem) - for { - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64LDAR) - v.AddArg(ptr) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicLoadPtr_0(v *Value) bool { - // match: (AtomicLoadPtr ptr mem) - // cond: - // result: (LDAR ptr mem) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem))) + // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i0] p) mem)) for { + t := v.Type + if v.AuxInt != 56 { + break + } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64LDAR) - v.AddArg(ptr) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicOr8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (AtomicOr8 ptr val mem) - // cond: - // result: (Select1 (LoweredAtomicOr8 ptr val mem)) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg(ptr) - v0.AddArg(val) - v0.AddArg(mem) + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 48 { + break + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 40 { + break + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { + break + } + if o2.AuxInt != 32 { + break + } + _ = o2.Args[1] + y0 := o2.Args[0] + if y0.Op != OpARM64REVW { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVWUload { + break + } + i4 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + i3 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + break + } + i1 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + break + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUload { + break + } + i0 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x4.Pos, OpARM64REV, t) + v.reset(OpCopy) v.AddArg(v0) + v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t) + v1.Aux = s + v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type) + v2.AuxInt = i0 + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpAtomicStore32_0(v *Value) bool { - // match: (AtomicStore32 ptr val mem) - // cond: - // result: (STLRW ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64STLRW) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicStore64_0(v *Value) bool { - // match: (AtomicStore64 ptr val mem) - // cond: - // result: (STLR ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64STLR) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAtomicStorePtrNoWB_0(v *Value) bool { - // match: (AtomicStorePtrNoWB ptr val mem) - // cond: - // result: (STLR ptr val mem) - for { - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64STLR) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpAvg64u_0(v *Value) bool { - b := v.Block - _ = b - // match: (Avg64u x y) - // cond: - // result: (ADD (SRLconst (SUB x y) [1]) y) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem))) + // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDloadidx ptr0 idx0 mem)) for { t := v.Type + if v.AuxInt != 56 { + break + } _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64ADD) - v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) - v0.AuxInt = 1 - v1 := b.NewValue0(v.Pos, OpARM64SUB, t) - v1.AddArg(x) - v1.AddArg(y) - v0.AddArg(v1) + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 48 { + break + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 40 { + break + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { + break + } + if o2.AuxInt != 32 { + break + } + _ = o2.Args[1] + y0 := o2.Args[0] + if y0.Op != OpARM64REVW { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVWUload { + break + } + if x0.AuxInt != 4 { + break + } + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + if x1.AuxInt != 3 { + break + } + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + break + } + if x2.AuxInt != 2 { + break + } + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + break + } + if x3.AuxInt != 1 { + break + } + if x3.Aux != s { + break + } + _ = x3.Args[1] + p1 := x3.Args[0] + if p1.Op != OpARM64ADD { + break + } + _ = p1.Args[1] + ptr1 := p1.Args[0] + idx1 := p1.Args[1] + if mem != x3.Args[1] { + break + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + break + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + break + } + _ = x4.Args[2] + ptr0 := x4.Args[0] + idx0 := x4.Args[1] + if mem != x4.Args[2] { + break + } + if !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(x3.Pos, OpARM64REV, t) + v.reset(OpCopy) v.AddArg(v0) - v.AddArg(y) + v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr0) + v1.AddArg(idx0) + v1.AddArg(mem) + v0.AddArg(v1) return true } -} -func rewriteValueARM64_OpBitLen64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (BitLen64 x) - // cond: - // result: (SUB (MOVDconst [64]) (CLZ x)) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDloadidx ptr idx mem)) for { - x := v.Args[0] - v.reset(OpARM64SUB) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 64 + t := v.Type + if v.AuxInt != 56 { + break + } + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { + break + } + if o0.AuxInt != 48 { + break + } + _ = o0.Args[1] + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 40 { + break + } + _ = o1.Args[1] + o2 := o1.Args[0] + if o2.Op != OpARM64ORshiftLL { + break + } + if o2.AuxInt != 32 { + break + } + _ = o2.Args[1] + y0 := o2.Args[0] + if y0.Op != OpARM64REVW { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVWUloadidx { + break + } + _ = x0.Args[2] + ptr := x0.Args[0] + x0_1 := x0.Args[1] + if x0_1.Op != OpARM64ADDconst { + break + } + if x0_1.AuxInt != 4 { + break + } + idx := x0_1.Args[0] + mem := x0.Args[2] + y1 := o2.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUloadidx { + break + } + _ = x1.Args[2] + if ptr != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpARM64ADDconst { + break + } + if x1_1.AuxInt != 3 { + break + } + if idx != x1_1.Args[0] { + break + } + if mem != x1.Args[2] { + break + } + y2 := o1.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUloadidx { + break + } + _ = x2.Args[2] + if ptr != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpARM64ADDconst { + break + } + if x2_1.AuxInt != 2 { + break + } + if idx != x2_1.Args[0] { + break + } + if mem != x2.Args[2] { + break + } + y3 := o0.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUloadidx { + break + } + _ = x3.Args[2] + if ptr != x3.Args[0] { + break + } + x3_1 := x3.Args[1] + if x3_1.Op != OpARM64ADDconst { + break + } + if x3_1.AuxInt != 1 { + break + } + if idx != x3_1.Args[0] { + break + } + if mem != x3.Args[2] { + break + } + y4 := v.Args[1] + if y4.Op != OpARM64MOVDnop { + break + } + x4 := y4.Args[0] + if x4.Op != OpARM64MOVBUloadidx { + break + } + _ = x4.Args[2] + if ptr != x4.Args[0] { + break + } + if idx != x4.Args[1] { + break + } + if mem != x4.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4) + v0 := b.NewValue0(v.Pos, OpARM64REV, t) + v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) - v1.AddArg(x) - v.AddArg(v1) + v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t) + v1.AddArg(ptr) + v1.AddArg(idx) + v1.AddArg(mem) + v0.AddArg(v1) return true } + return false } -func rewriteValueARM64_OpBitRev16_0(v *Value) bool { +func rewriteValueARM64_OpARM64ORshiftRA_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (BitRev16 x) + // match: (ORshiftRA (MOVDconst [c]) x [d]) // cond: - // result: (SRLconst [48] (RBIT x)) + // result: (ORconst [c] (SRAconst x [d])) for { - x := v.Args[0] - v.reset(OpARM64SRLconst) - v.AuxInt = 48 - v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) return true } -} -func rewriteValueARM64_OpBitRev32_0(v *Value) bool { - // match: (BitRev32 x) + // match: (ORshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (RBITW x) + // result: (ORconst x [c>>uint64(d)]) for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64RBITW) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = c >> uint64(d) v.AddArg(x) return true } -} -func rewriteValueARM64_OpBitRev64_0(v *Value) bool { - // match: (BitRev64 x) - // cond: - // result: (RBIT x) + // match: (ORshiftRA x y:(SRAconst x [c]) [d]) + // cond: c==d + // result: y for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64RBIT) - v.AddArg(x) + y := v.Args[1] + if y.Op != OpARM64SRAconst { + break + } + c := y.AuxInt + if x != y.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpBitRev8_0(v *Value) bool { +func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (BitRev8 x) + // match: (ORshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (SRLconst [56] (RBIT x)) + // result: (ORconst [c] (SRLconst x [d])) for { - x := v.Args[0] - v.reset(OpARM64SRLconst) - v.AuxInt = 56 - v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) return true } -} -func rewriteValueARM64_OpBswap32_0(v *Value) bool { - // match: (Bswap32 x) + // match: (ORshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (REVW x) + // result: (ORconst x [int64(uint64(c)>>uint64(d))]) for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64REVW) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpBswap64_0(v *Value) bool { - // match: (Bswap64 x) - // cond: - // result: (REV x) + // match: (ORshiftRL x y:(SRLconst x [c]) [d]) + // cond: c==d + // result: y for { + d := v.AuxInt + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64REV) - v.AddArg(x) + y := v.Args[1] + if y.Op != OpARM64SRLconst { + break + } + c := y.AuxInt + if x != y.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } -} -func rewriteValueARM64_OpCeil_0(v *Value) bool { - // match: (Ceil x) + // match: (ORshiftRL [c] (SLLconst x [64-c]) x) // cond: - // result: (FRINTPD x) + // result: (RORconst [ c] x) for { - x := v.Args[0] - v.reset(OpARM64FRINTPD) + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + if v_0.AuxInt != 64-c { + break + } + x := v_0.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARM64RORconst) + v.AuxInt = c v.AddArg(x) return true } -} -func rewriteValueARM64_OpClosureCall_0(v *Value) bool { - // match: (ClosureCall [argwid] entry closure mem) - // cond: - // result: (CALLclosure [argwid] entry closure mem) + // match: (ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) + // cond: c < 32 && t.Size() == 4 + // result: (RORWconst [c] x) for { - argwid := v.AuxInt - _ = v.Args[2] - entry := v.Args[0] - closure := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64CALLclosure) - v.AuxInt = argwid - v.AddArg(entry) - v.AddArg(closure) - v.AddArg(mem) + t := v.Type + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + if v_0.AuxInt != 32-c { + break + } + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWUreg { + break + } + if x != v_1.Args[0] { + break + } + if !(c < 32 && t.Size() == 4) { + break + } + v.reset(OpARM64RORWconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpCom16_0(v *Value) bool { - // match: (Com16 x) - // cond: - // result: (MVN x) + // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) + // cond: lc > rc && ac == ^((1< rc && ac == ^((1<>uint64(c)]) for { - b := v.AuxInt + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + d := v_0.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = b + v.AuxInt = d >> uint64(c) return true } -} -func rewriteValueARM64_OpConstNil_0(v *Value) bool { - // match: (ConstNil) - // cond: - // result: (MOVDconst [0]) + // match: (SRAconst [rc] (SLLconst [lc] x)) + // cond: lc > rc + // result: (SBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) for { - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc > rc) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpCtz32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Ctz32 x) - // cond: - // result: (CLZW (RBITW x)) + // match: (SRAconst [rc] (SLLconst [lc] x)) + // cond: lc <= rc + // result: (SBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) for { - t := v.Type - x := v.Args[0] - v.reset(OpARM64CLZW) - v0 := b.NewValue0(v.Pos, OpARM64RBITW, t) - v0.AddArg(x) - v.AddArg(v0) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc <= rc) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpCtz32NonZero_0(v *Value) bool { - // match: (Ctz32NonZero x) - // cond: - // result: (Ctz32 x) + // match: (SRAconst [rc] (MOVWreg x)) + // cond: rc < 32 + // result: (SBFX [arm64BFAuxInt(rc, 32-rc)] x) for { - x := v.Args[0] - v.reset(OpCtz32) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVWreg { + break + } + x := v_0.Args[0] + if !(rc < 32) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BFAuxInt(rc, 32-rc) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCtz64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Ctz64 x) - // cond: - // result: (CLZ (RBIT x)) + // match: (SRAconst [rc] (MOVHreg x)) + // cond: rc < 16 + // result: (SBFX [arm64BFAuxInt(rc, 16-rc)] x) for { - t := v.Type - x := v.Args[0] - v.reset(OpARM64CLZ) - v0 := b.NewValue0(v.Pos, OpARM64RBIT, t) - v0.AddArg(x) - v.AddArg(v0) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVHreg { + break + } + x := v_0.Args[0] + if !(rc < 16) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BFAuxInt(rc, 16-rc) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpCtz64NonZero_0(v *Value) bool { - // match: (Ctz64NonZero x) - // cond: - // result: (Ctz64 x) + // match: (SRAconst [rc] (MOVBreg x)) + // cond: rc < 8 + // result: (SBFX [arm64BFAuxInt(rc, 8-rc)] x) for { - x := v.Args[0] - v.reset(OpCtz64) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVBreg { + break + } + x := v_0.Args[0] + if !(rc < 8) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BFAuxInt(rc, 8-rc) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt32Fto32_0(v *Value) bool { - // match: (Cvt32Fto32 x) - // cond: - // result: (FCVTZSSW x) + // match: (SRAconst [sc] (SBFIZ [bfc] x)) + // cond: sc < getARM64BFlsb(bfc) + // result: (SBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc))] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTZSSW) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SBFIZ { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if !(sc < getARM64BFlsb(bfc)) { + break + } + v.reset(OpARM64SBFIZ) + v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)-sc, getARM64BFwidth(bfc)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt32Fto32U_0(v *Value) bool { - // match: (Cvt32Fto32U x) - // cond: - // result: (FCVTZUSW x) + // match: (SRAconst [sc] (SBFIZ [bfc] x)) + // cond: sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) + // result: (SBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTZUSW) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SBFIZ { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if !(sc >= getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64SBFX) + v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpCvt32Fto64_0(v *Value) bool { - // match: (Cvt32Fto64 x) +func rewriteValueARM64_OpARM64SRL_0(v *Value) bool { + // match: (SRL x (MOVDconst [c])) // cond: - // result: (FCVTZSS x) + // result: (SRLconst x [c&63]) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FCVTZSS) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64SRLconst) + v.AuxInt = c & 63 v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpCvt32Fto64F_0(v *Value) bool { - // match: (Cvt32Fto64F x) +func rewriteValueARM64_OpARM64SRLconst_0(v *Value) bool { + // match: (SRLconst [c] (MOVDconst [d])) // cond: - // result: (FCVTSD x) + // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) for { - x := v.Args[0] - v.reset(OpARM64FCVTSD) - v.AddArg(x) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint64(d) >> uint64(c)) return true } -} -func rewriteValueARM64_OpCvt32Fto64U_0(v *Value) bool { - // match: (Cvt32Fto64U x) - // cond: - // result: (FCVTZUS x) + // match: (SRLconst [c] (SLLconst [c] x)) + // cond: 0 < c && c < 64 + // result: (ANDconst [1< rc + // result: (UBFIZ [arm64BFAuxInt(lc-rc, 64-lc)] x) for { - x := v.Args[0] - v.reset(OpARM64UCVTFWS) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpCvt32Uto64F_0(v *Value) bool { - // match: (Cvt32Uto64F x) - // cond: - // result: (UCVTFWD x) - for { - x := v.Args[0] - v.reset(OpARM64UCVTFWD) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc > rc) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(lc-rc, 64-lc) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt32to32F_0(v *Value) bool { - // match: (Cvt32to32F x) - // cond: - // result: (SCVTFWS x) + // match: (SRLconst [sc] (ANDconst [ac] x)) + // cond: isARM64BFMask(sc, ac, sc) + // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(ac, sc))] x) for { - x := v.Args[0] - v.reset(OpARM64SCVTFWS) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64ANDconst { + break + } + ac := v_0.AuxInt + x := v_0.Args[0] + if !(isARM64BFMask(sc, ac, sc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(ac, sc)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt32to64F_0(v *Value) bool { - // match: (Cvt32to64F x) - // cond: - // result: (SCVTFWD x) + // match: (SRLconst [sc] (MOVWUreg x)) + // cond: isARM64BFMask(sc, 1<<32-1, sc) + // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc))] x) for { - x := v.Args[0] - v.reset(OpARM64SCVTFWD) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVWUreg { + break + } + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<32-1, sc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<32-1, sc)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt64Fto32_0(v *Value) bool { - // match: (Cvt64Fto32 x) - // cond: - // result: (FCVTZSDW x) + // match: (SRLconst [sc] (MOVHUreg x)) + // cond: isARM64BFMask(sc, 1<<16-1, sc) + // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc))] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTZSDW) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVHUreg { + break + } + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<16-1, sc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<16-1, sc)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt64Fto32F_0(v *Value) bool { - // match: (Cvt64Fto32F x) - // cond: - // result: (FCVTDS x) + // match: (SRLconst [sc] (MOVBUreg x)) + // cond: isARM64BFMask(sc, 1<<8-1, sc) + // result: (UBFX [arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc))] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTDS) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVBUreg { + break + } + x := v_0.Args[0] + if !(isARM64BFMask(sc, 1<<8-1, sc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc, arm64BFWidth(1<<8-1, sc)) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt64Fto32U_0(v *Value) bool { - // match: (Cvt64Fto32U x) - // cond: - // result: (FCVTZUDW x) + // match: (SRLconst [rc] (SLLconst [lc] x)) + // cond: lc < rc + // result: (UBFX [arm64BFAuxInt(rc-lc, 64-rc)] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTZUDW) + rc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + lc := v_0.AuxInt + x := v_0.Args[0] + if !(lc < rc) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(rc-lc, 64-rc) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt64Fto64_0(v *Value) bool { - // match: (Cvt64Fto64 x) - // cond: - // result: (FCVTZSD x) + // match: (SRLconst [sc] (UBFX [bfc] x)) + // cond: sc < getARM64BFwidth(bfc) + // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) for { - x := v.Args[0] - v.reset(OpARM64FCVTZSD) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFX { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if !(sc < getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } -} -func rewriteValueARM64_OpCvt64Fto64U_0(v *Value) bool { - // match: (Cvt64Fto64U x) - // cond: - // result: (FCVTZUD x) + // match: (SRLconst [sc] (UBFIZ [bfc] x)) + // cond: sc == getARM64BFlsb(bfc) + // result: (ANDconst [1< getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) + // result: (UBFX [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { - x := v.Args[0] - v.reset(OpARM64UCVTFD) + sc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFIZ { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpCvt64to32F_0(v *Value) bool { - // match: (Cvt64to32F x) - // cond: - // result: (SCVTFS x) +func rewriteValueARM64_OpARM64STP_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) + // cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (STP [off1+off2] {sym} ptr val1 val2 mem) for { - x := v.Args[0] - v.reset(OpARM64SCVTFS) - v.AddArg(x) + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val1 := v.Args[1] + val2 := v.Args[2] + mem := v.Args[3] + if !(is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64STP) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val1) + v.AddArg(val2) + v.AddArg(mem) return true } -} -func rewriteValueARM64_OpCvt64to64F_0(v *Value) bool { - // match: (Cvt64to64F x) - // cond: - // result: (SCVTFD x) + // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) for { - x := v.Args[0] - v.reset(OpARM64SCVTFD) - v.AddArg(x) + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val1 := v.Args[1] + val2 := v.Args[2] + mem := v.Args[3] + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + break + } + v.reset(OpARM64STP) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val1) + v.AddArg(val2) + v.AddArg(mem) return true } -} -func rewriteValueARM64_OpDiv16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div16 x y) + // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) // cond: - // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) + // result: (MOVQstorezero [off] {sym} ptr mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64DIVW) - v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + if v_1.AuxInt != 0 { + break + } + v_2 := v.Args[2] + if v_2.Op != OpARM64MOVDconst { + break + } + if v_2.AuxInt != 0 { + break + } + mem := v.Args[3] + v.reset(OpARM64MOVQstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } + return false } -func rewriteValueARM64_OpDiv16u_0(v *Value) bool { +func rewriteValueARM64_OpARM64SUB_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div16u x y) - // cond: - // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UDIVW) - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValueARM64_OpDiv32_0(v *Value) bool { - // match: (Div32 x y) + // match: (SUB x (MOVDconst [c])) // cond: - // result: (DIVW x y) + // result: (SUBconst [c] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64DIVW) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64SUBconst) + v.AuxInt = c v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv32F_0(v *Value) bool { - // match: (Div32F x y) - // cond: - // result: (FDIVS x y) + // match: (SUB a l:(MUL x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MSUB a x y) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64FDIVS) + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MUL { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUB) + v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv32u_0(v *Value) bool { - // match: (Div32u x y) - // cond: - // result: (UDIVW x y) + // match: (SUB a l:(MNEG x y)) + // cond: l.Uses==1 && clobber(l) + // result: (MADD a x y) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UDIVW) + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MNEG { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADD) + v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv64_0(v *Value) bool { - // match: (Div64 x y) - // cond: - // result: (DIV x y) + // match: (SUB a l:(MULW x y)) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MSUBW a x y) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64DIV) + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MULW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MSUBW) + v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv64F_0(v *Value) bool { - // match: (Div64F x y) - // cond: - // result: (FDIVD x y) + // match: (SUB a l:(MNEGW x y)) + // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l) + // result: (MADDW a x y) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64FDIVD) + a := v.Args[0] + l := v.Args[1] + if l.Op != OpARM64MNEGW { + break + } + _ = l.Args[1] + x := l.Args[0] + y := l.Args[1] + if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) { + break + } + v.reset(OpARM64MADDW) + v.AddArg(a) v.AddArg(x) v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv64u_0(v *Value) bool { - // match: (Div64u x y) + // match: (SUB x x) // cond: - // result: (UDIV x y) + // result: (MOVDconst [0]) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UDIV) - v.AddArg(x) - v.AddArg(y) + if x != v.Args[1] { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM64_OpDiv8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div8 x y) + // match: (SUB x (SUB y z)) // cond: - // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) + // result: (SUB (ADD x z) y) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64DIVW) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v_1 := v.Args[1] + if v_1.Op != OpARM64SUB { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpDiv8u_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Div8u x y) + // match: (SUB (SUB x y) z) // cond: - // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (SUB x (ADD y z)) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UDIVW) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) + v_0 := v.Args[0] + if v_0.Op != OpARM64SUB { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + z := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type) + v0.AddArg(y) + v0.AddArg(z) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) return true } -} -func rewriteValueARM64_OpEq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Eq16 x y) - // cond: - // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + // match: (SUB x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftLL x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpEq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Eq32 x y) - // cond: - // result: (Equal (CMPW x y)) + // match: (SUB x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftRL x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpEq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Eq32F x y) - // cond: - // result: (Equal (FCMPS x y)) +func rewriteValueARM64_OpARM64SUB_10(v *Value) bool { + // match: (SUB x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (SUBshiftRA x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64SUBshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpEq64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Eq64 x y) +func rewriteValueARM64_OpARM64SUBconst_0(v *Value) bool { + // match: (SUBconst [0] x) // cond: - // result: (Equal (CMP x y)) + // result: x for { - _ = v.Args[1] + if v.AuxInt != 0 { + break + } x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } -} -func rewriteValueARM64_OpEq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Eq64F x y) + // match: (SUBconst [c] (MOVDconst [d])) // cond: - // result: (Equal (FCMPD x y)) + // result: (MOVDconst [d-c]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = d - c return true } -} -func rewriteValueARM64_OpEq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Eq8 x y) + // match: (SUBconst [c] (SUBconst [d] x)) // cond: - // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (ADDconst [-c-d] x) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64Equal) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = -c - d + v.AddArg(x) return true } -} -func rewriteValueARM64_OpEqB_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (EqB x y) + // match: (SUBconst [c] (ADDconst [d] x)) // cond: - // result: (XOR (MOVDconst [1]) (XOR x y)) + // result: (ADDconst [-c+d] x) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64XOR) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 1 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) - v1.AddArg(x) - v1.AddArg(y) - v.AddArg(v1) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64ADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = -c + d + v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpEqPtr_0(v *Value) bool { - b := v.Block - _ = b - // match: (EqPtr x y) +func rewriteValueARM64_OpARM64SUBshiftLL_0(v *Value) bool { + // match: (SUBshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (Equal (CMP x y)) + // result: (SUBconst x [int64(uint64(c)<>uint64(d)]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64SUBconst) + v.AuxInt = c >> uint64(d) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpGeq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32 x y) - // cond: - // result: (GreaterEqual (CMPW x y)) + // match: (SUBshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64SRAconst { + break + } + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM64_OpGeq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32F x y) +func rewriteValueARM64_OpARM64SUBshiftRL_0(v *Value) bool { + // match: (SUBshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (GreaterEqual (FCMPS x y)) + // result: (SUBconst x [int64(uint64(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64SUBconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpGeq32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq32U x y) - // cond: - // result: (GreaterEqualU (CMPW x y)) + // match: (SUBshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { + break + } + c := v_1.AuxInt + if x != v_1.Args[0] { + break + } + if !(c == d) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM64_OpGeq64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq64 x y) +func rewriteValueARM64_OpARM64TST_0(v *Value) bool { + // match: (TST x (MOVDconst [c])) // cond: - // result: (GreaterEqual (CMP x y)) + // result: (TSTconst [c] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64TSTconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpGeq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq64F x y) + // match: (TST (MOVDconst [c]) x) // cond: - // result: (GreaterEqual (FCMPD x y)) + // result: (TSTconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64TSTconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpGeq64U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Geq64U x y) - // cond: - // result: (GreaterEqualU (CMP x y)) + // match: (TST x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftLL x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpGeq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq8 x y) - // cond: - // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + // match: (TST x1:(SLLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (TSTshiftLL x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + x1 := v.Args[0] + if x1.Op != OpARM64SLLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpGeq8U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Geq8U x y) - // cond: - // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + // match: (TST x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftRL x0 y [c]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) + return true } -} -func rewriteValueARM64_OpGetCallerPC_0(v *Value) bool { - // match: (GetCallerPC) - // cond: - // result: (LoweredGetCallerPC) + // match: (TST x1:(SRLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (TSTshiftRL x0 y [c]) for { - v.reset(OpARM64LoweredGetCallerPC) + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRLconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpGetCallerSP_0(v *Value) bool { - // match: (GetCallerSP) - // cond: - // result: (LoweredGetCallerSP) + // match: (TST x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (TSTshiftRA x0 y [c]) for { - v.reset(OpARM64LoweredGetCallerSP) + _ = v.Args[1] + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpGetClosurePtr_0(v *Value) bool { - // match: (GetClosurePtr) - // cond: - // result: (LoweredGetClosurePtr) + // match: (TST x1:(SRAconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (TSTshiftRA x0 y [c]) for { - v.reset(OpARM64LoweredGetClosurePtr) + _ = v.Args[1] + x1 := v.Args[0] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64TSTshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpGreater16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater16 x y) +func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool { + // match: (TSTW x (MOVDconst [c])) // cond: - // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) + // result: (TSTWconst [c] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64TSTWconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpGreater16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater16U x y) + // match: (TSTW (MOVDconst [c]) x) // cond: - // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (TSTWconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64TSTWconst) + v.AuxInt = c + v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpGreater32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32 x y) - // cond: - // result: (GreaterThan (CMPW x y)) +func rewriteValueARM64_OpARM64TSTWconst_0(v *Value) bool { + // match: (TSTWconst (MOVDconst [x]) [y]) + // cond: int32(x&y)==0 + // result: (FlagEQ) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) == 0) { + break + } + v.reset(OpARM64FlagEQ) return true } -} -func rewriteValueARM64_OpGreater32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32F x y) - // cond: - // result: (GreaterThan (FCMPS x y)) + // match: (TSTWconst (MOVDconst [x]) [y]) + // cond: int32(x&y)<0 + // result: (FlagLT_UGT) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) < 0) { + break + } + v.reset(OpARM64FlagLT_UGT) return true } -} -func rewriteValueARM64_OpGreater32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater32U x y) - // cond: - // result: (GreaterThanU (CMPW x y)) + // match: (TSTWconst (MOVDconst [x]) [y]) + // cond: int32(x&y)>0 + // result: (FlagGT_UGT) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) > 0) { + break + } + v.reset(OpARM64FlagGT_UGT) return true } + return false } -func rewriteValueARM64_OpGreater64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater64 x y) - // cond: - // result: (GreaterThan (CMP x y)) +func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool { + // match: (TSTconst (MOVDconst [x]) [y]) + // cond: int64(x&y)==0 + // result: (FlagEQ) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int64(x&y) == 0) { + break + } + v.reset(OpARM64FlagEQ) return true } -} -func rewriteValueARM64_OpGreater64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Greater64F x y) - // cond: - // result: (GreaterThan (FCMPD x y)) + // match: (TSTconst (MOVDconst [x]) [y]) + // cond: int64(x&y)<0 + // result: (FlagLT_UGT) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int64(x&y) < 0) { + break + } + v.reset(OpARM64FlagLT_UGT) + return true + } + // match: (TSTconst (MOVDconst [x]) [y]) + // cond: int64(x&y)>0 + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + x := v_0.AuxInt + if !(int64(x&y) > 0) { + break + } + v.reset(OpARM64FlagGT_UGT) return true } + return false } -func rewriteValueARM64_OpGreater64U_0(v *Value) bool { +func rewriteValueARM64_OpARM64TSTshiftLL_0(v *Value) bool { b := v.Block _ = b - // match: (Greater64U x y) + // match: (TSTshiftLL (MOVDconst [c]) x [d]) // cond: - // result: (GreaterThanU (CMP x y)) + // result: (TSTconst [c] (SLLconst x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64TSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM64_OpGreater8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Greater8 x y) + // match: (TSTshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) + // result: (TSTconst x [int64(uint64(c)< x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64TSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) return true } -} -func rewriteValueARM64_OpHmul32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Hmul32 x y) + // match: (TSTshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (SRAconst (MULL x y) [32]) + // result: (TSTconst x [c>>uint64(d)]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRAconst) - v.AuxInt = 32 - v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64TSTconst) + v.AuxInt = c >> uint64(d) + v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpHmul32u_0(v *Value) bool { +func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Hmul32u x y) + // match: (TSTshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (SRAconst (UMULL x y) [32]) + // result: (TSTconst [c] (SRLconst x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRAconst) - v.AuxInt = 32 - v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64TSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) return true } -} -func rewriteValueARM64_OpHmul64_0(v *Value) bool { - // match: (Hmul64 x y) + // match: (TSTshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (MULH x y) + // result: (TSTconst x [int64(uint64(c)>>uint64(d))]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64MULH) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64TSTconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) v.AddArg(x) - v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpHmul64u_0(v *Value) bool { - // match: (Hmul64u x y) - // cond: - // result: (UMULH x y) +func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool { + // match: (UBFIZ [bfc] (SLLconst [sc] x)) + // cond: sc < getARM64BFwidth(bfc) + // result: (UBFIZ [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc)] x) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UMULH) + bfc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + sc := v_0.AuxInt + x := v_0.Args[0] + if !(sc < getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)-sc) v.AddArg(x) - v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpInterCall_0(v *Value) bool { - // match: (InterCall [argwid] entry mem) - // cond: - // result: (CALLinter [argwid] entry mem) +func rewriteValueARM64_OpARM64UBFX_0(v *Value) bool { + // match: (UBFX [bfc] (SRLconst [sc] x)) + // cond: sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64 + // result: (UBFX [arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc))] x) for { - argwid := v.AuxInt - _ = v.Args[1] - entry := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64CALLinter) - v.AuxInt = argwid - v.AddArg(entry) - v.AddArg(mem) + bfc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { + break + } + sc := v_0.AuxInt + x := v_0.Args[0] + if !(sc+getARM64BFwidth(bfc)+getARM64BFlsb(bfc) < 64) { + break + } + v.reset(OpARM64UBFX) + v.AuxInt = arm64BFAuxInt(getARM64BFlsb(bfc)+sc, getARM64BFwidth(bfc)) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpIsInBounds_0(v *Value) bool { - b := v.Block - _ = b - // match: (IsInBounds idx len) - // cond: - // result: (LessThanU (CMP idx len)) + // match: (UBFX [bfc] (SLLconst [sc] x)) + // cond: sc == getARM64BFlsb(bfc) + // result: (ANDconst [1< getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc) + // result: (UBFIZ [arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc)] x) for { - _ = v.Args[1] - idx := v.Args[0] - len := v.Args[1] - v.reset(OpARM64LessEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) - v.AddArg(v0) + bfc := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { + break + } + sc := v_0.AuxInt + x := v_0.Args[0] + if !(sc > getARM64BFlsb(bfc) && sc < getARM64BFlsb(bfc)+getARM64BFwidth(bfc)) { + break + } + v.reset(OpARM64UBFIZ) + v.AuxInt = arm64BFAuxInt(sc-getARM64BFlsb(bfc), getARM64BFlsb(bfc)+getARM64BFwidth(bfc)-sc) + v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpLeq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq16 x y) +func rewriteValueARM64_OpARM64UDIV_0(v *Value) bool { + // match: (UDIV x (MOVDconst [1])) // cond: - // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) + // result: x for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLeq16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq16U x y) - // cond: - // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + // match: (UDIV x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SRLconst [log2(c)] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARM64SRLconst) + v.AuxInt = log2(c) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLeq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32 x y) + // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (LessEqual (CMPW x y)) + // result: (MOVDconst [int64(uint64(c)/uint64(d))]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint64(c) / uint64(d)) return true } + return false } -func rewriteValueARM64_OpLeq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32F x y) - // cond: - // result: (GreaterEqual (FCMPS y x)) +func rewriteValueARM64_OpARM64UDIVW_0(v *Value) bool { + // match: (UDIVW x (MOVDconst [c])) + // cond: uint32(c)==1 + // result: x for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(uint32(c) == 1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLeq32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq32U x y) - // cond: - // result: (LessEqualU (CMPW x y)) + // match: (UDIVW x (MOVDconst [c])) + // cond: isPowerOfTwo(c) && is32Bit(c) + // result: (SRLconst [log2(c)] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c) && is32Bit(c)) { + break + } + v.reset(OpARM64SRLconst) + v.AuxInt = log2(c) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLeq64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq64 x y) + // match: (UDIVW (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (LessEqual (CMP x y)) + // result: (MOVDconst [int64(uint32(c)/uint32(d))]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint32(c) / uint32(d)) return true } + return false } -func rewriteValueARM64_OpLeq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq64F x y) +func rewriteValueARM64_OpARM64UMOD_0(v *Value) bool { + // match: (UMOD _ (MOVDconst [1])) // cond: - // result: (GreaterEqual (FCMPD y x)) + // result: (MOVDconst [0]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + if v_1.AuxInt != 1 { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM64_OpLeq64U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Leq64U x y) - // cond: - // result: (LessEqualU (CMP x y)) + // match: (UMOD x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (ANDconst [c-1] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARM64ANDconst) + v.AuxInt = c - 1 + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLeq8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq8 x y) + // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + // result: (MOVDconst [int64(uint64(c)%uint64(d))]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint64(c) % uint64(d)) return true } + return false } -func rewriteValueARM64_OpLeq8U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Leq8U x y) - // cond: - // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) +func rewriteValueARM64_OpARM64UMODW_0(v *Value) bool { + // match: (UMODW _ (MOVDconst [c])) + // cond: uint32(c)==1 + // result: (MOVDconst [0]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessEqualU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(uint32(c) == 1) { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM64_OpLess16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less16 x y) - // cond: - // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) + // match: (UMODW x (MOVDconst [c])) + // cond: isPowerOfTwo(c) && is32Bit(c) + // result: (ANDconst [c-1] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThan) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c) && is32Bit(c)) { + break + } + v.reset(OpARM64ANDconst) + v.AuxInt = c - 1 + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLess16U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less16U x y) + // match: (UMODW (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (MOVDconst [int64(uint32(c)%uint32(d))]) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(uint32(c) % uint32(d)) return true } + return false } -func rewriteValueARM64_OpLess32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32 x y) +func rewriteValueARM64_OpARM64XOR_0(v *Value) bool { + // match: (XOR x (MOVDconst [c])) // cond: - // result: (LessThan (CMPW x y)) + // result: (XORconst [c] x) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThan) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLess32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32F x y) + // match: (XOR (MOVDconst [c]) x) // cond: - // result: (GreaterThan (FCMPS y x)) + // result: (XORconst [c] x) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64XORconst) + v.AuxInt = c + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLess32U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less32U x y) + // match: (XOR x x) // cond: - // result: (LessThanU (CMPW x y)) + // result: (MOVDconst [0]) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + if x != v.Args[1] { + break + } + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM64_OpLess64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less64 x y) + // match: (XOR x (MVN y)) // cond: - // result: (LessThan (CMP x y)) + // result: (EON x y) for { _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThan) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v_1 := v.Args[1] + if v_1.Op != OpARM64MVN { + break + } + y := v_1.Args[0] + v.reset(OpARM64EON) + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpLess64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less64F x y) + // match: (XOR (MVN y) x) // cond: - // result: (GreaterThan (FCMPD y x)) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64GreaterThan) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) - return true - } -} -func rewriteValueARM64_OpLess64U_0(v *Value) bool { - b := v.Block - _ = b - // match: (Less64U x y) - // cond: - // result: (LessThanU (CMP x y)) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM64_OpLess8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less8 x y) - // cond: - // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThan) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } -} -func rewriteValueARM64_OpLess8U_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Less8U x y) - // cond: - // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) - for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64LessThanU) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } -} -func rewriteValueARM64_OpLoad_0(v *Value) bool { - // match: (Load ptr mem) - // cond: t.IsBoolean() - // result: (MOVBUload ptr mem) + // result: (EON x y) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsBoolean()) { + v_0 := v.Args[0] + if v_0.Op != OpARM64MVN { break } - v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64EON) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Load ptr mem) - // cond: (is8BitInt(t) && isSigned(t)) - // result: (MOVBload ptr mem) + // match: (XOR x0 x1:(SLLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftLL x0 y [c]) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && isSigned(t)) { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SLLconst { break } - v.reset(OpARM64MOVBload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load ptr mem) - // cond: (is8BitInt(t) && !isSigned(t)) - // result: (MOVBUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && !isSigned(t)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64MOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64XORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (Load ptr mem) - // cond: (is16BitInt(t) && isSigned(t)) - // result: (MOVHload ptr mem) + // match: (XOR x1:(SLLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (XORshiftLL x0 y [c]) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && isSigned(t)) { + x1 := v.Args[0] + if x1.Op != OpARM64SLLconst { break } - v.reset(OpARM64MOVHload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load ptr mem) - // cond: (is16BitInt(t) && !isSigned(t)) - // result: (MOVHUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && !isSigned(t)) { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64MOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64XORshiftLL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (Load ptr mem) - // cond: (is32BitInt(t) && isSigned(t)) - // result: (MOVWload ptr mem) + // match: (XOR x0 x1:(SRLconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftRL x0 y [c]) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitInt(t) && isSigned(t)) { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVWload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load ptr mem) - // cond: (is32BitInt(t) && !isSigned(t)) - // result: (MOVWUload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitInt(t) && !isSigned(t)) { + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64MOVWUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64XORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVDload ptr mem) + // match: (XOR x1:(SRLconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (XORshiftRL x0 y [c]) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is64BitInt(t) || isPtr(t)) { + x1 := v.Args[0] + if x1.Op != OpARM64SRLconst { break } - v.reset(OpARM64MOVDload) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (FMOVSload ptr mem) - for { - t := v.Type - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitFloat(t)) { + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { break } - v.reset(OpARM64FMOVSload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARM64XORshiftRL) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (FMOVDload ptr mem) + // match: (XOR x0 x1:(SRAconst [c] y)) + // cond: clobberIfDead(x1) + // result: (XORshiftRA x0 y [c]) for { - t := v.Type _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(is64BitFloat(t)) { + x0 := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpARM64SRAconst { break } - v.reset(OpARM64FMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + c := x1.AuxInt + y := x1.Args[0] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64XORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } return false } -func rewriteValueARM64_OpLocalAddr_0(v *Value) bool { - // match: (LocalAddr {sym} base _) - // cond: - // result: (MOVDaddr {sym} base) - for { - sym := v.Aux - _ = v.Args[1] - base := v.Args[0] - v.reset(OpARM64MOVDaddr) - v.Aux = sym - v.AddArg(base) - return true - } -} -func rewriteValueARM64_OpLsh16x16_0(v *Value) bool { +func rewriteValueARM64_OpARM64XOR_10(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Lsh16x16 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // match: (XOR x1:(SRAconst [c] y) x0) + // cond: clobberIfDead(x1) + // result: (XORshiftRA x0 y [c]) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + x1 := v.Args[0] + if x1.Op != OpARM64SRAconst { + break + } + c := x1.AuxInt + y := x1.Args[0] + x0 := v.Args[1] + if !(clobberIfDead(x1)) { + break + } + v.reset(OpARM64XORshiftRA) + v.AuxInt = c + v.AddArg(x0) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh16x32 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // match: (XOR (SLL x (ANDconst [63] y)) (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARM64SLL { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + break + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt64 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { + break + } + if v_1_0.Type != typ.UInt64 { + break + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 64 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 63 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 64 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 63 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } -} -func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh16x64 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // match: (XOR (CSEL0 {cc} (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SLL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x (NEG y)) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { + break + } + if v_0.Type != typ.UInt64 { + break + } + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { + break + } + if v_0_0.Type != typ.UInt64 { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 64 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 63 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 64 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 63 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SLL { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v.AddArg(v2) return true } -} -func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh16x8 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // match: (XOR (SRL x (ANDconst [63] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) - return true - } -} -func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh32x16 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { + break + } + if v_0.Type != typ.UInt64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + break + } + t := v_0_1.Type + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt64 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + break + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 64 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 63 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 64 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 63 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh32x32 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // match: (XOR (CSEL0 {cc} (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y)))) (SRL x (ANDconst [63] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (ROR x y) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { + break + } + if v_0.Type != typ.UInt64 { + break + } + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 64 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 63 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 64 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 63 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { + break + } + if v_1.Type != typ.UInt64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64ROR) + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh32x64 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // match: (XOR (SLL x (ANDconst [31] y)) (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) + v_0 := v.Args[0] + if v_0.Op != OpARM64SLL { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + break + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt32 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SRL { + break + } + if v_1_0.Type != typ.UInt32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpARM64MOVWUreg { + break + } + if x != v_1_0_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 32 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 31 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 32 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 31 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v.AddArg(v2) - return true - } -} -func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh32x8 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) - for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } -} -func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh64x16 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // match: (XOR (CSEL0 {cc} (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SLL x (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x (NEG y)) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { + break + } + if v_0.Type != typ.UInt32 { + break + } + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SRL { + break + } + if v_0_0.Type != typ.UInt32 { + break + } + _ = v_0_0.Args[1] + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpARM64MOVWUreg { + break + } + x := v_0_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 32 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 31 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 32 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 31 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SLL { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } -} -func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh64x32 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // match: (XOR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v_0 := v.Args[0] + if v_0.Op != OpARM64SRL { + break + } + if v_0.Type != typ.UInt32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64MOVWUreg { + break + } + x := v_0_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64ANDconst { + break + } + t := v_0_1.Type + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64CSEL0 { + break + } + if v_1.Type != typ.UInt32 { + break + } + cc := v_1.Aux + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64SLL { + break + } + _ = v_1_0.Args[1] + if x != v_1_0.Args[0] { + break + } + v_1_0_1 := v_1_0.Args[1] + if v_1_0_1.Op != OpARM64SUB { + break + } + if v_1_0_1.Type != t { + break + } + _ = v_1_0_1.Args[1] + v_1_0_1_0 := v_1_0_1.Args[0] + if v_1_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_1_0_1_0.AuxInt != 32 { + break + } + v_1_0_1_1 := v_1_0_1.Args[1] + if v_1_0_1_1.Op != OpARM64ANDconst { + break + } + if v_1_0_1_1.Type != t { + break + } + if v_1_0_1_1.AuxInt != 31 { + break + } + if y != v_1_0_1_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64CMPconst { + break + } + if v_1_1.AuxInt != 64 { + break + } + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpARM64SUB { + break + } + if v_1_1_0.Type != t { + break + } + _ = v_1_1_0.Args[1] + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_1_1_0_0.AuxInt != 32 { + break + } + v_1_1_0_1 := v_1_1_0.Args[1] + if v_1_1_0_1.Op != OpARM64ANDconst { + break + } + if v_1_1_0_1.Type != t { + break + } + if v_1_1_0_1.AuxInt != 31 { + break + } + if y != v_1_1_0_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) return true } -} -func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh64x64 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // match: (XOR (CSEL0 {cc} (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y)))) (SRL (MOVWUreg x) (ANDconst [31] y))) + // cond: cc.(Op) == OpARM64LessThanU + // result: (RORW x y) for { - t := v.Type _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v.AddArg(v2) + v_0 := v.Args[0] + if v_0.Op != OpARM64CSEL0 { + break + } + if v_0.Type != typ.UInt32 { + break + } + cc := v_0.Aux + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpARM64SLL { + break + } + _ = v_0_0.Args[1] + x := v_0_0.Args[0] + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpARM64SUB { + break + } + t := v_0_0_1.Type + _ = v_0_0_1.Args[1] + v_0_0_1_0 := v_0_0_1.Args[0] + if v_0_0_1_0.Op != OpARM64MOVDconst { + break + } + if v_0_0_1_0.AuxInt != 32 { + break + } + v_0_0_1_1 := v_0_0_1.Args[1] + if v_0_0_1_1.Op != OpARM64ANDconst { + break + } + if v_0_0_1_1.Type != t { + break + } + if v_0_0_1_1.AuxInt != 31 { + break + } + y := v_0_0_1_1.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpARM64CMPconst { + break + } + if v_0_1.AuxInt != 64 { + break + } + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpARM64SUB { + break + } + if v_0_1_0.Type != t { + break + } + _ = v_0_1_0.Args[1] + v_0_1_0_0 := v_0_1_0.Args[0] + if v_0_1_0_0.Op != OpARM64MOVDconst { + break + } + if v_0_1_0_0.AuxInt != 32 { + break + } + v_0_1_0_1 := v_0_1_0.Args[1] + if v_0_1_0_1.Op != OpARM64ANDconst { + break + } + if v_0_1_0_1.Type != t { + break + } + if v_0_1_0_1.AuxInt != 31 { + break + } + if y != v_0_1_0_1.Args[0] { + break + } + v_1 := v.Args[1] + if v_1.Op != OpARM64SRL { + break + } + if v_1.Type != typ.UInt32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpARM64MOVWUreg { + break + } + if x != v_1_0.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpARM64ANDconst { + break + } + if v_1_1.Type != t { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + if !(cc.(Op) == OpARM64LessThanU) { + break + } + v.reset(OpARM64RORW) + v.AddArg(x) + v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh64x8 x y) +func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool { + // match: (XORconst [0] x) // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: x for { - t := v.Type - _ = v.Args[1] + if v.AuxInt != 0 { + break + } x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh8x16 x y) + // match: (XORconst [-1] x) // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (MVN x) for { - t := v.Type - _ = v.Args[1] + if v.AuxInt != -1 { + break + } x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v.reset(OpARM64MVN) + v.AddArg(x) return true } -} -func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh8x32 x y) + // match: (XORconst [c] (MOVDconst [d])) // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (MOVDconst [c^d]) for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + d := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = c ^ d return true } -} -func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Lsh8x64 x y) + // match: (XORconst [c] (XORconst [d] x)) // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (XORconst [c^d] x) for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v.AddArg(v2) + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARM64XORconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARM64XORconst) + v.AuxInt = c ^ d + v.AddArg(x) return true } + return false } -func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { +func rewriteValueARM64_OpARM64XORshiftLL_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Lsh8x8 x y) + // match: (XORshiftLL (MOVDconst [c]) x [d]) // cond: - // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (XORconst [c] (SLLconst x [d])) for { - t := v.Type + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64XORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) + v0.AuxInt = d v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } -} -func rewriteValueARM64_OpMod16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod16 x y) + // match: (XORshiftLL x (MOVDconst [c]) [d]) // cond: - // result: (MODW (SignExt16to32 x) (SignExt16to32 y)) + // result: (XORconst x [int64(uint64(c)< [c] (UBFX [bfc] x) x) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (RORWconst [32-c] x) for { + t := v.Type + c := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UMODW) + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFX { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + if x != v.Args[1] { + break + } + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64RORWconst) + v.AuxInt = 32 - c v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM64_OpMod64_0(v *Value) bool { - // match: (Mod64 x y) + // match: (XORshiftLL [c] (SRLconst x [64-c]) x2) // cond: - // result: (MOD x y) + // result: (EXTRconst [64-c] x2 x) for { + c := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64MOD) + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { + break + } + if v_0.AuxInt != 64-c { + break + } + x := v_0.Args[0] + x2 := v.Args[1] + v.reset(OpARM64EXTRconst) + v.AuxInt = 64 - c + v.AddArg(x2) v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM64_OpMod64u_0(v *Value) bool { - // match: (Mod64u x y) - // cond: - // result: (UMOD x y) + // match: (XORshiftLL [c] (UBFX [bfc] x) x2) + // cond: c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c) + // result: (EXTRWconst [32-c] x2 x) for { + t := v.Type + c := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UMOD) + v_0 := v.Args[0] + if v_0.Op != OpARM64UBFX { + break + } + bfc := v_0.AuxInt + x := v_0.Args[0] + x2 := v.Args[1] + if !(c < 32 && t.Size() == 4 && bfc == arm64BFAuxInt(32-c, c)) { + break + } + v.reset(OpARM64EXTRWconst) + v.AuxInt = 32 - c + v.AddArg(x2) v.AddArg(x) - v.AddArg(y) return true } + return false } -func rewriteValueARM64_OpMod8_0(v *Value) bool { +func rewriteValueARM64_OpARM64XORshiftRA_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod8 x y) + // match: (XORshiftRA (MOVDconst [c]) x [d]) // cond: - // result: (MODW (SignExt8to32 x) (SignExt8to32 y)) + // result: (XORconst [c] (SRAconst x [d])) for { + d := v.AuxInt _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64MODW) - v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64XORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type) + v0.AuxInt = d v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) - v1.AddArg(y) - v.AddArg(v1) return true } -} -func rewriteValueARM64_OpMod8u_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Mod8u x y) + // match: (XORshiftRA x (MOVDconst [c]) [d]) // cond: - // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (XORconst x [c>>uint64(d)]) for { + d := v.AuxInt _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64UMODW) - v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValueARM64_OpMove_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Move [0] _ _ mem) - // cond: - // result: mem - for { - if v.AuxInt != 0 { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = v.Args[2] - mem := v.Args[2] - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = c >> uint64(d) + v.AddArg(x) return true } - // match: (Move [1] dst src mem) - // cond: - // result: (MOVBstore dst (MOVBUload src mem) mem) + // match: (XORshiftRA x (SRAconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { - if v.AuxInt != 1 { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRAconst { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (Move [2] dst src mem) - // cond: - // result: (MOVHstore dst (MOVHUload src mem) mem) - for { - if v.AuxInt != 2 { + c := v_1.AuxInt + if x != v_1.Args[0] { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (Move [4] dst src mem) - // cond: - // result: (MOVWstore dst (MOVWUload src mem) mem) - for { - if v.AuxInt != 4 { + if !(c == d) { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVWstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (Move [8] dst src mem) + return false +} +func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (XORshiftRL (MOVDconst [c]) x [d]) // cond: - // result: (MOVDstore dst (MOVDload src mem) mem) + // result: (XORconst [c] (SRLconst x [d])) for { - if v.AuxInt != 8 { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AddArg(src) - v0.AddArg(mem) + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64XORconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Move [3] dst src mem) + // match: (XORshiftRL x (MOVDconst [c]) [d]) // cond: - // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + // result: (XORconst x [int64(uint64(c)>>uint64(d))]) for { - if v.AuxInt != 3 { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = 2 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + c := v_1.AuxInt + v.reset(OpARM64XORconst) + v.AuxInt = int64(uint64(c) >> uint64(d)) + v.AddArg(x) return true } - // match: (Move [5] dst src mem) - // cond: - // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + // match: (XORshiftRL x (SRLconst x [c]) [d]) + // cond: c==d + // result: (MOVDconst [0]) for { - if v.AuxInt != 5 { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = 4 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [6] dst src mem) - // cond: - // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) - for { - if v.AuxInt != 6 { + c := v_1.AuxInt + if x != v_1.Args[0] { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVHstore) - v.AuxInt = 4 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v0.AuxInt = 4 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [7] dst src mem) - // cond: - // result: (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))) - for { - if v.AuxInt != 7 { + if !(c == d) { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVBstore) - v.AuxInt = 6 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) - v0.AuxInt = 6 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 4 - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) - v2.AuxInt = 4 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(dst) - v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 return true } - // match: (Move [12] dst src mem) + // match: (XORshiftRL [c] (SLLconst x [64-c]) x) // cond: - // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + // result: (RORconst [ c] x) for { - if v.AuxInt != 12 { + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVWstore) - v.AuxInt = 8 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) - v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - return false -} -func rewriteValueARM64_OpMove_10(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ - // match: (Move [16] dst src mem) - // cond: - // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) - for { - if v.AuxInt != 16 { + if v_0.AuxInt != 64-c { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDstore) - v.AuxInt = 8 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [24] dst src mem) - // cond: - // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) - for { - if v.AuxInt != 24 { + x := v_0.Args[0] + if x != v.Args[1] { break } - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64MOVDstore) - v.AuxInt = 16 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = 16 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AuxInt = 8 - v1.AddArg(dst) - v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v2.AuxInt = 8 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(dst) - v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARM64RORconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (Move [s] dst src mem) - // cond: s%8 != 0 && s > 8 - // result: (Move [s%8] (OffPtr dst [s-s%8]) (OffPtr src [s-s%8]) (Move [s-s%8] dst src mem)) + // match: (XORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) + // cond: c < 32 && t.Size() == 4 + // result: (RORWconst [c] x) for { - s := v.AuxInt - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(s%8 != 0 && s > 8) { + t := v.Type + c := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - v.reset(OpMove) - v.AuxInt = s % 8 - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = s - s%8 - v0.AddArg(dst) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = s - s%8 - v1.AddArg(src) - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = s - s%8 - v2.AddArg(dst) - v2.AddArg(src) - v2.AddArg(mem) - v.AddArg(v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice - // result: (MOVDstore [s-8] dst (MOVDload [s-8] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) - for { - s := v.AuxInt - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice) { + if v_0.AuxInt != 32-c { break } - v.reset(OpARM64MOVDstore) - v.AuxInt = s - 8 - v.AddArg(dst) - v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) - v0.AuxInt = s - 8 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem) - v1.AuxInt = 8 * (64 - (s-8)/16) - v1.AddArg(dst) - v1.AddArg(src) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice - // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) - for { - s := v.AuxInt - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVWUreg { break } - v.reset(OpARM64DUFFCOPY) - v.AuxInt = 8 * (64 - s/16) - v.AddArg(dst) - v.AddArg(src) - v.AddArg(mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 24 && s%8 == 0 - // result: (LoweredMove dst src (ADDconst src [s-8]) mem) - for { - s := v.AuxInt - _ = v.Args[2] - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(s > 24 && s%8 == 0) { + if x != v_1.Args[0] { break } - v.reset(OpARM64LoweredMove) - v.AddArg(dst) - v.AddArg(src) - v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) - v0.AuxInt = s - 8 - v0.AddArg(src) - v.AddArg(v0) - v.AddArg(mem) + if !(c < 32 && t.Size() == 4) { + break + } + v.reset(OpARM64RORWconst) + v.AuxInt = c + v.AddArg(x) return true } return false } -func rewriteValueARM64_OpMul16_0(v *Value) bool { - // match: (Mul16 x y) +func rewriteValueARM64_OpAbs_0(v *Value) bool { + // match: (Abs x) // cond: - // result: (MULW x y) + // result: (FABSD x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64MULW) + v.reset(OpARM64FABSD) v.AddArg(x) - v.AddArg(y) return true } } -func rewriteValueARM64_OpMul32_0(v *Value) bool { - // match: (Mul32 x y) +func rewriteValueARM64_OpAdd16_0(v *Value) bool { + // match: (Add16 x y) // cond: - // result: (MULW x y) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64MULW) + v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpMul32F_0(v *Value) bool { - // match: (Mul32F x y) +func rewriteValueARM64_OpAdd32_0(v *Value) bool { + // match: (Add32 x y) // cond: - // result: (FMULS x y) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64FMULS) + v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpMul64_0(v *Value) bool { - // match: (Mul64 x y) +func rewriteValueARM64_OpAdd32F_0(v *Value) bool { + // match: (Add32F x y) // cond: - // result: (MUL x y) + // result: (FADDS x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64MUL) + v.reset(OpARM64FADDS) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpMul64F_0(v *Value) bool { - // match: (Mul64F x y) +func rewriteValueARM64_OpAdd64_0(v *Value) bool { + // match: (Add64 x y) // cond: - // result: (FMULD x y) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64FMULD) + v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpMul64uhilo_0(v *Value) bool { - // match: (Mul64uhilo x y) +func rewriteValueARM64_OpAdd64F_0(v *Value) bool { + // match: (Add64F x y) // cond: - // result: (LoweredMuluhilo x y) + // result: (FADDD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64LoweredMuluhilo) + v.reset(OpARM64FADDD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpMul8_0(v *Value) bool { - // match: (Mul8 x y) +func rewriteValueARM64_OpAdd8_0(v *Value) bool { + // match: (Add8 x y) // cond: - // result: (MULW x y) + // result: (ADD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64MULW) + v.reset(OpARM64ADD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpNeg16_0(v *Value) bool { - // match: (Neg16 x) +func rewriteValueARM64_OpAddPtr_0(v *Value) bool { + // match: (AddPtr x y) // cond: - // result: (NEG x) + // result: (ADD x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64NEG) + y := v.Args[1] + v.reset(OpARM64ADD) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeg32_0(v *Value) bool { - // match: (Neg32 x) +func rewriteValueARM64_OpAddr_0(v *Value) bool { + // match: (Addr {sym} base) // cond: - // result: (NEG x) + // result: (MOVDaddr {sym} base) for { - x := v.Args[0] - v.reset(OpARM64NEG) - v.AddArg(x) + sym := v.Aux + base := v.Args[0] + v.reset(OpARM64MOVDaddr) + v.Aux = sym + v.AddArg(base) return true } } -func rewriteValueARM64_OpNeg32F_0(v *Value) bool { - // match: (Neg32F x) +func rewriteValueARM64_OpAnd16_0(v *Value) bool { + // match: (And16 x y) // cond: - // result: (FNEGS x) + // result: (AND x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FNEGS) + y := v.Args[1] + v.reset(OpARM64AND) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeg64_0(v *Value) bool { - // match: (Neg64 x) +func rewriteValueARM64_OpAnd32_0(v *Value) bool { + // match: (And32 x y) // cond: - // result: (NEG x) + // result: (AND x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64NEG) + y := v.Args[1] + v.reset(OpARM64AND) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeg64F_0(v *Value) bool { - // match: (Neg64F x) +func rewriteValueARM64_OpAnd64_0(v *Value) bool { + // match: (And64 x y) // cond: - // result: (FNEGD x) + // result: (AND x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FNEGD) + y := v.Args[1] + v.reset(OpARM64AND) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeg8_0(v *Value) bool { - // match: (Neg8 x) +func rewriteValueARM64_OpAnd8_0(v *Value) bool { + // match: (And8 x y) // cond: - // result: (NEG x) + // result: (AND x y) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64NEG) + y := v.Args[1] + v.reset(OpARM64AND) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeq16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Neq16 x y) +func rewriteValueARM64_OpAndB_0(v *Value) bool { + // match: (AndB x y) // cond: - // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (AND x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v.reset(OpARM64AND) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM64_OpNeq32_0(v *Value) bool { - b := v.Block - _ = b - // match: (Neq32 x y) +func rewriteValueARM64_OpAtomicAdd32_0(v *Value) bool { + // match: (AtomicAdd32 ptr val mem) // cond: - // result: (NotEqual (CMPW x y)) + // result: (LoweredAtomicAdd32 ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicAdd32) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNeq32F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Neq32F x y) +func rewriteValueARM64_OpAtomicAdd32Variant_0(v *Value) bool { + // match: (AtomicAdd32Variant ptr val mem) // cond: - // result: (NotEqual (FCMPS x y)) + // result: (LoweredAtomicAdd32Variant ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicAdd32Variant) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNeq64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Neq64 x y) +func rewriteValueARM64_OpAtomicAdd64_0(v *Value) bool { + // match: (AtomicAdd64 ptr val mem) // cond: - // result: (NotEqual (CMP x y)) + // result: (LoweredAtomicAdd64 ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicAdd64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNeq64F_0(v *Value) bool { - b := v.Block - _ = b - // match: (Neq64F x y) +func rewriteValueARM64_OpAtomicAdd64Variant_0(v *Value) bool { + // match: (AtomicAdd64Variant ptr val mem) // cond: - // result: (NotEqual (FCMPD x y)) + // result: (LoweredAtomicAdd64Variant ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicAdd64Variant) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNeq8_0(v *Value) bool { +func rewriteValueARM64_OpAtomicAnd8_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Neq8 x y) + // match: (AtomicAnd8 ptr val mem) // cond: - // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (Select1 (LoweredAtomicAnd8 ptr val mem)) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) - v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) - v2.AddArg(y) - v0.AddArg(v2) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg(ptr) + v0.AddArg(val) + v0.AddArg(mem) v.AddArg(v0) return true } } -func rewriteValueARM64_OpNeqB_0(v *Value) bool { - // match: (NeqB x y) +func rewriteValueARM64_OpAtomicCompareAndSwap32_0(v *Value) bool { + // match: (AtomicCompareAndSwap32 ptr old new_ mem) // cond: - // result: (XOR x y) + // result: (LoweredAtomicCas32 ptr old new_ mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64XOR) - v.AddArg(x) - v.AddArg(y) + _ = v.Args[3] + ptr := v.Args[0] + old := v.Args[1] + new_ := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64LoweredAtomicCas32) + v.AddArg(ptr) + v.AddArg(old) + v.AddArg(new_) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNeqPtr_0(v *Value) bool { - b := v.Block - _ = b - // match: (NeqPtr x y) +func rewriteValueARM64_OpAtomicCompareAndSwap64_0(v *Value) bool { + // match: (AtomicCompareAndSwap64 ptr old new_ mem) // cond: - // result: (NotEqual (CMP x y)) + // result: (LoweredAtomicCas64 ptr old new_ mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64NotEqual) - v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + _ = v.Args[3] + ptr := v.Args[0] + old := v.Args[1] + new_ := v.Args[2] + mem := v.Args[3] + v.reset(OpARM64LoweredAtomicCas64) + v.AddArg(ptr) + v.AddArg(old) + v.AddArg(new_) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpNilCheck_0(v *Value) bool { - // match: (NilCheck ptr mem) +func rewriteValueARM64_OpAtomicExchange32_0(v *Value) bool { + // match: (AtomicExchange32 ptr val mem) // cond: - // result: (LoweredNilCheck ptr mem) + // result: (LoweredAtomicExchange32 ptr val mem) for { - _ = v.Args[1] + _ = v.Args[2] ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64LoweredNilCheck) + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicExchange32) v.AddArg(ptr) + v.AddArg(val) v.AddArg(mem) return true } } -func rewriteValueARM64_OpNot_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Not x) +func rewriteValueARM64_OpAtomicExchange64_0(v *Value) bool { + // match: (AtomicExchange64 ptr val mem) // cond: - // result: (XOR (MOVDconst [1]) x) + // result: (LoweredAtomicExchange64 ptr val mem) for { - x := v.Args[0] - v.reset(OpARM64XOR) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 1 - v.AddArg(v0) - v.AddArg(x) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredAtomicExchange64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpOffPtr_0(v *Value) bool { - // match: (OffPtr [off] ptr:(SP)) +func rewriteValueARM64_OpAtomicLoad32_0(v *Value) bool { + // match: (AtomicLoad32 ptr mem) // cond: - // result: (MOVDaddr [off] ptr) + // result: (LDARW ptr mem) for { - off := v.AuxInt + _ = v.Args[1] ptr := v.Args[0] - if ptr.Op != OpSP { - break - } - v.reset(OpARM64MOVDaddr) - v.AuxInt = off + mem := v.Args[1] + v.reset(OpARM64LDARW) v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (OffPtr [off] ptr) +} +func rewriteValueARM64_OpAtomicLoad64_0(v *Value) bool { + // match: (AtomicLoad64 ptr mem) // cond: - // result: (ADDconst [off] ptr) + // result: (LDAR ptr mem) for { - off := v.AuxInt + _ = v.Args[1] ptr := v.Args[0] - v.reset(OpARM64ADDconst) - v.AuxInt = off + mem := v.Args[1] + v.reset(OpARM64LDAR) v.AddArg(ptr) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpOr16_0(v *Value) bool { - // match: (Or16 x y) +func rewriteValueARM64_OpAtomicLoadPtr_0(v *Value) bool { + // match: (AtomicLoadPtr ptr mem) // cond: - // result: (OR x y) + // result: (LDAR ptr mem) for { _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64OR) - v.AddArg(x) - v.AddArg(y) + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64LDAR) + v.AddArg(ptr) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpOr32_0(v *Value) bool { - // match: (Or32 x y) +func rewriteValueARM64_OpAtomicOr8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (AtomicOr8 ptr val mem) // cond: - // result: (OR x y) + // result: (Select1 (LoweredAtomicOr8 ptr val mem)) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64OR) - v.AddArg(x) - v.AddArg(y) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg(ptr) + v0.AddArg(val) + v0.AddArg(mem) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpOr64_0(v *Value) bool { - // match: (Or64 x y) +func rewriteValueARM64_OpAtomicStore32_0(v *Value) bool { + // match: (AtomicStore32 ptr val mem) // cond: - // result: (OR x y) + // result: (STLRW ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64OR) - v.AddArg(x) - v.AddArg(y) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64STLRW) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpOr8_0(v *Value) bool { - // match: (Or8 x y) +func rewriteValueARM64_OpAtomicStore64_0(v *Value) bool { + // match: (AtomicStore64 ptr val mem) // cond: - // result: (OR x y) + // result: (STLR ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64OR) - v.AddArg(x) - v.AddArg(y) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64STLR) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpOrB_0(v *Value) bool { - // match: (OrB x y) +func rewriteValueARM64_OpAtomicStorePtrNoWB_0(v *Value) bool { + // match: (AtomicStorePtrNoWB ptr val mem) // cond: - // result: (OR x y) + // result: (STLR ptr val mem) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64OR) - v.AddArg(x) - v.AddArg(y) + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64STLR) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpPopCount16_0(v *Value) bool { +func rewriteValueARM64_OpAvg64u_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (PopCount16 x) + // match: (Avg64u x y) // cond: - // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt16to64 x))))) + // result: (ADD (SRLconst (SUB x y) [1]) y) for { t := v.Type + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FMOVDfpgp) - v.Type = t - v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) - v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) - v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) - v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v3.AddArg(x) - v2.AddArg(v3) - v1.AddArg(v2) + y := v.Args[1] + v.reset(OpARM64ADD) + v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t) + v0.AuxInt = 1 + v1 := b.NewValue0(v.Pos, OpARM64SUB, t) + v1.AddArg(x) + v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) + v.AddArg(y) return true } } -func rewriteValueARM64_OpPopCount32_0(v *Value) bool { +func rewriteValueARM64_OpBitLen64_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (PopCount32 x) + // match: (BitLen64 x) // cond: - // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt32to64 x))))) + // result: (SUB (MOVDconst [64]) (CLZ x)) for { - t := v.Type x := v.Args[0] - v.reset(OpARM64FMOVDfpgp) - v.Type = t - v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) - v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) - v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) - v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v3.AddArg(x) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) + v.reset(OpARM64SUB) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 64 v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int) + v1.AddArg(x) + v.AddArg(v1) return true } } -func rewriteValueARM64_OpPopCount64_0(v *Value) bool { +func rewriteValueARM64_OpBitRev16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (PopCount64 x) + // match: (BitRev16 x) // cond: - // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp x)))) + // result: (SRLconst [48] (RBIT x)) for { - t := v.Type x := v.Args[0] - v.reset(OpARM64FMOVDfpgp) - v.Type = t - v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) - v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) - v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) + v.reset(OpARM64SRLconst) + v.AuxInt = 48 + v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + v0.AddArg(x) v.AddArg(v0) return true } } -func rewriteValueARM64_OpRound_0(v *Value) bool { - // match: (Round x) +func rewriteValueARM64_OpBitRev32_0(v *Value) bool { + // match: (BitRev32 x) // cond: - // result: (FRINTAD x) + // result: (RBITW x) for { x := v.Args[0] - v.reset(OpARM64FRINTAD) + v.reset(OpARM64RBITW) v.AddArg(x) return true } } -func rewriteValueARM64_OpRound32F_0(v *Value) bool { - // match: (Round32F x) +func rewriteValueARM64_OpBitRev64_0(v *Value) bool { + // match: (BitRev64 x) // cond: - // result: (LoweredRound32F x) + // result: (RBIT x) for { x := v.Args[0] - v.reset(OpARM64LoweredRound32F) + v.reset(OpARM64RBIT) v.AddArg(x) return true } } -func rewriteValueARM64_OpRound64F_0(v *Value) bool { - // match: (Round64F x) +func rewriteValueARM64_OpBitRev8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (BitRev8 x) // cond: - // result: (LoweredRound64F x) + // result: (SRLconst [56] (RBIT x)) for { x := v.Args[0] - v.reset(OpARM64LoweredRound64F) - v.AddArg(x) + v.reset(OpARM64SRLconst) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux16 x y) +func rewriteValueARM64_OpBswap32_0(v *Value) bool { + // match: (Bswap32 x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (REVW x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.reset(OpARM64REVW) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux32 x y) +func rewriteValueARM64_OpBswap64_0(v *Value) bool { + // match: (Bswap64 x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (REV x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.reset(OpARM64REV) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux64 x y) +func rewriteValueARM64_OpCeil_0(v *Value) bool { + // match: (Ceil x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (FRINTPD x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v.AddArg(v3) + v.reset(OpARM64FRINTPD) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16Ux8 x y) +func rewriteValueARM64_OpClosureCall_0(v *Value) bool { + // match: (ClosureCall [argwid] entry closure mem) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CALLclosure [argwid] entry closure mem) for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + argwid := v.AuxInt + _ = v.Args[2] + entry := v.Args[0] + closure := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64CALLclosure) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(closure) + v.AddArg(mem) return true } } -func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x16 x y) +func rewriteValueARM64_OpCom16_0(v *Value) bool { + // match: (Com16 x) // cond: - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (MVN x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64MVN) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x32 x y) +func rewriteValueARM64_OpCom32_0(v *Value) bool { + // match: (Com32 x) // cond: - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (MVN x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64MVN) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x64 x y) +func rewriteValueARM64_OpCom64_0(v *Value) bool { + // match: (Com64 x) // cond: - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (MVN x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARM64MVN) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh16x8 x y) +func rewriteValueARM64_OpCom8_0(v *Value) bool { + // match: (Com8 x) // cond: - // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (MVN x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64MVN) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { +func rewriteValueARM64_OpCondSelect_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32Ux16 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // match: (CondSelect x y bool) + // cond: flagArg(bool) != nil + // result: (CSEL {bool.Op} x y flagArg(bool)) for { - t := v.Type - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] + bool := v.Args[2] + if !(flagArg(bool) != nil) { + break + } v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.Aux = bool.Op + v.AddArg(x) + v.AddArg(y) + v.AddArg(flagArg(bool)) return true } -} -func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32Ux32 x y) - // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // match: (CondSelect x y bool) + // cond: flagArg(bool) == nil + // result: (CSEL {OpARM64NotEqual} x y (CMPWconst [0] bool)) for { - t := v.Type - _ = v.Args[1] + _ = v.Args[2] x := v.Args[0] y := v.Args[1] + bool := v.Args[2] + if !(flagArg(bool) == nil) { + break + } v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) + v.Aux = OpARM64NotEqual + v.AddArg(x) + v.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(bool) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) return true } + return false } -func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32Ux64 x y) +func rewriteValueARM64_OpConst16_0(v *Value) bool { + // match: (Const16 [val]) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (MOVDconst [val]) for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v.AddArg(v3) + val := v.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32Ux8 x y) +func rewriteValueARM64_OpConst32_0(v *Value) bool { + // match: (Const32 [val]) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (MOVDconst [val]) for { - t := v.Type - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + val := v.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32x16 x y) +func rewriteValueARM64_OpConst32F_0(v *Value) bool { + // match: (Const32F [val]) // cond: - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (FMOVSconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + val := v.AuxInt + v.reset(OpARM64FMOVSconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32x32 x y) +func rewriteValueARM64_OpConst64_0(v *Value) bool { + // match: (Const64 [val]) // cond: - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (MOVDconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + val := v.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32x64 x y) +func rewriteValueARM64_OpConst64F_0(v *Value) bool { + // match: (Const64F [val]) // cond: - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (FMOVDconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + val := v.AuxInt + v.reset(OpARM64FMOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh32x8 x y) +func rewriteValueARM64_OpConst8_0(v *Value) bool { + // match: (Const8 [val]) // cond: - // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (MOVDconst [val]) for { - _ = v.Args[1] - x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + val := v.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = val return true } } -func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { +func rewriteValueARM64_OpConstBool_0(v *Value) bool { + // match: (ConstBool [b]) + // cond: + // result: (MOVDconst [b]) + for { + b := v.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = b + return true + } +} +func rewriteValueARM64_OpConstNil_0(v *Value) bool { + // match: (ConstNil) + // cond: + // result: (MOVDconst [0]) + for { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } +} +func rewriteValueARM64_OpCtz32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64Ux16 x y) + // match: (Ctz32 x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CLZW (RBITW x)) for { t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v.reset(OpARM64CLZW) + v0 := b.NewValue0(v.Pos, OpARM64RBITW, t) v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) return true } } -func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64Ux32 x y) +func rewriteValueARM64_OpCtz32NonZero_0(v *Value) bool { + // match: (Ctz32NonZero x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (Ctz32 x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v.reset(OpCtz32) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { +func rewriteValueARM64_OpCtz64_0(v *Value) bool { b := v.Block _ = b - // match: (Rsh64Ux64 x y) + // match: (Ctz64 x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CLZ (RBIT x)) for { t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v.reset(OpARM64CLZ) + v0 := b.NewValue0(v.Pos, OpARM64RBIT, t) v0.AddArg(x) - v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v.AddArg(v2) return true } } -func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64Ux8 x y) +func rewriteValueARM64_OpCtz64NonZero_0(v *Value) bool { + // match: (Ctz64NonZero x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (Ctz64 x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v.AddArg(v3) + v.reset(OpCtz64) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64x16 x y) +func rewriteValueARM64_OpCvt32Fto32_0(v *Value) bool { + // match: (Cvt32Fto32 x) // cond: - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (FCVTZSSW x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) + v.reset(OpARM64FCVTZSSW) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = OpARM64LessThanU - v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v0.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) return true } } -func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64x32 x y) +func rewriteValueARM64_OpCvt32Fto32U_0(v *Value) bool { + // match: (Cvt32Fto32U x) // cond: - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (FCVTZUSW x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) + v.reset(OpARM64FCVTZUSW) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = OpARM64LessThanU - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v0.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) return true } } -func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { - b := v.Block - _ = b - // match: (Rsh64x64 x y) +func rewriteValueARM64_OpCvt32Fto64_0(v *Value) bool { + // match: (Cvt32Fto64 x) // cond: - // result: (SRA x (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (FCVTZSS x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) + v.reset(OpARM64FCVTZSS) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = OpARM64LessThanU - v0.AddArg(y) - v1 := b.NewValue0(v.Pos, OpConst64, y.Type) - v1.AuxInt = 63 - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v2.AuxInt = 64 - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) return true } } -func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh64x8 x y) +func rewriteValueARM64_OpCvt32Fto64F_0(v *Value) bool { + // match: (Cvt32Fto64F x) // cond: - // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (FCVTSD x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) + v.reset(OpARM64FCVTSD) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v0.Aux = OpARM64LessThanU - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(y) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v0.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) - v0.AddArg(v3) - v.AddArg(v0) return true } } -func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8Ux16 x y) +func rewriteValueARM64_OpCvt32Fto64U_0(v *Value) bool { + // match: (Cvt32Fto64U x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (FCVTZUS x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.reset(OpARM64FCVTZUS) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8Ux32 x y) +func rewriteValueARM64_OpCvt32Uto32F_0(v *Value) bool { + // match: (Cvt32Uto32F x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (UCVTFWS x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.reset(OpARM64UCVTFWS) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8Ux64 x y) +func rewriteValueARM64_OpCvt32Uto64F_0(v *Value) bool { + // match: (Cvt32Uto64F x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (UCVTFWD x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(y) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = 0 - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v.AddArg(v3) + v.reset(OpARM64UCVTFWD) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8Ux8 x y) +func rewriteValueARM64_OpCvt32to32F_0(v *Value) bool { + // match: (Cvt32to32F x) // cond: - // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (SCVTFWS x) for { - t := v.Type - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64CSEL) - v.Aux = OpARM64LessThanU - v0 := b.NewValue0(v.Pos, OpARM64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) - v3.AuxInt = 0 - v.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v.AddArg(v4) + v.reset(OpARM64SCVTFWS) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8x16 x y) +func rewriteValueARM64_OpCvt32to64F_0(v *Value) bool { + // match: (Cvt32to64F x) // cond: - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SCVTFWD x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64SCVTFWD) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8x32 x y) +func rewriteValueARM64_OpCvt64Fto32_0(v *Value) bool { + // match: (Cvt64Fto32 x) // cond: - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (FCVTZSDW x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64FCVTZSDW) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8x64 x y) +func rewriteValueARM64_OpCvt64Fto32F_0(v *Value) bool { + // match: (Cvt64Fto32F x) // cond: - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + // result: (FCVTDS x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) - v2.AuxInt = 63 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v3.AuxInt = 64 - v3.AddArg(y) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARM64FCVTDS) + v.AddArg(x) return true } } -func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Rsh8x8 x y) +func rewriteValueARM64_OpCvt64Fto32U_0(v *Value) bool { + // match: (Cvt64Fto32U x) // cond: - // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (FCVTZUDW x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) - v1.Aux = OpARM64LessThanU - v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v2.AddArg(y) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) - v3.AuxInt = 63 - v1.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v4.AuxInt = 64 - v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v5.AddArg(y) - v4.AddArg(v5) - v1.AddArg(v4) - v.AddArg(v1) + v.reset(OpARM64FCVTZUDW) + v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt16to32_0(v *Value) bool { - // match: (SignExt16to32 x) +func rewriteValueARM64_OpCvt64Fto64_0(v *Value) bool { + // match: (Cvt64Fto64 x) // cond: - // result: (MOVHreg x) + // result: (FCVTZSD x) for { x := v.Args[0] - v.reset(OpARM64MOVHreg) + v.reset(OpARM64FCVTZSD) v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt16to64_0(v *Value) bool { - // match: (SignExt16to64 x) +func rewriteValueARM64_OpCvt64Fto64U_0(v *Value) bool { + // match: (Cvt64Fto64U x) // cond: - // result: (MOVHreg x) + // result: (FCVTZUD x) for { x := v.Args[0] - v.reset(OpARM64MOVHreg) + v.reset(OpARM64FCVTZUD) v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt32to64_0(v *Value) bool { - // match: (SignExt32to64 x) +func rewriteValueARM64_OpCvt64Uto32F_0(v *Value) bool { + // match: (Cvt64Uto32F x) // cond: - // result: (MOVWreg x) + // result: (UCVTFS x) for { x := v.Args[0] - v.reset(OpARM64MOVWreg) + v.reset(OpARM64UCVTFS) v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt8to16_0(v *Value) bool { - // match: (SignExt8to16 x) +func rewriteValueARM64_OpCvt64Uto64F_0(v *Value) bool { + // match: (Cvt64Uto64F x) // cond: - // result: (MOVBreg x) + // result: (UCVTFD x) for { x := v.Args[0] - v.reset(OpARM64MOVBreg) + v.reset(OpARM64UCVTFD) v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt8to32_0(v *Value) bool { - // match: (SignExt8to32 x) +func rewriteValueARM64_OpCvt64to32F_0(v *Value) bool { + // match: (Cvt64to32F x) // cond: - // result: (MOVBreg x) + // result: (SCVTFS x) for { x := v.Args[0] - v.reset(OpARM64MOVBreg) + v.reset(OpARM64SCVTFS) v.AddArg(x) return true } } -func rewriteValueARM64_OpSignExt8to64_0(v *Value) bool { - // match: (SignExt8to64 x) +func rewriteValueARM64_OpCvt64to64F_0(v *Value) bool { + // match: (Cvt64to64F x) // cond: - // result: (MOVBreg x) + // result: (SCVTFD x) for { x := v.Args[0] - v.reset(OpARM64MOVBreg) + v.reset(OpARM64SCVTFD) v.AddArg(x) return true } } -func rewriteValueARM64_OpSlicemask_0(v *Value) bool { +func rewriteValueARM64_OpDiv16_0(v *Value) bool { b := v.Block _ = b - // match: (Slicemask x) + typ := &b.Func.Config.Types + _ = typ + // match: (Div16 x y) // cond: - // result: (SRAconst (NEG x) [63]) + // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { - t := v.Type + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64SRAconst) - v.AuxInt = 63 - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + y := v.Args[1] + v.reset(OpARM64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) v0.AddArg(x) v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM64_OpSqrt_0(v *Value) bool { - // match: (Sqrt x) +func rewriteValueARM64_OpDiv16u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Div16u x y) // cond: - // result: (FSQRTD x) + // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FSQRTD) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpStaticCall_0(v *Value) bool { - // match: (StaticCall [argwid] {target} mem) - // cond: - // result: (CALLstatic [argwid] {target} mem) - for { - argwid := v.AuxInt - target := v.Aux - mem := v.Args[0] - v.reset(OpARM64CALLstatic) - v.AuxInt = argwid - v.Aux = target - v.AddArg(mem) - return true - } -} -func rewriteValueARM64_OpStore_0(v *Value) bool { - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 1 - // result: (MOVBstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 1) { - break - } - v.reset(OpARM64MOVBstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 2 - // result: (MOVHstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 2) { - break - } - v.reset(OpARM64MOVHstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) - // result: (MOVWstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { - break - } - v.reset(OpARM64MOVWstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) - // result: (MOVDstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { - break - } - v.reset(OpARM64MOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) - // result: (FMOVSstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { - break - } - v.reset(OpARM64FMOVSstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (Store {t} ptr val mem) - // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) - // result: (FMOVDstore ptr val mem) - for { - t := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { - break - } - v.reset(OpARM64FMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + y := v.Args[1] + v.reset(OpARM64UDIVW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) return true } - return false } -func rewriteValueARM64_OpSub16_0(v *Value) bool { - // match: (Sub16 x y) +func rewriteValueARM64_OpDiv32_0(v *Value) bool { + // match: (Div32 x y) // cond: - // result: (SUB x y) + // result: (DIVW x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64SUB) + v.reset(OpARM64DIVW) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSub32_0(v *Value) bool { - // match: (Sub32 x y) +func rewriteValueARM64_OpDiv32F_0(v *Value) bool { + // match: (Div32F x y) // cond: - // result: (SUB x y) + // result: (FDIVS x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64SUB) + v.reset(OpARM64FDIVS) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSub32F_0(v *Value) bool { - // match: (Sub32F x y) +func rewriteValueARM64_OpDiv32u_0(v *Value) bool { + // match: (Div32u x y) // cond: - // result: (FSUBS x y) + // result: (UDIVW x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64FSUBS) + v.reset(OpARM64UDIVW) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSub64_0(v *Value) bool { - // match: (Sub64 x y) +func rewriteValueARM64_OpDiv64_0(v *Value) bool { + // match: (Div64 x y) // cond: - // result: (SUB x y) + // result: (DIV x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64SUB) + v.reset(OpARM64DIV) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSub64F_0(v *Value) bool { - // match: (Sub64F x y) +func rewriteValueARM64_OpDiv64F_0(v *Value) bool { + // match: (Div64F x y) // cond: - // result: (FSUBD x y) + // result: (FDIVD x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64FSUBD) + v.reset(OpARM64FDIVD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSub8_0(v *Value) bool { - // match: (Sub8 x y) +func rewriteValueARM64_OpDiv64u_0(v *Value) bool { + // match: (Div64u x y) // cond: - // result: (SUB x y) + // result: (UDIV x y) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64SUB) + v.reset(OpARM64UDIV) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM64_OpSubPtr_0(v *Value) bool { - // match: (SubPtr x y) +func rewriteValueARM64_OpDiv8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Div8 x y) // cond: - // result: (SUB x y) + // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64SUB) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM64_OpTrunc_0(v *Value) bool { - // match: (Trunc x) +func rewriteValueARM64_OpDiv8u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Div8u x y) // cond: - // result: (FRINTZD x) + // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpARM64FRINTZD) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64UDIVW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM64_OpTrunc16to8_0(v *Value) bool { - // match: (Trunc16to8 x) +func rewriteValueARM64_OpEq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Eq16 x y) // cond: - // result: x + // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpTrunc32to16_0(v *Value) bool { - // match: (Trunc32to16 x) +func rewriteValueARM64_OpEq32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Eq32 x y) // cond: - // result: x + // result: (Equal (CMPW x y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpTrunc32to8_0(v *Value) bool { - // match: (Trunc32to8 x) +func rewriteValueARM64_OpEq32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Eq32F x y) // cond: - // result: x + // result: (Equal (FCMPS x y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpTrunc64to16_0(v *Value) bool { - // match: (Trunc64to16 x) +func rewriteValueARM64_OpEq64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Eq64 x y) // cond: - // result: x + // result: (Equal (CMP x y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpTrunc64to32_0(v *Value) bool { - // match: (Trunc64to32 x) +func rewriteValueARM64_OpEq64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Eq64F x y) // cond: - // result: x + // result: (Equal (FCMPD x y)) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpTrunc64to8_0(v *Value) bool { - // match: (Trunc64to8 x) +func rewriteValueARM64_OpEq8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Eq8 x y) // cond: - // result: x + // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { + _ = v.Args[1] x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + y := v.Args[1] + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpWB_0(v *Value) bool { - // match: (WB {fn} destptr srcptr mem) +func rewriteValueARM64_OpEqB_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (EqB x y) // cond: - // result: (LoweredWB {fn} destptr srcptr mem) + // result: (XOR (MOVDconst [1]) (XOR x y)) for { - fn := v.Aux - _ = v.Args[2] - destptr := v.Args[0] - srcptr := v.Args[1] - mem := v.Args[2] - v.reset(OpARM64LoweredWB) - v.Aux = fn - v.AddArg(destptr) - v.AddArg(srcptr) - v.AddArg(mem) + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 1 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool) + v1.AddArg(x) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM64_OpXor16_0(v *Value) bool { - // match: (Xor16 x y) +func rewriteValueARM64_OpEqPtr_0(v *Value) bool { + b := v.Block + _ = b + // match: (EqPtr x y) // cond: - // result: (XOR x y) + // result: (Equal (CMP x y)) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64XOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64Equal) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpXor32_0(v *Value) bool { - // match: (Xor32 x y) +func rewriteValueARM64_OpFloor_0(v *Value) bool { + // match: (Floor x) // cond: - // result: (XOR x y) + // result: (FRINTMD x) for { - _ = v.Args[1] x := v.Args[0] - y := v.Args[1] - v.reset(OpARM64XOR) + v.reset(OpARM64FRINTMD) v.AddArg(x) - v.AddArg(y) return true } } -func rewriteValueARM64_OpXor64_0(v *Value) bool { - // match: (Xor64 x y) +func rewriteValueARM64_OpGeq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Geq16 x y) // cond: - // result: (XOR x y) + // result: (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64XOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpXor8_0(v *Value) bool { - // match: (Xor8 x y) +func rewriteValueARM64_OpGeq16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Geq16U x y) // cond: - // result: (XOR x y) + // result: (GreaterEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { _ = v.Args[1] x := v.Args[0] y := v.Args[1] - v.reset(OpARM64XOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARM64GreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM64_OpZero_0(v *Value) bool { +func rewriteValueARM64_OpGeq32_0(v *Value) bool { b := v.Block _ = b - typ := &b.Func.Config.Types - _ = typ - // match: (Zero [0] _ mem) + // match: (Geq32 x y) // cond: - // result: mem + // result: (GreaterEqual (CMPW x y)) for { - if v.AuxInt != 0 { - break - } _ = v.Args[1] - mem := v.Args[1] - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (Zero [1] ptr mem) +} +func rewriteValueARM64_OpGeq32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Geq32F x y) // cond: - // result: (MOVBstore ptr (MOVDconst [0]) mem) + // result: (GreaterEqual (FCMPS x y)) for { - if v.AuxInt != 1 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [2] ptr mem) +} +func rewriteValueARM64_OpGeq32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Geq32U x y) // cond: - // result: (MOVHstore ptr (MOVDconst [0]) mem) + // result: (GreaterEqualU (CMPW x y)) for { - if v.AuxInt != 2 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVHstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [4] ptr mem) +} +func rewriteValueARM64_OpGeq64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Geq64 x y) // cond: - // result: (MOVWstore ptr (MOVDconst [0]) mem) + // result: (GreaterEqual (CMP x y)) for { - if v.AuxInt != 4 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVWstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [8] ptr mem) +} +func rewriteValueARM64_OpGeq64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Geq64F x y) // cond: - // result: (MOVDstore ptr (MOVDconst [0]) mem) + // result: (GreaterEqual (FCMPD x y)) for { - if v.AuxInt != 8 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVDstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [3] ptr mem) +} +func rewriteValueARM64_OpGeq64U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Geq64U x y) // cond: - // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) + // result: (GreaterEqualU (CMP x y)) for { - if v.AuxInt != 3 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 2 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [5] ptr mem) +} +func rewriteValueARM64_OpGeq8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Geq8 x y) // cond: - // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + // result: (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - if v.AuxInt != 5 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 4 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [6] ptr mem) +} +func rewriteValueARM64_OpGeq8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Geq8U x y) // cond: - // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + // result: (GreaterEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - if v.AuxInt != 6 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVHstore) - v.AuxInt = 4 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [7] ptr mem) +} +func rewriteValueARM64_OpGetCallerPC_0(v *Value) bool { + // match: (GetCallerPC) // cond: - // result: (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) + // result: (LoweredGetCallerPC) for { - if v.AuxInt != 7 { - break - } - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 6 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 4 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARM64LoweredGetCallerPC) return true } - // match: (Zero [9] ptr mem) +} +func rewriteValueARM64_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) // cond: - // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + // result: (LoweredGetCallerSP) for { - if v.AuxInt != 9 { - break - } - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 8 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v.reset(OpARM64LoweredGetCallerSP) return true } - return false } -func rewriteValueARM64_OpZero_10(v *Value) bool { +func rewriteValueARM64_OpGetClosurePtr_0(v *Value) bool { + // match: (GetClosurePtr) + // cond: + // result: (LoweredGetClosurePtr) + for { + v.reset(OpARM64LoweredGetClosurePtr) + return true + } +} +func rewriteValueARM64_OpGreater16_0(v *Value) bool { b := v.Block _ = b typ := &b.Func.Config.Types _ = typ - // match: (Zero [10] ptr mem) + // match: (Greater16 x y) // cond: - // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + // result: (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) for { - if v.AuxInt != 10 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVHstore) - v.AuxInt = 8 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [11] ptr mem) +} +func rewriteValueARM64_OpGreater16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater16U x y) // cond: - // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + // result: (GreaterThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) for { - if v.AuxInt != 11 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 10 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 8 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) return true } - // match: (Zero [12] ptr mem) +} +func rewriteValueARM64_OpGreater32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32 x y) // cond: - // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + // result: (GreaterThan (CMPW x y)) for { - if v.AuxInt != 12 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVWstore) - v.AuxInt = 8 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [13] ptr mem) +} +func rewriteValueARM64_OpGreater32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32F x y) // cond: - // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + // result: (GreaterThan (FCMPS x y)) for { - if v.AuxInt != 13 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 12 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AuxInt = 8 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) return true } - // match: (Zero [14] ptr mem) +} +func rewriteValueARM64_OpGreater32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater32U x y) // cond: - // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + // result: (GreaterThanU (CMPW x y)) for { - if v.AuxInt != 14 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVHstore) - v.AuxInt = 12 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v1.AuxInt = 8 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) return true } - // match: (Zero [15] ptr mem) - // cond: - // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) +} +func rewriteValueARM64_OpGreater64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater64 x y) + // cond: + // result: (GreaterThan (CMP x y)) for { - if v.AuxInt != 15 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVBstore) - v.AuxInt = 14 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) - v1.AuxInt = 12 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) - v3.AuxInt = 8 - v3.AddArg(ptr) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v5.AddArg(ptr) - v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) return true } - // match: (Zero [16] ptr mem) +} +func rewriteValueARM64_OpGreater64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater64F x y) // cond: - // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + // result: (GreaterThan (FCMPD x y)) for { - if v.AuxInt != 16 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64STP) - v.AuxInt = 0 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) return true } - // match: (Zero [32] ptr mem) +} +func rewriteValueARM64_OpGreater64U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Greater64U x y) // cond: - // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + // result: (GreaterThanU (CMP x y)) for { - if v.AuxInt != 32 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64STP) - v.AuxInt = 16 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v2.AuxInt = 0 - v2.AddArg(ptr) - v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v3.AuxInt = 0 - v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v2.AddArg(v4) - v2.AddArg(mem) - v.AddArg(v2) return true } - // match: (Zero [48] ptr mem) +} +func rewriteValueARM64_OpGreater8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater8 x y) // cond: - // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + // result: (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) for { - if v.AuxInt != 48 { - break - } _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64STP) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpGreater8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Greater8U x y) + // cond: + // result: (GreaterThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpHmul32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Hmul32 x y) + // cond: + // result: (SRAconst (MULL x y) [32]) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRAconst) v.AuxInt = 32 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 + v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v2.AuxInt = 16 - v2.AddArg(ptr) - v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v3.AuxInt = 0 - v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v2.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v5.AuxInt = 0 - v5.AddArg(ptr) - v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v6.AuxInt = 0 - v5.AddArg(v6) - v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v7.AuxInt = 0 - v5.AddArg(v7) - v5.AddArg(mem) - v2.AddArg(v5) - v.AddArg(v2) return true } - // match: (Zero [64] ptr mem) +} +func rewriteValueARM64_OpHmul32u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Hmul32u x y) // cond: - // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) + // result: (SRAconst (UMULL x y) [32]) for { - if v.AuxInt != 64 { - break - } _ = v.Args[1] - ptr := v.Args[0] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRAconst) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpHmul64_0(v *Value) bool { + // match: (Hmul64 x y) + // cond: + // result: (MULH x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MULH) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpHmul64u_0(v *Value) bool { + // match: (Hmul64u x y) + // cond: + // result: (UMULH x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64UMULH) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpInterCall_0(v *Value) bool { + // match: (InterCall [argwid] entry mem) + // cond: + // result: (CALLinter [argwid] entry mem) + for { + argwid := v.AuxInt + _ = v.Args[1] + entry := v.Args[0] mem := v.Args[1] - v.reset(OpARM64STP) - v.AuxInt = 48 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v.reset(OpARM64CALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) + return true + } +} +func rewriteValueARM64_OpIsInBounds_0(v *Value) bool { + b := v.Block + _ = b + // match: (IsInBounds idx len) + // cond: + // result: (LessThanU (CMP idx len)) + for { + _ = v.Args[1] + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpIsNonNil_0(v *Value) bool { + b := v.Block + _ = b + // match: (IsNonNil ptr) + // cond: + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v.Args[0] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) v0.AuxInt = 0 + v0.AddArg(ptr) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v1.AuxInt = 0 - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v2.AuxInt = 32 - v2.AddArg(ptr) - v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v3.AuxInt = 0 - v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v4.AuxInt = 0 - v2.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v5.AuxInt = 16 - v5.AddArg(ptr) - v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v6.AuxInt = 0 - v5.AddArg(v6) - v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v7.AuxInt = 0 - v5.AddArg(v7) - v8 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) - v8.AuxInt = 0 - v8.AddArg(ptr) - v9 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v9.AuxInt = 0 - v8.AddArg(v9) - v10 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v10.AuxInt = 0 - v8.AddArg(v10) - v8.AddArg(mem) - v5.AddArg(v8) - v2.AddArg(v5) - v.AddArg(v2) return true } - return false } -func rewriteValueARM64_OpZero_20(v *Value) bool { +func rewriteValueARM64_OpIsSliceInBounds_0(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config - // match: (Zero [s] ptr mem) - // cond: s%16 != 0 && s%16 <= 8 && s > 16 - // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) + // match: (IsSliceInBounds idx len) + // cond: + // result: (LessEqualU (CMP idx len)) for { - s := v.AuxInt _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(s%16 != 0 && s%16 <= 8 && s > 16) { - break + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq16 x y) + // cond: + // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq16U x y) + // cond: + // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq32 x y) + // cond: + // result: (LessEqual (CMPW x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq32F x y) + // cond: + // result: (GreaterEqual (FCMPS y x)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq32U x y) + // cond: + // result: (LessEqualU (CMPW x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq64 x y) + // cond: + // result: (LessEqual (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq64F x y) + // cond: + // result: (GreaterEqual (FCMPD y x)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq64U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Leq64U x y) + // cond: + // result: (LessEqualU (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq8 x y) + // cond: + // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLeq8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Leq8U x y) + // cond: + // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessEqualU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Less16 x y) + // cond: + // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess16U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Less16U x y) + // cond: + // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32 x y) + // cond: + // result: (LessThan (CMPW x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32F x y) + // cond: + // result: (GreaterThan (FCMPS y x)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess32U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less32U x y) + // cond: + // result: (LessThanU (CMPW x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less64 x y) + // cond: + // result: (LessThan (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less64F x y) + // cond: + // result: (GreaterThan (FCMPD y x)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64GreaterThan) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess64U_0(v *Value) bool { + b := v.Block + _ = b + // match: (Less64U x y) + // cond: + // result: (LessThanU (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Less8 x y) + // cond: + // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThan) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLess8U_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Less8U x y) + // cond: + // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LessThanU) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpLoad_0(v *Value) bool { + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsBoolean()) { + break + } + v.reset(OpARM64MOVBUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && isSigned(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is8BitInt(t) && isSigned(t)) { + break + } + v.reset(OpARM64MOVBload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !isSigned(t)) + // result: (MOVBUload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is8BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpARM64MOVBUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && isSigned(t)) + // result: (MOVHload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t) && isSigned(t)) { + break + } + v.reset(OpARM64MOVHload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !isSigned(t)) + // result: (MOVHUload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpARM64MOVHUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && isSigned(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t) && isSigned(t)) { + break + } + v.reset(OpARM64MOVWload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !isSigned(t)) + // result: (MOVWUload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpARM64MOVWUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpARM64MOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVSload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitFloat(t)) { + break + } + v.reset(OpARM64FMOVSload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitFloat(t)) { + break + } + v.reset(OpARM64FMOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpLocalAddr_0(v *Value) bool { + // match: (LocalAddr {sym} base _) + // cond: + // result: (MOVDaddr {sym} base) + for { + sym := v.Aux + _ = v.Args[1] + base := v.Args[0] + v.reset(OpARM64MOVDaddr) + v.Aux = sym + v.AddArg(base) + return true + } +} +func rewriteValueARM64_OpLsh16x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh16x16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh16x32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh16x64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh16x8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh32x16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh32x32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh32x64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh32x8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh64x16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh64x32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh64x64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh64x8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh8x16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh8x32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Lsh8x64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Lsh8x8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SLL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpMod16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Mod16 x y) + // cond: + // result: (MODW (SignExt16to32 x) (SignExt16to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MODW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpMod16u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Mod16u x y) + // cond: + // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64UMODW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpMod32_0(v *Value) bool { + // match: (Mod32 x y) + // cond: + // result: (MODW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MODW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMod32u_0(v *Value) bool { + // match: (Mod32u x y) + // cond: + // result: (UMODW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64UMODW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMod64_0(v *Value) bool { + // match: (Mod64 x y) + // cond: + // result: (MOD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MOD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMod64u_0(v *Value) bool { + // match: (Mod64u x y) + // cond: + // result: (UMOD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64UMOD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMod8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Mod8 x y) + // cond: + // result: (MODW (SignExt8to32 x) (SignExt8to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MODW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpMod8u_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Mod8u x y) + // cond: + // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64UMODW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpMove_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Move [0] _ _ mem) + // cond: + // result: mem + for { + if v.AuxInt != 0 { + break + } + _ = v.Args[2] + mem := v.Args[2] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Move [1] dst src mem) + // cond: + // result: (MOVBstore dst (MOVBUload src mem) mem) + for { + if v.AuxInt != 1 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [2] dst src mem) + // cond: + // result: (MOVHstore dst (MOVHUload src mem) mem) + for { + if v.AuxInt != 2 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [4] dst src mem) + // cond: + // result: (MOVWstore dst (MOVWUload src mem) mem) + for { + if v.AuxInt != 4 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [8] dst src mem) + // cond: + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if v.AuxInt != 8 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVDstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [3] dst src mem) + // cond: + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + for { + if v.AuxInt != 3 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [5] dst src mem) + // cond: + // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + for { + if v.AuxInt != 5 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [6] dst src mem) + // cond: + // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)) + for { + if v.AuxInt != 6 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVHstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [7] dst src mem) + // cond: + // result: (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))) + for { + if v.AuxInt != 7 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVBstore) + v.AuxInt = 6 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8) + v0.AuxInt = 6 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 4 + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16) + v2.AuxInt = 4 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move [12] dst src mem) + // cond: + // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if v.AuxInt != 12 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVWstore) + v.AuxInt = 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32) + v0.AuxInt = 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueARM64_OpMove_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ + // match: (Move [16] dst src mem) + // cond: + // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)) + for { + if v.AuxInt != 16 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVDstore) + v.AuxInt = 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [24] dst src mem) + // cond: + // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))) + for { + if v.AuxInt != 24 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64MOVDstore) + v.AuxInt = 16 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = 16 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v2.AuxInt = 8 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s%8 != 0 && s > 8 + // result: (Move [s%8] (OffPtr dst [s-s%8]) (OffPtr src [s-s%8]) (Move [s-s%8] dst src mem)) + for { + s := v.AuxInt + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s%8 != 0 && s > 8) { + break + } + v.reset(OpMove) + v.AuxInt = s % 8 + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = s - s%8 + v0.AddArg(dst) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = s - s%8 + v1.AddArg(src) + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = s - s%8 + v2.AddArg(dst) + v2.AddArg(src) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice + // result: (MOVDstore [s-8] dst (MOVDload [s-8] src mem) (DUFFCOPY [8*(64-(s-8)/16)] dst src mem)) + for { + s := v.AuxInt + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice) { + break + } + v.reset(OpARM64MOVDstore) + v.AuxInt = s - 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64) + v0.AuxInt = s - 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem) + v1.AuxInt = 8 * (64 - (s-8)/16) + v1.AddArg(dst) + v1.AddArg(src) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice + // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem) + for { + s := v.AuxInt + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpARM64DUFFCOPY) + v.AuxInt = 8 * (64 - s/16) + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: s > 24 && s%8 == 0 + // result: (LoweredMove dst src (ADDconst src [s-8]) mem) + for { + s := v.AuxInt + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s > 24 && s%8 == 0) { + break + } + v.reset(OpARM64LoweredMove) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type) + v0.AuxInt = s - 8 + v0.AddArg(src) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpMul16_0(v *Value) bool { + // match: (Mul16 x y) + // cond: + // result: (MULW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MULW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul32_0(v *Value) bool { + // match: (Mul32 x y) + // cond: + // result: (MULW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MULW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul32F_0(v *Value) bool { + // match: (Mul32F x y) + // cond: + // result: (FMULS x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64FMULS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul64_0(v *Value) bool { + // match: (Mul64 x y) + // cond: + // result: (MUL x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MUL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul64F_0(v *Value) bool { + // match: (Mul64F x y) + // cond: + // result: (FMULD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64FMULD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul64uhilo_0(v *Value) bool { + // match: (Mul64uhilo x y) + // cond: + // result: (LoweredMuluhilo x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64LoweredMuluhilo) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpMul8_0(v *Value) bool { + // match: (Mul8 x y) + // cond: + // result: (MULW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64MULW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpNeg16_0(v *Value) bool { + // match: (Neg16 x) + // cond: + // result: (NEG x) + for { + x := v.Args[0] + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeg32_0(v *Value) bool { + // match: (Neg32 x) + // cond: + // result: (NEG x) + for { + x := v.Args[0] + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeg32F_0(v *Value) bool { + // match: (Neg32F x) + // cond: + // result: (FNEGS x) + for { + x := v.Args[0] + v.reset(OpARM64FNEGS) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeg64_0(v *Value) bool { + // match: (Neg64 x) + // cond: + // result: (NEG x) + for { + x := v.Args[0] + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeg64F_0(v *Value) bool { + // match: (Neg64F x) + // cond: + // result: (FNEGD x) + for { + x := v.Args[0] + v.reset(OpARM64FNEGD) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeg8_0(v *Value) bool { + // match: (Neg8 x) + // cond: + // result: (NEG x) + for { + x := v.Args[0] + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpNeq16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Neq16 x y) + // cond: + // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq32_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq32 x y) + // cond: + // result: (NotEqual (CMPW x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq32F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq32F x y) + // cond: + // result: (NotEqual (FCMPS x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq64 x y) + // cond: + // result: (NotEqual (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq64F_0(v *Value) bool { + b := v.Block + _ = b + // match: (Neq64F x y) + // cond: + // result: (NotEqual (FCMPD x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeq8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Neq8 x y) + // cond: + // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNeqB_0(v *Value) bool { + // match: (NeqB x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpNeqPtr_0(v *Value) bool { + b := v.Block + _ = b + // match: (NeqPtr x y) + // cond: + // result: (NotEqual (CMP x y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64NotEqual) + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpNilCheck_0(v *Value) bool { + // match: (NilCheck ptr mem) + // cond: + // result: (LoweredNilCheck ptr mem) + for { + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64LoweredNilCheck) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueARM64_OpNot_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Not x) + // cond: + // result: (XOR (MOVDconst [1]) x) + for { + x := v.Args[0] + v.reset(OpARM64XOR) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpOffPtr_0(v *Value) bool { + // match: (OffPtr [off] ptr:(SP)) + // cond: + // result: (MOVDaddr [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if ptr.Op != OpSP { + break + } + v.reset(OpARM64MOVDaddr) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: + // result: (ADDconst [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpARM64ADDconst) + v.AuxInt = off + v.AddArg(ptr) + return true + } +} +func rewriteValueARM64_OpOr16_0(v *Value) bool { + // match: (Or16 x y) + // cond: + // result: (OR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpOr32_0(v *Value) bool { + // match: (Or32 x y) + // cond: + // result: (OR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpOr64_0(v *Value) bool { + // match: (Or64 x y) + // cond: + // result: (OR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpOr8_0(v *Value) bool { + // match: (Or8 x y) + // cond: + // result: (OR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpOrB_0(v *Value) bool { + // match: (OrB x y) + // cond: + // result: (OR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpPopCount16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount16 x) + // cond: + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt16to64 x))))) + for { + t := v.Type + x := v.Args[0] + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpPopCount32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount32 x) + // cond: + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt32to64 x))))) + for { + t := v.Type + x := v.Args[0] + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(x) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpPopCount64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount64 x) + // cond: + // result: (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp x)))) + for { + t := v.Type + x := v.Args[0] + v.reset(OpARM64FMOVDfpgp) + v.Type = t + v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64) + v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64) + v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRotateLeft32_0(v *Value) bool { + b := v.Block + _ = b + // match: (RotateLeft32 x y) + // cond: + // result: (RORW x (NEG y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64RORW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRotateLeft64_0(v *Value) bool { + b := v.Block + _ = b + // match: (RotateLeft64 x y) + // cond: + // result: (ROR x (NEG y)) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64ROR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRound_0(v *Value) bool { + // match: (Round x) + // cond: + // result: (FRINTAD x) + for { + x := v.Args[0] + v.reset(OpARM64FRINTAD) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpRound32F_0(v *Value) bool { + // match: (Round32F x) + // cond: + // result: (LoweredRound32F x) + for { + x := v.Args[0] + v.reset(OpARM64LoweredRound32F) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpRound64F_0(v *Value) bool { + // match: (Round64F x) + // cond: + // result: (LoweredRound64F x) + for { + x := v.Args[0] + v.reset(OpARM64LoweredRound64F) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpRoundToEven_0(v *Value) bool { + // match: (RoundToEven x) + // cond: + // result: (FRINTND x) + for { + x := v.Args[0] + v.reset(OpARM64FRINTND) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16Ux8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x16 x y) + // cond: + // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x32 x y) + // cond: + // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x64 x y) + // cond: + // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v1.AddArg(v3) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh16x8 x y) + // cond: + // result: (SRA (SignExt16to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32Ux16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32Ux32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32Ux64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32Ux8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32x16 x y) + // cond: + // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32x32 x y) + // cond: + // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32x64 x y) + // cond: + // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v1.AddArg(v3) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh32x8 x y) + // cond: + // result: (SRA (SignExt32to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64Ux16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64Ux32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Rsh64Ux64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL x y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpConst64, t) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v.AddArg(v2) + return true + } +} +func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64Ux8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64x16 x y) + // cond: + // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.Aux = OpARM64LessThanU + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v0.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v0.AddArg(v3) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64x32 x y) + // cond: + // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.Aux = OpARM64LessThanU + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v0.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v0.AddArg(v3) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { + b := v.Block + _ = b + // match: (Rsh64x64 x y) + // cond: + // result: (SRA x (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.Aux = OpARM64LessThanU + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1.AuxInt = 63 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v2.AuxInt = 64 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh64x8 x y) + // cond: + // result: (SRA x (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v0.Aux = OpARM64LessThanU + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v0.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v0.AddArg(v3) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux16 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux32 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux64 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpConst64, t) + v2.AuxInt = 0 + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v.AddArg(v3) + return true + } +} +func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8Ux8 x y) + // cond: + // result: (CSEL {OpARM64LessThanU} (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64CSEL) + v.Aux = OpARM64LessThanU + v0 := b.NewValue0(v.Pos, OpARM64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpConst64, t) + v3.AuxInt = 0 + v.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v.AddArg(v4) + return true + } +} +func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x16 x y) + // cond: + // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x32 x y) + // cond: + // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x64 x y) + // cond: + // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} y (Const64 [63]) (CMPconst [64] y))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2.AuxInt = 63 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v3.AuxInt = 64 + v3.AddArg(y) + v1.AddArg(v3) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Rsh8x8 x y) + // cond: + // result: (SRA (SignExt8to64 x) (CSEL {OpARM64LessThanU} (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SRA) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type) + v1.Aux = OpARM64LessThanU + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3.AuxInt = 63 + v1.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) + v4.AuxInt = 64 + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(y) + v4.AddArg(v5) + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueARM64_OpSignExt16to32_0(v *Value) bool { + // match: (SignExt16to32 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSignExt16to64_0(v *Value) bool { + // match: (SignExt16to64 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSignExt32to64_0(v *Value) bool { + // match: (SignExt32to64 x) + // cond: + // result: (MOVWreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVWreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSignExt8to16_0(v *Value) bool { + // match: (SignExt8to16 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSignExt8to32_0(v *Value) bool { + // match: (SignExt8to32 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSignExt8to64_0(v *Value) bool { + // match: (SignExt8to64 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpSlicemask_0(v *Value) bool { + b := v.Block + _ = b + // match: (Slicemask x) + // cond: + // result: (SRAconst (NEG x) [63]) + for { + t := v.Type + x := v.Args[0] + v.reset(OpARM64SRAconst) + v.AuxInt = 63 + v0 := b.NewValue0(v.Pos, OpARM64NEG, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM64_OpSqrt_0(v *Value) bool { + // match: (Sqrt x) + // cond: + // result: (FSQRTD x) + for { + x := v.Args[0] + v.reset(OpARM64FSQRTD) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpStaticCall_0(v *Value) bool { + // match: (StaticCall [argwid] {target} mem) + // cond: + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpARM64CALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } +} +func rewriteValueARM64_OpStore_0(v *Value) bool { + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 1) { + break + } + v.reset(OpARM64MOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 2) { + break + } + v.reset(OpARM64MOVHstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // result: (MOVWstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + break + } + v.reset(OpARM64MOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) + // result: (MOVDstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { + break + } + v.reset(OpARM64MOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // result: (FMOVSstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + break + } + v.reset(OpARM64FMOVSstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // result: (FMOVDstore ptr val mem) + for { + t := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + break + } + v.reset(OpARM64FMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpSub16_0(v *Value) bool { + // match: (Sub16 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSub32_0(v *Value) bool { + // match: (Sub32 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSub32F_0(v *Value) bool { + // match: (Sub32F x y) + // cond: + // result: (FSUBS x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64FSUBS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSub64_0(v *Value) bool { + // match: (Sub64 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSub64F_0(v *Value) bool { + // match: (Sub64F x y) + // cond: + // result: (FSUBD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64FSUBD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSub8_0(v *Value) bool { + // match: (Sub8 x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpSubPtr_0(v *Value) bool { + // match: (SubPtr x y) + // cond: + // result: (SUB x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpTrunc_0(v *Value) bool { + // match: (Trunc x) + // cond: + // result: (FRINTZD x) + for { + x := v.Args[0] + v.reset(OpARM64FRINTZD) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc16to8_0(v *Value) bool { + // match: (Trunc16to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc32to16_0(v *Value) bool { + // match: (Trunc32to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc32to8_0(v *Value) bool { + // match: (Trunc32to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc64to16_0(v *Value) bool { + // match: (Trunc64to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc64to32_0(v *Value) bool { + // match: (Trunc64to32 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpTrunc64to8_0(v *Value) bool { + // match: (Trunc64to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpWB_0(v *Value) bool { + // match: (WB {fn} destptr srcptr mem) + // cond: + // result: (LoweredWB {fn} destptr srcptr mem) + for { + fn := v.Aux + _ = v.Args[2] + destptr := v.Args[0] + srcptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARM64LoweredWB) + v.Aux = fn + v.AddArg(destptr) + v.AddArg(srcptr) + v.AddArg(mem) + return true + } +} +func rewriteValueARM64_OpXor16_0(v *Value) bool { + // match: (Xor16 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpXor32_0(v *Value) bool { + // match: (Xor32 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpXor64_0(v *Value) bool { + // match: (Xor64 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpXor8_0(v *Value) bool { + // match: (Xor8 x y) + // cond: + // result: (XOR x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpARM64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM64_OpZero_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Zero [0] _ mem) + // cond: + // result: mem + for { + if v.AuxInt != 0 { + break + } + _ = v.Args[1] + mem := v.Args[1] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [1] ptr mem) + // cond: + // result: (MOVBstore ptr (MOVDconst [0]) mem) + for { + if v.AuxInt != 1 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [2] ptr mem) + // cond: + // result: (MOVHstore ptr (MOVDconst [0]) mem) + for { + if v.AuxInt != 2 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [4] ptr mem) + // cond: + // result: (MOVWstore ptr (MOVDconst [0]) mem) + for { + if v.AuxInt != 4 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVWstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [8] ptr mem) + // cond: + // result: (MOVDstore ptr (MOVDconst [0]) mem) + for { + if v.AuxInt != 8 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVDstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [3] ptr mem) + // cond: + // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 3 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 2 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [5] ptr mem) + // cond: + // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 5 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 4 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [6] ptr mem) + // cond: + // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 6 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AuxInt = 4 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [7] ptr mem) + // cond: + // result: (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 7 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 6 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 4 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [9] ptr mem) + // cond: + // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 9 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 8 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueARM64_OpZero_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Zero [10] ptr mem) + // cond: + // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 10 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AuxInt = 8 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [11] ptr mem) + // cond: + // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 11 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 10 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [12] ptr mem) + // cond: + // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 12 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVWstore) + v.AuxInt = 8 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [13] ptr mem) + // cond: + // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 13 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 12 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [14] ptr mem) + // cond: + // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 14 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AuxInt = 12 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [15] ptr mem) + // cond: + // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) + for { + if v.AuxInt != 15 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 14 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 12 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v3.AuxInt = 8 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [16] ptr mem) + // cond: + // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + for { + if v.AuxInt != 16 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 0 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + // match: (Zero [32] ptr mem) + // cond: + // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + for { + if v.AuxInt != 32 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 16 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 0 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Zero [48] ptr mem) + // cond: + // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + for { + if v.AuxInt != 48 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 32 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 16 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v5.AuxInt = 0 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v7.AuxInt = 0 + v5.AddArg(v7) + v5.AddArg(mem) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + // match: (Zero [64] ptr mem) + // cond: + // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) + for { + if v.AuxInt != 64 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 48 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 32 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v5.AuxInt = 16 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v7.AuxInt = 0 + v5.AddArg(v7) + v8 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v8.AuxInt = 0 + v8.AddArg(ptr) + v9 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v9.AuxInt = 0 + v8.AddArg(v9) + v10 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v10.AuxInt = 0 + v8.AddArg(v10) + v8.AddArg(mem) + v5.AddArg(v8) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueARM64_OpZero_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (Zero [s] ptr mem) + // cond: s%16 != 0 && s%16 <= 8 && s > 16 + // result: (Zero [8] (OffPtr ptr [s-8]) (Zero [s-s%16] ptr mem)) + for { + s := v.AuxInt + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(s%16 != 0 && s%16 <= 8 && s > 16) { + break + } + v.reset(OpZero) + v.AuxInt = 8 + v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) + v0.AuxInt = s - 8 + v0.AddArg(ptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = s - s%16 + v1.AddArg(ptr) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 != 0 && s%16 > 8 && s > 16 + // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) + for { + s := v.AuxInt + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(s%16 != 0 && s%16 > 8 && s > 16) { + break + } + v.reset(OpZero) + v.AuxInt = 16 + v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) + v0.AuxInt = s - 16 + v0.AddArg(ptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) + v1.AuxInt = s - s%16 + v1.AddArg(ptr) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice + // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) + for { + s := v.AuxInt + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { + break + } + v.reset(OpARM64DUFFZERO) + v.AuxInt = 4 * (64 - s/16) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) + // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) + for { + s := v.AuxInt + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { + break + } + v.reset(OpARM64LoweredZero) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) + v0.AuxInt = s - 16 + v0.AddArg(ptr) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM64_OpZeroExt16to32_0(v *Value) bool { + // match: (ZeroExt16to32 x) + // cond: + // result: (MOVHUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVHUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpZeroExt16to64_0(v *Value) bool { + // match: (ZeroExt16to64 x) + // cond: + // result: (MOVHUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVHUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpZeroExt32to64_0(v *Value) bool { + // match: (ZeroExt32to64 x) + // cond: + // result: (MOVWUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVWUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpZeroExt8to16_0(v *Value) bool { + // match: (ZeroExt8to16 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpZeroExt8to32_0(v *Value) bool { + // match: (ZeroExt8to32 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM64_OpZeroExt8to64_0(v *Value) bool { + // match: (ZeroExt8to64 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARM64MOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteBlockARM64(b *Block) bool { + config := b.Func.Config + _ = config + fe := b.Func.fe + _ = fe + typ := &config.Types + _ = typ + switch b.Kind { + case BlockARM64EQ: + // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (TSTWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (TSTW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (TSTconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (CMNconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (EQ (CMNWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMP { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPW { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] x) yes no) + // cond: + // result: (Z x yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + b.Kind = BlockARM64Z + b.SetControl(x) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] x) yes no) + // cond: + // result: (ZW x yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + b.Kind = BlockARM64ZW + b.SetControl(x) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMNW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADDW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (EQ (CMPW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUBW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64EQ + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (TSTconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBZ {ntz(c)} x yes no) + for { + v := b.Control + if v.Op != OpARM64TSTconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(c)) { + break + } + b.Kind = BlockARM64TBZ + b.SetControl(x) + b.Aux = ntz(c) + return true + } + // match: (EQ (TSTWconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBZ {ntz(int64(uint32(c)))} x yes no) + for { + v := b.Control + if v.Op != OpARM64TSTWconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.Kind = BlockARM64TBZ + b.SetControl(x) + b.Aux = ntz(int64(uint32(c))) + return true + } + // match: (EQ (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARM64FlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (EQ (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARM64FlagLT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true } - v.reset(OpZero) - v.AuxInt = 8 - v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) - v0.AuxInt = s - 8 - v0.AddArg(ptr) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) - v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Zero [s] ptr mem) - // cond: s%16 != 0 && s%16 > 8 && s > 16 - // result: (Zero [16] (OffPtr ptr [s-16]) (Zero [s-s%16] ptr mem)) - for { - s := v.AuxInt - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(s%16 != 0 && s%16 > 8 && s > 16) { - break + // match: (EQ (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARM64FlagLT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true } - v.reset(OpZero) - v.AuxInt = 16 - v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) - v0.AuxInt = s - 16 - v0.AddArg(ptr) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) - v1.AuxInt = s - s%16 - v1.AddArg(ptr) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Zero [s] ptr mem) - // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice - // result: (DUFFZERO [4 * (64 - s/16)] ptr mem) - for { - s := v.AuxInt - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { - break + // match: (EQ (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARM64FlagGT_ULT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true } - v.reset(OpARM64DUFFZERO) - v.AuxInt = 4 * (64 - s/16) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (Zero [s] ptr mem) - // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) - // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) - for { - s := v.AuxInt - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { - break + // match: (EQ (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARM64FlagGT_UGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true } - v.reset(OpARM64LoweredZero) - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) - v0.AuxInt = s - 16 - v0.AddArg(ptr) - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueARM64_OpZeroExt16to32_0(v *Value) bool { - // match: (ZeroExt16to32 x) - // cond: - // result: (MOVHUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVHUreg) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpZeroExt16to64_0(v *Value) bool { - // match: (ZeroExt16to64 x) - // cond: - // result: (MOVHUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVHUreg) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpZeroExt32to64_0(v *Value) bool { - // match: (ZeroExt32to64 x) - // cond: - // result: (MOVWUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVWUreg) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpZeroExt8to16_0(v *Value) bool { - // match: (ZeroExt8to16 x) - // cond: - // result: (MOVBUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVBUreg) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpZeroExt8to32_0(v *Value) bool { - // match: (ZeroExt8to32 x) - // cond: - // result: (MOVBUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVBUreg) - v.AddArg(x) - return true - } -} -func rewriteValueARM64_OpZeroExt8to64_0(v *Value) bool { - // match: (ZeroExt8to64 x) - // cond: - // result: (MOVBUreg x) - for { - x := v.Args[0] - v.reset(OpARM64MOVBUreg) - v.AddArg(x) - return true - } -} -func rewriteBlockARM64(b *Block) bool { - config := b.Func.Config - _ = config - fe := b.Func.fe - _ = fe - typ := &config.Types - _ = typ - switch b.Kind { - case BlockARM64EQ: - // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpARM64InvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockARM64EQ + b.SetControl(cmp) + b.Aux = nil + return true + } + case BlockARM64GE: + // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 - // result: (EQ (TSTWconst [c] y) yes no) + // result: (GE (TSTWconst [c] y) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32153,7 +39294,7 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) @@ -32161,9 +39302,9 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] z:(AND x y)) yes no) + // match: (GE (CMPconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (TST x y) yes no) + // result: (GE (TST x y) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32182,7 +39323,7 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) @@ -32190,9 +39331,9 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPWconst [0] z:(AND x y)) yes no) + // match: (GE (CMPWconst [0] z:(AND x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (TSTW x y) yes no) + // result: (GE (TSTW x y) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32211,7 +39352,7 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) @@ -32219,9 +39360,9 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) + // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 - // result: (EQ (TSTconst [c] y) yes no) + // result: (GE (TSTconst [c] y) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32239,7 +39380,7 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) @@ -32247,9 +39388,65 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] z:(ADD x y)) yes no) + // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GE (CMNconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GE (CMNWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (CMN x y) yes no) + // result: (GE (CMN x y) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32268,7 +39465,7 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) @@ -32276,9 +39473,38 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMP x z:(NEG y)) yes no) + // match: (GE (CMPWconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 - // result: (EQ (CMN x y) yes no) + // result: (GE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (GE (CMN x y) yes no) for { v := b.Control if v.Op != OpARM64CMP { @@ -32294,7 +39520,7 @@ func rewriteBlockARM64(b *Block) bool { if !(z.Uses == 1) { break } - b.Kind = BlockARM64EQ + b.Kind = BlockARM64GE v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) v0.AddArg(x) v0.AddArg(y) @@ -32302,9 +39528,35 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (CMPconst [0] x) yes no) - // cond: - // result: (Z x yes no) + // match: (GE (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (GE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPW { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (GE (CMN a (MUL x y)) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32313,15 +39565,64 @@ func rewriteBlockARM64(b *Block) bool { if v.AuxInt != 0 { break } - x := v.Args[0] - b.Kind = BlockARM64Z - b.SetControl(x) + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - // match: (EQ (CMPWconst [0] x) yes no) - // cond: - // result: (ZW x yes no) + // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (GE (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (GE (CMNW a (MULW x y)) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32330,49 +39631,96 @@ func rewriteBlockARM64(b *Block) bool { if v.AuxInt != 0 { break } - x := v.Args[0] - b.Kind = BlockARM64ZW - b.SetControl(x) + z := v.Args[0] + if z.Op != OpARM64MADDW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - // match: (EQ (TSTconst [c] x) yes no) - // cond: oneBit(c) - // result: (TBZ {ntz(c)} x yes no) + // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (GE (CMPW a (MULW x y)) yes no) for { v := b.Control - if v.Op != OpARM64TSTconst { + if v.Op != OpARM64CMPWconst { break } - c := v.AuxInt - x := v.Args[0] - if !(oneBit(c)) { + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUBW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GE + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPWconst [0] x) yes no) + // cond: + // result: (TBZ {int64(31)} x yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { break } + if v.AuxInt != 0 { + break + } + x := v.Args[0] b.Kind = BlockARM64TBZ b.SetControl(x) - b.Aux = ntz(c) + b.Aux = int64(31) return true } - // match: (EQ (TSTWconst [c] x) yes no) - // cond: oneBit(int64(uint32(c))) - // result: (TBZ {ntz(int64(uint32(c)))} x yes no) + // match: (GE (CMPconst [0] x) yes no) + // cond: + // result: (TBZ {int64(63)} x yes no) for { v := b.Control - if v.Op != OpARM64TSTWconst { + if v.Op != OpARM64CMPconst { break } - c := v.AuxInt - x := v.Args[0] - if !(oneBit(int64(uint32(c)))) { + if v.AuxInt != 0 { break } + x := v.Args[0] b.Kind = BlockARM64TBZ b.SetControl(x) - b.Aux = ntz(int64(uint32(c))) + b.Aux = int64(63) return true } - // match: (EQ (FlagEQ) yes no) + // match: (GE (FlagEQ) yes no) // cond: // result: (First nil yes no) for { @@ -32385,7 +39733,7 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (FlagLT_ULT) yes no) + // match: (GE (FlagLT_ULT) yes no) // cond: // result: (First nil no yes) for { @@ -32399,7 +39747,7 @@ func rewriteBlockARM64(b *Block) bool { b.swapSuccessors() return true } - // match: (EQ (FlagLT_UGT) yes no) + // match: (GE (FlagLT_UGT) yes no) // cond: // result: (First nil no yes) for { @@ -32413,9 +39761,9 @@ func rewriteBlockARM64(b *Block) bool { b.swapSuccessors() return true } - // match: (EQ (FlagGT_ULT) yes no) + // match: (GE (FlagGT_ULT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARM64FlagGT_ULT { @@ -32424,12 +39772,11 @@ func rewriteBlockARM64(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (EQ (FlagGT_UGT) yes no) + // match: (GE (FlagGT_UGT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpARM64FlagGT_UGT { @@ -32438,30 +39785,115 @@ func rewriteBlockARM64(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (EQ (InvertFlags cmp) yes no) + // match: (GE (InvertFlags cmp) yes no) // cond: - // result: (EQ cmp yes no) + // result: (LE cmp yes no) for { v := b.Control if v.Op != OpARM64InvertFlags { break } - cmp := v.Args[0] - b.Kind = BlockARM64EQ - b.SetControl(cmp) + cmp := v.Args[0] + b.Kind = BlockARM64LE + b.SetControl(cmp) + b.Aux = nil + return true + } + case BlockARM64GT: + // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GT (TSTWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (TSTW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - case BlockARM64GE: - // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 - // result: (GE (TSTWconst [c] y) yes no) + // result: (GT (TSTconst [c] y) yes no) for { v := b.Control - if v.Op != OpARM64CMPWconst { + if v.Op != OpARM64CMPconst { break } if v.AuxInt != 0 { @@ -32476,17 +39908,17 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Kind = BlockARM64GE - v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) b.SetControl(v0) b.Aux = nil return true } - // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no) + // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no) // cond: x.Uses == 1 - // result: (GE (TSTconst [c] y) yes no) + // result: (GT (CMNconst [c] y) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32496,7 +39928,7 @@ func rewriteBlockARM64(b *Block) bool { break } x := v.Args[0] - if x.Op != OpARM64ANDconst { + if x.Op != OpARM64ADDconst { break } c := x.AuxInt @@ -32504,17 +39936,17 @@ func rewriteBlockARM64(b *Block) bool { if !(x.Uses == 1) { break } - b.Kind = BlockARM64GE - v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) v0.AuxInt = c v0.AddArg(y) b.SetControl(v0) b.Aux = nil return true } - // match: (GE (CMPWconst [0] x) yes no) - // cond: - // result: (TBZ {int64(31)} x yes no) + // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (GT (CMNWconst [c] y) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32524,14 +39956,25 @@ func rewriteBlockARM64(b *Block) bool { break } x := v.Args[0] - b.Kind = BlockARM64TBZ - b.SetControl(x) - b.Aux = int64(31) + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil return true } - // match: (GE (CMPconst [0] x) yes no) - // cond: - // result: (TBZ {int64(63)} x yes no) + // match: (GT (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (CMN x y) yes no) for { v := b.Control if v.Op != OpARM64CMPconst { @@ -32540,97 +39983,174 @@ func rewriteBlockARM64(b *Block) bool { if v.AuxInt != 0 { break } - x := v.Args[0] - b.Kind = BlockARM64TBZ - b.SetControl(x) - b.Aux = int64(63) - return true - } - // match: (GE (FlagEQ) yes no) - // cond: - // result: (First nil yes no) - for { - v := b.Control - if v.Op != OpARM64FlagEQ { + z := v.Args[0] + if z.Op != OpARM64ADD { break } - b.Kind = BlockFirst - b.SetControl(nil) + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (GE (FlagLT_ULT) yes no) - // cond: - // result: (First nil no yes) + // match: (GT (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (CMNW x y) yes no) for { v := b.Control - if v.Op != OpARM64FlagLT_ULT { + if v.Op != OpARM64CMPWconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (GE (FlagLT_UGT) yes no) - // cond: - // result: (First nil no yes) + // match: (GT (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (GT (CMN x y) yes no) for { v := b.Control - if v.Op != OpARM64FlagLT_UGT { + if v.Op != OpARM64CMP { break } - b.Kind = BlockFirst - b.SetControl(nil) + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (GE (FlagGT_ULT) yes no) - // cond: - // result: (First nil yes no) + // match: (GT (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (GT (CMNW x y) yes no) for { v := b.Control - if v.Op != OpARM64FlagGT_ULT { + if v.Op != OpARM64CMPW { break } - b.Kind = BlockFirst - b.SetControl(nil) + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (GE (FlagGT_UGT) yes no) - // cond: - // result: (First nil yes no) + // match: (GT (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (GT (CMN a (MUL x y)) yes no) for { v := b.Control - if v.Op != OpARM64FlagGT_UGT { + if v.Op != OpARM64CMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - // match: (GE (InvertFlags cmp) yes no) - // cond: - // result: (LE cmp yes no) + // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (GT (CMP a (MUL x y)) yes no) for { v := b.Control - if v.Op != OpARM64InvertFlags { + if v.Op != OpARM64CMPconst { break } - cmp := v.Args[0] - b.Kind = BlockARM64LE - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64GT + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - case BlockARM64GT: - // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) - // cond: x.Uses == 1 - // result: (GT (TSTWconst [c] y) yes no) + // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (GT (CMNW a (MULW x y)) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32639,47 +40159,57 @@ func rewriteBlockARM64(b *Block) bool { if v.AuxInt != 0 { break } - x := v.Args[0] - if x.Op != OpARM64ANDconst { + z := v.Args[0] + if z.Op != OpARM64MADDW { break } - c := x.AuxInt - y := x.Args[0] - if !(x.Uses == 1) { + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { break } b.Kind = BlockARM64GT - v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) b.SetControl(v0) b.Aux = nil return true } - // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no) - // cond: x.Uses == 1 - // result: (GT (TSTconst [c] y) yes no) + // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (GT (CMPW a (MULW x y)) yes no) for { v := b.Control - if v.Op != OpARM64CMPconst { + if v.Op != OpARM64CMPWconst { break } if v.AuxInt != 0 { break } - x := v.Args[0] - if x.Op != OpARM64ANDconst { + z := v.Args[0] + if z.Op != OpARM64MSUBW { break } - c := x.AuxInt - y := x.Args[0] - if !(x.Uses == 1) { + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { break } b.Kind = BlockARM64GT - v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) b.SetControl(v0) b.Aux = nil return true @@ -32901,28 +40431,374 @@ func rewriteBlockARM64(b *Block) bool { if v.Op != OpARM64GreaterEqualU { break } - cc := v.Args[0] - b.Kind = BlockARM64UGE - b.SetControl(cc) + cc := v.Args[0] + b.Kind = BlockARM64UGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If cond yes no) + // cond: + // result: (NZ cond yes no) + for { + v := b.Control + _ = v + cond := b.Control + b.Kind = BlockARM64NZ + b.SetControl(cond) + b.Aux = nil + return true + } + case BlockARM64LE: + // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (TSTWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (TSTW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (TSTconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ANDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (CMNconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LE (CMNWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (LE (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMP { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (LE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPW { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (LE (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - // match: (If cond yes no) - // cond: - // result: (NZ cond yes no) + // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (LE (CMP a (MUL x y)) yes no) for { v := b.Control - _ = v - cond := b.Control - b.Kind = BlockARM64NZ - b.SetControl(cond) + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LE + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) b.Aux = nil return true } - case BlockARM64LE: - // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) - // cond: x.Uses == 1 - // result: (LE (TSTWconst [c] y) yes no) + // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (LE (CMNW a (MULW x y)) yes no) for { v := b.Control if v.Op != OpARM64CMPWconst { @@ -32931,47 +40807,57 @@ func rewriteBlockARM64(b *Block) bool { if v.AuxInt != 0 { break } - x := v.Args[0] - if x.Op != OpARM64ANDconst { + z := v.Args[0] + if z.Op != OpARM64MADDW { break } - c := x.AuxInt - y := x.Args[0] - if !(x.Uses == 1) { + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { break } b.Kind = BlockARM64LE - v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) b.SetControl(v0) b.Aux = nil return true } - // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no) - // cond: x.Uses == 1 - // result: (LE (TSTconst [c] y) yes no) + // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (LE (CMPW a (MULW x y)) yes no) for { v := b.Control - if v.Op != OpARM64CMPconst { + if v.Op != OpARM64CMPWconst { break } if v.AuxInt != 0 { break } - x := v.Args[0] - if x.Op != OpARM64ANDconst { + z := v.Args[0] + if z.Op != OpARM64MSUBW { break } - c := x.AuxInt - y := x.Args[0] - if !(x.Uses == 1) { + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { break } b.Kind = BlockARM64LE - v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags) - v0.AuxInt = c - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) b.SetControl(v0) b.Aux = nil return true @@ -33086,6 +40972,64 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (LT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (TST x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPWconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (TSTW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no) // cond: x.Uses == 1 // result: (LT (TSTconst [c] y) yes no) @@ -33114,6 +41058,304 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LT (CMNconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (LT (CMNWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMP x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (LT (CMN x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMP { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (LT (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPW { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (LT (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (LT (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (LT (CMNW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADDW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (LT (CMPW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUBW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64LT + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (LT (CMPWconst [0] x) yes no) // cond: // result: (TBNZ {int64(31)} x yes no) @@ -33345,6 +41587,62 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (CMNconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) + // cond: x.Uses == 1 + // result: (NE (CMNWconst [c] y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + x := v.Args[0] + if x.Op != OpARM64ADDconst { + break + } + c := x.AuxInt + y := x.Args[0] + if !(x.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (NE (CMPconst [0] z:(ADD x y)) yes no) // cond: z.Uses == 1 // result: (NE (CMN x y) yes no) @@ -33374,6 +41672,35 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (NE (CMPWconst [0] z:(ADD x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64ADD { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (NE (CMP x z:(NEG y)) yes no) // cond: z.Uses == 1 // result: (NE (CMN x y) yes no) @@ -33400,6 +41727,32 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (NE (CMPW x z:(NEG y)) yes no) + // cond: z.Uses == 1 + // result: (NE (CMNW x y) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPW { + break + } + _ = v.Args[1] + x := v.Args[0] + z := v.Args[1] + if z.Op != OpARM64NEG { + break + } + y := z.Args[0] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (NE (CMPconst [0] x) yes no) // cond: // result: (NZ x yes no) @@ -33434,6 +41787,138 @@ func rewriteBlockARM64(b *Block) bool { b.Aux = nil return true } + // match: (NE (CMPconst [0] z:(MADD a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMN a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADD { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMP a (MUL x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUB { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMNW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MADDW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no) + // cond: z.Uses==1 + // result: (NE (CMPW a (MULW x y)) yes no) + for { + v := b.Control + if v.Op != OpARM64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpARM64MSUBW { + break + } + _ = z.Args[2] + a := z.Args[0] + x := z.Args[1] + y := z.Args[2] + if !(z.Uses == 1) { + break + } + b.Kind = BlockARM64NE + v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags) + v0.AddArg(a) + v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + b.SetControl(v0) + b.Aux = nil + return true + } // match: (NE (TSTconst [c] x) yes no) // cond: oneBit(c) // result: (TBNZ {ntz(c)} x yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 231949644ee8d..55bef5a792b2d 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -3500,7 +3502,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, t) + v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -3661,7 +3663,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpMIPSMOVBload, t) + v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -4357,7 +4359,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, t) + v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -4568,7 +4570,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, t) + v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -5623,7 +5625,7 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { return true } // match: (SGTUconst [c] (SRLconst _ [d])) - // cond: uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c) + // cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) // result: (MOVWconst [1]) for { c := v.AuxInt @@ -5632,7 +5634,7 @@ func rewriteValueMIPS_OpMIPSSGTUconst_0(v *Value) bool { break } d := v_0.AuxInt - if !(uint32(d) <= 31 && 1<<(32-uint32(d)) <= uint32(c)) { + if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) { break } v.reset(OpMIPSMOVWconst) @@ -5860,7 +5862,7 @@ func rewriteValueMIPS_OpMIPSSGTconst_10(v *Value) bool { return true } // match: (SGTconst [c] (SRLconst _ [d])) - // cond: 0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c) + // cond: 0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) // result: (MOVWconst [1]) for { c := v.AuxInt @@ -5869,7 +5871,7 @@ func rewriteValueMIPS_OpMIPSSGTconst_10(v *Value) bool { break } d := v_0.AuxInt - if !(0 <= int32(c) && uint32(d) <= 31 && 1<<(32-uint32(d)) <= int32(c)) { + if !(0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) { break } v.reset(OpMIPSMOVWconst) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 9cd0050e26d1a..9e12780664d60 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -6003,7 +6005,7 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { return true } // match: (SGTUconst [c] (SRLVconst _ [d])) - // cond: 0 < d && d <= 63 && 1<>uint64(d) < uint64(c) // result: (MOVVconst [1]) for { c := v.AuxInt @@ -6012,7 +6014,7 @@ func rewriteValueMIPS64_OpMIPS64SGTUconst_0(v *Value) bool { break } d := v_0.AuxInt - if !(0 < d && d <= 63 && 1<>uint64(d) < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) @@ -6221,7 +6223,7 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { return true } // match: (SGTconst [c] (SRLVconst _ [d])) - // cond: 0 <= c && 0 < d && d <= 63 && 1<>uint64(d) < uint64(c) // result: (MOVVconst [1]) for { c := v.AuxInt @@ -6230,7 +6232,7 @@ func rewriteValueMIPS64_OpMIPS64SGTconst_10(v *Value) bool { break } d := v_0.AuxInt - if !(0 <= c && 0 < d && d <= 63 && 1<>uint64(d) < uint64(c)) { break } v.reset(OpMIPS64MOVVconst) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index ba6a862989ff6..fdb34aec0aab0 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -53,6 +55,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v) case OpAtomicCompareAndSwap64: return rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v) + case OpAtomicCompareAndSwapRel32: + return rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v) case OpAtomicExchange32: return rewriteValuePPC64_OpAtomicExchange32_0(v) case OpAtomicExchange64: @@ -61,6 +65,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpAtomicLoad32_0(v) case OpAtomicLoad64: return rewriteValuePPC64_OpAtomicLoad64_0(v) + case OpAtomicLoadAcq32: + return rewriteValuePPC64_OpAtomicLoadAcq32_0(v) case OpAtomicLoadPtr: return rewriteValuePPC64_OpAtomicLoadPtr_0(v) case OpAtomicOr8: @@ -69,6 +75,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpAtomicStore32_0(v) case OpAtomicStore64: return rewriteValuePPC64_OpAtomicStore64_0(v) + case OpAtomicStoreRel32: + return rewriteValuePPC64_OpAtomicStoreRel32_0(v) case OpAvg64u: return rewriteValuePPC64_OpAvg64u_0(v) case OpBitLen32: @@ -105,6 +113,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpConstNil_0(v) case OpCopysign: return rewriteValuePPC64_OpCopysign_0(v) + case OpCtz16: + return rewriteValuePPC64_OpCtz16_0(v) case OpCtz32: return rewriteValuePPC64_OpCtz32_0(v) case OpCtz32NonZero: @@ -113,6 +123,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpCtz64_0(v) case OpCtz64NonZero: return rewriteValuePPC64_OpCtz64NonZero_0(v) + case OpCtz8: + return rewriteValuePPC64_OpCtz8_0(v) case OpCvt32Fto32: return rewriteValuePPC64_OpCvt32Fto32_0(v) case OpCvt32Fto64: @@ -337,6 +349,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpMul64_0(v) case OpMul64F: return rewriteValuePPC64_OpMul64F_0(v) + case OpMul64uhilo: + return rewriteValuePPC64_OpMul64uhilo_0(v) case OpMul8: return rewriteValuePPC64_OpMul8_0(v) case OpNeg16: @@ -388,9 +402,9 @@ func rewriteValuePPC64(v *Value) bool { case OpPPC64ADDconst: return rewriteValuePPC64_OpPPC64ADDconst_0(v) case OpPPC64AND: - return rewriteValuePPC64_OpPPC64AND_0(v) + return rewriteValuePPC64_OpPPC64AND_0(v) || rewriteValuePPC64_OpPPC64AND_10(v) case OpPPC64ANDconst: - return rewriteValuePPC64_OpPPC64ANDconst_0(v) + return rewriteValuePPC64_OpPPC64ANDconst_0(v) || rewriteValuePPC64_OpPPC64ANDconst_10(v) case OpPPC64CMP: return rewriteValuePPC64_OpPPC64CMP_0(v) case OpPPC64CMPU: @@ -449,46 +463,66 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MFVSRD_0(v) case OpPPC64MOVBZload: return rewriteValuePPC64_OpPPC64MOVBZload_0(v) + case OpPPC64MOVBZloadidx: + return rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v) case OpPPC64MOVBZreg: - return rewriteValuePPC64_OpPPC64MOVBZreg_0(v) + return rewriteValuePPC64_OpPPC64MOVBZreg_0(v) || rewriteValuePPC64_OpPPC64MOVBZreg_10(v) case OpPPC64MOVBreg: - return rewriteValuePPC64_OpPPC64MOVBreg_0(v) + return rewriteValuePPC64_OpPPC64MOVBreg_0(v) || rewriteValuePPC64_OpPPC64MOVBreg_10(v) case OpPPC64MOVBstore: return rewriteValuePPC64_OpPPC64MOVBstore_0(v) || rewriteValuePPC64_OpPPC64MOVBstore_10(v) || rewriteValuePPC64_OpPPC64MOVBstore_20(v) + case OpPPC64MOVBstoreidx: + return rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v) || rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v) case OpPPC64MOVBstorezero: return rewriteValuePPC64_OpPPC64MOVBstorezero_0(v) case OpPPC64MOVDload: return rewriteValuePPC64_OpPPC64MOVDload_0(v) + case OpPPC64MOVDloadidx: + return rewriteValuePPC64_OpPPC64MOVDloadidx_0(v) case OpPPC64MOVDstore: return rewriteValuePPC64_OpPPC64MOVDstore_0(v) + case OpPPC64MOVDstoreidx: + return rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v) case OpPPC64MOVDstorezero: return rewriteValuePPC64_OpPPC64MOVDstorezero_0(v) case OpPPC64MOVHBRstore: return rewriteValuePPC64_OpPPC64MOVHBRstore_0(v) case OpPPC64MOVHZload: return rewriteValuePPC64_OpPPC64MOVHZload_0(v) + case OpPPC64MOVHZloadidx: + return rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v) case OpPPC64MOVHZreg: - return rewriteValuePPC64_OpPPC64MOVHZreg_0(v) + return rewriteValuePPC64_OpPPC64MOVHZreg_0(v) || rewriteValuePPC64_OpPPC64MOVHZreg_10(v) case OpPPC64MOVHload: return rewriteValuePPC64_OpPPC64MOVHload_0(v) + case OpPPC64MOVHloadidx: + return rewriteValuePPC64_OpPPC64MOVHloadidx_0(v) case OpPPC64MOVHreg: - return rewriteValuePPC64_OpPPC64MOVHreg_0(v) + return rewriteValuePPC64_OpPPC64MOVHreg_0(v) || rewriteValuePPC64_OpPPC64MOVHreg_10(v) case OpPPC64MOVHstore: return rewriteValuePPC64_OpPPC64MOVHstore_0(v) + case OpPPC64MOVHstoreidx: + return rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v) case OpPPC64MOVHstorezero: return rewriteValuePPC64_OpPPC64MOVHstorezero_0(v) case OpPPC64MOVWBRstore: return rewriteValuePPC64_OpPPC64MOVWBRstore_0(v) case OpPPC64MOVWZload: return rewriteValuePPC64_OpPPC64MOVWZload_0(v) + case OpPPC64MOVWZloadidx: + return rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v) case OpPPC64MOVWZreg: - return rewriteValuePPC64_OpPPC64MOVWZreg_0(v) + return rewriteValuePPC64_OpPPC64MOVWZreg_0(v) || rewriteValuePPC64_OpPPC64MOVWZreg_10(v) || rewriteValuePPC64_OpPPC64MOVWZreg_20(v) case OpPPC64MOVWload: return rewriteValuePPC64_OpPPC64MOVWload_0(v) + case OpPPC64MOVWloadidx: + return rewriteValuePPC64_OpPPC64MOVWloadidx_0(v) case OpPPC64MOVWreg: - return rewriteValuePPC64_OpPPC64MOVWreg_0(v) + return rewriteValuePPC64_OpPPC64MOVWreg_0(v) || rewriteValuePPC64_OpPPC64MOVWreg_10(v) case OpPPC64MOVWstore: return rewriteValuePPC64_OpPPC64MOVWstore_0(v) + case OpPPC64MOVWstoreidx: + return rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v) case OpPPC64MOVWstorezero: return rewriteValuePPC64_OpPPC64MOVWstorezero_0(v) case OpPPC64MTVSRD: @@ -544,7 +578,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh32Ux32: return rewriteValuePPC64_OpRsh32Ux32_0(v) case OpRsh32Ux64: - return rewriteValuePPC64_OpRsh32Ux64_0(v) + return rewriteValuePPC64_OpRsh32Ux64_0(v) || rewriteValuePPC64_OpRsh32Ux64_10(v) case OpRsh32Ux8: return rewriteValuePPC64_OpRsh32Ux8_0(v) case OpRsh32x16: @@ -552,7 +586,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh32x32: return rewriteValuePPC64_OpRsh32x32_0(v) case OpRsh32x64: - return rewriteValuePPC64_OpRsh32x64_0(v) + return rewriteValuePPC64_OpRsh32x64_0(v) || rewriteValuePPC64_OpRsh32x64_10(v) case OpRsh32x8: return rewriteValuePPC64_OpRsh32x8_0(v) case OpRsh64Ux16: @@ -560,7 +594,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh64Ux32: return rewriteValuePPC64_OpRsh64Ux32_0(v) case OpRsh64Ux64: - return rewriteValuePPC64_OpRsh64Ux64_0(v) + return rewriteValuePPC64_OpRsh64Ux64_0(v) || rewriteValuePPC64_OpRsh64Ux64_10(v) case OpRsh64Ux8: return rewriteValuePPC64_OpRsh64Ux8_0(v) case OpRsh64x16: @@ -568,7 +602,7 @@ func rewriteValuePPC64(v *Value) bool { case OpRsh64x32: return rewriteValuePPC64_OpRsh64x32_0(v) case OpRsh64x64: - return rewriteValuePPC64_OpRsh64x64_0(v) + return rewriteValuePPC64_OpRsh64x64_0(v) || rewriteValuePPC64_OpRsh64x64_10(v) case OpRsh64x8: return rewriteValuePPC64_OpRsh64x8_0(v) case OpRsh8Ux16: @@ -905,7 +939,7 @@ func rewriteValuePPC64_OpAtomicAnd8_0(v *Value) bool { func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool { // match: (AtomicCompareAndSwap32 ptr old new_ mem) // cond: - // result: (LoweredAtomicCas32 ptr old new_ mem) + // result: (LoweredAtomicCas32 [1] ptr old new_ mem) for { _ = v.Args[3] ptr := v.Args[0] @@ -913,6 +947,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool { new_ := v.Args[2] mem := v.Args[3] v.reset(OpPPC64LoweredAtomicCas32) + v.AuxInt = 1 v.AddArg(ptr) v.AddArg(old) v.AddArg(new_) @@ -923,7 +958,7 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap32_0(v *Value) bool { func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool { // match: (AtomicCompareAndSwap64 ptr old new_ mem) // cond: - // result: (LoweredAtomicCas64 ptr old new_ mem) + // result: (LoweredAtomicCas64 [1] ptr old new_ mem) for { _ = v.Args[3] ptr := v.Args[0] @@ -931,6 +966,26 @@ func rewriteValuePPC64_OpAtomicCompareAndSwap64_0(v *Value) bool { new_ := v.Args[2] mem := v.Args[3] v.reset(OpPPC64LoweredAtomicCas64) + v.AuxInt = 1 + v.AddArg(ptr) + v.AddArg(old) + v.AddArg(new_) + v.AddArg(mem) + return true + } +} +func rewriteValuePPC64_OpAtomicCompareAndSwapRel32_0(v *Value) bool { + // match: (AtomicCompareAndSwapRel32 ptr old new_ mem) + // cond: + // result: (LoweredAtomicCas32 [0] ptr old new_ mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + old := v.Args[1] + new_ := v.Args[2] + mem := v.Args[3] + v.reset(OpPPC64LoweredAtomicCas32) + v.AuxInt = 0 v.AddArg(ptr) v.AddArg(old) v.AddArg(new_) @@ -973,12 +1028,13 @@ func rewriteValuePPC64_OpAtomicExchange64_0(v *Value) bool { func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool { // match: (AtomicLoad32 ptr mem) // cond: - // result: (LoweredAtomicLoad32 ptr mem) + // result: (LoweredAtomicLoad32 [1] ptr mem) for { _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] v.reset(OpPPC64LoweredAtomicLoad32) + v.AuxInt = 1 v.AddArg(ptr) v.AddArg(mem) return true @@ -987,12 +1043,28 @@ func rewriteValuePPC64_OpAtomicLoad32_0(v *Value) bool { func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool { // match: (AtomicLoad64 ptr mem) // cond: - // result: (LoweredAtomicLoad64 ptr mem) + // result: (LoweredAtomicLoad64 [1] ptr mem) for { _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] v.reset(OpPPC64LoweredAtomicLoad64) + v.AuxInt = 1 + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValuePPC64_OpAtomicLoadAcq32_0(v *Value) bool { + // match: (AtomicLoadAcq32 ptr mem) + // cond: + // result: (LoweredAtomicLoad32 [0] ptr mem) + for { + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpPPC64LoweredAtomicLoad32) + v.AuxInt = 0 v.AddArg(ptr) v.AddArg(mem) return true @@ -1001,12 +1073,13 @@ func rewriteValuePPC64_OpAtomicLoad64_0(v *Value) bool { func rewriteValuePPC64_OpAtomicLoadPtr_0(v *Value) bool { // match: (AtomicLoadPtr ptr mem) // cond: - // result: (LoweredAtomicLoadPtr ptr mem) + // result: (LoweredAtomicLoadPtr [1] ptr mem) for { _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] v.reset(OpPPC64LoweredAtomicLoadPtr) + v.AuxInt = 1 v.AddArg(ptr) v.AddArg(mem) return true @@ -1031,13 +1104,14 @@ func rewriteValuePPC64_OpAtomicOr8_0(v *Value) bool { func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool { // match: (AtomicStore32 ptr val mem) // cond: - // result: (LoweredAtomicStore32 ptr val mem) + // result: (LoweredAtomicStore32 [1] ptr val mem) for { _ = v.Args[2] ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpPPC64LoweredAtomicStore32) + v.AuxInt = 1 v.AddArg(ptr) v.AddArg(val) v.AddArg(mem) @@ -1047,13 +1121,31 @@ func rewriteValuePPC64_OpAtomicStore32_0(v *Value) bool { func rewriteValuePPC64_OpAtomicStore64_0(v *Value) bool { // match: (AtomicStore64 ptr val mem) // cond: - // result: (LoweredAtomicStore64 ptr val mem) + // result: (LoweredAtomicStore64 [1] ptr val mem) for { _ = v.Args[2] ptr := v.Args[0] val := v.Args[1] mem := v.Args[2] v.reset(OpPPC64LoweredAtomicStore64) + v.AuxInt = 1 + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValuePPC64_OpAtomicStoreRel32_0(v *Value) bool { + // match: (AtomicStoreRel32 ptr val mem) + // cond: + // result: (LoweredAtomicStore32 [0] ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpPPC64LoweredAtomicStore32) + v.AuxInt = 0 v.AddArg(ptr) v.AddArg(val) v.AddArg(mem) @@ -1301,6 +1393,29 @@ func rewriteValuePPC64_OpCopysign_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpCtz16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Ctz16 x) + // cond: + // result: (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) + for { + x := v.Args[0] + v.reset(OpPPC64POPCNTW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16) + v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16) + v2.AuxInt = -1 + v2.AddArg(x) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} func rewriteValuePPC64_OpCtz32_0(v *Value) bool { b := v.Block _ = b @@ -1367,6 +1482,29 @@ func rewriteValuePPC64_OpCtz64NonZero_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpCtz8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Ctz8 x) + // cond: + // result: (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) + for { + x := v.Args[0] + v.reset(OpPPC64POPCNTB) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8) + v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8) + v2.AuxInt = -1 + v2.AddArg(x) + v1.AddArg(v2) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool { b := v.Block _ = b @@ -3070,6 +3208,21 @@ func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh16x16 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { @@ -3136,6 +3289,21 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool { return true } // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh16x32 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { @@ -3219,6 +3387,21 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool { return true } // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh16x64 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] y)))) for { @@ -3245,6 +3428,21 @@ func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh16x8 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { @@ -3273,6 +3471,21 @@ func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh32x16 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { @@ -3339,6 +3552,21 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool { return true } // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh32x32 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { @@ -3421,6 +3649,21 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Lsh32x64 x (AND y (MOVDconst [31]))) // cond: // result: (SLW x (ANDconst [31] y)) @@ -3527,6 +3770,21 @@ func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh32x8 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { @@ -3555,6 +3813,21 @@ func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh64x16 x y) // cond: // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { @@ -3621,6 +3894,21 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool { return true } // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh64x32 x y) // cond: // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) for { @@ -3703,6 +3991,21 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Lsh64x64 x (AND y (MOVDconst [63]))) // cond: // result: (SLD x (ANDconst [63] y)) @@ -3809,6 +4112,21 @@ func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SLD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh64x8 x y) // cond: // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { @@ -3837,6 +4155,21 @@ func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh8x16 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { @@ -3903,6 +4236,21 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool { return true } // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh8x32 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { @@ -3986,8 +4334,23 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool { return true } // match: (Lsh8x64 x y) - // cond: - // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh8x64 x y) + // cond: + // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { _ = v.Args[1] x := v.Args[0] @@ -4012,6 +4375,21 @@ func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SLW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Lsh8x8 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { @@ -4569,6 +4947,20 @@ func rewriteValuePPC64_OpMul64F_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpMul64uhilo_0(v *Value) bool { + // match: (Mul64uhilo x y) + // cond: + // result: (LoweredMuluhilo x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpPPC64LoweredMuluhilo) + v.AddArg(x) + v.AddArg(y) + return true + } +} func rewriteValuePPC64_OpMul8_0(v *Value) bool { // match: (Mul8 x y) // cond: @@ -5533,6 +5925,95 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool { v.AddArg(x) return true } + // match: (AND (MOVDconst [c]) y:(MOVWZreg _)) + // cond: c&0xFFFFFFFF == 0xFFFFFFFF + // result: y + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + y := v.Args[1] + if y.Op != OpPPC64MOVWZreg { + break + } + if !(c&0xFFFFFFFF == 0xFFFFFFFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (AND y:(MOVWZreg _) (MOVDconst [c])) + // cond: c&0xFFFFFFFF == 0xFFFFFFFF + // result: y + for { + _ = v.Args[1] + y := v.Args[0] + if y.Op != OpPPC64MOVWZreg { + break + } + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + if !(c&0xFFFFFFFF == 0xFFFFFFFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) + // cond: + // result: (MOVWZreg x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + if v_0.AuxInt != 0xFFFFFFFF { + break + } + y := v.Args[1] + if y.Op != OpPPC64MOVWreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } + // match: (AND y:(MOVWreg x) (MOVDconst [0xFFFFFFFF])) + // cond: + // result: (MOVWZreg x) + for { + _ = v.Args[1] + y := v.Args[0] + if y.Op != OpPPC64MOVWreg { + break + } + x := y.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + if v_1.AuxInt != 0xFFFFFFFF { + break + } + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64AND_10(v *Value) bool { // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) // cond: // result: (ANDconst [c&0xFF] x) @@ -5673,6 +6154,22 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { v.AddArg(y) return true } + // match: (ANDconst [0xFF] y:(MOVBreg _)) + // cond: + // result: y + for { + if v.AuxInt != 0xFF { + break + } + y := v.Args[0] + if y.Op != OpPPC64MOVBreg { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } // match: (ANDconst [c] y:(MOVHZreg _)) // cond: c&0xFFFF == 0xFFFF // result: y @@ -5690,16 +6187,15 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { v.AddArg(y) return true } - // match: (ANDconst [c] y:(MOVWZreg _)) - // cond: c&0xFFFFFFFF == 0xFFFFFFFF + // match: (ANDconst [0xFFFF] y:(MOVHreg _)) + // cond: // result: y for { - c := v.AuxInt - y := v.Args[0] - if y.Op != OpPPC64MOVWZreg { + if v.AuxInt != 0xFFFF { break } - if !(c&0xFFFFFFFF == 0xFFFFFFFF) { + y := v.Args[0] + if y.Op != OpPPC64MOVHreg { break } v.reset(OpCopy) @@ -5707,6 +6203,21 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { v.AddArg(y) return true } + // match: (ANDconst [c] (MOVBreg x)) + // cond: + // result: (ANDconst [c&0xFF] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVBreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = c & 0xFF + v.AddArg(x) + return true + } // match: (ANDconst [c] (MOVBZreg x)) // cond: // result: (ANDconst [c&0xFF] x) @@ -5722,6 +6233,24 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ANDconst [c] (MOVHreg x)) + // cond: + // result: (ANDconst [c&0xFFFF] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVHreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = c & 0xFFFF + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool { // match: (ANDconst [c] (MOVHZreg x)) // cond: // result: (ANDconst [c&0xFFFF] x) @@ -5737,6 +6266,21 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool { v.AddArg(x) return true } + // match: (ANDconst [c] (MOVWreg x)) + // cond: + // result: (ANDconst [c&0xFFFFFFFF] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVWreg { + break + } + x := v_0.Args[0] + v.reset(OpPPC64ANDconst) + v.AuxInt = c & 0xFFFFFFFF + v.AddArg(x) + return true + } // match: (ANDconst [c] (MOVWZreg x)) // cond: // result: (ANDconst [c&0xFFFFFFFF] x) @@ -6257,7 +6801,7 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { // match: (FABS (FMOVDconst [x])) // cond: - // result: (FMOVDconst [f2i(math.Abs(i2f(x)))]) + // result: (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { @@ -6265,7 +6809,7 @@ func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { } x := v_0.AuxInt v.reset(OpPPC64FMOVDconst) - v.AuxInt = f2i(math.Abs(i2f(x))) + v.AuxInt = auxFrom64F(math.Abs(auxTo64F(x))) return true } return false @@ -6355,7 +6899,7 @@ func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { // match: (FCEIL (FMOVDconst [x])) // cond: - // result: (FMOVDconst [f2i(math.Ceil(i2f(x)))]) + // result: (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { @@ -6363,7 +6907,7 @@ func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { } x := v_0.AuxInt v.reset(OpPPC64FMOVDconst) - v.AuxInt = f2i(math.Ceil(i2f(x))) + v.AuxInt = auxFrom64F(math.Ceil(auxTo64F(x))) return true } return false @@ -6371,7 +6915,7 @@ func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool { // match: (FFLOOR (FMOVDconst [x])) // cond: - // result: (FMOVDconst [f2i(math.Floor(i2f(x)))]) + // result: (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { @@ -6379,7 +6923,7 @@ func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool { } x := v_0.AuxInt v.reset(OpPPC64FMOVDconst) - v.AuxInt = f2i(math.Floor(i2f(x))) + v.AuxInt = auxFrom64F(math.Floor(auxTo64F(x))) return true } return false @@ -6681,7 +7225,7 @@ func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool { // match: (FSQRT (FMOVDconst [x])) // cond: - // result: (FMOVDconst [f2i(math.Sqrt(i2f(x)))]) + // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { @@ -6689,7 +7233,7 @@ func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool { } x := v_0.AuxInt v.reset(OpPPC64FMOVDconst) - v.AuxInt = f2i(math.Sqrt(i2f(x))) + v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x))) return true } return false @@ -6741,7 +7285,7 @@ func rewriteValuePPC64_OpPPC64FSUBS_0(v *Value) bool { func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool { // match: (FTRUNC (FMOVDconst [x])) // cond: - // result: (FMOVDconst [f2i(math.Trunc(i2f(x)))]) + // result: (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64FMOVDconst { @@ -6749,7 +7293,7 @@ func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool { } x := v_0.AuxInt v.reset(OpPPC64FMOVDconst) - v.AuxInt = f2i(math.Trunc(i2f(x))) + v.AuxInt = auxFrom64F(math.Trunc(auxTo64F(x))) return true } return false @@ -6997,7 +7541,7 @@ func rewriteValuePPC64_OpPPC64MFVSRD_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) + v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -7058,9 +7602,84 @@ func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVBZloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVBZloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v *Value) bool { + // match: (MOVBZloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVBZload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBZloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVBZload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVBZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } return false } func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (MOVBZreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0xFF // result: y @@ -7078,22 +7697,97 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVBZreg y:(MOVBZreg _)) + // match: (MOVBZreg (SRWconst [c] (MOVBZreg x))) // cond: - // result: y + // result: (SRWconst [c] (MOVBZreg x)) for { - y := v.Args[0] - if y.Op != OpPPC64MOVBZreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVBZreg (MOVBreg x)) - // cond: - // result: (MOVBZreg x) + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVBZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) == 8 + // result: (SRWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(sizeof(x.Type) == 8) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBZreg (SRDconst [c] x)) + // cond: c>=56 + // result: (SRDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c >= 56) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBZreg (SRWconst [c] x)) + // cond: c>=24 + // result: (SRWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c >= 24) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBZreg y:(MOVBZreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVBZreg { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVBZreg (MOVBreg x)) + // cond: + // result: (MOVBZreg x) for { v_0 := v.Args[0] if v_0.Op != OpPPC64MOVBreg { @@ -7118,6 +7812,40 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBZreg x:(MOVBZloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVBZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVBZreg x:(Arg )) + // cond: is8BitInt(t) && !isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !(is8BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBZreg_10(v *Value) bool { // match: (MOVBZreg (MOVDconst [c])) // cond: // result: (MOVDconst [int64(uint8(c))]) @@ -7134,6 +7862,10 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (MOVBreg y:(ANDconst [c] _)) // cond: uint64(c) <= 0x7F // result: y @@ -7151,6 +7883,117 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { v.AddArg(y) return true } + // match: (MOVBreg (SRAWconst [c] (MOVBreg x))) + // cond: + // result: (SRAWconst [c] (MOVBreg x)) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { + break + } + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVBreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) == 8 + // result: (SRAWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(sizeof(x.Type) == 8) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBreg (SRDconst [c] x)) + // cond: c>56 + // result: (SRDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c > 56) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBreg (SRDconst [c] x)) + // cond: c==56 + // result: (SRADconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c == 56) { + break + } + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBreg (SRWconst [c] x)) + // cond: c>24 + // result: (SRWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c > 24) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVBreg (SRWconst [c] x)) + // cond: c==24 + // result: (SRAWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c == 24) { + break + } + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } // match: (MOVBreg y:(MOVBreg _)) // cond: // result: y @@ -7177,6 +8020,26 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBreg x:(Arg )) + // cond: is8BitInt(t) && isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !(is8BitInt(t) && isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBreg_10(v *Value) bool { // match: (MOVBreg (MOVDconst [c])) // cond: // result: (MOVDconst [int64(int8(c))]) @@ -7193,10 +8056,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) // cond: is16Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} x val mem) @@ -7250,8 +8109,8 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: c == 0 + // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: // result: (MOVBstorezero [off] {sym} ptr mem) for { off := v.AuxInt @@ -7262,11 +8121,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { if v_1.Op != OpPPC64MOVDconst { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(c == 0) { + if v_1.AuxInt != 0 { break } + mem := v.Args[2] v.reset(OpPPC64MOVBstorezero) v.AuxInt = off v.Aux = sym @@ -7274,6 +8132,32 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [off] {sym} p:(ADD ptr idx) val mem) + // cond: off == 0 && sym == nil && p.Uses == 1 + // result: (MOVBstoreidx ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // cond: // result: (MOVBstore [off] {sym} ptr x mem) @@ -7406,6 +8290,15 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem) // cond: c <= 8 // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) @@ -7439,15 +8332,6 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem) // cond: c <= 8 // result: (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) @@ -7593,7 +8477,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { v.AuxInt = i0 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt16) + v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) v.AddArg(v0) @@ -7646,7 +8530,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { v.AuxInt = i0 v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt16) + v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16) v0.AuxInt = 16 v0.AddArg(w) v.AddArg(v0) @@ -7819,7 +8703,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } v.reset(OpPPC64MOVWBRstore) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) @@ -7864,7 +8748,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { break } v.reset(OpPPC64MOVHBRstore) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) @@ -7873,6 +8757,15 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem))))) // cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) // result: (MOVDstore [i0] {s} p w mem) @@ -7982,15 +8875,6 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ // match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem)))))))) // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) // result: (MOVDBRstore (MOVDaddr [i0] {s} p) w mem) @@ -8159,7 +9043,7 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { break } v.reset(OpPPC64MOVDBRstore) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v0.AuxInt = i0 v0.Aux = s v0.AddArg(p) @@ -8170,6 +9054,350 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem) + // cond: is16Bit(c) + // result: (MOVBstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem) + // cond: is16Bit(c) + // result: (MOVBstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVBstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVBreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBZreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVBZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVHreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVHZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) + // cond: + // result: (MOVBstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst x [c]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64SRWconst { + break + } + c := v_2.AuxInt + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVHreg { + break + } + x := v_2_0.Args[0] + mem := v.Args[3] + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHZreg x) [c]) mem) + // cond: c <= 8 + // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst x [c]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64SRWconst { + break + } + c := v_2.AuxInt + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVHZreg { + break + } + x := v_2_0.Args[0] + mem := v.Args[3] + if !(c <= 8) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst x [c]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64SRWconst { + break + } + c := v_2.AuxInt + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVWreg { + break + } + x := v_2_0.Args[0] + mem := v.Args[3] + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWZreg x) [c]) mem) + // cond: c <= 24 + // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst x [c]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64SRWconst { + break + } + c := v_2.AuxInt + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpPPC64MOVWZreg { + break + } + x := v_2_0.Args[0] + mem := v.Args[3] + if !(c <= 24) { + break + } + v.reset(OpPPC64MOVBstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) @@ -8299,15 +9527,86 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { - // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem) - // cond: - // result: (FMOVDstore [off] {sym} ptr x mem) + // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVDloadidx ptr idx mem) for { - off := v.AuxInt - sym := v.Aux + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVDloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool { + // match: (MOVDloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVDload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVDloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVDload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVDload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { + // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem) + // cond: + // result: (FMOVDstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] @@ -8377,8 +9676,8 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: c == 0 + // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: // result: (MOVDstorezero [off] {sym} ptr mem) for { off := v.AuxInt @@ -8389,11 +9688,10 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { if v_1.Op != OpPPC64MOVDconst { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(c == 0) { + if v_1.AuxInt != 0 { break } + mem := v.Args[2] v.reset(OpPPC64MOVDstorezero) v.AuxInt = off v.Aux = sym @@ -8401,6 +9699,81 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVDstore [off] {sym} p:(ADD ptr idx) val mem) + // cond: off == 0 && sym == nil && p.Uses == 1 + // result: (MOVDstoreidx ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVDstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool { + // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem) + // cond: is16Bit(c) + // result: (MOVDstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem) + // cond: is16Bit(c) + // result: (MOVDstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { @@ -8588,171 +9961,203 @@ func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { - // match: (MOVHZreg y:(ANDconst [c] _)) - // cond: uint64(c) <= 0xFFFF - // result: y + // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVHZloadidx ptr idx mem) for { - y := v.Args[0] - if y.Op != OpPPC64ANDconst { + if v.AuxInt != 0 { break } - c := y.AuxInt - if !(uint64(c) <= 0xFFFF) { + sym := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64ADD { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVHZloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (MOVHZreg y:(MOVHZreg _)) - // cond: - // result: y + return false +} +func rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v *Value) bool { + // match: (MOVHZloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVHZload [c] ptr mem) for { - y := v.Args[0] - if y.Op != OpPPC64MOVHZreg { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + c := v_1.AuxInt + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHZreg y:(MOVBZreg _)) - // cond: - // result: y + // match: (MOVHZloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVHZload [c] ptr mem) for { - y := v.Args[0] - if y.Op != OpPPC64MOVBZreg { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVHZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (MOVHZreg y:(MOVHBRload _ _)) - // cond: + return false +} +func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVHZreg y:(ANDconst [c] _)) + // cond: uint64(c) <= 0xFFFF // result: y for { y := v.Args[0] - if y.Op != OpPPC64MOVHBRload { + if y.Op != OpPPC64ANDconst { + break + } + c := y.AuxInt + if !(uint64(c) <= 0xFFFF) { break } - _ = y.Args[1] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) return true } - // match: (MOVHZreg y:(MOVHreg x)) + // match: (MOVHZreg (SRWconst [c] (MOVBZreg x))) // cond: - // result: (MOVHZreg x) + // result: (SRWconst [c] (MOVBZreg x)) for { - y := v.Args[0] - if y.Op != OpPPC64MOVHreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { break } - x := y.Args[0] - v.reset(OpPPC64MOVHZreg) - v.AddArg(x) + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVHZreg x:(MOVHZload _ _)) + // match: (MOVHZreg (SRWconst [c] (MOVHZreg x))) // cond: - // result: x + // result: (SRWconst [c] (MOVHZreg x)) for { - x := v.Args[0] - if x.Op != OpPPC64MOVHZload { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { break } - _ = x.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVHZreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(uint16(c))]) + // match: (MOVHZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) <= 16 + // result: (SRWconst [c] x) for { v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { + if v_0.Op != OpPPC64SRWconst { break } c := v_0.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64(uint16(c)) - return true + x := v_0.Args[0] + if !(sizeof(x.Type) <= 16) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { - // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (MOVHZreg (SRDconst [c] x)) + // cond: c>=48 + // result: (SRDconst [c] x) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - p := v.Args[0] - if p.Op != OpPPC64MOVDaddr { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { break } - off2 := p.AuxInt - sym2 := p.Aux - ptr := p.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c >= 48) { break } - v.reset(OpPPC64MOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(off1+off2) - // result: (MOVHload [off1+off2] {sym} x mem) + // match: (MOVHZreg (SRWconst [c] x)) + // cond: c>=16 + // result: (SRWconst [c] x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpPPC64ADDconst { + if v_0.Op != OpPPC64SRWconst { break } - off2 := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] - mem := v.Args[1] - if !(is16Bit(off1 + off2)) { + if !(c >= 16) { break } - v.reset(OpPPC64MOVHload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(OpPPC64SRWconst) + v.AuxInt = c v.AddArg(x) - v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { - // match: (MOVHreg y:(ANDconst [c] _)) - // cond: uint64(c) <= 0x7FFF + // match: (MOVHZreg y:(MOVHZreg _)) + // cond: // result: y for { y := v.Args[0] - if y.Op != OpPPC64ANDconst { - break - } - c := y.AuxInt - if !(uint64(c) <= 0x7FFF) { + if y.Op != OpPPC64MOVHZreg { break } v.reset(OpCopy) @@ -8760,12 +10165,12 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVHreg y:(MOVHreg _)) + // match: (MOVHZreg y:(MOVBZreg _)) // cond: // result: y for { y := v.Args[0] - if y.Op != OpPPC64MOVHreg { + if y.Op != OpPPC64MOVBZreg { break } v.reset(OpCopy) @@ -8773,38 +10178,42 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVHreg y:(MOVBreg _)) + // match: (MOVHZreg y:(MOVHBRload _ _)) // cond: // result: y for { y := v.Args[0] - if y.Op != OpPPC64MOVBreg { + if y.Op != OpPPC64MOVHBRload { break } + _ = y.Args[1] v.reset(OpCopy) v.Type = y.Type v.AddArg(y) return true } - // match: (MOVHreg y:(MOVHZreg x)) + // match: (MOVHZreg y:(MOVHreg x)) // cond: - // result: (MOVHreg x) + // result: (MOVHZreg x) for { y := v.Args[0] - if y.Op != OpPPC64MOVHZreg { + if y.Op != OpPPC64MOVHreg { break } x := y.Args[0] - v.reset(OpPPC64MOVHreg) + v.reset(OpPPC64MOVHZreg) v.AddArg(x) return true } - // match: (MOVHreg x:(MOVHload _ _)) + return false +} +func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool { + // match: (MOVHZreg x:(MOVBZload _ _)) // cond: // result: x for { x := v.Args[0] - if x.Op != OpPPC64MOVHload { + if x.Op != OpPPC64MOVBZload { break } _ = x.Args[1] @@ -8813,9 +10222,68 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg (MOVDconst [c])) + // match: (MOVHZreg x:(MOVBZloadidx _ _ _)) // cond: - // result: (MOVDconst [int64(int16(c))]) + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVBZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVHZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHZload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(MOVHZloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHZreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(uint16(c))]) for { v_0 := v.Args[0] if v_0.Op != OpPPC64MOVDconst { @@ -8823,75 +10291,94 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpPPC64MOVDconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int64(uint16(c)) return true } return false } -func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) +func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { + // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) - // result: (MOVHstore [off1+off2] {sym} x val mem) + // result: (MOVHload [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux - _ = v.Args[2] + _ = v.Args[1] v_0 := v.Args[0] if v_0.Op != OpPPC64ADDconst { break } off2 := v_0.AuxInt x := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is16Bit(off1 + off2)) { break } - v.reset(OpPPC64MOVHstore) + v.reset(OpPPC64MOVHload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(x) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVHloadidx ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[2] + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] p := v.Args[0] - if p.Op != OpPPC64MOVDaddr { + if p.Op != OpPPC64ADD { break } - off2 := p.AuxInt - sym2 := p.Aux + _ = p.Args[1] ptr := p.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { break } - v.reset(OpPPC64MOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.reset(OpPPC64MOVHloadidx) v.AddArg(ptr) - v.AddArg(val) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: c == 0 - // result: (MOVHstorezero [off] {sym} ptr mem) + return false +} +func rewriteValuePPC64_OpPPC64MOVHloadidx_0(v *Value) bool { + // match: (MOVHloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVHload [c] ptr mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] ptr := v.Args[0] v_1 := v.Args[1] @@ -8900,291 +10387,1311 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { } c := v_1.AuxInt mem := v.Args[2] - if !(c == 0) { + if !(is16Bit(c)) { break } - v.reset(OpPPC64MOVHstorezero) - v.AuxInt = off - v.Aux = sym + v.reset(OpPPC64MOVHload) + v.AuxInt = c v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // match: (MOVHloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVHload [c] ptr mem) for { - off := v.AuxInt - sym := v.Aux _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVHreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { break } - x := v_1.Args[0] + c := v_0.AuxInt + ptr := v.Args[1] mem := v.Args[2] - v.reset(OpPPC64MOVHstore) - v.AuxInt = off - v.Aux = sym + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVHload) + v.AuxInt = c v.AddArg(ptr) - v.AddArg(x) v.AddArg(mem) return true } - // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + return false +} +func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVHreg y:(ANDconst [c] _)) + // cond: uint64(c) <= 0x7FFF + // result: y for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVHZreg { + y := v.Args[0] + if y.Op != OpPPC64ANDconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpPPC64MOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + c := y.AuxInt + if !(uint64(c) <= 0x7FFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } - // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // match: (MOVHreg (SRAWconst [c] (MOVBreg x))) // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // result: (SRAWconst [c] (MOVBreg x)) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVWreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpPPC64MOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem) + // match: (MOVHreg (SRAWconst [c] (MOVHreg x))) // cond: - // result: (MOVHstore [off] {sym} ptr x mem) + // result: (SRAWconst [c] (MOVHreg x)) for { - off := v.AuxInt - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVWZreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpPPC64MOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) - // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0) - // result: (MOVWstore [i0] {s} p w mem) + // match: (MOVHreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) <= 16 + // result: (SRAWconst [c] x) for { - i1 := v.AuxInt - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRWconst { - break - } - if v_1.AuxInt != 16 { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - w := v_1.Args[0] - x0 := v.Args[2] - if x0.Op != OpPPC64MOVHstore { + c := v_0.AuxInt + x := v_0.Args[0] + if !(sizeof(x.Type) <= 16) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVHreg (SRDconst [c] x)) + // cond: c>48 + // result: (SRDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { break } - _ = x0.Args[2] - if p != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c > 48) { break } - if w != x0.Args[1] { + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVHreg (SRDconst [c] x)) + // cond: c==48 + // result: (SRADconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { break } - mem := x0.Args[2] - if !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c == 48) { break } - v.reset(OpPPC64MOVWstore) - v.AuxInt = i0 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) - // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0) - // result: (MOVWstore [i0] {s} p w mem) + // match: (MOVHreg (SRWconst [c] x)) + // cond: c>16 + // result: (SRWconst [c] x) for { - i1 := v.AuxInt - s := v.Aux - _ = v.Args[2] - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64SRDconst { - break - } - if v_1.AuxInt != 16 { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { break } - w := v_1.Args[0] - x0 := v.Args[2] - if x0.Op != OpPPC64MOVHstore { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c > 16) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVHreg (SRWconst [c] x)) + // cond: c==16 + // result: (SRAWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { break } - _ = x0.Args[2] - if p != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c == 16) { break } - if w != x0.Args[1] { + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVHreg y:(MOVHreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVHreg { break } - mem := x0.Args[2] - if !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) { + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVHreg y:(MOVBreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVBreg { break } - v.reset(OpPPC64MOVWstore) - v.AuxInt = i0 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) return true } return false } -func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { - // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(off1+off2) - // result: (MOVHstorezero [off1+off2] {sym} x mem) +func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool { + // match: (MOVHreg y:(MOVHZreg x)) + // cond: + // result: (MOVHreg x) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64ADDconst { - break - } - off2 := v_0.AuxInt - x := v_0.Args[0] - mem := v.Args[1] - if !(is16Bit(off1 + off2)) { + y := v.Args[0] + if y.Op != OpPPC64MOVHZreg { break } - v.reset(OpPPC64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = sym + x := y.Args[0] + v.reset(OpPPC64MOVHreg) v.AddArg(x) - v.AddArg(mem) return true } - // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) - // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + // match: (MOVHreg x:(MOVHload _ _)) + // cond: + // result: x for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - p := v.Args[0] - if p.Op != OpPPC64MOVDaddr { - break - } - off2 := p.AuxInt - sym2 := p.Aux - x := p.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { + x := v.Args[0] + if x.Op != OpPPC64MOVHload { break } - v.reset(OpPPC64MOVHstorezero) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) - v.AddArg(mem) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVWBRstore_0(v *Value) bool { - // match: (MOVWBRstore {sym} ptr (MOVWreg x) mem) + // match: (MOVHreg x:(MOVHloadidx _ _ _)) // cond: - // result: (MOVWBRstore {sym} ptr x mem) + // result: x for { - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] + x := v.Args[0] + if x.Op != OpPPC64MOVHloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int16(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64(int16(c)) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) + // cond: is16Bit(off1+off2) + // result: (MOVHstore [off1+off2] {sym} x val mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := v_0.AuxInt + x := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is16Bit(off1 + off2)) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: + // result: (MOVHstorezero [off] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + if v_1.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} p:(ADD ptr idx) val mem) + // cond: off == 0 && sym == nil && p.Uses == 1 + // result: (MOVHstoreidx ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVHreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVHZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] if v_1.Op != OpPPC64MOVWreg { break } x := v_1.Args[0] mem := v.Args[2] - v.reset(OpPPC64MOVWBRstore) + v.reset(OpPPC64MOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVHstore) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(x) - v.AddArg(mem) + v.AddArg(mem) + return true + } + // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) + // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0) + // result: (MOVWstore [i0] {s} p w mem) + for { + i1 := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRWconst { + break + } + if v_1.AuxInt != 16 { + break + } + w := v_1.Args[0] + x0 := v.Args[2] + if x0.Op != OpPPC64MOVHstore { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[2] + if p != x0.Args[0] { + break + } + if w != x0.Args[1] { + break + } + mem := x0.Args[2] + if !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = i0 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem)) + // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0) + // result: (MOVWstore [i0] {s} p w mem) + for { + i1 := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRDconst { + break + } + if v_1.AuxInt != 16 { + break + } + w := v_1.Args[0] + x0 := v.Args[2] + if x0.Op != OpPPC64MOVHstore { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[2] + if p != x0.Args[0] { + break + } + if w != x0.Args[1] { + break + } + mem := x0.Args[2] + if !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = i0 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool { + // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem) + // cond: is16Bit(c) + // result: (MOVHstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem) + // cond: is16Bit(c) + // result: (MOVHstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHreg x) mem) + // cond: + // result: (MOVHstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVHreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHZreg x) mem) + // cond: + // result: (MOVHstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVHZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWreg x) mem) + // cond: + // result: (MOVHstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) + // cond: + // result: (MOVHstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { + // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) + // cond: is16Bit(off1+off2) + // result: (MOVHstorezero [off1+off2] {sym} x mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := v_0.AuxInt + x := v_0.Args[0] + mem := v.Args[1] + if !(is16Bit(off1 + off2)) { + break + } + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) + // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := p.AuxInt + sym2 := p.Aux + x := p.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { + break + } + v.reset(OpPPC64MOVHstorezero) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWBRstore_0(v *Value) bool { + // match: (MOVWBRstore {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVWBRstore {sym} ptr x mem) + for { + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVWreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVWBRstore) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem) + // cond: + // result: (MOVWBRstore {sym} ptr x mem) + for { + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVWZreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVWBRstore) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { + // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) + // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { + break + } + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) + // cond: is16Bit(off1+off2) + // result: (MOVWZload [off1+off2] {sym} x mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64ADDconst { + break + } + off2 := v_0.AuxInt + x := v_0.Args[0] + mem := v.Args[1] + if !(is16Bit(off1 + off2)) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVWZloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWZloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v *Value) bool { + // match: (MOVWZloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVWZload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWZloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVWZload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWZload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVWZreg y:(ANDconst [c] _)) + // cond: uint64(c) <= 0xFFFFFFFF + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64ANDconst { + break + } + c := y.AuxInt + if !(uint64(c) <= 0xFFFFFFFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(AND (MOVDconst [c]) _)) + // cond: uint64(c) <= 0xFFFFFFFF + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64AND { + break + } + _ = y.Args[1] + y_0 := y.Args[0] + if y_0.Op != OpPPC64MOVDconst { + break + } + c := y_0.AuxInt + if !(uint64(c) <= 0xFFFFFFFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(AND _ (MOVDconst [c]))) + // cond: uint64(c) <= 0xFFFFFFFF + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64AND { + break + } + _ = y.Args[1] + y_1 := y.Args[1] + if y_1.Op != OpPPC64MOVDconst { + break + } + c := y_1.AuxInt + if !(uint64(c) <= 0xFFFFFFFF) { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg (SRWconst [c] (MOVBZreg x))) + // cond: + // result: (SRWconst [c] (MOVBZreg x)) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] (MOVHZreg x))) + // cond: + // result: (SRWconst [c] (MOVHZreg x)) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] (MOVWZreg x))) + // cond: + // result: (SRWconst [c] (MOVWZreg x)) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MOVWZreg (SRWconst [c] x)) + // cond: sizeof(x.Type) <= 32 + // result: (SRWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRWconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(sizeof(x.Type) <= 32) { + break + } + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVWZreg (SRDconst [c] x)) + // cond: c>=32 + // result: (SRDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c >= 32) { + break + } + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVWZreg y:(MOVWZreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVWZreg { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(MOVHZreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVHZreg { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool { + // match: (MOVWZreg y:(MOVBZreg _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVBZreg { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(MOVHBRload _ _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVHBRload { + break + } + _ = y.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(MOVWBRload _ _)) + // cond: + // result: y + for { + y := v.Args[0] + if y.Op != OpPPC64MOVWBRload { + break + } + _ = y.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVWZreg y:(MOVWreg x)) + // cond: + // result: (MOVWZreg x) + for { + y := v.Args[0] + if y.Op != OpPPC64MOVWreg { + break + } + x := y.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVBZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVBZload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVBZloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVBZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVHZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHZload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVHZloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVWZload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVWZloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool { + // match: (MOVWZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem) + // match: (MOVWZreg (MOVDconst [c])) // cond: - // result: (MOVWBRstore {sym} ptr x mem) + // result: (MOVDconst [int64(uint32(c))]) for { - sym := v.Aux - _ = v.Args[2] - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVWZreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { break } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpPPC64MOVWBRstore) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) + c := v_0.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64(uint32(c)) return true } return false } -func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { - // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) +func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { + // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) - // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -9200,16 +11707,16 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } - v.reset(OpPPC64MOVWZload) + v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) + // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) - // result: (MOVWZload [off1+off2] {sym} x mem) + // result: (MOVWload [off1+off2] {sym} x mem) for { off1 := v.AuxInt sym := v.Aux @@ -9224,18 +11731,93 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { if !(is16Bit(off1 + off2)) { break } - v.reset(OpPPC64MOVWZload) + v.reset(OpPPC64MOVWload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(x) v.AddArg(mem) return true } + // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem) + // cond: sym == nil && p.Uses == 1 + // result: (MOVWloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + mem := v.Args[1] + if !(sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } -func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { - // match: (MOVWZreg y:(ANDconst [c] _)) - // cond: uint64(c) <= 0xFFFFFFFF +func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool { + // match: (MOVWloadidx ptr (MOVDconst [c]) mem) + // cond: is16Bit(c) + // result: (MOVWload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx (MOVDconst [c]) ptr mem) + // cond: is16Bit(c) + // result: (MOVWload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVWreg y:(ANDconst [c] _)) + // cond: uint64(c) <= 0xFFFF // result: y for { y := v.Args[0] @@ -9243,7 +11825,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { break } c := y.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { + if !(uint64(c) <= 0xFFFF) { break } v.reset(OpCopy) @@ -9251,8 +11833,8 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVWZreg y:(AND (MOVDconst [c]) _)) - // cond: uint64(c) <= 0xFFFFFFFF + // match: (MOVWreg y:(AND (MOVDconst [c]) _)) + // cond: uint64(c) <= 0x7FFFFFFF // result: y for { y := v.Args[0] @@ -9265,7 +11847,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { break } c := y_0.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { + if !(uint64(c) <= 0x7FFFFFFF) { break } v.reset(OpCopy) @@ -9273,8 +11855,8 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVWZreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0xFFFFFFFF + // match: (MOVWreg y:(AND _ (MOVDconst [c]))) + // cond: uint64(c) <= 0x7FFFFFFF // result: y for { y := v.Args[0] @@ -9287,20 +11869,7 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { break } c := y_1.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVWZreg y:(MOVWZreg _)) - // cond: - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64MOVWZreg { + if !(uint64(c) <= 0x7FFFFFFF) { break } v.reset(OpCopy) @@ -9308,187 +11877,121 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { v.AddArg(y) return true } - // match: (MOVWZreg y:(MOVHZreg _)) + // match: (MOVWreg (SRAWconst [c] (MOVBreg x))) // cond: - // result: y + // result: (SRAWconst [c] (MOVBreg x)) for { - y := v.Args[0] - if y.Op != OpPPC64MOVHZreg { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVWZreg y:(MOVBZreg _)) - // cond: - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64MOVBZreg { + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVWZreg y:(MOVHBRload _ _)) + // match: (MOVWreg (SRAWconst [c] (MOVHreg x))) // cond: - // result: y + // result: (SRAWconst [c] (MOVHreg x)) for { - y := v.Args[0] - if y.Op != OpPPC64MOVHBRload { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVWZreg y:(MOVWBRload _ _)) - // cond: - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64MOVWBRload { + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { break } - _ = y.Args[1] - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (MOVWZreg y:(MOVWreg x)) + // match: (MOVWreg (SRAWconst [c] (MOVWreg x))) // cond: - // result: (MOVWZreg x) - for { - y := v.Args[0] - if y.Op != OpPPC64MOVWreg { - break - } - x := y.Args[0] - v.reset(OpPPC64MOVWZreg) - v.AddArg(x) - return true - } - return false -} -func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { - // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - _ = v.Args[1] - p := v.Args[0] - if p.Op != OpPPC64MOVDaddr { - break - } - off2 := p.AuxInt - sym2 := p.Aux - ptr := p.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { - break - } - v.reset(OpPPC64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem) - // cond: is16Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} x mem) + // result: (SRAWconst [c] (MOVWreg x)) for { - off1 := v.AuxInt - sym := v.Aux - _ = v.Args[1] v_0 := v.Args[0] - if v_0.Op != OpPPC64ADDconst { + if v_0.Op != OpPPC64SRAWconst { break } - off2 := v_0.AuxInt - x := v_0.Args[0] - mem := v.Args[1] - if !(is16Bit(off1 + off2)) { + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWreg { break } - v.reset(OpPPC64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(x) - v.AddArg(mem) + x := v_0_0.Args[0] + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) return true } - return false -} -func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { - // match: (MOVWreg y:(ANDconst [c] _)) - // cond: uint64(c) <= 0xFFFF - // result: y + // match: (MOVWreg (SRAWconst [c] x)) + // cond: sizeof(x.Type) <= 32 + // result: (SRAWconst [c] x) for { - y := v.Args[0] - if y.Op != OpPPC64ANDconst { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRAWconst { break } - c := y.AuxInt - if !(uint64(c) <= 0xFFFF) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(sizeof(x.Type) <= 32) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.reset(OpPPC64SRAWconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (MOVWreg y:(AND (MOVDconst [c]) _)) - // cond: uint64(c) <= 0x7FFFFFFF - // result: y + // match: (MOVWreg (SRDconst [c] x)) + // cond: c>32 + // result: (SRDconst [c] x) for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - _ = y.Args[1] - y_0 := y.Args[0] - if y_0.Op != OpPPC64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { break } - c := y_0.AuxInt - if !(uint64(c) <= 0x7FFFFFFF) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c > 32) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.reset(OpPPC64SRDconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (MOVWreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0x7FFFFFFF - // result: y + // match: (MOVWreg (SRDconst [c] x)) + // cond: c==32 + // result: (SRADconst [c] x) for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - _ = y.Args[1] - y_1 := y.Args[1] - if y_1.Op != OpPPC64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRDconst { break } - c := y_1.AuxInt - if !(uint64(c) <= 0x7FFFFFFF) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c == 32) { break } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) return true } // match: (MOVWreg y:(MOVWreg _)) @@ -9504,6 +12007,9 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { v.AddArg(y) return true } + return false +} +func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool { // match: (MOVWreg y:(MOVHreg _)) // cond: // result: y @@ -9543,6 +12049,92 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVWreg x:(MOVHload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVHloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVWload { + break + } + _ = x.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWloadidx _ _ _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpPPC64MOVWloadidx { + break + } + _ = x.Args[2] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) + // result: x + for { + x := v.Args[0] + if x.Op != OpArg { + break + } + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64(int32(c)) + return true + } return false } func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { @@ -9599,8 +12191,8 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: c == 0 + // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) + // cond: // result: (MOVWstorezero [off] {sym} ptr mem) for { off := v.AuxInt @@ -9611,11 +12203,10 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { if v_1.Op != OpPPC64MOVDconst { break } - c := v_1.AuxInt - mem := v.Args[2] - if !(c == 0) { + if v_1.AuxInt != 0 { break } + mem := v.Args[2] v.reset(OpPPC64MOVWstorezero) v.AuxInt = off v.Aux = sym @@ -9623,6 +12214,32 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWstore [off] {sym} p:(ADD ptr idx) val mem) + // cond: off == 0 && sym == nil && p.Uses == 1 + // result: (MOVWstoreidx ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + p := v.Args[0] + if p.Op != OpPPC64ADD { + break + } + _ = p.Args[1] + ptr := p.Args[0] + idx := p.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(off == 0 && sym == nil && p.Uses == 1) { + break + } + v.reset(OpPPC64MOVWstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) // cond: // result: (MOVWstore [off] {sym} ptr x mem) @@ -9669,6 +12286,103 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool { + // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem) + // cond: is16Bit(c) + // result: (MOVWstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem) + // cond: is16Bit(c) + // result: (MOVWstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is16Bit(c)) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWreg x) mem) + // cond: + // result: (MOVWstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVWstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWZreg x) mem) + // cond: + // result: (MOVWstoreidx [off] {sym} ptr idx x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpPPC64MOVWZreg { + break + } + x := v_2.Args[0] + mem := v.Args[3] + v.reset(OpPPC64MOVWstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) // cond: is16Bit(off1+off2) @@ -9756,7 +12470,7 @@ func rewriteValuePPC64_OpPPC64MTVSRD_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpPPC64FMOVDload, typ.Float64) + v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -10334,7 +13048,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10384,7 +13098,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10434,7 +13148,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10484,7 +13198,7 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -10534,10 +13248,10 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -10586,10 +13300,10 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -10638,10 +13352,10 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -10690,10 +13404,10 @@ func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -10754,12 +13468,12 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = n1 - v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -10812,12 +13526,12 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = n1 - v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -10870,12 +13584,12 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = n1 - v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -10928,12 +13642,12 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = n1 - v1 := b.NewValue0(v.Pos, OpPPC64MOVHBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVHBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -11013,7 +13727,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11093,7 +13807,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11173,7 +13887,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11253,7 +13967,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11333,7 +14047,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11413,7 +14127,7 @@ func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11502,7 +14216,7 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11582,7 +14296,7 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -11672,10 +14386,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -11764,10 +14478,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -11856,10 +14570,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -11948,10 +14662,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12040,10 +14754,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12132,10 +14846,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12224,10 +14938,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12316,10 +15030,10 @@ func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12417,10 +15131,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12509,10 +15223,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12601,10 +15315,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12693,10 +15407,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12785,10 +15499,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12877,10 +15591,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -12969,10 +15683,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -13061,10 +15775,10 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -13160,12 +15874,12 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13262,12 +15976,12 @@ func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13373,12 +16087,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13475,12 +16189,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13577,12 +16291,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13679,12 +16393,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13781,12 +16495,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -13883,12 +16597,12 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, OpPPC64SLDconst, t) + v0 := b.NewValue0(x2.Pos, OpPPC64SLDconst, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = 32 - v1 := b.NewValue0(v.Pos, OpPPC64MOVWBRload, t) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t) + v2 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -14028,7 +16742,7 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14168,7 +16882,7 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x4.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14308,7 +17022,7 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14448,7 +17162,7 @@ func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14595,7 +17309,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14735,7 +17449,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -14875,7 +17589,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15015,7 +17729,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15155,7 +17869,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15295,7 +18009,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15435,7 +18149,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15575,7 +18289,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15715,7 +18429,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -15855,7 +18569,7 @@ func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -16004,7 +18718,7 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -16144,7 +18858,7 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -16291,10 +19005,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -16440,10 +19154,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -16589,10 +19303,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -16738,10 +19452,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x2.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -16887,10 +19601,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17036,10 +19750,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17185,10 +19899,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17334,10 +20048,10 @@ func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x1.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17492,10 +20206,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17641,10 +20355,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17790,10 +20504,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -17939,10 +20653,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18088,10 +20802,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18237,10 +20951,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18386,10 +21100,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18535,10 +21249,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x0, x1, x2, x3, x4) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x0.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18687,10 +21401,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -18839,10 +21553,10 @@ func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19000,10 +21714,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19152,10 +21866,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19304,10 +22018,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19456,10 +22170,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19608,10 +22322,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19760,10 +22474,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -19912,10 +22626,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20064,10 +22778,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20216,10 +22930,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20368,10 +23082,10 @@ func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20529,10 +23243,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20681,10 +23395,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20833,10 +23547,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -20985,10 +23699,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21137,10 +23851,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21289,10 +24003,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21441,10 +24155,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21593,10 +24307,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x5.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x5.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21745,10 +24459,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -21897,10 +24611,10 @@ func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22058,10 +24772,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22210,10 +24924,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x6.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22362,10 +25076,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22514,10 +25228,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22666,10 +25380,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22818,10 +25532,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -22970,10 +25684,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -23122,10 +25836,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -23274,10 +25988,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -23426,10 +26140,10 @@ func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { break } b = mergePoint(b, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRload, t) + v0 := b.NewValue0(x7.Pos, OpPPC64MOVDBRload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, typ.Uintptr) + v1 := b.NewValue0(x7.Pos, OpPPC64MOVDaddr, typ.Uintptr) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -24055,11 +26769,11 @@ func rewriteValuePPC64_OpPopCount8_0(v *Value) bool { _ = typ // match: (PopCount8 x) // cond: - // result: (POPCNTB (MOVBreg x)) + // result: (POPCNTB (MOVBZreg x)) for { x := v.Args[0] v.reset(OpPPC64POPCNTB) - v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) v0.AddArg(x) v.AddArg(v0) return true @@ -24104,6 +26818,23 @@ func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16Ux16 x y) // cond: // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { @@ -24176,6 +26907,23 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool { return true } // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16Ux32 x y) // cond: // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { @@ -24257,11 +27005,28 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool { if !(uint64(c) < 16) { break } - v.reset(OpPPC64SRWconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v.reset(OpPPC64SRWconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Rsh16Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) v0.AddArg(x) v.AddArg(v0) + v.AddArg(y) return true } // match: (Rsh16Ux64 x y) @@ -24293,6 +27058,23 @@ func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVHZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16Ux8 x y) // cond: // result: (SRW (ZeroExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { @@ -24323,6 +27105,23 @@ func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16x16 x y) // cond: // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y))))) for { @@ -24395,6 +27194,23 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool { return true } // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16x32 x y) // cond: // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y))))) for { @@ -24488,6 +27304,23 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool { return true } // match: (Rsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16x64 x y) // cond: // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] y)))) for { @@ -24516,6 +27349,23 @@ func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVHreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh16x8 x y) // cond: // result: (SRAW (SignExt16to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y))))) for { @@ -24546,6 +27396,21 @@ func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32Ux16 x y) // cond: // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { @@ -24612,6 +27477,21 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool { return true } // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32Ux32 x y) // cond: // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { @@ -24694,6 +27574,21 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh32Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) // cond: // result: (SRW x (ANDconst [31] y)) @@ -24918,6 +27813,13 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuePPC64_OpRsh32Ux64_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (Rsh32Ux64 x y) // cond: // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -24945,6 +27847,21 @@ func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32Ux8 x y) // cond: // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { @@ -24973,6 +27890,21 @@ func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32x16 x y) // cond: // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y))))) for { @@ -25039,6 +27971,21 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool { return true } // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32x32 x y) // cond: // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y))))) for { @@ -25123,6 +28070,21 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Rsh32x64 x (AND y (MOVDconst [31]))) // cond: // result: (SRAW x (ANDconst [31] y)) @@ -25347,6 +28309,13 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuePPC64_OpRsh32x64_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (Rsh32x64 x y) // cond: // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -25374,6 +28343,21 @@ func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh32x8 x y) // cond: // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y))))) for { @@ -25402,6 +28386,21 @@ func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh64Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh64Ux16 x y) // cond: // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { @@ -25468,6 +28467,21 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool { return true } // match: (Rsh64Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh64Ux32 x y) // cond: // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) for { @@ -25550,6 +28564,21 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh64Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) // cond: // result: (SRD x (ANDconst [63] y)) @@ -25774,6 +28803,13 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuePPC64_OpRsh64Ux64_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (Rsh64Ux64 x y) // cond: // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) @@ -25801,6 +28837,21 @@ func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh64Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh64Ux8 x y) // cond: // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { @@ -25829,6 +28880,21 @@ func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh64x16 x y) // cond: // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y))))) for { @@ -25889,9 +28955,24 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool { if !(uint32(c) < 64) { break } - v.reset(OpPPC64SRADconst) - v.AuxInt = c + v.reset(OpPPC64SRADconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (Rsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) v.AddArg(x) + v.AddArg(y) return true } // match: (Rsh64x32 x y) @@ -25979,6 +29060,21 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (Rsh64x64 x (AND y (MOVDconst [63]))) // cond: // result: (SRAD x (ANDconst [63] y)) @@ -26203,6 +29299,13 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuePPC64_OpRsh64x64_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (Rsh64x64 x y) // cond: // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) @@ -26230,6 +29333,21 @@ func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAD x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Rsh64x8 x y) // cond: // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y))))) for { @@ -26258,6 +29376,23 @@ func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8Ux16 x y) // cond: // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { @@ -26330,6 +29465,23 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool { return true } // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8Ux32 x y) // cond: // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { @@ -26419,6 +29571,23 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool { return true } // match: (Rsh8Ux64 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8Ux64 x y) // cond: // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { @@ -26447,6 +29616,23 @@ func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SRW (MOVBZreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8Ux8 x y) // cond: // result: (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { @@ -26477,6 +29663,23 @@ func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8x16 x y) // cond: // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y))))) for { @@ -26549,6 +29752,23 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool { return true } // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8x32 x y) // cond: // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y))))) for { @@ -26642,6 +29862,23 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool { return true } // match: (Rsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8x64 x y) // cond: // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) for { @@ -26670,6 +29907,23 @@ func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SRAW (MOVBreg x) y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + if !(shiftIsBounded(v)) { + break + } + v.reset(OpPPC64SRAW) + v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } + // match: (Rsh8x8 x y) // cond: // result: (SRAW (SignExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y))))) for { @@ -27042,70 +30296,148 @@ func rewriteValuePPC64_OpTrunc_0(v *Value) bool { } } func rewriteValuePPC64_OpTrunc16to8_0(v *Value) bool { - // match: (Trunc16to8 x) - // cond: + // match: (Trunc16to8 x) + // cond: isSigned(t) // result: (MOVBreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } + // match: (Trunc16to8 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpTrunc32to16_0(v *Value) bool { - // match: (Trunc32to16 x) - // cond: + // match: (Trunc32to16 x) + // cond: isSigned(t) // result: (MOVHreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVHreg) v.AddArg(x) return true } + // match: (Trunc32to16 x) + // cond: + // result: (MOVHZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpTrunc32to8_0(v *Value) bool { - // match: (Trunc32to8 x) - // cond: + // match: (Trunc32to8 x) + // cond: isSigned(t) // result: (MOVBreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } + // match: (Trunc32to8 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpTrunc64to16_0(v *Value) bool { - // match: (Trunc64to16 x) - // cond: + // match: (Trunc64to16 x) + // cond: isSigned(t) // result: (MOVHreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVHreg) v.AddArg(x) return true } + // match: (Trunc64to16 x) + // cond: + // result: (MOVHZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVHZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpTrunc64to32_0(v *Value) bool { - // match: (Trunc64to32 x) - // cond: + // match: (Trunc64to32 x) + // cond: isSigned(t) // result: (MOVWreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVWreg) v.AddArg(x) return true } + // match: (Trunc64to32 x) + // cond: + // result: (MOVWZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVWZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpTrunc64to8_0(v *Value) bool { - // match: (Trunc64to8 x) - // cond: + // match: (Trunc64to8 x) + // cond: isSigned(t) // result: (MOVBreg x) for { + t := v.Type x := v.Args[0] + if !(isSigned(t)) { + break + } v.reset(OpPPC64MOVBreg) v.AddArg(x) return true } + // match: (Trunc64to8 x) + // cond: + // result: (MOVBZreg x) + for { + x := v.Args[0] + v.reset(OpPPC64MOVBZreg) + v.AddArg(x) + return true + } } func rewriteValuePPC64_OpWB_0(v *Value) bool { // match: (WB {fn} destptr srcptr mem) @@ -27615,7 +30947,200 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (FlagEQ) yes no) + // match: (EQ (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpPPC64FlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (EQ (FlagLT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpPPC64FlagLT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (FlagGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpPPC64FlagGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpPPC64InvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockPPC64EQ + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (EQ (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64EQ + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (EQ (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64EQ + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (ANDCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64EQ + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (ORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64EQ + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (EQ (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (EQ (XORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64EQ + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockPPC64GE: + // match: (GE (FlagEQ) yes no) // cond: // result: (First nil yes no) for { @@ -27628,7 +31153,7 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } - // match: (EQ (FlagLT) yes no) + // match: (GE (FlagLT) yes no) // cond: // result: (First nil no yes) for { @@ -27642,9 +31167,9 @@ func rewriteBlockPPC64(b *Block) bool { b.swapSuccessors() return true } - // match: (EQ (FlagGT) yes no) + // match: (GE (FlagGT) yes no) // cond: - // result: (First nil no yes) + // result: (First nil yes no) for { v := b.Control if v.Op != OpPPC64FlagGT { @@ -27653,27 +31178,163 @@ func rewriteBlockPPC64(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil - b.swapSuccessors() return true } - // match: (EQ (InvertFlags cmp) yes no) + // match: (GE (InvertFlags cmp) yes no) // cond: - // result: (EQ cmp yes no) + // result: (LE cmp yes no) for { v := b.Control if v.Op != OpPPC64InvertFlags { break } cmp := v.Args[0] - b.Kind = BlockPPC64EQ + b.Kind = BlockPPC64LE b.SetControl(cmp) b.Aux = nil return true } - case BlockPPC64GE: - // match: (GE (FlagEQ) yes no) + // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (First nil yes no) + // result: (GE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64GE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (GE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64GE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (ANDCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (ORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GE + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (GE (XORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GE + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockPPC64GT: + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) for { v := b.Control if v.Op != OpPPC64FlagEQ { @@ -27682,9 +31343,10 @@ func rewriteBlockPPC64(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil + b.swapSuccessors() return true } - // match: (GE (FlagLT) yes no) + // match: (GT (FlagLT) yes no) // cond: // result: (First nil no yes) for { @@ -27698,7 +31360,7 @@ func rewriteBlockPPC64(b *Block) bool { b.swapSuccessors() return true } - // match: (GE (FlagGT) yes no) + // match: (GT (FlagGT) yes no) // cond: // result: (First nil yes no) for { @@ -27711,73 +31373,154 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } - // match: (GE (InvertFlags cmp) yes no) + // match: (GT (InvertFlags cmp) yes no) // cond: - // result: (LE cmp yes no) + // result: (LT cmp yes no) for { v := b.Control if v.Op != OpPPC64InvertFlags { break } cmp := v.Args[0] - b.Kind = BlockPPC64LE + b.Kind = BlockPPC64LT b.SetControl(cmp) b.Aux = nil return true } - case BlockPPC64GT: - // match: (GT (FlagEQ) yes no) + // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (First nil no yes) + // result: (GT (ANDCCconst [c] x) yes no) for { v := b.Control - if v.Op != OpPPC64FlagEQ { + if v.Op != OpPPC64CMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64GT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (GT (FlagLT) yes no) + // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (First nil no yes) + // result: (GT (ANDCCconst [c] x) yes no) for { v := b.Control - if v.Op != OpPPC64FlagLT { + if v.Op != OpPPC64CMPWconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64GT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (GT (FlagGT) yes no) - // cond: - // result: (First nil yes no) + // match: (GT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (ANDCC x y) yes no) for { v := b.Control - if v.Op != OpPPC64FlagGT { + if v.Op != OpPPC64CMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (GT (InvertFlags cmp) yes no) - // cond: - // result: (LT cmp yes no) + // match: (GT (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (ORCC x y) yes no) for { v := b.Control - if v.Op != OpPPC64InvertFlags { + if v.Op != OpPPC64CMPconst { break } - cmp := v.Args[0] - b.Kind = BlockPPC64LT - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GT + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (GT (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (GT (XORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64GT + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } @@ -27908,39 +31651,231 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } - // match: (If (FGreaterEqual cc) yes no) - // cond: - // result: (FGE cc yes no) + // match: (If (FGreaterEqual cc) yes no) + // cond: + // result: (FGE cc yes no) + for { + v := b.Control + if v.Op != OpPPC64FGreaterEqual { + break + } + cc := v.Args[0] + b.Kind = BlockPPC64FGE + b.SetControl(cc) + b.Aux = nil + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (CMPWconst [0] cond) yes no) + for { + v := b.Control + _ = v + cond := b.Control + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags) + v0.AuxInt = 0 + v0.AddArg(cond) + b.SetControl(v0) + b.Aux = nil + return true + } + case BlockPPC64LE: + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpPPC64FlagEQ { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagLT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpPPC64FlagLT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + return true + } + // match: (LE (FlagGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpPPC64FlagGT { + break + } + b.Kind = BlockFirst + b.SetControl(nil) + b.Aux = nil + b.swapSuccessors() + return true + } + // match: (LE (InvertFlags cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpPPC64InvertFlags { + break + } + cmp := v.Args[0] + b.Kind = BlockPPC64GE + b.SetControl(cmp) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (LE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64LE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (LE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64LE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (ANDCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (ORCC x y) yes no) for { v := b.Control - if v.Op != OpPPC64FGreaterEqual { + if v.Op != OpPPC64CMPconst { break } - cc := v.Args[0] - b.Kind = BlockPPC64FGE - b.SetControl(cc) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LE + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } - // match: (If cond yes no) - // cond: - // result: (NE (CMPWconst [0] cond) yes no) + // match: (LE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (LE (XORCC x y) yes no) for { v := b.Control - _ = v - cond := b.Control - b.Kind = BlockPPC64NE - v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags) - v0.AuxInt = 0 - v0.AddArg(cond) + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LE + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) b.SetControl(v0) b.Aux = nil return true } - case BlockPPC64LE: - // match: (LE (FlagEQ) yes no) + case BlockPPC64LT: + // match: (LT (FlagEQ) yes no) // cond: - // result: (First nil yes no) + // result: (First nil no yes) for { v := b.Control if v.Op != OpPPC64FlagEQ { @@ -27949,9 +31884,10 @@ func rewriteBlockPPC64(b *Block) bool { b.Kind = BlockFirst b.SetControl(nil) b.Aux = nil + b.swapSuccessors() return true } - // match: (LE (FlagLT) yes no) + // match: (LT (FlagLT) yes no) // cond: // result: (First nil yes no) for { @@ -27964,7 +31900,7 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } - // match: (LE (FlagGT) yes no) + // match: (LT (FlagGT) yes no) // cond: // result: (First nil no yes) for { @@ -27978,73 +31914,154 @@ func rewriteBlockPPC64(b *Block) bool { b.swapSuccessors() return true } - // match: (LE (InvertFlags cmp) yes no) + // match: (LT (InvertFlags cmp) yes no) // cond: - // result: (GE cmp yes no) + // result: (GT cmp yes no) for { v := b.Control if v.Op != OpPPC64InvertFlags { break } cmp := v.Args[0] - b.Kind = BlockPPC64GE + b.Kind = BlockPPC64GT b.SetControl(cmp) b.Aux = nil return true } - case BlockPPC64LT: - // match: (LT (FlagEQ) yes no) + // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (First nil no yes) + // result: (LT (ANDCCconst [c] x) yes no) for { v := b.Control - if v.Op != OpPPC64FlagEQ { + if v.Op != OpPPC64CMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64LT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (FlagLT) yes no) + // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no) // cond: - // result: (First nil yes no) + // result: (LT (ANDCCconst [c] x) yes no) for { v := b.Control - if v.Op != OpPPC64FlagLT { + if v.Op != OpPPC64CMPWconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64LT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) b.Aux = nil return true } - // match: (LT (FlagGT) yes no) - // cond: - // result: (First nil no yes) + // match: (LT (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (ANDCC x y) yes no) for { v := b.Control - if v.Op != OpPPC64FlagGT { + if v.Op != OpPPC64CMPconst { break } - b.Kind = BlockFirst - b.SetControl(nil) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LT + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil - b.swapSuccessors() return true } - // match: (LT (InvertFlags cmp) yes no) - // cond: - // result: (GT cmp yes no) + // match: (LT (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (ORCC x y) yes no) for { v := b.Control - if v.Op != OpPPC64InvertFlags { + if v.Op != OpPPC64CMPconst { break } - cmp := v.Args[0] - b.Kind = BlockPPC64GT - b.SetControl(cmp) + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LT + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (LT (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (LT (XORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64LT + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) b.Aux = nil return true } @@ -28363,6 +32380,143 @@ func rewriteBlockPPC64(b *Block) bool { b.Aux = nil return true } + // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (NE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) + // cond: + // result: (NE (ANDCCconst [c] x) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPWconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpPPC64ANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] z:(AND x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (ANDCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] z:(OR x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (ORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64OR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64ORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } + // match: (NE (CMPconst [0] z:(XOR x y)) yes no) + // cond: z.Uses == 1 + // result: (NE (XORCC x y) yes no) + for { + v := b.Control + if v.Op != OpPPC64CMPconst { + break + } + if v.AuxInt != 0 { + break + } + z := v.Args[0] + if z.Op != OpPPC64XOR { + break + } + _ = z.Args[1] + x := z.Args[0] + y := z.Args[1] + if !(z.Uses == 1) { + break + } + b.Kind = BlockPPC64NE + v0 := b.NewValue0(v.Pos, OpPPC64XORCC, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + b.SetControl(v0) + b.Aux = nil + return true + } } return false } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 7125b888bdbfd..ce501a74efb4c 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -383,6 +385,18 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpOr8_0(v) case OpOrB: return rewriteValueS390X_OpOrB_0(v) + case OpPopCount16: + return rewriteValueS390X_OpPopCount16_0(v) + case OpPopCount32: + return rewriteValueS390X_OpPopCount32_0(v) + case OpPopCount64: + return rewriteValueS390X_OpPopCount64_0(v) + case OpPopCount8: + return rewriteValueS390X_OpPopCount8_0(v) + case OpRotateLeft32: + return rewriteValueS390X_OpRotateLeft32_0(v) + case OpRotateLeft64: + return rewriteValueS390X_OpRotateLeft64_0(v) case OpRound: return rewriteValueS390X_OpRound_0(v) case OpRound32F: @@ -657,6 +671,10 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XORconst_0(v) case OpS390XORload: return rewriteValueS390X_OpS390XORload_0(v) + case OpS390XRLL: + return rewriteValueS390X_OpS390XRLL_0(v) + case OpS390XRLLG: + return rewriteValueS390X_OpS390XRLLG_0(v) case OpS390XSLD: return rewriteValueS390X_OpS390XSLD_0(v) || rewriteValueS390X_OpS390XSLD_10(v) case OpS390XSLW: @@ -691,6 +709,12 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XSUBconst_0(v) case OpS390XSUBload: return rewriteValueS390X_OpS390XSUBload_0(v) + case OpS390XSumBytes2: + return rewriteValueS390X_OpS390XSumBytes2_0(v) + case OpS390XSumBytes4: + return rewriteValueS390X_OpS390XSumBytes4_0(v) + case OpS390XSumBytes8: + return rewriteValueS390X_OpS390XSumBytes8_0(v) case OpS390XXOR: return rewriteValueS390X_OpS390XXOR_0(v) || rewriteValueS390X_OpS390XXOR_10(v) case OpS390XXORW: @@ -4775,7 +4799,7 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { } // match: (Move [s] dst src mem) // cond: s > 1024 - // result: (LoweredMove [s%256] dst src (ADDconst src [(s/256)*256]) mem) + // result: (LoweredMove [s%256] dst src (ADD src (MOVDconst [(s/256)*256])) mem) for { s := v.AuxInt _ = v.Args[2] @@ -4789,9 +4813,11 @@ func rewriteValueS390X_OpMove_10(v *Value) bool { v.AuxInt = s % 256 v.AddArg(dst) v.AddArg(src) - v0 := b.NewValue0(v.Pos, OpS390XADDconst, src.Type) - v0.AuxInt = (s / 256) * 256 + v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type) v0.AddArg(src) + v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v1.AuxInt = (s / 256) * 256 + v0.AddArg(v1) v.AddArg(v0) v.AddArg(mem) return true @@ -5311,6 +5337,108 @@ func rewriteValueS390X_OpOrB_0(v *Value) bool { return true } } +func rewriteValueS390X_OpPopCount16_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount16 x) + // cond: + // result: (MOVBZreg (SumBytes2 (POPCNT x))) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount32_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount32 x) + // cond: + // result: (MOVBZreg (SumBytes4 (POPCNT x))) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount64_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount64 x) + // cond: + // result: (MOVBZreg (SumBytes8 (POPCNT x))) + for { + x := v.Args[0] + v.reset(OpS390XMOVBZreg) + v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8) + v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpPopCount8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (PopCount8 x) + // cond: + // result: (POPCNT (MOVBZreg x)) + for { + x := v.Args[0] + v.reset(OpS390XPOPCNT) + v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpRotateLeft32_0(v *Value) bool { + // match: (RotateLeft32 x y) + // cond: + // result: (RLL x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XRLL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueS390X_OpRotateLeft64_0(v *Value) bool { + // match: (RotateLeft64 x y) + // cond: + // result: (RLLG x y) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpS390XRLLG) + v.AddArg(x) + v.AddArg(y) + return true + } +} func rewriteValueS390X_OpRound_0(v *Value) bool { // match: (Round x) // cond: @@ -6993,7 +7121,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { return true } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7008,7 +7136,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7021,7 +7149,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { return true } // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7036,7 +7164,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7052,7 +7180,7 @@ func rewriteValueS390X_OpS390XADD_0(v *Value) bool { } func rewriteValueS390X_OpS390XADD_10(v *Value) bool { // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7067,7 +7195,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7080,7 +7208,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { return true } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) for { t := v.Type @@ -7095,7 +7223,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDload) @@ -7229,7 +7357,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7244,7 +7372,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7257,7 +7385,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7272,7 +7400,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7285,7 +7413,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7300,7 +7428,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7313,7 +7441,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { return true } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7328,7 +7456,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7344,7 +7472,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { } func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7359,7 +7487,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7372,7 +7500,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7387,7 +7515,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7400,7 +7528,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7415,7 +7543,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7428,7 +7556,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { return true } // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) for { t := v.Type @@ -7443,7 +7571,7 @@ func rewriteValueS390X_OpS390XADDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XADDWload) @@ -7709,7 +7837,7 @@ func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { } v.reset(OpS390XADD) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true @@ -8012,7 +8140,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8027,7 +8155,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8040,7 +8168,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8055,7 +8183,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8068,7 +8196,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8083,7 +8211,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8096,7 +8224,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { return true } // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDload [off] {sym} x ptr mem) for { t := v.Type @@ -8111,7 +8239,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDload) @@ -8173,7 +8301,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8188,7 +8316,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8201,7 +8329,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8216,7 +8344,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8229,7 +8357,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8244,7 +8372,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8257,7 +8385,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8272,7 +8400,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8285,7 +8413,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8300,7 +8428,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8313,7 +8441,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8328,7 +8456,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8341,7 +8469,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { return true } // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8356,7 +8484,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8372,7 +8500,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { } func rewriteValueS390X_OpS390XANDW_10(v *Value) bool { // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) for { t := v.Type @@ -8387,7 +8515,7 @@ func rewriteValueS390X_OpS390XANDW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XANDWload) @@ -8628,7 +8756,7 @@ func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { } v.reset(OpS390XAND) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true @@ -10756,11 +10884,11 @@ func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XLNDFR, t) + v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XLDGR, t) - v2 := b.NewValue0(v.Pos, OpS390XMOVDload, t1) + v1 := b.NewValue0(x.Pos, OpS390XLDGR, t) + v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1) v2.AuxInt = off v2.Aux = sym v2.AddArg(ptr) @@ -11411,7 +11539,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -11437,7 +11565,7 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -11851,7 +11979,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVBload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -11877,7 +12005,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVBload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15231,7 +15359,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15258,7 +15386,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15285,7 +15413,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15312,7 +15440,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15339,7 +15467,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15366,7 +15494,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15393,7 +15521,7 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15672,7 +15800,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15699,7 +15827,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15726,7 +15854,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15753,7 +15881,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15780,7 +15908,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15807,7 +15935,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -15834,7 +15962,7 @@ func rewriteValueS390X_OpS390XMOVDreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) + v0 := b.NewValue0(x.Pos, OpS390XMOVDload, t) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -17869,7 +17997,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -17895,7 +18023,7 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18365,7 +18493,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVHload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18398,7 +18526,7 @@ func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVHload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -18943,7 +19071,7 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { v.AuxInt = ValAndOff(a).Off() v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = int64(int32(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16)) v.AddArg(v0) v.AddArg(mem) @@ -20809,7 +20937,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -20840,7 +20968,7 @@ func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -21339,7 +21467,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -21365,7 +21493,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { break } b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, v.Type) + v0 := b.NewValue0(x.Pos, OpS390XMOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = off @@ -21912,7 +22040,7 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { v.AuxInt = ValAndOff(a).Off() v.Aux = s v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64) + v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64) v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 v.AddArg(v0) v.AddArg(mem) @@ -22498,7 +22626,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22513,7 +22641,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22526,7 +22654,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22541,7 +22669,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22554,7 +22682,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22569,7 +22697,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22582,7 +22710,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { return true } // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLDload [off] {sym} x ptr mem) for { t := v.Type @@ -22597,7 +22725,7 @@ func rewriteValueS390X_OpS390XMULLD_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLDload) @@ -22745,7 +22873,7 @@ func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { } v.reset(OpS390XMULLD) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true @@ -22839,7 +22967,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22854,7 +22982,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22867,7 +22995,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22882,7 +23010,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22895,7 +23023,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22910,7 +23038,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22923,7 +23051,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22938,7 +23066,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22951,7 +23079,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22966,7 +23094,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -22979,7 +23107,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -22994,7 +23122,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23007,7 +23135,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23022,7 +23150,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23035,7 +23163,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { return true } // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (MULLWload [off] {sym} x ptr mem) for { t := v.Type @@ -23050,7 +23178,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XMULLWload) @@ -23684,7 +23812,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23699,7 +23827,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23712,7 +23840,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23727,7 +23855,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23740,7 +23868,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23755,7 +23883,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23768,7 +23896,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { return true } // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) for { t := v.Type @@ -23783,7 +23911,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORload) @@ -23835,7 +23963,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -23884,7 +24012,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -23933,7 +24061,7 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -23989,7 +24117,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -24038,7 +24166,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -24087,7 +24215,7 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64) + v0 := b.NewValue0(x1.Pos, OpS390XMOVDload, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -24145,12 +24273,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24209,12 +24337,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24273,12 +24401,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24337,12 +24465,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24401,12 +24529,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24465,12 +24593,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24529,12 +24657,12 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -24600,12 +24728,12 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v2 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -28194,10 +28322,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28245,10 +28373,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v1 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28304,10 +28432,10 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28370,10 +28498,10 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v1 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -28429,7 +28557,7 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, typ.UInt64) + v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -28486,7 +28614,7 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, typ.UInt64) + v0 := b.NewValue0(x0.Pos, OpS390XMOVDBRload, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -28544,13 +28672,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28610,13 +28738,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28676,13 +28804,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28742,13 +28870,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28816,13 +28944,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28890,13 +29018,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -28964,13 +29092,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -29045,13 +29173,13 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v2 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -33057,7 +33185,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33072,7 +33200,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33085,7 +33213,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33100,7 +33228,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33113,7 +33241,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33128,7 +33256,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33141,7 +33269,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33156,7 +33284,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33169,7 +33297,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { return true } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33184,7 +33312,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33204,7 +33332,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33219,7 +33347,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33232,7 +33360,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { return true } // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33247,7 +33375,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33260,7 +33388,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { return true } // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (ORWload [off] {sym} x ptr mem) for { t := v.Type @@ -33275,7 +33403,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XORWload) @@ -33327,7 +33455,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -33376,7 +33504,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -33425,7 +33553,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -33474,7 +33602,7 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -33532,12 +33660,12 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -33596,12 +33724,12 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -33660,12 +33788,12 @@ func rewriteValueS390X_OpS390XORW_10(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -33731,12 +33859,12 @@ func rewriteValueS390X_OpS390XORW_20(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16) v2.AuxInt = i0 v2.Aux = s v2.AddArg(p) @@ -35775,10 +35903,10 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -35826,10 +35954,10 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) + v0 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v1 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v1.AuxInt = i0 v1.Aux = s v1.AddArg(p) @@ -35885,7 +36013,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -35942,7 +36070,7 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, typ.UInt32) + v0 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32) v.reset(OpCopy) v.AddArg(v0) v0.AuxInt = i0 @@ -36000,13 +36128,13 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -36066,13 +36194,13 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -36132,13 +36260,13 @@ func rewriteValueS390X_OpS390XORW_50(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -36205,13 +36333,13 @@ func rewriteValueS390X_OpS390XORW_60(v *Value) bool { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type) v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, typ.UInt16) + v2 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64) + v3 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16) v3.AuxInt = i0 v3.Aux = s v3.AddArg(p) @@ -38496,7 +38624,7 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { } v.reset(OpS390XOR) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true @@ -38556,6 +38684,44 @@ func rewriteValueS390X_OpS390XORload_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XRLL_0(v *Value) bool { + // match: (RLL x (MOVDconst [c])) + // cond: + // result: (RLLconst x [c&31]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XRLLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XRLLG_0(v *Value) bool { + // match: (RLLG x (MOVDconst [c])) + // cond: + // result: (RLLGconst x [c&63]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpS390XRLLGconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + return false +} func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { b := v.Block _ = b @@ -40043,7 +40209,7 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { return true } // match: (SUB x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBload [off] {sym} x ptr mem) for { t := v.Type @@ -40058,7 +40224,7 @@ func rewriteValueS390X_OpS390XSUB_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBload) @@ -40123,7 +40289,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { return true } // match: (SUBW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -40138,7 +40304,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) @@ -40151,7 +40317,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { return true } // match: (SUBW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (SUBWload [off] {sym} x ptr mem) for { t := v.Type @@ -40166,7 +40332,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XSUBWload) @@ -40357,7 +40523,7 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { } v.reset(OpS390XSUB) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true @@ -40417,6 +40583,67 @@ func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XSumBytes2_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SumBytes2 x) + // cond: + // result: (ADDW (SRWconst x [8]) x) + for { + x := v.Args[0] + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8) + v0.AuxInt = 8 + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueS390X_OpS390XSumBytes4_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SumBytes4 x) + // cond: + // result: (SumBytes2 (ADDW (SRWconst x [16]) x)) + for { + x := v.Args[0] + v.reset(OpS390XSumBytes2) + v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16) + v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16) + v1.AuxInt = 16 + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueS390X_OpS390XSumBytes8_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SumBytes8 x) + // cond: + // result: (SumBytes4 (ADDW (SRDconst x [32]) x)) + for { + x := v.Args[0] + v.reset(OpS390XSumBytes4) + v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32) + v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32) + v1.AuxInt = 32 + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) @@ -40562,7 +40789,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40577,7 +40804,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40590,7 +40817,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40605,7 +40832,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40618,7 +40845,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { return true } // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40633,7 +40860,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40649,7 +40876,7 @@ func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { } func rewriteValueS390X_OpS390XXOR_10(v *Value) bool { // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORload [off] {sym} x ptr mem) for { t := v.Type @@ -40664,7 +40891,7 @@ func rewriteValueS390X_OpS390XXOR_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORload) @@ -40779,7 +41006,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40794,7 +41021,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40807,7 +41034,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40822,7 +41049,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40835,7 +41062,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40850,7 +41077,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40863,7 +41090,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40878,7 +41105,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40891,7 +41118,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { return true } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40906,7 +41133,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40922,7 +41149,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { } func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40937,7 +41164,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40950,7 +41177,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { return true } // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40965,7 +41192,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { ptr := g.Args[0] mem := g.Args[1] x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -40978,7 +41205,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { return true } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) for { t := v.Type @@ -40993,7 +41220,7 @@ func rewriteValueS390X_OpS390XXORW_10(v *Value) bool { _ = g.Args[1] ptr := g.Args[0] mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoadClobber(v, g, x) && clobber(g)) { break } v.reset(OpS390XXORWload) @@ -41155,7 +41382,7 @@ func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { } v.reset(OpS390XXOR) v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t) v0.AddArg(y) v.AddArg(v0) return true diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index c07651ef0ecb3..c17ed54b3c952 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -5071,7 +5073,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool { _ = typ // match: (F64Add (F64Const [x]) (F64Const [y])) // cond: - // result: (F64Const [f2i(i2f(x) + i2f(y))]) + // result: (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -5085,7 +5087,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool { } y := v_1.AuxInt v.reset(OpWasmF64Const) - v.AuxInt = f2i(i2f(x) + i2f(y)) + v.AuxInt = auxFrom64F(auxTo64F(x) + auxTo64F(y)) return true } // match: (F64Add (F64Const [x]) y) @@ -5115,7 +5117,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool { _ = typ // match: (F64Mul (F64Const [x]) (F64Const [y])) // cond: - // result: (F64Const [f2i(i2f(x) * i2f(y))]) + // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -5129,7 +5131,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool { } y := v_1.AuxInt v.reset(OpWasmF64Const) - v.AuxInt = f2i(i2f(x) * i2f(y)) + v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y)) return true } // match: (F64Mul (F64Const [x]) y) diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 8ca737bed1f88..e980520376f34 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 500e274206bbc..f88fce8076fb6 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go new file mode 100644 index 0000000000000..6b823252ea9e9 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewritedecArgs.go @@ -0,0 +1,288 @@ +// Code generated from gen/decArgs.rules; DO NOT EDIT. +// generated with: cd gen; go run *.go + +package ssa + +import "fmt" +import "math" +import "cmd/internal/obj" +import "cmd/internal/objabi" +import "cmd/compile/internal/types" + +var _ = fmt.Println // in case not otherwise used +var _ = math.MinInt8 // in case not otherwise used +var _ = obj.ANOP // in case not otherwise used +var _ = objabi.GOROOT // in case not otherwise used +var _ = types.TypeMem // in case not otherwise used + +func rewriteValuedecArgs(v *Value) bool { + switch v.Op { + case OpArg: + return rewriteValuedecArgs_OpArg_0(v) || rewriteValuedecArgs_OpArg_10(v) + } + return false +} +func rewriteValuedecArgs_OpArg_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + fe := b.Func.fe + _ = fe + typ := &b.Func.Config.Types + _ = typ + // match: (Arg {n} [off]) + // cond: v.Type.IsString() + // result: (StringMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize])) + for { + off := v.AuxInt + n := v.Aux + if !(v.Type.IsString()) { + break + } + v.reset(OpStringMake) + v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, typ.Int) + v1.AuxInt = off + config.PtrSize + v1.Aux = n + v.AddArg(v1) + return true + } + // match: (Arg {n} [off]) + // cond: v.Type.IsSlice() + // result: (SliceMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize]) (Arg {n} [off+2*config.PtrSize])) + for { + off := v.AuxInt + n := v.Aux + if !(v.Type.IsSlice()) { + break + } + v.reset(OpSliceMake) + v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo()) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, typ.Int) + v1.AuxInt = off + config.PtrSize + v1.Aux = n + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpArg, typ.Int) + v2.AuxInt = off + 2*config.PtrSize + v2.Aux = n + v.AddArg(v2) + return true + } + // match: (Arg {n} [off]) + // cond: v.Type.IsInterface() + // result: (IMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize])) + for { + off := v.AuxInt + n := v.Aux + if !(v.Type.IsInterface()) { + break + } + v.reset(OpIMake) + v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) + v1.AuxInt = off + config.PtrSize + v1.Aux = n + v.AddArg(v1) + return true + } + // match: (Arg {n} [off]) + // cond: v.Type.IsComplex() && v.Type.Size() == 16 + // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+8])) + for { + off := v.AuxInt + n := v.Aux + if !(v.Type.IsComplex() && v.Type.Size() == 16) { + break + } + v.reset(OpComplexMake) + v0 := b.NewValue0(v.Pos, OpArg, typ.Float64) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, typ.Float64) + v1.AuxInt = off + 8 + v1.Aux = n + v.AddArg(v1) + return true + } + // match: (Arg {n} [off]) + // cond: v.Type.IsComplex() && v.Type.Size() == 8 + // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+4])) + for { + off := v.AuxInt + n := v.Aux + if !(v.Type.IsComplex() && v.Type.Size() == 8) { + break + } + v.reset(OpComplexMake) + v0 := b.NewValue0(v.Pos, OpArg, typ.Float32) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, typ.Float32) + v1.AuxInt = off + 4 + v1.Aux = n + v.AddArg(v1) + return true + } + // match: (Arg ) + // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) + // result: (StructMake0) + for { + t := v.Type + if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake0) + return true + } + // match: (Arg {n} [off]) + // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) + // result: (StructMake1 (Arg {n} [off+t.FieldOff(0)])) + for { + t := v.Type + off := v.AuxInt + n := v.Aux + if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake1) + v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) + v0.AuxInt = off + t.FieldOff(0) + v0.Aux = n + v.AddArg(v0) + return true + } + // match: (Arg {n} [off]) + // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) + // result: (StructMake2 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)])) + for { + t := v.Type + off := v.AuxInt + n := v.Aux + if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake2) + v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) + v0.AuxInt = off + t.FieldOff(0) + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) + v1.AuxInt = off + t.FieldOff(1) + v1.Aux = n + v.AddArg(v1) + return true + } + // match: (Arg {n} [off]) + // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) + // result: (StructMake3 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)]) (Arg {n} [off+t.FieldOff(2)])) + for { + t := v.Type + off := v.AuxInt + n := v.Aux + if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake3) + v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) + v0.AuxInt = off + t.FieldOff(0) + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) + v1.AuxInt = off + t.FieldOff(1) + v1.Aux = n + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) + v2.AuxInt = off + t.FieldOff(2) + v2.Aux = n + v.AddArg(v2) + return true + } + // match: (Arg {n} [off]) + // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) + // result: (StructMake4 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)]) (Arg {n} [off+t.FieldOff(2)]) (Arg {n} [off+t.FieldOff(3)])) + for { + t := v.Type + off := v.AuxInt + n := v.Aux + if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { + break + } + v.reset(OpStructMake4) + v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) + v0.AuxInt = off + t.FieldOff(0) + v0.Aux = n + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) + v1.AuxInt = off + t.FieldOff(1) + v1.Aux = n + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) + v2.AuxInt = off + t.FieldOff(2) + v2.Aux = n + v.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3)) + v3.AuxInt = off + t.FieldOff(3) + v3.Aux = n + v.AddArg(v3) + return true + } + return false +} +func rewriteValuedecArgs_OpArg_10(v *Value) bool { + b := v.Block + _ = b + fe := b.Func.fe + _ = fe + // match: (Arg ) + // cond: t.IsArray() && t.NumElem() == 0 + // result: (ArrayMake0) + for { + t := v.Type + if !(t.IsArray() && t.NumElem() == 0) { + break + } + v.reset(OpArrayMake0) + return true + } + // match: (Arg {n} [off]) + // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) + // result: (ArrayMake1 (Arg {n} [off])) + for { + t := v.Type + off := v.AuxInt + n := v.Aux + if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { + break + } + v.reset(OpArrayMake1) + v0 := b.NewValue0(v.Pos, OpArg, t.Elem()) + v0.AuxInt = off + v0.Aux = n + v.AddArg(v0) + return true + } + return false +} +func rewriteBlockdecArgs(b *Block) bool { + config := b.Func.Config + _ = config + fe := b.Func.fe + _ = fe + typ := &config.Types + _ = typ + switch b.Kind { + } + return false +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index a1c83ea37834e..f2c7529e024f7 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -3,11 +3,13 @@ package ssa +import "fmt" import "math" import "cmd/internal/obj" import "cmd/internal/objabi" import "cmd/compile/internal/types" +var _ = fmt.Println // in case not otherwise used var _ = math.MinInt8 // in case not otherwise used var _ = obj.ANOP // in case not otherwise used var _ = objabi.GOROOT // in case not otherwise used @@ -37,8 +39,6 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpAnd64_0(v) || rewriteValuegeneric_OpAnd64_10(v) || rewriteValuegeneric_OpAnd64_20(v) case OpAnd8: return rewriteValuegeneric_OpAnd8_0(v) || rewriteValuegeneric_OpAnd8_10(v) || rewriteValuegeneric_OpAnd8_20(v) - case OpArg: - return rewriteValuegeneric_OpArg_0(v) || rewriteValuegeneric_OpArg_10(v) case OpArraySelect: return rewriteValuegeneric_OpArraySelect_0(v) case OpCom16: @@ -2409,7 +2409,7 @@ func rewriteValuegeneric_OpAdd32_30(v *Value) bool { func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { // match: (Add32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -2423,12 +2423,12 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) + i2f32(d))) + v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d)) return true } // match: (Add32F (Const32F [d]) (Const32F [c])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -2442,43 +2442,7 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) + i2f32(d))) - return true - } - // match: (Add32F x (Const32F [0])) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add32F (Const32F [0]) x) - // cond: - // result: x - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d)) return true } return false @@ -3454,7 +3418,7 @@ func rewriteValuegeneric_OpAdd64_30(v *Value) bool { func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { // match: (Add64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Const64F [f2i(i2f(c) + i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -3468,12 +3432,12 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) + i2f(d)) + v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d)) return true } // match: (Add64F (Const64F [d]) (Const64F [c])) // cond: - // result: (Const64F [f2i(i2f(c) + i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -3487,43 +3451,7 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) + i2f(d)) - return true - } - // match: (Add64F x (Const64F [0])) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add64F (Const64F [0]) x) - // cond: - // result: x - for { - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d)) return true } return false @@ -6887,259 +6815,6 @@ func rewriteValuegeneric_OpAnd8_20(v *Value) bool { } return false } -func rewriteValuegeneric_OpArg_0(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - fe := b.Func.fe - _ = fe - typ := &b.Func.Config.Types - _ = typ - // match: (Arg {n} [off]) - // cond: v.Type.IsString() - // result: (StringMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize])) - for { - off := v.AuxInt - n := v.Aux - if !(v.Type.IsString()) { - break - } - v.reset(OpStringMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, typ.Int) - v1.AuxInt = off + config.PtrSize - v1.Aux = n - v.AddArg(v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsSlice() - // result: (SliceMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize]) (Arg {n} [off+2*config.PtrSize])) - for { - off := v.AuxInt - n := v.Aux - if !(v.Type.IsSlice()) { - break - } - v.reset(OpSliceMake) - v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo()) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, typ.Int) - v1.AuxInt = off + config.PtrSize - v1.Aux = n - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpArg, typ.Int) - v2.AuxInt = off + 2*config.PtrSize - v2.Aux = n - v.AddArg(v2) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsInterface() - // result: (IMake (Arg {n} [off]) (Arg {n} [off+config.PtrSize])) - for { - off := v.AuxInt - n := v.Aux - if !(v.Type.IsInterface()) { - break - } - v.reset(OpIMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr) - v1.AuxInt = off + config.PtrSize - v1.Aux = n - v.AddArg(v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsComplex() && v.Type.Size() == 16 - // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+8])) - for { - off := v.AuxInt - n := v.Aux - if !(v.Type.IsComplex() && v.Type.Size() == 16) { - break - } - v.reset(OpComplexMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Float64) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, typ.Float64) - v1.AuxInt = off + 8 - v1.Aux = n - v.AddArg(v1) - return true - } - // match: (Arg {n} [off]) - // cond: v.Type.IsComplex() && v.Type.Size() == 8 - // result: (ComplexMake (Arg {n} [off]) (Arg {n} [off+4])) - for { - off := v.AuxInt - n := v.Aux - if !(v.Type.IsComplex() && v.Type.Size() == 8) { - break - } - v.reset(OpComplexMake) - v0 := b.NewValue0(v.Pos, OpArg, typ.Float32) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, typ.Float32) - v1.AuxInt = off + 4 - v1.Aux = n - v.AddArg(v1) - return true - } - // match: (Arg ) - // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) - // result: (StructMake0) - for { - t := v.Type - if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) - // result: (StructMake1 (Arg {n} [off+t.FieldOff(0)])) - for { - t := v.Type - off := v.AuxInt - n := v.Aux - if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake1) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = off + t.FieldOff(0) - v0.Aux = n - v.AddArg(v0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) - // result: (StructMake2 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)])) - for { - t := v.Type - off := v.AuxInt - n := v.Aux - if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake2) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = off + t.FieldOff(0) - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = off + t.FieldOff(1) - v1.Aux = n - v.AddArg(v1) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) - // result: (StructMake3 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)]) (Arg {n} [off+t.FieldOff(2)])) - for { - t := v.Type - off := v.AuxInt - n := v.Aux - if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake3) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = off + t.FieldOff(0) - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = off + t.FieldOff(1) - v1.Aux = n - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) - v2.AuxInt = off + t.FieldOff(2) - v2.Aux = n - v.AddArg(v2) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) - // result: (StructMake4 (Arg {n} [off+t.FieldOff(0)]) (Arg {n} [off+t.FieldOff(1)]) (Arg {n} [off+t.FieldOff(2)]) (Arg {n} [off+t.FieldOff(3)])) - for { - t := v.Type - off := v.AuxInt - n := v.Aux - if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake4) - v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0)) - v0.AuxInt = off + t.FieldOff(0) - v0.Aux = n - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1)) - v1.AuxInt = off + t.FieldOff(1) - v1.Aux = n - v.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2)) - v2.AuxInt = off + t.FieldOff(2) - v2.Aux = n - v.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3)) - v3.AuxInt = off + t.FieldOff(3) - v3.Aux = n - v.AddArg(v3) - return true - } - return false -} -func rewriteValuegeneric_OpArg_10(v *Value) bool { - b := v.Block - _ = b - fe := b.Func.fe - _ = fe - // match: (Arg ) - // cond: t.IsArray() && t.NumElem() == 0 - // result: (ArrayMake0) - for { - t := v.Type - if !(t.IsArray() && t.NumElem() == 0) { - break - } - v.reset(OpArrayMake0) - return true - } - // match: (Arg {n} [off]) - // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) - // result: (ArrayMake1 (Arg {n} [off])) - for { - t := v.Type - off := v.AuxInt - n := v.Aux - if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) { - break - } - v.reset(OpArrayMake1) - v0 := b.NewValue0(v.Pos, OpArg, t.Elem()) - v0.AuxInt = off - v0.Aux = n - v.AddArg(v0) - return true - } - return false -} func rewriteValuegeneric_OpArraySelect_0(v *Value) bool { // match: (ArraySelect (ArrayMake1 x)) // cond: @@ -7566,7 +7241,7 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { // match: (Cvt32Fto32 (Const32F [c])) // cond: - // result: (Const32 [int64(int32(i2f(c)))]) + // result: (Const32 [int64(int32(auxTo32F(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32F { @@ -7574,7 +7249,7 @@ func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst32) - v.AuxInt = int64(int32(i2f(c))) + v.AuxInt = int64(int32(auxTo32F(c))) return true } return false @@ -7582,7 +7257,7 @@ func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool { // match: (Cvt32Fto64 (Const32F [c])) // cond: - // result: (Const64 [int64(i2f(c))]) + // result: (Const64 [int64(auxTo32F(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32F { @@ -7590,7 +7265,7 @@ func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst64) - v.AuxInt = int64(i2f(c)) + v.AuxInt = int64(auxTo32F(c)) return true } return false @@ -7614,7 +7289,7 @@ func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool { func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { // match: (Cvt32to32F (Const32 [c])) // cond: - // result: (Const32F [f2i(float64(float32(int32(c))))]) + // result: (Const32F [auxFrom32F(float32(int32(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -7622,7 +7297,7 @@ func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(float32(int32(c)))) + v.AuxInt = auxFrom32F(float32(int32(c))) return true } return false @@ -7630,7 +7305,7 @@ func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { // match: (Cvt32to64F (Const32 [c])) // cond: - // result: (Const64F [f2i(float64(int32(c)))]) + // result: (Const64F [auxFrom64F(float64(int32(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -7638,7 +7313,7 @@ func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(float64(int32(c))) + v.AuxInt = auxFrom64F(float64(int32(c))) return true } return false @@ -7646,7 +7321,7 @@ func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { // match: (Cvt64Fto32 (Const64F [c])) // cond: - // result: (Const32 [int64(int32(i2f(c)))]) + // result: (Const32 [int64(int32(auxTo64F(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64F { @@ -7654,7 +7329,7 @@ func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst32) - v.AuxInt = int64(int32(i2f(c))) + v.AuxInt = int64(int32(auxTo64F(c))) return true } return false @@ -7662,7 +7337,7 @@ func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { // match: (Cvt64Fto32F (Const64F [c])) // cond: - // result: (Const32F [f2i(float64(i2f32(c)))]) + // result: (Const32F [auxFrom32F(float32(auxTo64F(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64F { @@ -7670,7 +7345,7 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c))) + v.AuxInt = auxFrom32F(float32(auxTo64F(c))) return true } return false @@ -7678,7 +7353,7 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { // match: (Cvt64Fto64 (Const64F [c])) // cond: - // result: (Const64 [int64(i2f(c))]) + // result: (Const64 [int64(auxTo64F(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64F { @@ -7686,7 +7361,7 @@ func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst64) - v.AuxInt = int64(i2f(c)) + v.AuxInt = int64(auxTo64F(c)) return true } return false @@ -7694,7 +7369,7 @@ func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { // match: (Cvt64to32F (Const64 [c])) // cond: - // result: (Const32F [f2i(float64(float32(c)))]) + // result: (Const32F [auxFrom32F(float32(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -7702,7 +7377,7 @@ func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(float32(c))) + v.AuxInt = auxFrom32F(float32(c)) return true } return false @@ -7710,7 +7385,7 @@ func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool { // match: (Cvt64to64F (Const64 [c])) // cond: - // result: (Const64F [f2i(float64(c))]) + // result: (Const64F [auxFrom64F(float64(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -7718,7 +7393,7 @@ func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(float64(c)) + v.AuxInt = auxFrom64F(float64(c)) return true } return false @@ -8342,7 +8017,7 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { _ = b // match: (Div32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) / i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -8356,12 +8031,12 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) / i2f32(d))) + v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) return true } // match: (Div32F x (Const32F [c])) - // cond: reciprocalExact32(float32(i2f(c))) - // result: (Mul32F x (Const32F [f2i(1/i2f(c))])) + // cond: reciprocalExact32(auxTo32F(c)) + // result: (Mul32F x (Const32F [auxFrom32F(1/auxTo32F(c))])) for { _ = v.Args[1] x := v.Args[0] @@ -8371,13 +8046,13 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool { } t := v_1.Type c := v_1.AuxInt - if !(reciprocalExact32(float32(i2f(c)))) { + if !(reciprocalExact32(auxTo32F(c))) { break } v.reset(OpMul32F) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst32F, t) - v0.AuxInt = f2i(1 / i2f(c)) + v0.AuxInt = auxFrom32F(1 / auxTo32F(c)) v.AddArg(v0) return true } @@ -8866,7 +8541,7 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { _ = b // match: (Div64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Const64F [f2i(i2f(c) / i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -8880,12 +8555,12 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) / i2f(d)) + v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) return true } // match: (Div64F x (Const64F [c])) - // cond: reciprocalExact64(i2f(c)) - // result: (Mul64F x (Const64F [f2i(1/i2f(c))])) + // cond: reciprocalExact64(auxTo64F(c)) + // result: (Mul64F x (Const64F [auxFrom64F(1/auxTo64F(c))])) for { _ = v.Args[1] x := v.Args[0] @@ -8895,13 +8570,13 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool { } t := v_1.Type c := v_1.AuxInt - if !(reciprocalExact64(i2f(c))) { + if !(reciprocalExact64(auxTo64F(c))) { break } v.reset(OpMul64F) v.AddArg(x) v0 := b.NewValue0(v.Pos, OpConst64F, t) - v0.AuxInt = f2i(1 / i2f(c)) + v0.AuxInt = auxFrom64F(1 / auxTo64F(c)) v.AddArg(v0) return true } @@ -9802,7 +9477,7 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { func rewriteValuegeneric_OpEq32F_0(v *Value) bool { // match: (Eq32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -9816,12 +9491,12 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) == i2f(d)) + v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d)) return true } // match: (Eq32F (Const32F [d]) (Const32F [c])) // cond: - // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -9835,7 +9510,7 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) == i2f(d)) + v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d)) return true } return false @@ -10081,7 +9756,7 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { func rewriteValuegeneric_OpEq64F_0(v *Value) bool { // match: (Eq64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -10095,12 +9770,12 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) == i2f(d)) + v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d)) return true } // match: (Eq64F (Const64F [d]) (Const64F [c])) // cond: - // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -10114,7 +9789,7 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) == i2f(d)) + v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d)) return true } return false @@ -11077,7 +10752,7 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool { func rewriteValuegeneric_OpGeq32F_0(v *Value) bool { // match: (Geq32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) >= i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) >= auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -11091,7 +10766,7 @@ func rewriteValuegeneric_OpGeq32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) >= i2f(d)) + v.AuxInt = b2i(auxTo32F(c) >= auxTo32F(d)) return true } return false @@ -11143,7 +10818,7 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { func rewriteValuegeneric_OpGeq64F_0(v *Value) bool { // match: (Geq64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) >= i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) >= auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -11157,7 +10832,7 @@ func rewriteValuegeneric_OpGeq64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) >= i2f(d)) + v.AuxInt = b2i(auxTo64F(c) >= auxTo64F(d)) return true } return false @@ -11297,7 +10972,7 @@ func rewriteValuegeneric_OpGreater32_0(v *Value) bool { func rewriteValuegeneric_OpGreater32F_0(v *Value) bool { // match: (Greater32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) > i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) > auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -11311,7 +10986,7 @@ func rewriteValuegeneric_OpGreater32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) > i2f(d)) + v.AuxInt = b2i(auxTo32F(c) > auxTo32F(d)) return true } return false @@ -11363,7 +11038,7 @@ func rewriteValuegeneric_OpGreater64_0(v *Value) bool { func rewriteValuegeneric_OpGreater64F_0(v *Value) bool { // match: (Greater64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) > i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) > auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -11377,7 +11052,7 @@ func rewriteValuegeneric_OpGreater64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) > i2f(d)) + v.AuxInt = b2i(auxTo64F(c) > auxTo64F(d)) return true } return false @@ -12945,7 +12620,7 @@ func rewriteValuegeneric_OpLeq32_0(v *Value) bool { func rewriteValuegeneric_OpLeq32F_0(v *Value) bool { // match: (Leq32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) <= i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) <= auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -12959,7 +12634,7 @@ func rewriteValuegeneric_OpLeq32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) <= i2f(d)) + v.AuxInt = b2i(auxTo32F(c) <= auxTo32F(d)) return true } return false @@ -13011,7 +12686,7 @@ func rewriteValuegeneric_OpLeq64_0(v *Value) bool { func rewriteValuegeneric_OpLeq64F_0(v *Value) bool { // match: (Leq64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) <= i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) <= auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -13025,7 +12700,7 @@ func rewriteValuegeneric_OpLeq64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) <= i2f(d)) + v.AuxInt = b2i(auxTo64F(c) <= auxTo64F(d)) return true } return false @@ -13165,7 +12840,7 @@ func rewriteValuegeneric_OpLess32_0(v *Value) bool { func rewriteValuegeneric_OpLess32F_0(v *Value) bool { // match: (Less32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) < i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) < auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -13179,7 +12854,7 @@ func rewriteValuegeneric_OpLess32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) < i2f(d)) + v.AuxInt = b2i(auxTo32F(c) < auxTo32F(d)) return true } return false @@ -13231,7 +12906,7 @@ func rewriteValuegeneric_OpLess64_0(v *Value) bool { func rewriteValuegeneric_OpLess64F_0(v *Value) bool { // match: (Less64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) < i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) < auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -13245,7 +12920,7 @@ func rewriteValuegeneric_OpLess64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) < i2f(d)) + v.AuxInt = b2i(auxTo64F(c) < auxTo64F(d)) return true } return false @@ -13483,7 +13158,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { } // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) - // result: (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) + // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) for { t1 := v.Type _ = v.Args[1] @@ -13504,7 +13179,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } v.reset(OpConst32F) - v.AuxInt = f2i(float64(math.Float32frombits(uint32(x)))) + v.AuxInt = auxFrom32F(math.Float32frombits(uint32(x))) return true } // match: (Load p1 (Store {t2} p2 (Const64F [x]) _)) @@ -13535,7 +13210,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { } // match: (Load p1 (Store {t2} p2 (Const32F [x]) _)) // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) - // result: (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) + // result: (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))]) for { t1 := v.Type _ = v.Args[1] @@ -13556,7 +13231,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { break } v.reset(OpConst32) - v.AuxInt = int64(int32(math.Float32bits(float32(i2f(x))))) + v.AuxInt = int64(int32(math.Float32bits(auxTo32F(x)))) return true } // match: (Load op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _))) @@ -16223,9 +15898,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { v.AddArg(v0) return true } - // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) for { n := v.AuxInt t1 := v.Aux @@ -16242,6 +15917,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem.Args[1] @@ -16255,6 +15931,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type if op3.AuxInt != 0 { break } @@ -16265,14 +15942,14 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) v1.AddArg(v2) @@ -16281,9 +15958,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { v.AddArg(v1) return true } - // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) for { n := v.AuxInt t1 := v.Aux @@ -16300,6 +15977,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem.Args[1] @@ -16313,6 +15991,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type o3 := op3.AuxInt p3 := op3.Args[0] d2 := mem_2.Args[1] @@ -16326,6 +16005,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op4.Op != OpOffPtr { break } + tt4 := op4.Type if op4.AuxInt != 0 { break } @@ -16336,21 +16016,21 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) v1.AddArg(v2) v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 - v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) v3.AddArg(v4) @@ -16360,9 +16040,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { v.AddArg(v1) return true } - // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) + // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) for { n := v.AuxInt t1 := v.Aux @@ -16379,6 +16059,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem.Args[1] @@ -16392,6 +16073,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type o3 := op3.AuxInt p3 := op3.Args[0] d2 := mem_2.Args[1] @@ -16405,6 +16087,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op4.Op != OpOffPtr { break } + tt4 := op4.Type o4 := op4.AuxInt p4 := op4.Args[0] d3 := mem_2_2.Args[1] @@ -16418,6 +16101,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { if op5.Op != OpOffPtr { break } + tt5 := op5.Type if op5.AuxInt != 0 { break } @@ -16428,28 +16112,28 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) v1.AddArg(v2) v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 - v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) v3.AddArg(v4) v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 - v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type)) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) v5.AddArg(v6) @@ -16465,9 +16149,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool { func rewriteValuegeneric_OpMove_10(v *Value) bool { b := v.Block _ = b - // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [0] dst) d2 mem)) for { n := v.AuxInt t1 := v.Aux @@ -16488,6 +16172,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem_0.Args[1] @@ -16501,6 +16186,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type if op3.AuxInt != 0 { break } @@ -16511,14 +16197,14 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = 0 v2.AddArg(dst) v1.AddArg(v2) @@ -16527,9 +16213,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { v.AddArg(v1) return true } - // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [0] dst) d3 mem))) for { n := v.AuxInt t1 := v.Aux @@ -16550,6 +16236,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem_0.Args[1] @@ -16563,6 +16250,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type o3 := op3.AuxInt p3 := op3.Args[0] d2 := mem_0_2.Args[1] @@ -16576,6 +16264,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op4.Op != OpOffPtr { break } + tt4 := op4.Type if op4.AuxInt != 0 { break } @@ -16586,21 +16275,21 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) v1.AddArg(v2) v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 - v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = 0 v4.AddArg(dst) v3.AddArg(v4) @@ -16610,9 +16299,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { v.AddArg(v1) return true } - // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) + // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5) - // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) + // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [0] dst) d4 mem)))) for { n := v.AuxInt t1 := v.Aux @@ -16633,6 +16322,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op2.Op != OpOffPtr { break } + tt2 := op2.Type o2 := op2.AuxInt p2 := op2.Args[0] d1 := mem_0.Args[1] @@ -16646,6 +16336,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op3.Op != OpOffPtr { break } + tt3 := op3.Type o3 := op3.AuxInt p3 := op3.Args[0] d2 := mem_0_2.Args[1] @@ -16659,6 +16350,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op4.Op != OpOffPtr { break } + tt4 := op4.Type o4 := op4.AuxInt p4 := op4.Args[0] d3 := mem_0_2_2.Args[1] @@ -16672,6 +16364,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { if op5.Op != OpOffPtr { break } + tt5 := op5.Type if op5.AuxInt != 0 { break } @@ -16682,28 +16375,28 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { } v.reset(OpStore) v.Aux = t2 - v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type)) + v0 := b.NewValue0(v.Pos, OpOffPtr, tt2) v0.AuxInt = o2 v0.AddArg(dst) v.AddArg(v0) v.AddArg(d1) v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v1.Aux = t3 - v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type)) + v2 := b.NewValue0(v.Pos, OpOffPtr, tt3) v2.AuxInt = o3 v2.AddArg(dst) v1.AddArg(v2) v1.AddArg(d2) v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v3.Aux = t4 - v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type)) + v4 := b.NewValue0(v.Pos, OpOffPtr, tt4) v4.AuxInt = o4 v4.AddArg(dst) v3.AddArg(v4) v3.AddArg(d3) v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem) v5.Aux = t5 - v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type)) + v6 := b.NewValue0(v.Pos, OpOffPtr, tt5) v6.AuxInt = 0 v6.AddArg(dst) v5.AddArg(v6) @@ -17286,6 +16979,8 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool { func rewriteValuegeneric_OpMove_20(v *Value) bool { b := v.Block _ = b + config := b.Func.Config + _ = config // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr [o2] p2) d1 (Store {t3} (OffPtr [o3] p3) d2 (Store {t4} (OffPtr [o4] p4) d3 (Store {t5} (OffPtr [o5] p5) d4 (Zero {t6} [n] p6 _))))))) // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && alignof(t6) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + sizeof(t2) && n >= o3 + sizeof(t3) && n >= o4 + sizeof(t4) && n >= o5 + sizeof(t5) // result: (Store {t2} (OffPtr [o2] dst) d1 (Store {t3} (OffPtr [o3] dst) d2 (Store {t4} (OffPtr [o4] dst) d3 (Store {t5} (OffPtr [o5] dst) d4 (Zero {t1} [n] dst mem))))) @@ -17407,6 +17102,88 @@ func rewriteValuegeneric_OpMove_20(v *Value) bool { v.AddArg(v1) return true } + // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) + // cond: t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // result: (Move {t1} [s] dst src midmem) + for { + s := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + tmp1 := v.Args[1] + midmem := v.Args[2] + if midmem.Op != OpMove { + break + } + if midmem.AuxInt != s { + break + } + t2 := midmem.Aux + _ = midmem.Args[2] + tmp2 := midmem.Args[0] + src := midmem.Args[1] + if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + break + } + v.reset(OpMove) + v.AuxInt = s + v.Aux = t1 + v.AddArg(dst) + v.AddArg(src) + v.AddArg(midmem) + return true + } + // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) + // cond: t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + // result: (Move {t1} [s] dst src midmem) + for { + s := v.AuxInt + t1 := v.Aux + _ = v.Args[2] + dst := v.Args[0] + tmp1 := v.Args[1] + midmem := v.Args[2] + if midmem.Op != OpVarDef { + break + } + midmem_0 := midmem.Args[0] + if midmem_0.Op != OpMove { + break + } + if midmem_0.AuxInt != s { + break + } + t2 := midmem_0.Aux + _ = midmem_0.Args[2] + tmp2 := midmem_0.Args[0] + src := midmem_0.Args[1] + if !(t1.(*types.Type).Compare(t2.(*types.Type)) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) { + break + } + v.reset(OpMove) + v.AuxInt = s + v.Aux = t1 + v.AddArg(dst) + v.AddArg(src) + v.AddArg(midmem) + return true + } + // match: (Move dst src mem) + // cond: isSamePtr(dst, src) + // result: mem + for { + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(isSamePtr(dst, src)) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } return false } func rewriteValuegeneric_OpMul16_0(v *Value) bool { @@ -18320,7 +18097,7 @@ func rewriteValuegeneric_OpMul32_10(v *Value) bool { func rewriteValuegeneric_OpMul32F_0(v *Value) bool { // match: (Mul32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -18334,12 +18111,12 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) * i2f32(d))) + v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true } // match: (Mul32F (Const32F [d]) (Const32F [c])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -18353,10 +18130,10 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) * i2f32(d))) + v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) return true } - // match: (Mul32F x (Const32F [f2i(1)])) + // match: (Mul32F x (Const32F [auxFrom64F(1)])) // cond: // result: x for { @@ -18366,7 +18143,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_1.Op != OpConst32F { break } - if v_1.AuxInt != f2i(1) { + if v_1.AuxInt != auxFrom64F(1) { break } v.reset(OpCopy) @@ -18374,7 +18151,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32F (Const32F [f2i(1)]) x) + // match: (Mul32F (Const32F [auxFrom64F(1)]) x) // cond: // result: x for { @@ -18383,7 +18160,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_0.Op != OpConst32F { break } - if v_0.AuxInt != f2i(1) { + if v_0.AuxInt != auxFrom64F(1) { break } x := v.Args[1] @@ -18392,7 +18169,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32F x (Const32F [f2i(-1)])) + // match: (Mul32F x (Const32F [auxFrom32F(-1)])) // cond: // result: (Neg32F x) for { @@ -18402,14 +18179,14 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_1.Op != OpConst32F { break } - if v_1.AuxInt != f2i(-1) { + if v_1.AuxInt != auxFrom32F(-1) { break } v.reset(OpNeg32F) v.AddArg(x) return true } - // match: (Mul32F (Const32F [f2i(-1)]) x) + // match: (Mul32F (Const32F [auxFrom32F(-1)]) x) // cond: // result: (Neg32F x) for { @@ -18418,7 +18195,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_0.Op != OpConst32F { break } - if v_0.AuxInt != f2i(-1) { + if v_0.AuxInt != auxFrom32F(-1) { break } x := v.Args[1] @@ -18426,7 +18203,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32F x (Const32F [f2i(2)])) + // match: (Mul32F x (Const32F [auxFrom32F(2)])) // cond: // result: (Add32F x x) for { @@ -18436,7 +18213,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_1.Op != OpConst32F { break } - if v_1.AuxInt != f2i(2) { + if v_1.AuxInt != auxFrom32F(2) { break } v.reset(OpAdd32F) @@ -18444,7 +18221,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32F (Const32F [f2i(2)]) x) + // match: (Mul32F (Const32F [auxFrom32F(2)]) x) // cond: // result: (Add32F x x) for { @@ -18453,7 +18230,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool { if v_0.Op != OpConst32F { break } - if v_0.AuxInt != f2i(2) { + if v_0.AuxInt != auxFrom32F(2) { break } x := v.Args[1] @@ -19001,7 +18778,7 @@ func rewriteValuegeneric_OpMul64_10(v *Value) bool { func rewriteValuegeneric_OpMul64F_0(v *Value) bool { // match: (Mul64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Const64F [f2i(i2f(c) * i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -19015,12 +18792,12 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) * i2f(d)) + v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true } // match: (Mul64F (Const64F [d]) (Const64F [c])) // cond: - // result: (Const64F [f2i(i2f(c) * i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -19034,10 +18811,10 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) * i2f(d)) + v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) return true } - // match: (Mul64F x (Const64F [f2i(1)])) + // match: (Mul64F x (Const64F [auxFrom64F(1)])) // cond: // result: x for { @@ -19047,7 +18824,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_1.Op != OpConst64F { break } - if v_1.AuxInt != f2i(1) { + if v_1.AuxInt != auxFrom64F(1) { break } v.reset(OpCopy) @@ -19055,7 +18832,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64F (Const64F [f2i(1)]) x) + // match: (Mul64F (Const64F [auxFrom64F(1)]) x) // cond: // result: x for { @@ -19064,7 +18841,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_0.Op != OpConst64F { break } - if v_0.AuxInt != f2i(1) { + if v_0.AuxInt != auxFrom64F(1) { break } x := v.Args[1] @@ -19073,7 +18850,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64F x (Const64F [f2i(-1)])) + // match: (Mul64F x (Const64F [auxFrom64F(-1)])) // cond: // result: (Neg64F x) for { @@ -19083,14 +18860,14 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_1.Op != OpConst64F { break } - if v_1.AuxInt != f2i(-1) { + if v_1.AuxInt != auxFrom64F(-1) { break } v.reset(OpNeg64F) v.AddArg(x) return true } - // match: (Mul64F (Const64F [f2i(-1)]) x) + // match: (Mul64F (Const64F [auxFrom64F(-1)]) x) // cond: // result: (Neg64F x) for { @@ -19099,7 +18876,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_0.Op != OpConst64F { break } - if v_0.AuxInt != f2i(-1) { + if v_0.AuxInt != auxFrom64F(-1) { break } x := v.Args[1] @@ -19107,7 +18884,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64F x (Const64F [f2i(2)])) + // match: (Mul64F x (Const64F [auxFrom64F(2)])) // cond: // result: (Add64F x x) for { @@ -19117,7 +18894,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_1.Op != OpConst64F { break } - if v_1.AuxInt != f2i(2) { + if v_1.AuxInt != auxFrom64F(2) { break } v.reset(OpAdd64F) @@ -19125,7 +18902,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64F (Const64F [f2i(2)]) x) + // match: (Mul64F (Const64F [auxFrom64F(2)]) x) // cond: // result: (Add64F x x) for { @@ -19134,7 +18911,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool { if v_0.Op != OpConst64F { break } - if v_0.AuxInt != f2i(2) { + if v_0.AuxInt != auxFrom64F(2) { break } x := v.Args[1] @@ -19585,19 +19362,19 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool { } func rewriteValuegeneric_OpNeg32F_0(v *Value) bool { // match: (Neg32F (Const32F [c])) - // cond: i2f(c) != 0 - // result: (Const32F [f2i(-i2f(c))]) + // cond: auxTo32F(c) != 0 + // result: (Const32F [auxFrom32F(-auxTo32F(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32F { break } c := v_0.AuxInt - if !(i2f(c) != 0) { + if !(auxTo32F(c) != 0) { break } v.reset(OpConst32F) - v.AuxInt = f2i(-i2f(c)) + v.AuxInt = auxFrom32F(-auxTo32F(c)) return true } return false @@ -19636,19 +19413,19 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool { } func rewriteValuegeneric_OpNeg64F_0(v *Value) bool { // match: (Neg64F (Const64F [c])) - // cond: i2f(c) != 0 - // result: (Const64F [f2i(-i2f(c))]) + // cond: auxTo64F(c) != 0 + // result: (Const64F [auxFrom64F(-auxTo64F(c))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64F { break } c := v_0.AuxInt - if !(i2f(c) != 0) { + if !(auxTo64F(c) != 0) { break } v.reset(OpConst64F) - v.AuxInt = f2i(-i2f(c)) + v.AuxInt = auxFrom64F(-auxTo64F(c)) return true } return false @@ -20164,7 +19941,7 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { // match: (Neq32F (Const32F [c]) (Const32F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -20178,12 +19955,12 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) != i2f(d)) + v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d)) return true } // match: (Neq32F (Const32F [d]) (Const32F [c])) // cond: - // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -20197,7 +19974,7 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) != i2f(d)) + v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d)) return true } return false @@ -20443,7 +20220,7 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { // match: (Neq64F (Const64F [c]) (Const64F [d])) // cond: - // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -20457,12 +20234,12 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) != i2f(d)) + v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d)) return true } // match: (Neq64F (Const64F [d]) (Const64F [c])) // cond: - // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -20476,7 +20253,7 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(i2f(c) != i2f(d)) + v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d)) return true } return false @@ -21412,7 +21189,7 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { return true } // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) - // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") + // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { _ = v.Args[1] @@ -21435,14 +21212,14 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { break } sym := v_0_1.Aux - if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { + if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) return true } // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) - // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") + // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { _ = v.Args[1] @@ -21469,7 +21246,7 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { break } sym := v_0_0_1.Aux - if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { + if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) @@ -24905,6 +24682,32 @@ func rewriteValuegeneric_OpRsh16Ux64_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) + // cond: + // result: (Rsh16Ux64 x (Const64 [15])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRsh16x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if v_1.AuxInt != 15 { + break + } + v.reset(OpRsh16Ux64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = 15 + v.AddArg(v0) + return true + } // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh16Ux64 x (Const64 [c1-c2+c3])) @@ -25449,6 +25252,32 @@ func rewriteValuegeneric_OpRsh32Ux64_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) + // cond: + // result: (Rsh32Ux64 x (Const64 [31])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRsh32x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if v_1.AuxInt != 31 { + break + } + v.reset(OpRsh32Ux64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = 31 + v.AddArg(v0) + return true + } // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh32Ux64 x (Const64 [c1-c2+c3])) @@ -26055,6 +25884,32 @@ func rewriteValuegeneric_OpRsh64Ux64_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) + // cond: + // result: (Rsh64Ux64 x (Const64 [63])) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRsh64x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if v_1.AuxInt != 63 { + break + } + v.reset(OpRsh64Ux64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = 63 + v.AddArg(v0) + return true + } // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh64Ux64 x (Const64 [c1-c2+c3])) @@ -26723,6 +26578,32 @@ func rewriteValuegeneric_OpRsh8Ux64_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 [7])) + // cond: + // result: (Rsh8Ux64 x (Const64 [7] )) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRsh8x64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if v_1.AuxInt != 7 { + break + } + v.reset(OpRsh8Ux64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = 7 + v.AddArg(v0) + return true + } // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) // result: (Rsh8Ux64 x (Const64 [c1-c2+c3])) @@ -27497,7 +27378,7 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool { func rewriteValuegeneric_OpSqrt_0(v *Value) bool { // match: (Sqrt (Const64F [c])) // cond: - // result: (Const64F [f2i(math.Sqrt(i2f(c)))]) + // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64F { @@ -27505,7 +27386,7 @@ func rewriteValuegeneric_OpSqrt_0(v *Value) bool { } c := v_0.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(math.Sqrt(i2f(c))) + v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) return true } return false @@ -27516,7 +27397,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { config := b.Func.Config _ = config // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst,src,sz,config) && clobber(s1) && clobber(s2) && clobber(s3) + // cond: isSameSym(sym,"runtime.memmove") && t.(*types.Type).IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst,src,sz,config) && clobber(s1) && clobber(s2) && clobber(s3) // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) for { sym := v.Aux @@ -27544,7 +27425,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { _ = s3.Args[2] dst := s3.Args[1] mem := s3.Args[2] - if !(isSameSym(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1) && clobber(s2) && clobber(s3)) { + if !(isSameSym(sym, "runtime.memmove") && t.(*types.Type).IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1) && clobber(s2) && clobber(s3)) { break } v.reset(OpMove) @@ -27556,7 +27437,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { return true } // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst,src,sz,config) && clobber(s1) && clobber(s2) && clobber(s3) + // cond: isSameSym(sym,"runtime.memmove") && t.(*types.Type).IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst,src,sz,config) && clobber(s1) && clobber(s2) && clobber(s3) // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) for { sym := v.Aux @@ -27584,7 +27465,7 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { _ = s3.Args[2] dst := s3.Args[1] mem := s3.Args[2] - if !(isSameSym(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1) && clobber(s2) && clobber(s3)) { + if !(isSameSym(sym, "runtime.memmove") && t.(*types.Type).IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1) && clobber(s2) && clobber(s3)) { break } v.reset(OpMove) @@ -27595,6 +27476,20 @@ func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { v.AddArg(mem) return true } + // match: (StaticCall {sym} x) + // cond: needRaceCleanup(sym,v) + // result: x + for { + sym := v.Aux + x := v.Args[0] + if !(needRaceCleanup(sym, v)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } return false } func rewriteValuegeneric_OpStore_0(v *Value) bool { @@ -28684,9 +28579,9 @@ func rewriteValuegeneric_OpStringLen_0(v *Value) bool { return false } func rewriteValuegeneric_OpStringPtr_0(v *Value) bool { - // match: (StringPtr (StringMake (Const64 [c]) _)) + // match: (StringPtr (StringMake (Addr {s} base) _)) // cond: - // result: (Const64 [c]) + // result: (Addr {s} base) for { v_0 := v.Args[0] if v_0.Op != OpStringMake { @@ -28694,14 +28589,16 @@ func rewriteValuegeneric_OpStringPtr_0(v *Value) bool { } _ = v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + if v_0_0.Op != OpAddr { break } t := v_0_0.Type - c := v_0_0.AuxInt - v.reset(OpConst64) + s := v_0_0.Aux + base := v_0_0.Args[0] + v.reset(OpAddr) v.Type = t - v.AuxInt = c + v.Aux = s + v.AddArg(base) return true } return false @@ -29706,7 +29603,7 @@ func rewriteValuegeneric_OpSub32_10(v *Value) bool { func rewriteValuegeneric_OpSub32F_0(v *Value) bool { // match: (Sub32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Const32F [f2i(float64(i2f32(c) - i2f32(d)))]) + // result: (Const32F [auxFrom32F(auxTo32F(c) - auxTo32F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -29720,25 +29617,7 @@ func rewriteValuegeneric_OpSub32F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) - i2f32(d))) - return true - } - // match: (Sub32F x (Const32F [0])) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.AuxInt = auxFrom32F(auxTo32F(c) - auxTo32F(d)) return true } return false @@ -30130,7 +30009,7 @@ func rewriteValuegeneric_OpSub64_10(v *Value) bool { func rewriteValuegeneric_OpSub64F_0(v *Value) bool { // match: (Sub64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Const64F [f2i(i2f(c) - i2f(d))]) + // result: (Const64F [auxFrom64F(auxTo64F(c) - auxTo64F(d))]) for { _ = v.Args[1] v_0 := v.Args[0] @@ -30144,25 +30023,7 @@ func rewriteValuegeneric_OpSub64F_0(v *Value) bool { } d := v_1.AuxInt v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) - i2f(d)) - return true - } - // match: (Sub64F x (Const64F [0])) - // cond: - // result: x - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.AuxInt = auxFrom64F(auxTo64F(c) - auxTo64F(d)) return true } return false diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index f1783a9532efe..c5b4c538434f9 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -4,14 +4,19 @@ package ssa -import "container/heap" +import ( + "container/heap" + "sort" +) const ( ScorePhi = iota // towards top of block + ScoreArg ScoreNilCheck ScoreReadTuple ScoreVarDef ScoreMemory + ScoreReadFlags ScoreDefault ScoreFlags ScoreControl // towards bottom of block @@ -57,6 +62,16 @@ func (h ValHeap) Less(i, j int) bool { return x.ID > y.ID } +func (op Op) isLoweredGetClosurePtr() bool { + switch op { + case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, + Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, + OpWasmLoweredGetClosurePtr: + return true + } + return false +} + // Schedule the Values in each Block. After this phase returns, the // order of b.Values matters and is the order in which those values // will appear in the assembly output. For now it generates a @@ -87,11 +102,7 @@ func schedule(f *Func) { // Compute score. Larger numbers are scheduled closer to the end of the block. for _, v := range b.Values { switch { - case v.Op == OpAMD64LoweredGetClosurePtr || v.Op == OpPPC64LoweredGetClosurePtr || - v.Op == OpARMLoweredGetClosurePtr || v.Op == OpARM64LoweredGetClosurePtr || - v.Op == Op386LoweredGetClosurePtr || v.Op == OpMIPS64LoweredGetClosurePtr || - v.Op == OpS390XLoweredGetClosurePtr || v.Op == OpMIPSLoweredGetClosurePtr || - v.Op == OpWasmLoweredGetClosurePtr: + case v.Op.isLoweredGetClosurePtr(): // We also score GetLoweredClosurePtr as early as possible to ensure that the // context register is not stomped. GetLoweredClosurePtr should only appear // in the entry block where there are no phi functions, so there is no @@ -113,6 +124,9 @@ func schedule(f *Func) { case v.Op == OpVarDef: // We want all the vardefs next. score[v.ID] = ScoreVarDef + case v.Op == OpArg: + // We want all the args as early as possible, for better debugging. + score[v.ID] = ScoreArg case v.Type.IsMemory(): // Schedule stores as early as possible. This tends to // reduce register pressure. It also helps make sure @@ -125,13 +139,19 @@ func schedule(f *Func) { // false dependency on the other part of the tuple. // Also ensures tuple is never spilled. score[v.ID] = ScoreReadTuple - case v.Type.IsFlags() || v.Type.IsTuple(): + case v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags(): // Schedule flag register generation as late as possible. // This makes sure that we only have one live flags // value at a time. score[v.ID] = ScoreFlags default: score[v.ID] = ScoreDefault + // If we're reading flags, schedule earlier to keep flag lifetime short. + for _, a := range v.Args { + if a.Type.IsFlags() { + score[v.ID] = ScoreReadFlags + } + } } } } @@ -175,9 +195,11 @@ func schedule(f *Func) { } } - if b.Control != nil && b.Control.Op != OpPhi { + if b.Control != nil && b.Control.Op != OpPhi && b.Control.Op != OpArg { // Force the control value to be scheduled at the end, // unless it is a phi value (which must be first). + // OpArg also goes first -- if it is stack it register allocates + // to a LoadReg, if it is register it is from the beginning anyway. score[b.Control.ID] = ScoreControl // Schedule values dependent on the control value at the end. @@ -436,5 +458,33 @@ func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value count[s-1]++ } + // Order nil checks in source order. We want the first in source order to trigger. + // If two are on the same line, we don't really care which happens first. + // See issue 18169. + if hasNilCheck { + start := -1 + for i, v := range order { + if v.Op == OpNilCheck { + if start == -1 { + start = i + } + } else { + if start != -1 { + sort.Sort(bySourcePos(order[start:i])) + start = -1 + } + } + } + if start != -1 { + sort.Sort(bySourcePos(order[start:])) + } + } + return order } + +type bySourcePos []*Value + +func (s bySourcePos) Len() int { return len(s) } +func (s bySourcePos) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s bySourcePos) Less(i, j int) bool { return s[i].Pos.Before(s[j].Pos) } diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go index 39829b046c5dc..4b578b133b567 100644 --- a/src/cmd/compile/internal/ssa/softfloat.go +++ b/src/cmd/compile/internal/ssa/softfloat.go @@ -4,7 +4,10 @@ package ssa -import "math" +import ( + "cmd/compile/internal/types" + "math" +) func softfloat(f *Func) { if !f.Config.SoftFloat { @@ -25,7 +28,7 @@ func softfloat(f *Func) { case OpConst32F: v.Op = OpConst32 v.Type = f.Config.Types.UInt32 - v.AuxInt = int64(int32(math.Float32bits(i2f32(v.AuxInt)))) + v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt)))) case OpConst64F: v.Op = OpConst64 v.Type = f.Config.Types.UInt64 @@ -53,6 +56,15 @@ func softfloat(f *Func) { v.Type = f.Config.Types.UInt64 } newInt64 = newInt64 || v.Type.Size() == 8 + } else if (v.Op == OpStore || v.Op == OpZero || v.Op == OpMove) && v.Aux.(*types.Type).IsFloat() { + switch size := v.Aux.(*types.Type).Size(); size { + case 4: + v.Aux = f.Config.Types.UInt32 + case 8: + v.Aux = f.Config.Types.UInt64 + default: + v.Fatalf("bad float type with size %d", size) + } } } } diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go index f7af85446ba70..546da8348d6a4 100644 --- a/src/cmd/compile/internal/ssa/sparsetree.go +++ b/src/cmd/compile/internal/ssa/sparsetree.go @@ -98,9 +98,9 @@ func (t SparseTree) treestructure1(b *Block, i int) string { s := "\n" + strings.Repeat("\t", i) + b.String() + "->[" for i, e := range b.Succs { if i > 0 { - s = s + "," + s += "," } - s = s + e.b.String() + s += e.b.String() } s += "]" if c0 := t[b.ID].child; c0 != nil { diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 8d0ab93c64f7a..76125851365ae 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -212,7 +212,7 @@ func (s *stackAllocState) stackalloc() { h := f.getHome(id) if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { // A variable can interfere with itself. - // It is rare, but but it can happen. + // It is rare, but it can happen. s.nSelfInterfere++ goto noname } diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go index 1081f83f6d278..c71f8befd9d29 100644 --- a/src/cmd/compile/internal/ssa/stmtlines_test.go +++ b/src/cmd/compile/internal/ssa/stmtlines_test.go @@ -7,6 +7,7 @@ import ( "debug/pe" "fmt" "internal/testenv" + "internal/xcoff" "io" "runtime" "testing" @@ -25,6 +26,10 @@ func open(path string) (*dwarf.Data, error) { return fh.DWARF() } + if fh, err := xcoff.Open(path); err == nil { + return fh.DWARF() + } + return nil, fmt.Errorf("unrecognized executable format") } @@ -62,6 +67,9 @@ func TestStmtLines(t *testing.T) { if pkgname == "runtime" { continue } + if e.Val(dwarf.AttrStmtList) == nil { + continue + } lrdr, err := dw.LineReader(e) must(err) diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts index 7eb1d3a35b6ed..1e4d35051b6b3 100644 --- a/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts +++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts @@ -8,47 +8,57 @@ 63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' 64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' 65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) 81: hist = ensure(int(i), hist) @@ -58,24 +68,32 @@ 87: if a == 0 { //gdb-opt=(a,n,t) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts index ad2719185e501..65c5d0a2cee41 100644 --- a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts +++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts @@ -19,7 +19,7 @@ dy = 65: if len(os.Args) > 1 { 73: scanner := bufio.NewScanner(reader) 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -29,7 +29,7 @@ i = 1 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -39,7 +39,7 @@ i = 1 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -49,7 +49,7 @@ i = 1 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -59,7 +59,7 @@ i = 2 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -69,7 +69,7 @@ i = 2 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -79,7 +79,7 @@ i = 2 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -89,7 +89,7 @@ i = 4 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -99,7 +99,7 @@ i = 4 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 75: s := scanner.Text() 76: i, err := strconv.ParseInt(s, 10, 64) 77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) @@ -109,7 +109,7 @@ i = 5 81: hist = ensure(int(i), hist) 82: hist[int(i)]++ 74: for scanner.Scan() { //gdb-opt=(scanner/A) -scanner = (struct bufio.Scanner *) +scanner = (bufio.Scanner *) 86: for i, a := range hist { 87: if a == 0 { //gdb-opt=(a,n,t) a = 0 @@ -120,7 +120,9 @@ t = 0 a = 3 n = 0 t = 0 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { @@ -128,7 +130,9 @@ t = 0 a = 3 n = 3 t = 3 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { @@ -141,7 +145,9 @@ t = 9 a = 2 n = 6 t = 9 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { @@ -149,7 +155,9 @@ t = 9 a = 1 n = 8 t = 17 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 90: t += i * a 92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) 86: for i, a := range hist { diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts index 3cc2ec5121d49..b5e41aa906e8c 100644 --- a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts +++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts @@ -15,6 +15,7 @@ 26: for i := x; i < 3; i++ { 31: fmt.Println(x, y) 30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) +31: fmt.Println(x, y) 33: for x := 0; x <= 1; x++ { // From delve scopetest.go 35: f1(a) 38: f2(b) @@ -42,5 +43,4 @@ 58: if i == f { 59: fmt.Println("foo") 64: helloworld() -66: } 15: } diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts index a66eab83cd388..5a186b5440b51 100644 --- a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts +++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts @@ -23,6 +23,7 @@ y = 1 30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) x = 0 y = 5 +31: fmt.Println(x, y) 0: 5 35: f1(a) 38: f2(b) diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go index 3713269376977..580a06dfde588 100644 --- a/src/cmd/compile/internal/ssa/tighten.go +++ b/src/cmd/compile/internal/ssa/tighten.go @@ -13,15 +13,14 @@ func tighten(f *Func) { canMove := make([]bool, f.NumValues()) for _, b := range f.Blocks { for _, v := range b.Values { + if v.Op.isLoweredGetClosurePtr() { + // Must stay in the entry block. + continue + } switch v.Op { - case OpPhi, OpArg, OpSelect0, OpSelect1, - OpAMD64LoweredGetClosurePtr, Op386LoweredGetClosurePtr, - OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, - OpMIPSLoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, - OpS390XLoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, - OpWasmLoweredGetClosurePtr: + case OpPhi, OpArg, OpSelect0, OpSelect1: // Phis need to stay in their block. - // GetClosurePtr & Arg must stay in the entry block. + // Arg must stay in the entry block. // Tuple selectors must stay with the tuple generator. continue } diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go index d97c6baaa1bff..1293548aad098 100644 --- a/src/cmd/compile/internal/ssa/trim.go +++ b/src/cmd/compile/internal/ssa/trim.go @@ -94,7 +94,7 @@ func trim(f *Func) { } } -// emptyBlock returns true if the block does not contain actual +// emptyBlock reports whether the block does not contain actual // instructions func emptyBlock(b *Block) bool { for _, v := range b.Values { @@ -105,7 +105,7 @@ func emptyBlock(b *Block) bool { return true } -// trimmableBlock returns true if the block can be trimmed from the CFG, +// trimmableBlock reports whether the block can be trimmed from the CFG, // subject to the following criteria: // - it should not be the first block // - it should be BlockPlain diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 7e869f29c9211..6e35a3c7773d6 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -300,7 +300,7 @@ func (v *Value) Fatalf(msg string, args ...interface{}) { v.Block.Func.fe.Fatalf(v.Pos, msg, args...) } -// isGenericIntConst returns whether v is a generic integer constant. +// isGenericIntConst reports whether v is a generic integer constant. func (v *Value) isGenericIntConst() bool { return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8) } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 2366e0bfbf358..1024ab25abf8a 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -11,7 +11,7 @@ import ( "strings" ) -// needwb returns whether we need write barrier for store op v. +// needwb reports whether we need write barrier for store op v. // v must be Store/Move/Zero. func needwb(v *Value) bool { t, ok := v.Aux.(*types.Type) @@ -24,6 +24,14 @@ func needwb(v *Value) bool { if IsStackAddr(v.Args[0]) { return false // write on stack doesn't need write barrier } + if v.Op == OpStore && IsGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) { + // Storing pointers to non-heap locations into a fresh object doesn't need a write barrier. + return false + } + if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) && IsNewObject(v.Args[0], v.MemoryArg()) { + // Copying data from readonly memory into a fresh object doesn't need a write barrier. + return false + } return true } @@ -353,7 +361,7 @@ func round(o int64, r int64) int64 { return (o + r - 1) &^ (r - 1) } -// IsStackAddr returns whether v is known to be an address of a stack slot +// IsStackAddr reports whether v is known to be an address of a stack slot. func IsStackAddr(v *Value) bool { for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { v = v.Args[0] @@ -365,6 +373,51 @@ func IsStackAddr(v *Value) bool { return false } +// IsGlobalAddr reports whether v is known to be an address of a global. +func IsGlobalAddr(v *Value) bool { + return v.Op == OpAddr && v.Args[0].Op == OpSB +} + +// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global. +func IsReadOnlyGlobalAddr(v *Value) bool { + if !IsGlobalAddr(v) { + return false + } + // See TODO in OpAddr case in IsSanitizerSafeAddr below. + return strings.HasPrefix(v.Aux.(*obj.LSym).Name, `"".statictmp_`) +} + +// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem. +// TODO: Be more precise. We really want "IsNilPointer" for the particular field in question. +// Right now, we can only detect a new object before any writes have been done to it. +// We could ignore non-pointer writes, writes to offsets which +// are known not to overlap the write in question, etc. +func IsNewObject(v *Value, mem *Value) bool { + if v.Op != OpLoad { + return false + } + if v.MemoryArg() != mem { + return false + } + if mem.Op != OpStaticCall { + return false + } + if !isSameSym(mem.Aux, "runtime.newobject") { + return false + } + if v.Args[0].Op != OpOffPtr { + return false + } + if v.Args[0].Args[0].Op != OpSP { + return false + } + c := v.Block.Func.Config + if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value + return false + } + return true +} + // IsSanitizerSafeAddr reports whether v is known to be an address // that doesn't need instrumentation. func IsSanitizerSafeAddr(v *Value) bool { @@ -393,7 +446,7 @@ func IsSanitizerSafeAddr(v *Value) bool { return false } -// isVolatile returns whether v is a pointer to argument region on stack which +// isVolatile reports whether v is a pointer to argument region on stack which // will be clobbered by a function call. func isVolatile(v *Value) bool { for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy { diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go index a03e2734d26a3..56e97c71d8ace 100644 --- a/src/cmd/compile/internal/syntax/branches.go +++ b/src/cmd/compile/internal/syntax/branches.go @@ -77,7 +77,7 @@ func (ls *labelScope) declare(b *block, s *LabeledStmt) *label { labels = make(map[string]*label) ls.labels = labels } else if alt := labels[name]; alt != nil { - ls.err(s.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String()) + ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String()) return alt } l := &label{b, s, false} diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go index 62eb0fdc30140..c6168b8594f74 100644 --- a/src/cmd/compile/internal/syntax/source.go +++ b/src/cmd/compile/internal/syntax/source.go @@ -33,7 +33,6 @@ type source struct { // source buffer buf [4 << 10]byte - offs int // source offset of buf r0, r, w int // previous/current read and write buf positions, excluding sentinel line0, line uint // previous/current line col0, col uint // previous/current column (byte offsets from line start) @@ -51,7 +50,6 @@ func (s *source) init(src io.Reader, errh func(line, pos uint, msg string)) { s.errh = errh s.buf[0] = utf8.RuneSelf // terminate with sentinel - s.offs = 0 s.r0, s.r, s.w = 0, 0, 0 s.line0, s.line = 0, linebase s.col0, s.col = 0, colbase @@ -68,7 +66,8 @@ func (s *source) ungetr() { // ungetr2 is like ungetr but enables a 2nd ungetr. // It must not be called if one of the runes seen -// was a newline. +// was a newline or had a UTF-8 encoding longer than +// 1 byte. func (s *source) ungetr2() { s.ungetr() // line must not have changed @@ -167,7 +166,6 @@ func (s *source) fill() { } n := s.r0 - 1 copy(s.buf[:], s.buf[n:s.w]) - s.offs += n s.r0 = 1 // eqv: s.r0 -= n s.r -= n s.w -= n @@ -189,6 +187,7 @@ func (s *source) fill() { } } + s.buf[s.w] = utf8.RuneSelf // sentinel s.ioerr = io.ErrNoProgress } diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go index 503a30d0b44e5..f234a31fd0269 100644 --- a/src/cmd/compile/internal/types/etype_string.go +++ b/src/cmd/compile/internal/types/etype_string.go @@ -4,9 +4,9 @@ package types import "strconv" -const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTR32PTR64FUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSDDDFIELDSSATUPLENTYPE" +const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSDDDFIELDSSATUPLENTYPE" -var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 101, 106, 110, 115, 120, 126, 130, 133, 138, 142, 145, 151, 160, 165, 168, 173, 181, 189, 197, 200, 205, 210} +var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 190, 193, 198, 203} func (i EType) String() string { if i >= EType(len(_EType_index)-1) { diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go new file mode 100644 index 0000000000000..7c14a03ba16dd --- /dev/null +++ b/src/cmd/compile/internal/types/identity.go @@ -0,0 +1,119 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +// Identical reports whether t1 and t2 are identical types, following +// the spec rules. Receiver parameter types are ignored. +func Identical(t1, t2 *Type) bool { + return identical(t1, t2, true, nil) +} + +// IdenticalIgnoreTags is like Identical, but it ignores struct tags +// for struct identity. +func IdenticalIgnoreTags(t1, t2 *Type) bool { + return identical(t1, t2, false, nil) +} + +type typePair struct { + t1 *Type + t2 *Type +} + +func identical(t1, t2 *Type, cmpTags bool, assumedEqual map[typePair]struct{}) bool { + if t1 == t2 { + return true + } + if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() { + return false + } + if t1.Sym != nil || t2.Sym != nil { + // Special case: we keep byte/uint8 and rune/int32 + // separate for error messages. Treat them as equal. + switch t1.Etype { + case TUINT8: + return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype) + case TINT32: + return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype) + default: + return false + } + } + + // Any cyclic type must go through a named type, and if one is + // named, it is only identical to the other if they are the + // same pointer (t1 == t2), so there's no chance of chasing + // cycles ad infinitum, so no need for a depth counter. + if assumedEqual == nil { + assumedEqual = make(map[typePair]struct{}) + } else if _, ok := assumedEqual[typePair{t1, t2}]; ok { + return true + } + assumedEqual[typePair{t1, t2}] = struct{}{} + + switch t1.Etype { + case TINTER: + if t1.NumFields() != t2.NumFields() { + return false + } + for i, f1 := range t1.FieldSlice() { + f2 := t2.Field(i) + if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) { + return false + } + } + return true + + case TSTRUCT: + if t1.NumFields() != t2.NumFields() { + return false + } + for i, f1 := range t1.FieldSlice() { + f2 := t2.Field(i) + if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) { + return false + } + if cmpTags && f1.Note != f2.Note { + return false + } + } + return true + + case TFUNC: + // Check parameters and result parameters for type equality. + // We intentionally ignore receiver parameters for type + // equality, because they're never relevant. + for _, f := range ParamsResults { + // Loop over fields in structs, ignoring argument names. + fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice() + if len(fs1) != len(fs2) { + return false + } + for i, f1 := range fs1 { + f2 := fs2[i] + if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, cmpTags, assumedEqual) { + return false + } + } + } + return true + + case TARRAY: + if t1.NumElem() != t2.NumElem() { + return false + } + + case TCHAN: + if t1.ChanDir() != t2.ChanDir() { + return false + } + + case TMAP: + if !identical(t1.Key(), t2.Key(), cmpTags, assumedEqual) { + return false + } + } + + return identical(t1.Elem(), t2.Elem(), cmpTags, assumedEqual) +} diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go index e27c1fdba32a4..e502b986aefb1 100644 --- a/src/cmd/compile/internal/types/pkg.go +++ b/src/cmd/compile/internal/types/pkg.go @@ -135,7 +135,7 @@ func InternString(b []byte) string { return s } -// CleanroomDo invokes f in an environment with with no preexisting packages. +// CleanroomDo invokes f in an environment with no preexisting packages. // For testing of import/export only. func CleanroomDo(f func()) { saved := pkgMap diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index 49233ad386e69..13761c7615b40 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -39,9 +39,10 @@ type Sym struct { const ( symOnExportList = 1 << iota // added to exportlist (no need to add again) symUniq - symSiggen - symAsm - symAlgGen + symSiggen // type symbol has been generated + symAsm // on asmlist, for writing to -asmhdr + symAlgGen // algorithm table has been generated + symFunc // function symbol; uses internal ABI ) func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 } @@ -49,12 +50,14 @@ func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 } func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 } func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 } func (sym *Sym) AlgGen() bool { return sym.flags&symAlgGen != 0 } +func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 } func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) } func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) } func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) } func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) } func (sym *Sym) SetAlgGen(b bool) { sym.flags.set(symAlgGen, b) } +func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) } func (sym *Sym) IsBlank() bool { return sym != nil && sym.Name == "_" @@ -74,6 +77,10 @@ func (sym *Sym) Linksym() *obj.LSym { if sym == nil { return nil } + if sym.Func() { + // This is a function symbol. Mark it as "internal ABI". + return Ctxt.LookupABI(sym.LinksymName(), obj.ABIInternal) + } return Ctxt.Lookup(sym.LinksymName()) } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index d367cd19440e6..3e5f5cbf4949f 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -43,9 +43,7 @@ const ( TBOOL - TPTR32 - TPTR64 - + TPTR TFUNC TSLICE TARRAY @@ -137,13 +135,13 @@ type Type struct { // TFUNCARGS: FuncArgs // TCHANARGS: ChanArgs // TCHAN: *Chan - // TPTR32, TPTR64: Ptr + // TPTR: Ptr // TARRAY: *Array // TSLICE: Slice Extra interface{} // Width is the width of this Type in bytes. - Width int64 + Width int64 // valid if Align > 0 methods Fields allMethods Fields @@ -151,23 +149,26 @@ type Type struct { Nod *Node // canonical OTYPE node Orig *Type // original type (type literal or predefined type) - SliceOf *Type - PtrBase *Type + // Cache of composite types, with this type being the element type. + Cache struct { + ptr *Type // *T, or nil + slice *Type // []T, or nil + } Sym *Sym // symbol containing name, for named types Vargen int32 // unique name for OTYPE/ONAME Etype EType // kind of type - Align uint8 // the required alignment of this type, in bytes + Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed) flags bitset8 } const ( - typeNotInHeap = 1 << iota // type cannot be heap allocated - typeBroke // broken type definition - typeNoalg // suppress hash and eq algorithm generation - typeDeferwidth + typeNotInHeap = 1 << iota // type cannot be heap allocated + typeBroke // broken type definition + typeNoalg // suppress hash and eq algorithm generation + typeDeferwidth // width computation has been deferred and type is on deferredTypeStack typeRecur ) @@ -373,16 +374,16 @@ type Field struct { } const ( - fieldIsddd = 1 << iota // field is ... argument + fieldIsDDD = 1 << iota // field is ... argument fieldBroke // broken field definition fieldNointerface ) -func (f *Field) Isddd() bool { return f.flags&fieldIsddd != 0 } +func (f *Field) IsDDD() bool { return f.flags&fieldIsDDD != 0 } func (f *Field) Broke() bool { return f.flags&fieldBroke != 0 } func (f *Field) Nointerface() bool { return f.flags&fieldNointerface != 0 } -func (f *Field) SetIsddd(b bool) { f.flags.set(fieldIsddd, b) } +func (f *Field) SetIsDDD(b bool) { f.flags.set(fieldIsDDD, b) } func (f *Field) SetBroke(b bool) { f.flags.set(fieldBroke, b) } func (f *Field) SetNointerface(b bool) { f.flags.set(fieldNointerface, b) } @@ -461,7 +462,7 @@ func New(et EType) *Type { t.Extra = new(Struct) case TINTER: t.Extra = new(Interface) - case TPTR32, TPTR64: + case TPTR: t.Extra = Ptr{} case TCHANARGS: t.Extra = ChanArgs{} @@ -490,7 +491,7 @@ func NewArray(elem *Type, bound int64) *Type { // NewSlice returns the slice Type with element type elem. func NewSlice(elem *Type) *Type { - if t := elem.SliceOf; t != nil { + if t := elem.Cache.slice; t != nil { if t.Elem() != elem { Fatalf("elem mismatch") } @@ -499,7 +500,7 @@ func NewSlice(elem *Type) *Type { t := New(TSLICE) t.Extra = Slice{Elem: elem} - elem.SliceOf = t + elem.Cache.slice = t return t } @@ -553,23 +554,19 @@ func NewPtr(elem *Type) *Type { Fatalf("NewPtr: pointer to elem Type is nil") } - if t := elem.PtrBase; t != nil { + if t := elem.Cache.ptr; t != nil { if t.Elem() != elem { Fatalf("NewPtr: elem mismatch") } return t } - if Tptr == 0 { - Fatalf("NewPtr: Tptr not initialized") - } - - t := New(Tptr) + t := New(TPTR) t.Extra = Ptr{Elem: elem} t.Width = int64(Widthptr) t.Align = uint8(Widthptr) if NewPtrCacheEnabled { - elem.PtrBase = t + elem.Cache.ptr = t } return t } @@ -619,7 +616,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { t = (*types)[0] *types = (*types)[1:] - case TPTR32, TPTR64: + case TPTR: elem := SubstAny(t.Elem(), types) if elem != t.Elem() { t = t.copy() @@ -668,23 +665,18 @@ func SubstAny(t *Type, types *[]*Type) *Type { } case TSTRUCT: + // Make a copy of all fields, including ones whose type does not change. + // This prevents aliasing across functions, which can lead to later + // fields getting their Offset incorrectly overwritten. fields := t.FieldSlice() - var nfs []*Field + nfs := make([]*Field, len(fields)) for i, f := range fields { nft := SubstAny(f.Type, types) - if nft == f.Type { - continue - } - if nfs == nil { - nfs = append([]*Field(nil), fields...) - } nfs[i] = f.Copy() nfs[i].Type = nft } - if nfs != nil { - t = t.copy() - t.SetFields(nfs) - } + t = t.copy() + t.SetFields(nfs) } return t @@ -751,7 +743,7 @@ func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() } // IsVariadic reports whether function type t is variadic. func (t *Type) IsVariadic() bool { n := t.NumParams() - return n > 0 && t.Params().Field(n-1).Isddd() + return n > 0 && t.Params().Field(n-1).IsDDD() } // Recv returns the receiver of function type t, if any. @@ -790,7 +782,7 @@ func (t *Type) Key() *Type { // Usable with pointers, channels, arrays, slices, and maps. func (t *Type) Elem() *Type { switch t.Etype { - case TPTR32, TPTR64: + case TPTR: return t.Extra.(Ptr).Elem case TARRAY: return t.Extra.(*Array).Elem @@ -817,7 +809,7 @@ func (t *Type) ChanArgs() *Type { return t.Extra.(ChanArgs).T } -// FuncArgs returns the channel type for TFUNCARGS type t. +// FuncArgs returns the func type for TFUNCARGS type t. func (t *Type) FuncArgs() *Type { t.wantEtype(TFUNCARGS) return t.Extra.(FuncArgs).T @@ -1101,7 +1093,7 @@ func (t *Type) cmp(x *Type) Cmp { } return t.Elem().cmp(x.Elem()) - case TPTR32, TPTR64, TSLICE: + case TPTR, TSLICE: // No special cases for these, they are handled // by the general code after the switch. @@ -1171,8 +1163,8 @@ func (t *Type) cmp(x *Type) Cmp { for i := 0; i < len(tfs) && i < len(xfs); i++ { ta := tfs[i] tb := xfs[i] - if ta.Isddd() != tb.Isddd() { - return cmpForNe(!ta.Isddd()) + if ta.IsDDD() != tb.IsDDD() { + return cmpForNe(!ta.IsDDD()) } if c := ta.Type.cmp(tb.Type); c != CMPeq { return c @@ -1199,7 +1191,7 @@ func (t *Type) cmp(x *Type) Cmp { panic(e) } - // Common element type comparison for TARRAY, TCHAN, TPTR32, TPTR64, and TSLICE. + // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE. return t.Elem().cmp(x.Elem()) } @@ -1261,7 +1253,12 @@ func (t *Type) IsComplex() bool { // IsPtr reports whether t is a regular Go pointer type. // This does not include unsafe.Pointer. func (t *Type) IsPtr() bool { - return t.Etype == TPTR32 || t.Etype == TPTR64 + return t.Etype == TPTR +} + +// IsPtrElem reports whether t is the element of a pointer (to t). +func (t *Type) IsPtrElem() bool { + return t.Cache.ptr != nil } // IsUnsafePtr reports whether t is an unsafe pointer. @@ -1275,7 +1272,7 @@ func (t *Type) IsUnsafePtr() bool { // that consist of a single pointer shaped type. // TODO(mdempsky): Should it? See golang.org/issue/15028. func (t *Type) IsPtrShaped() bool { - return t.Etype == TPTR32 || t.Etype == TPTR64 || t.Etype == TUNSAFEPTR || + return t.Etype == TPTR || t.Etype == TUNSAFEPTR || t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC } @@ -1449,7 +1446,7 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool { } return false - case TPTR32, TPTR64, TSLICE: + case TPTR, TSLICE: return !(ignoreNotInHeap && t.Elem().NotInHeap()) case TTUPLE: @@ -1460,7 +1457,7 @@ func Haspointers1(t *Type, ignoreNotInHeap bool) bool { return true } -// HasHeapPointer returns whether t contains a heap pointer. +// HasHeapPointer reports whether t contains a heap pointer. // This is used for write barrier insertion, so it ignores // pointers to go:notinheap types. func (t *Type) HasHeapPointer() bool { diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index 0eac402f8e0d7..caaeb889fbcd7 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -11,9 +11,6 @@ import ( const BADWIDTH = -1000000000 -// Initialized by frontend. Exists only here. -var Tptr EType // either TPTR32 or TPTR64 - // The following variables must be initialized early by the frontend. // They are here to break import cycles. // TODO(gri) eliminate these dependencies. diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index d82b1f7953dde..6e6dc557b4937 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -58,8 +58,8 @@ func zeroAuto(pp *gc.Progs, n *gc.Node) { } } -func ginsnop(pp *gc.Progs) { - pp.Prog(wasm.ANop) +func ginsnop(pp *gc.Progs) *obj.Prog { + return pp.Prog(wasm.ANop) } func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { @@ -134,10 +134,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if sym, ok := v.Aux.(*obj.LSym); ok { p := s.Prog(obj.ACALL) p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym} + p.Pos = v.Pos } else { getValue64(s, v.Args[0]) p := s.Prog(obj.ACALL) p.To = obj.Addr{Type: obj.TYPE_NONE} + p.Pos = v.Pos } case ssa.OpWasmLoweredMove: diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go index 7a3622405ce8d..18838fb4ca16d 100644 --- a/src/cmd/compile/internal/x86/387.go +++ b/src/cmd/compile/internal/x86/387.go @@ -22,11 +22,29 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { switch v.Op { case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: - p := s.Prog(loadPush(v.Type)) - p.From.Type = obj.TYPE_FCONST - p.From.Val = math.Float64frombits(uint64(v.AuxInt)) - p.To.Type = obj.TYPE_REG - p.To.Reg = x86.REG_F0 + iv := uint64(v.AuxInt) + if iv == 0x0000000000000000 { // +0.0 + s.Prog(x86.AFLDZ) + } else if iv == 0x3ff0000000000000 { // +1.0 + s.Prog(x86.AFLD1) + } else if iv == 0x8000000000000000 { // -0.0 + s.Prog(x86.AFLDZ) + s.Prog(x86.AFCHS) + } else if iv == 0xbff0000000000000 { // -1.0 + s.Prog(x86.AFLD1) + s.Prog(x86.AFCHS) + } else if iv == 0x400921fb54442d18 { // +pi + s.Prog(x86.AFLDPI) + } else if iv == 0xc00921fb54442d18 { // -pi + s.Prog(x86.AFLDPI) + s.Prog(x86.AFCHS) + } else { // others + p := s.Prog(loadPush(v.Type)) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(iv) + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_F0 + } popAndSave(s, v) case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2: diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go index ef380bd74067b..1851af57c4bd7 100644 --- a/src/cmd/compile/internal/x86/ggen.go +++ b/src/cmd/compile/internal/x86/ggen.go @@ -53,10 +53,11 @@ func zeroAuto(pp *gc.Progs, n *gc.Node) { } } -func ginsnop(pp *gc.Progs) { +func ginsnop(pp *gc.Progs) *obj.Prog { p := pp.Prog(x86.AXCHGL) p.From.Type = obj.TYPE_REG p.From.Reg = x86.REG_AX p.To.Type = obj.TYPE_REG p.To.Reg = x86.REG_AX + return p } diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index b781d957258a7..24ba9649beff0 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -198,24 +198,31 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW || v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW { - var c *obj.Prog + if ssa.NeedsFixUp(v) { + var c *obj.Prog + switch v.Op { + case ssa.Op386DIVL, ssa.Op386MODL: + c = s.Prog(x86.ACMPL) + j = s.Prog(x86.AJEQ) + + case ssa.Op386DIVW, ssa.Op386MODW: + c = s.Prog(x86.ACMPW) + j = s.Prog(x86.AJEQ) + } + c.From.Type = obj.TYPE_REG + c.From.Reg = x + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + + j.To.Type = obj.TYPE_BRANCH + } + // sign extend the dividend switch v.Op { case ssa.Op386DIVL, ssa.Op386MODL: - c = s.Prog(x86.ACMPL) - j = s.Prog(x86.AJEQ) - s.Prog(x86.ACDQ) //TODO: fix - + s.Prog(x86.ACDQ) case ssa.Op386DIVW, ssa.Op386MODW: - c = s.Prog(x86.ACMPW) - j = s.Prog(x86.AJEQ) s.Prog(x86.ACWD) } - c.From.Type = obj.TYPE_REG - c.From.Reg = x - c.To.Type = obj.TYPE_CONST - c.To.Offset = -1 - - j.To.Type = obj.TYPE_BRANCH } // for unsigned ints, we sign extend by setting DX = 0 @@ -278,6 +285,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { m.To.Reg = x86.REG_DX } + case ssa.Op386MULLU: + // Arg[0] is already in AX as it's the only register we allow + // results lo in AX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + case ssa.Op386MULLQU: // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]). p := s.Prog(v.Op.Asm()) @@ -417,6 +431,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Args[0].Reg() + case ssa.Op386CMPLload, ssa.Op386CMPWload, ssa.Op386CMPBload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[1].Reg() + case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload: + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + gc.AddAux2(&p.From, v, sc.Off()) + p.To.Type = obj.TYPE_CONST + p.To.Offset = sc.Val() case ssa.Op386MOVLconst: x := v.Reg() @@ -469,49 +498,47 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.Op386MOVSDloadidx8: + case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, + ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2: + r := v.Args[0].Reg() + i := v.Args[1].Reg() p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() + switch v.Op { + case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.From.Scale = 1 + case ssa.Op386MOVSDloadidx8: + p.From.Scale = 8 + case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4: + p.From.Scale = 4 + case ssa.Op386MOVWloadidx2: + p.From.Scale = 2 + } + p.From.Reg = r + p.From.Index = i gc.AddAux(&p.From, v) - p.From.Scale = 8 - p.From.Index = v.Args[1].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4: + case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4, + ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() - gc.AddAux(&p.From, v) + p.From.Reg = v.Args[1].Reg() + p.From.Index = v.Args[2].Reg() p.From.Scale = 4 - p.From.Index = v.Args[1].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.Op386MOVWloadidx2: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = v.Args[0].Reg() gc.AddAux(&p.From, v) - p.From.Scale = 2 - p.From.Index = v.Args[1].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1: - r := v.Args[0].Reg() - i := v.Args[1].Reg() - if i == x86.REG_SP { - r, i = i, r + if v.Reg() != v.Args[0].Reg() { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_MEM - p.From.Reg = r - p.From.Scale = 1 - p.From.Index = i - gc.AddAux(&p.From, v) - p.To.Type = obj.TYPE_REG - p.To.Reg = v.Reg() - case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload, - ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload, ssa.Op386MULSDload, ssa.Op386MULSSload: + case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload, + ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload, + ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload, + ssa.Op386MULSDload, ssa.Op386MULSSload, ssa.Op386DIVSSload, ssa.Op386DIVSDload: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[1].Reg() @@ -529,45 +556,57 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) - case ssa.Op386MOVSDstoreidx8: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - p.To.Scale = 8 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4: - p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() - p.To.Type = obj.TYPE_MEM - p.To.Reg = v.Args[0].Reg() - p.To.Scale = 4 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.Op386MOVWstoreidx2: + case ssa.Op386ADDLconstmodify: + sc := v.AuxValAndOff() + val := sc.Val() + if val == 1 || val == -1 { + var p *obj.Prog + if val == 1 { + p = s.Prog(x86.AINCL) + } else { + p = s.Prog(x86.ADECL) + } + off := sc.Off() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux2(&p.To, v, off) + break + } + fallthrough + case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify: + sc := v.AuxValAndOff() + off := sc.Off() + val := sc.Val() p := s.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = v.Args[2].Reg() + p.From.Type = obj.TYPE_CONST + p.From.Offset = val p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() - p.To.Scale = 2 - p.To.Index = v.Args[1].Reg() - gc.AddAux(&p.To, v) - case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1: + gc.AddAux2(&p.To, v, off) + case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, + ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2, + ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4: r := v.Args[0].Reg() i := v.Args[1].Reg() - if i == x86.REG_SP { - r, i = i, r - } p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() p.To.Type = obj.TYPE_MEM + switch v.Op { + case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.To.Scale = 1 + case ssa.Op386MOVSDstoreidx8: + p.To.Scale = 8 + case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, + ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4: + p.To.Scale = 4 + case ssa.Op386MOVWstoreidx2: + p.To.Scale = 2 + } p.To.Reg = r - p.To.Scale = 1 p.To.Index = i gc.AddAux(&p.To, v) case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst: @@ -578,7 +617,27 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux2(&p.To, v, sc.Off()) - case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1: + case ssa.Op386ADDLconstmodifyidx4: + sc := v.AuxValAndOff() + val := sc.Val() + if val == 1 || val == -1 { + var p *obj.Prog + if val == 1 { + p = s.Prog(x86.AINCL) + } else { + p = s.Prog(x86.ADECL) + } + off := sc.Off() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Scale = 4 + p.To.Index = v.Args[1].Reg() + gc.AddAux2(&p.To, v, off) + break + } + fallthrough + case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1, + ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST sc := v.AuxValAndOff() @@ -593,7 +652,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } case ssa.Op386MOVWstoreconstidx2: p.To.Scale = 2 - case ssa.Op386MOVLstoreconstidx4: + case ssa.Op386MOVLstoreconstidx4, + ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4: p.To.Scale = 4 } p.To.Type = obj.TYPE_MEM @@ -724,7 +784,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ssa.Op386SETGF, ssa.Op386SETGEF, ssa.Op386SETB, ssa.Op386SETBE, ssa.Op386SETORD, ssa.Op386SETNAN, - ssa.Op386SETA, ssa.Op386SETAE: + ssa.Op386SETA, ssa.Op386SETAE, + ssa.Op386SETO: p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -796,6 +857,8 @@ var blockJump = [...]struct { ssa.Block386GE: {x86.AJGE, x86.AJLT}, ssa.Block386LE: {x86.AJLE, x86.AJGT}, ssa.Block386GT: {x86.AJGT, x86.AJLE}, + ssa.Block386OS: {x86.AJOS, x86.AJOC}, + ssa.Block386OC: {x86.AJOC, x86.AJOS}, ssa.Block386ULT: {x86.AJCS, x86.AJCC}, ssa.Block386UGE: {x86.AJCC, x86.AJCS}, ssa.Block386UGT: {x86.AJHI, x86.AJLS}, @@ -857,6 +920,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { case ssa.Block386EQ, ssa.Block386NE, ssa.Block386LT, ssa.Block386GE, ssa.Block386LE, ssa.Block386GT, + ssa.Block386OS, ssa.Block386OC, ssa.Block386ULT, ssa.Block386UGT, ssa.Block386ULE, ssa.Block386UGE: jmp := blockJump[b.Kind] diff --git a/src/cmd/cover/cover.go b/src/cmd/cover/cover.go index f496f4cff612c..0348849578bb1 100644 --- a/src/cmd/cover/cover.go +++ b/src/cmd/cover/cover.go @@ -16,7 +16,7 @@ import ( "log" "os" "sort" - "strconv" + "unicode" "cmd/internal/edit" "cmd/internal/objabi" @@ -117,6 +117,10 @@ func parseFlags() error { return fmt.Errorf("too many options") } + if *varVar != "" && !isValidIdentifier(*varVar) { + return fmt.Errorf("-var: %q is not a valid identifier", *varVar) + } + if *mode != "" { switch *mode { case "set": @@ -294,17 +298,6 @@ func (f *File) Visit(node ast.Node) ast.Visitor { return f } -// unquote returns the unquoted string. -func unquote(s string) string { - t, err := strconv.Unquote(s) - if err != nil { - log.Fatalf("cover: improperly quoted string %q\n", s) - } - return t -} - -var slashslash = []byte("//") - func annotate(name string) { fset := token.NewFileSet() content, err := ioutil.ReadFile(name) @@ -653,9 +646,21 @@ func (f *File) addVariables(w io.Writer) { // - 32-bit starting line number // - 32-bit ending line number // - (16 bit ending column number << 16) | (16-bit starting column number). + var lastStart, lastEnd token.Position for i, block := range f.blocks { start := f.fset.Position(block.startByte) end := f.fset.Position(block.endByte) + + // It is possible for positions to repeat when there is a + // line directive that does not specify column information + // and the input has not been passed through gofmt. + // See issue #27350 and TestHtmlUnformatted. + if samePos(start, lastStart) && samePos(end, lastEnd) { + end.Column++ + } + lastStart = start + lastEnd = end + fmt.Fprintf(w, "\t\t%d, %d, %#x, // [%d]\n", start.Line, end.Line, (end.Column&0xFFFF)<<16|(start.Column&0xFFFF), i) } @@ -688,3 +693,27 @@ func (f *File) addVariables(w io.Writer) { fmt.Fprintf(w, "var _ = %s.LoadUint32\n", atomicPackageName) } } + +func isValidIdentifier(ident string) bool { + if len(ident) == 0 { + return false + } + for i, c := range ident { + if i > 0 && unicode.IsDigit(c) { + continue + } + if c == '_' || unicode.IsLetter(c) { + continue + } + return false + } + return true +} + +// samePos returns whether two positions have the same file/line/column. +// We don't use p1 == p2 because token.Position also has an Offset field, +// and when the input uses //line directives two Positions can have different +// Offset values while having the same file/line/dolumn. +func samePos(p1, p2 token.Position) bool { + return p1.Filename == p2.Filename && p1.Line == p2.Line && p1.Column == p2.Column +} diff --git a/src/cmd/cover/cover_test.go b/src/cmd/cover/cover_test.go index c818819c39277..3de9b0c12d7ea 100644 --- a/src/cmd/cover/cover_test.go +++ b/src/cmd/cover/cover_test.go @@ -19,43 +19,149 @@ import ( "path/filepath" "regexp" "strings" + "sync" "testing" ) const ( // Data directory, also the package directory for the test. testdata = "testdata" - - // Binaries we compile. - testcover = "./testcover.exe" ) var ( - // Files we use. - testMain = filepath.Join(testdata, "main.go") - testTest = filepath.Join(testdata, "test.go") - coverInput = filepath.Join(testdata, "test_line.go") - coverOutput = filepath.Join(testdata, "test_cover.go") - coverProfile = filepath.Join(testdata, "profile.cov") + // Input files. + testMain = filepath.Join(testdata, "main.go") + testTest = filepath.Join(testdata, "test.go") + coverProfile = filepath.Join(testdata, "profile.cov") + toolexecSource = filepath.Join(testdata, "toolexec.go") // The HTML test files are in a separate directory // so they are a complete package. - htmlProfile = filepath.Join(testdata, "html", "html.cov") - htmlHTML = filepath.Join(testdata, "html", "html.html") - htmlGolden = filepath.Join(testdata, "html", "html.golden") + htmlGolden = filepath.Join(testdata, "html", "html.golden") + + // Temporary files. + tmpTestMain string + coverInput string + coverOutput string + htmlProfile string + htmlHTML string + htmlUDir string + htmlU string + htmlUTest string + htmlUProfile string + htmlUHTML string +) + +var ( + // testTempDir is a temporary directory created in TestMain. + testTempDir string + + // testcover is a newly built version of the cover program. + testcover string + + // toolexec is a program to use as the go tool's -toolexec argument. + toolexec string + + // testcoverErr records an error building testcover or toolexec. + testcoverErr error + + // testcoverOnce is used to build testcover once. + testcoverOnce sync.Once + + // toolexecArg is the argument to pass to the go tool. + toolexecArg string ) var debug = flag.Bool("debug", false, "keep rewritten files for debugging") +// We use TestMain to set up a temporary directory and remove it when +// the tests are done. +func TestMain(m *testing.M) { + dir, err := ioutil.TempDir("", "gotestcover") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + testTempDir = dir + + tmpTestMain = filepath.Join(dir, "main.go") + coverInput = filepath.Join(dir, "test_line.go") + coverOutput = filepath.Join(dir, "test_cover.go") + htmlProfile = filepath.Join(dir, "html.cov") + htmlHTML = filepath.Join(dir, "html.html") + htmlUDir = filepath.Join(dir, "htmlunformatted") + htmlU = filepath.Join(htmlUDir, "htmlunformatted.go") + htmlUTest = filepath.Join(htmlUDir, "htmlunformatted_test.go") + htmlUProfile = filepath.Join(htmlUDir, "htmlunformatted.cov") + htmlUHTML = filepath.Join(htmlUDir, "htmlunformatted.html") + + status := m.Run() + + if !*debug { + os.RemoveAll(dir) + } + + os.Exit(status) +} + +// buildCover builds a version of the cover program for testing. +// This ensures that "go test cmd/cover" tests the current cmd/cover. +func buildCover(t *testing.T) { + t.Helper() + testenv.MustHaveGoBuild(t) + testcoverOnce.Do(func() { + var wg sync.WaitGroup + wg.Add(2) + + var err1, err2 error + go func() { + defer wg.Done() + testcover = filepath.Join(testTempDir, "cover.exe") + t.Logf("running [go build -o %s]", testcover) + out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", testcover).CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + err1 = err + }() + + go func() { + defer wg.Done() + toolexec = filepath.Join(testTempDir, "toolexec.exe") + t.Logf("running [go -build -o %s %s]", toolexec, toolexecSource) + out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", toolexec, toolexecSource).CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } + err2 = err + }() + + wg.Wait() + + testcoverErr = err1 + if err2 != nil && err1 == nil { + testcoverErr = err2 + } + + toolexecArg = "-toolexec=" + toolexec + " " + testcover + }) + if testcoverErr != nil { + t.Fatal("failed to build testcover or toolexec program:", testcoverErr) + } +} + // Run this shell script, but do it in Go so it can be run by "go test". // // replace the word LINE with the line number < testdata/test.go > testdata/test_line.go -// go build -o ./testcover -// ./testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go +// go build -o testcover +// testcover -mode=count -var=CoverTest -o ./testdata/test_cover.go testdata/test_line.go // go run ./testdata/main.go ./testdata/test.go // func TestCover(t *testing.T) { - testenv.MustHaveGoBuild(t) + t.Parallel() + testenv.MustHaveGoRun(t) + buildCover(t) // Read in the test file (testTest) and write it, with LINEs specified, to coverInput. file, err := ioutil.ReadFile(testTest) @@ -81,29 +187,28 @@ func TestCover(t *testing.T) { t.Fatal(err) } - // defer removal of test_line.go - if !*debug { - defer os.Remove(coverInput) - } - - // go build -o testcover - cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", testcover) + // testcover -mode=count -var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest -o ./testdata/test_cover.go testdata/test_line.go + cmd := exec.Command(testcover, "-mode=count", "-var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest", "-o", coverOutput, coverInput) run(cmd, t) - // defer removal of testcover - defer os.Remove(testcover) - - // ./testcover -mode=count -var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest -o ./testdata/test_cover.go testdata/test_line.go - cmd = exec.Command(testcover, "-mode=count", "-var=thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest", "-o", coverOutput, coverInput) - run(cmd, t) + cmd = exec.Command(testcover, "-mode=set", "-var=Not_an-identifier", "-o", coverOutput, coverInput) + err = cmd.Run() + if err == nil { + t.Error("Expected cover to fail with an error") + } - // defer removal of ./testdata/test_cover.go - if !*debug { - defer os.Remove(coverOutput) + // Copy testmain to testTempDir, so that it is in the same directory + // as coverOutput. + b, err := ioutil.ReadFile(testMain) + if err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(tmpTestMain, b, 0444); err != nil { + t.Fatal(err) } // go run ./testdata/main.go ./testdata/test.go - cmd = exec.Command(testenv.GoToolPath(t), "run", testMain, coverOutput) + cmd = exec.Command(testenv.GoToolPath(t), "run", tmpTestMain, coverOutput) run(cmd, t) file, err = ioutil.ReadFile(coverOutput) @@ -131,6 +236,9 @@ func TestCover(t *testing.T) { // above those declarations, even if they are not part of the block of // documentation comments. func TestDirectives(t *testing.T) { + t.Parallel() + buildCover(t) + // Read the source file and find all the directives. We'll keep // track of whether each one has been seen in the output. testDirectives := filepath.Join(testdata, "directives.go") @@ -140,8 +248,8 @@ func TestDirectives(t *testing.T) { } sourceDirectives := findDirectives(source) - // go tool cover -mode=atomic ./testdata/directives.go - cmd := exec.Command(testenv.GoToolPath(t), "tool", "cover", "-mode=atomic", testDirectives) + // testcover -mode=atomic ./testdata/directives.go + cmd := exec.Command(testcover, "-mode=atomic", testDirectives) cmd.Stderr = os.Stderr output, err := cmd.Output() if err != nil { @@ -247,8 +355,10 @@ func findDirectives(source []byte) []directiveInfo { // Makes sure that `cover -func=profile.cov` reports accurate coverage. // Issue #20515. func TestCoverFunc(t *testing.T) { - // go tool cover -func ./testdata/profile.cov - cmd := exec.Command(testenv.GoToolPath(t), "tool", "cover", "-func", coverProfile) + t.Parallel() + buildCover(t) + // testcover -func ./testdata/profile.cov + cmd := exec.Command(testcover, "-func", coverProfile) out, err := cmd.Output() if err != nil { if ee, ok := err.(*exec.ExitError); ok { @@ -266,19 +376,14 @@ func TestCoverFunc(t *testing.T) { // Check that cover produces correct HTML. // Issue #25767. func TestCoverHTML(t *testing.T) { - testenv.MustHaveGoBuild(t) - if !*debug { - defer os.Remove(testcover) - defer os.Remove(htmlProfile) - defer os.Remove(htmlHTML) - } - // go build -o testcover - cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", testcover) - run(cmd, t) + t.Parallel() + testenv.MustHaveGoRun(t) + buildCover(t) + // go test -coverprofile testdata/html/html.cov cmd/cover/testdata/html - cmd = exec.Command(testenv.GoToolPath(t), "test", "-coverprofile", htmlProfile, "cmd/cover/testdata/html") + cmd := exec.Command(testenv.GoToolPath(t), "test", toolexecArg, "-coverprofile", htmlProfile, "cmd/cover/testdata/html") run(cmd, t) - // ./testcover -html testdata/html/html.cov -o testdata/html/html.html + // testcover -html testdata/html/html.cov -o testdata/html/html.html cmd = exec.Command(testcover, "-html", htmlProfile, "-o", htmlHTML) run(cmd, t) @@ -303,6 +408,9 @@ func TestCoverHTML(t *testing.T) { in = false } } + if scan.Err() != nil { + t.Error(scan.Err()) + } golden, err := ioutil.ReadFile(htmlGolden) if err != nil { t.Fatalf("reading golden file: %v", err) @@ -314,7 +422,7 @@ func TestCoverHTML(t *testing.T) { // Compare at the line level, stopping at first different line so // we don't generate tons of output if there's an inserted or deleted line. for i, goldenLine := range goldenLines { - if i > len(outLines) { + if i >= len(outLines) { t.Fatalf("output shorter than golden; stops before line %d: %s\n", i+1, goldenLine) } // Convert all white space to simple spaces, for easy comparison. @@ -329,11 +437,54 @@ func TestCoverHTML(t *testing.T) { } } +// Test HTML processing with a source file not run through gofmt. +// Issue #27350. +func TestHtmlUnformatted(t *testing.T) { + t.Parallel() + testenv.MustHaveGoRun(t) + buildCover(t) + + if err := os.Mkdir(htmlUDir, 0777); err != nil { + t.Fatal(err) + } + + const htmlUContents = ` +package htmlunformatted + +var g int + +func F() { +//line x.go:1 + { { F(); goto lab } } +lab: +}` + + const htmlUTestContents = `package htmlunformatted` + + if err := ioutil.WriteFile(htmlU, []byte(htmlUContents), 0444); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(htmlUTest, []byte(htmlUTestContents), 0444); err != nil { + t.Fatal(err) + } + + // go test -covermode=count -coverprofile TMPDIR/htmlunformatted.cov + cmd := exec.Command(testenv.GoToolPath(t), "test", toolexecArg, "-covermode=count", "-coverprofile", htmlUProfile) + cmd.Dir = htmlUDir + run(cmd, t) + + // testcover -html TMPDIR/htmlunformatted.cov -o unformatted.html + cmd = exec.Command(testcover, "-html", htmlUProfile, "-o", htmlUHTML) + run(cmd, t) +} + func run(c *exec.Cmd, t *testing.T) { t.Helper() - c.Stdout = os.Stdout - c.Stderr = os.Stderr - err := c.Run() + t.Log("running", c.Args) + out, err := c.CombinedOutput() + if len(out) > 0 { + t.Logf("%s", out) + } if err != nil { t.Fatal(err) } diff --git a/src/cmd/cover/testdata/toolexec.go b/src/cmd/cover/testdata/toolexec.go new file mode 100644 index 0000000000000..1769efedbeb67 --- /dev/null +++ b/src/cmd/cover/testdata/toolexec.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The toolexec program is a helper program for cmd/cover tests. +// It is used so that the go tool will call the newly built version +// of the cover program, rather than the installed one. +// +// The tests arrange to run the go tool with the argument +// -toolexec="/path/to/toolexec /path/to/testcover" +// The go tool will invoke this program (compiled into /path/to/toolexec) +// with the arguments shown above followed by the command to run. +// This program will check whether it is expected to run the cover +// program, and if so replace it with /path/to/testcover. +package main + +import ( + "os" + "os/exec" + "strings" +) + +func main() { + if strings.HasSuffix(strings.TrimSuffix(os.Args[2], ".exe"), "cover") { + os.Args[2] = os.Args[1] + } + cmd := exec.Command(os.Args[2], os.Args[3:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + os.Exit(1) + } +} diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index eed9866ce478e..ad2c96436a069 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -9,6 +9,7 @@ import ( "encoding/json" "flag" "fmt" + "io/ioutil" "log" "os" "os/exec" @@ -69,6 +70,7 @@ var okgoarch = []string{ "ppc64le", "riscv64", "s390x", + "sparc64", "wasm", } @@ -86,6 +88,7 @@ var okgoos = []string{ "openbsd", "plan9", "windows", + "aix", } // find reports the first index of p in l[0:n], or else -1. @@ -680,7 +683,7 @@ func runInstall(dir string, ch chan struct{}) { } // Is the target up-to-date? - var gofiles, missing []string + var gofiles, sfiles, missing []string stale := rebuildall files = filter(files, func(p string) bool { for _, suf := range depsuffix { @@ -696,6 +699,8 @@ func runInstall(dir string, ch chan struct{}) { } if strings.HasSuffix(p, ".go") { gofiles = append(gofiles, p) + } else if strings.HasSuffix(p, ".s") { + sfiles = append(sfiles, p) } if t.After(ttarg) { stale = true @@ -776,10 +781,42 @@ func runInstall(dir string, ch chan struct{}) { return } + asmArgs := []string{ + pathf("%s/asm", tooldir), + "-I", workdir, + "-I", pathf("%s/pkg/include", goroot), + "-D", "GOOS_" + goos, + "-D", "GOARCH_" + goarch, + "-D", "GOOS_GOARCH_" + goos + "_" + goarch, + } + if goarch == "mips" || goarch == "mipsle" { + // Define GOMIPS_value from gomips. + asmArgs = append(asmArgs, "-D", "GOMIPS_"+gomips) + } + if goarch == "mips64" || goarch == "mipsle64" { + // Define GOMIPS64_value from gomips64. + asmArgs = append(asmArgs, "-D", "GOMIPS64_"+gomips64) + } + goasmh := pathf("%s/go_asm.h", workdir) + + // Collect symabis from assembly code. + var symabis string + if len(sfiles) > 0 { + symabis = pathf("%s/symabis", workdir) + var wg sync.WaitGroup + asmabis := append(asmArgs[:len(asmArgs):len(asmArgs)], "-gensymabis", "-o", symabis) + asmabis = append(asmabis, sfiles...) + if err := ioutil.WriteFile(goasmh, nil, 0666); err != nil { + fatalf("cannot write empty go_asm.h: %s", err) + } + bgrun(&wg, path, asmabis...) + bgwait(&wg) + } + var archive string // The next loop will compile individual non-Go files. // Hand the Go files to the compiler en masse. - // For package runtime, this writes go_asm.h, which + // For packages containing assembly, this writes go_asm.h, which // the assembly files will need. pkg := dir if strings.HasPrefix(dir, "cmd/") && strings.Count(dir, "/") == 1 { @@ -792,48 +829,41 @@ func runInstall(dir string, ch chan struct{}) { } else { archive = b } + + // Compile Go code. compile := []string{pathf("%s/compile", tooldir), "-std", "-pack", "-o", b, "-p", pkg} if gogcflags != "" { compile = append(compile, strings.Fields(gogcflags)...) } if dir == "runtime" { - compile = append(compile, "-+", "-asmhdr", pathf("%s/go_asm.h", workdir)) + compile = append(compile, "-+") } - if dir == "internal/bytealg" { - // TODO: why don't we generate go_asm.h for all packages - // that have any assembly? - compile = append(compile, "-asmhdr", pathf("%s/go_asm.h", workdir)) + if len(sfiles) > 0 { + compile = append(compile, "-asmhdr", goasmh) + } + if symabis != "" { + compile = append(compile, "-symabis", symabis) + } + if dir == "runtime" || dir == "runtime/internal/atomic" { + // These packages define symbols referenced by + // assembly in other packages. In cmd/go, we work out + // the exact details. For bootstrapping, just tell the + // compiler to generate ABI wrappers for everything. + compile = append(compile, "-allabis") } - compile = append(compile, gofiles...) - run(path, CheckExit|ShowOutput, compile...) - // Compile the files. + compile = append(compile, gofiles...) var wg sync.WaitGroup - for _, p := range files { - if !strings.HasSuffix(p, ".s") { - continue - } + // We use bgrun and immediately wait for it instead of calling run() synchronously. + // This executes all jobs through the bgwork channel and allows the process + // to exit cleanly in case an error occurs. + bgrun(&wg, path, compile...) + bgwait(&wg) - var compile []string + // Compile the files. + for _, p := range sfiles { // Assembly file for a Go package. - compile = []string{ - pathf("%s/asm", tooldir), - "-I", workdir, - "-I", pathf("%s/pkg/include", goroot), - "-D", "GOOS_" + goos, - "-D", "GOARCH_" + goarch, - "-D", "GOOS_GOARCH_" + goos + "_" + goarch, - } - - if goarch == "mips" || goarch == "mipsle" { - // Define GOMIPS_value from gomips. - compile = append(compile, "-D", "GOMIPS_"+gomips) - } - - if goarch == "mips64" || goarch == "mipsle64" { - // Define GOMIPS64_value from gomips64. - compile = append(compile, "-D", "GOMIPS64_"+gomips64) - } + compile := asmArgs[:len(asmArgs):len(asmArgs)] doclean := true b := pathf("%s/%s", workdir, filepath.Base(p)) @@ -858,7 +888,8 @@ func runInstall(dir string, ch chan struct{}) { // Remove target before writing it. xremove(link[targ]) - run("", CheckExit|ShowOutput, link...) + bgrun(&wg, "", link...) + bgwait(&wg) } // matchfield reports whether the field (x,y,z) matches this build. @@ -1382,6 +1413,7 @@ func checkNotStale(goBinary string, targets ...string) { // single point of truth for supported platforms. This list is used // by 'go tool dist list'. var cgoEnabled = map[string]bool{ + "aix/ppc64": false, "darwin/386": true, "darwin/amd64": true, "darwin/arm": true, @@ -1402,6 +1434,7 @@ var cgoEnabled = map[string]bool{ "linux/mips64le": true, "linux/riscv64": true, "linux/s390x": true, + "linux/sparc64": true, "android/386": true, "android/amd64": true, "android/arm": true, @@ -1415,13 +1448,21 @@ var cgoEnabled = map[string]bool{ "netbsd/arm": true, "openbsd/386": true, "openbsd/amd64": true, - "openbsd/arm": false, + "openbsd/arm": true, "plan9/386": false, "plan9/amd64": false, "plan9/arm": false, "solaris/amd64": true, "windows/386": true, "windows/amd64": true, + "windows/arm": false, +} + +// List of platforms which are supported but not complete yet. These get +// filtered out of cgoEnabled for 'dist list'. See golang.org/issue/28944 +var incomplete = map[string]bool{ + "linux/riscv64": true, + "linux/sparc64": true, } func needCC() bool { @@ -1542,6 +1583,9 @@ func cmdlist() { var plats []string for p := range cgoEnabled { + if incomplete[p] { + continue + } plats = append(plats, p) } sort.Strings(plats) diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index acf2230cb4927..5aadc8da674ff 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -31,7 +31,7 @@ func mkzversion(dir, file string) { fmt.Fprintln(&buf) fmt.Fprintf(&buf, "const TheVersion = `%s`\n", findgoversion()) fmt.Fprintf(&buf, "const Goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) - fmt.Fprintf(&buf, "const StackGuardMultiplier = %d\n", stackGuardMultiplier()) + fmt.Fprintf(&buf, "const StackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault()) writefile(buf.String(), file, writeSkipSame) } @@ -49,7 +49,7 @@ func mkzversion(dir, file string) { // const defaultGOARCH = runtime.GOARCH // const defaultGO_EXTLINK_ENABLED = // const version = -// const stackGuardMultiplier = +// const stackGuardMultiplierDefault = // const goexperiment = // // The use of runtime.GOOS and runtime.GOARCH makes sure that @@ -77,16 +77,16 @@ func mkzbootstrap(file string) { fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion()) - fmt.Fprintf(&buf, "const stackGuardMultiplier = %d\n", stackGuardMultiplier()) + fmt.Fprintf(&buf, "const stackGuardMultiplierDefault = %d\n", stackGuardMultiplierDefault()) fmt.Fprintf(&buf, "const goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) writefile(buf.String(), file, writeSkipSame) } -// stackGuardMultiplier returns a multiplier to apply to the default +// stackGuardMultiplierDefault returns a multiplier to apply to the default // stack guard size. Larger multipliers are used for non-optimized // builds that have larger stack frames. -func stackGuardMultiplier() int { +func stackGuardMultiplierDefault() int { for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") { if s == "-N" { return 2 diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 94b75870262fe..71ed4ba8bc7c1 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -73,6 +73,7 @@ var bootstrapDirs = []string{ "cmd/link/internal/loadelf", "cmd/link/internal/loadmacho", "cmd/link/internal/loadpe", + "cmd/link/internal/loadxcoff", "cmd/link/internal/mips", "cmd/link/internal/mips64", "cmd/link/internal/objfile", @@ -88,6 +89,7 @@ var bootstrapDirs = []string{ "debug/elf", "debug/macho", "debug/pe", + "internal/xcoff", "math/big", "math/bits", "sort", diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go index 37e37e2733566..bab8ab781ab67 100644 --- a/src/cmd/dist/main.go +++ b/src/cmd/dist/main.go @@ -65,6 +65,13 @@ func main() { case "freebsd": // Since FreeBSD 10 gcc is no longer part of the base system. defaultclang = true + case "openbsd": + // The gcc available on OpenBSD armv7 is old/inadequate (for example, lacks + // __sync_fetch_and_*/__sync_*_and_fetch) and will likely be removed in the + // not-to-distant future - use clang instead. + if runtime.GOARCH == "arm" { + defaultclang = true + } case "solaris": // Even on 64-bit platform, solaris uname -m prints i86pc. out := run("", CheckExit, "isainfo", "-n") @@ -81,6 +88,9 @@ func main() { } case "windows": exe = ".exe" + case "aix": + // uname -m doesn't work under AIX + gohostarch = "ppc64" } sysinit() diff --git a/src/cmd/dist/sys_windows.go b/src/cmd/dist/sys_windows.go index 216dc017982fe..2f6a1b0dceb6f 100644 --- a/src/cmd/dist/sys_windows.go +++ b/src/cmd/dist/sys_windows.go @@ -32,6 +32,7 @@ type systeminfo struct { const ( PROCESSOR_ARCHITECTURE_AMD64 = 9 PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_ARM = 5 ) var sysinfo systeminfo @@ -43,6 +44,8 @@ func sysinit() { gohostarch = "amd64" case PROCESSOR_ARCHITECTURE_INTEL: gohostarch = "386" + case PROCESSOR_ARCHITECTURE_ARM: + gohostarch = "arm" default: fatalf("unknown processor architecture") } diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 448c7867a143c..74cee8f42169b 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -212,6 +212,9 @@ func (t *tester) run() { if t.failed { fmt.Println("\nFAILED") os.Exit(1) + } else if incomplete[goos+"/"+goarch] { + fmt.Println("\nFAILED (incomplete port)") + os.Exit(1) } else if t.partial { fmt.Println("\nALL TESTS PASSED (some were excluded)") } else { @@ -519,7 +522,6 @@ func (t *tester) registerTests() { } // Run `go test fmt` in the moved GOROOT. - // Disable GOCACHE because it points back at the old GOROOT. cmd := exec.Command(filepath.Join(moved, "bin", "go"), "test", "fmt") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -529,7 +531,6 @@ func (t *tester) registerTests() { cmd.Env = append(cmd.Env, e) } } - cmd.Env = append(cmd.Env, "GOCACHE=off") err := cmd.Run() if rerr := os.Rename(moved, goroot); rerr != nil { @@ -705,7 +706,7 @@ func (t *tester) registerTests() { if gohostos == "linux" && goarch == "amd64" { t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go") } - if goos == "linux" && (goarch == "amd64" || goarch == "arm64") { + if mSanSupported(goos, goarch) { t.registerHostTest("testsanitizers/msan", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".") } if t.hasBash() && goos != "android" && !t.iOS() && gohostos != "windows" { @@ -1038,7 +1039,10 @@ func (t *tester) cgoTest(dt *distTest) error { "linux-386", "linux-amd64", "linux-arm", "linux-ppc64le", "linux-s390x", "netbsd-386", "netbsd-amd64": - t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") + cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") + // A -g argument in CGO_CFLAGS should not affect how the test runs. + cmd.Env = append(os.Environ(), "CGO_CFLAGS=-g0") + t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto") t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external") @@ -1329,13 +1333,26 @@ func (t *tester) hasSwig() bool { } func (t *tester) raceDetectorSupported() bool { - switch gohostos { - case "linux", "darwin", "freebsd", "windows": - // The race detector doesn't work on Alpine Linux: - // golang.org/issue/14481 - return t.cgoEnabled && (goarch == "amd64" || goarch == "ppc64le") && gohostos == goos && !isAlpineLinux() + if gohostos != goos { + return false } - return false + if !t.cgoEnabled { + return false + } + if !raceDetectorSupported(goos, goarch) { + return false + } + // The race detector doesn't work on Alpine Linux: + // golang.org/issue/14481 + if isAlpineLinux() { + return false + } + // NetBSD support is unfinished. + // golang.org/issue/26403 + if goos == "netbsd" { + return false + } + return true } func isAlpineLinux() bool { @@ -1450,3 +1467,28 @@ func (t *tester) packageHasBenchmarks(pkg string) bool { } return false } + +// raceDetectorSupported is a copy of the function +// cmd/internal/sys.RaceDetectorSupported, which can't be used here +// because cmd/dist has to be buildable by Go 1.4. +func raceDetectorSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" + case "darwin", "freebsd", "netbsd", "windows": + return goarch == "amd64" + default: + return false + } +} + +// mSanSupported is a copy of the function cmd/internal/sys.MSanSupported, +// which can't be used here because cmd/dist has to be buildable by Go 1.4. +func mSanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "arm64" + default: + return false + } +} diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go index 808a60a28e2c1..996e058b31822 100644 --- a/src/cmd/dist/util.go +++ b/src/cmd/dist/util.go @@ -397,9 +397,8 @@ func xgetgoarm() string { // Conservative default for cross-compilation. return "5" } - if goos == "freebsd" || goos == "openbsd" { + if goos == "freebsd" { // FreeBSD has broken VFP support. - // OpenBSD currently only supports softfloat. return "5" } diff --git a/src/cmd/doc/dirs.go b/src/cmd/doc/dirs.go index 24bd797eb51f9..c6f5cd9af8516 100644 --- a/src/cmd/doc/dirs.go +++ b/src/cmd/doc/dirs.go @@ -162,7 +162,7 @@ func findCodeRoots() []Dir { // Check for use of modules by 'go env GOMOD', // which reports a go.mod file path if modules are enabled. stdout, _ := exec.Command("go", "env", "GOMOD").Output() - usingModules = bytes.Contains(stdout, []byte("go.mod")) + usingModules = len(bytes.TrimSpace(stdout)) > 0 } if !usingModules { diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index 6010f04b56081..0761c6ddb3b0f 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -127,24 +127,87 @@ var tests = []test{ `type T1 = T2`, // Type alias }, []string{ - `const internalConstant = 2`, // No internal constants. - `var internalVariable = 2`, // No internal variables. - `func internalFunc(a int) bool`, // No internal functions. - `Comment about exported constant`, // No comment for single constant. - `Comment about exported variable`, // No comment for single variable. - `Comment about block of constants.`, // No comment for constant block. - `Comment about block of variables.`, // No comment for variable block. - `Comment before ConstOne`, // No comment for first entry in constant block. - `Comment before VarOne`, // No comment for first entry in variable block. - `ConstTwo = 2`, // No second entry in constant block. - `VarTwo = 2`, // No second entry in variable block. - `VarFive = 5`, // From block starting with unexported variable. - `type unexportedType`, // No unexported type. - `unexportedTypedConstant`, // No unexported typed constant. - `\bField`, // No fields. - `Method`, // No methods. - `someArgument[5-8]`, // No truncated arguments. - `type T1 T2`, // Type alias does not display as type declaration. + `const internalConstant = 2`, // No internal constants. + `var internalVariable = 2`, // No internal variables. + `func internalFunc(a int) bool`, // No internal functions. + `Comment about exported constant`, // No comment for single constant. + `Comment about exported variable`, // No comment for single variable. + `Comment about block of constants`, // No comment for constant block. + `Comment about block of variables`, // No comment for variable block. + `Comment before ConstOne`, // No comment for first entry in constant block. + `Comment before VarOne`, // No comment for first entry in variable block. + `ConstTwo = 2`, // No second entry in constant block. + `VarTwo = 2`, // No second entry in variable block. + `VarFive = 5`, // From block starting with unexported variable. + `type unexportedType`, // No unexported type. + `unexportedTypedConstant`, // No unexported typed constant. + `\bField`, // No fields. + `Method`, // No methods. + `someArgument[5-8]`, // No truncated arguments. + `type T1 T2`, // Type alias does not display as type declaration. + }, + }, + // Package dump -all + { + "full package", + []string{"-all", p}, + []string{ + `package pkg .*import`, + `Package comment`, + `CONSTANTS`, + `Comment before ConstOne`, + `ConstOne = 1`, + `ConstTwo = 2 // Comment on line with ConstTwo`, + `ConstFive`, + `ConstSix`, + `Const block where first entry is unexported`, + `ConstLeft2, constRight2 uint64`, + `constLeft3, ConstRight3`, + `ConstLeft4, ConstRight4`, + `Duplicate = iota`, + `const CaseMatch = 1`, + `const Casematch = 2`, + `const ExportedConstant = 1`, + `const MultiLineConst = `, + `MultiLineString1`, + `VARIABLES`, + `Comment before VarOne`, + `VarOne = 1`, + `Comment about block of variables`, + `VarFive = 5`, + `var ExportedVariable = 1`, + `var LongLine = newLongLine\(`, + `var MultiLineVar = map\[struct {`, + `FUNCTIONS`, + `func ExportedFunc\(a int\) bool`, + `Comment about exported function`, + `func MultiLineFunc\(x interface`, + `func ReturnUnexported\(\) unexportedType`, + `TYPES`, + `type ExportedInterface interface`, + `type ExportedStructOneField struct`, + `type ExportedType struct`, + `Comment about exported type`, + `const ConstGroup4 ExportedType = ExportedType`, + `ExportedTypedConstant ExportedType = iota`, + `Constants tied to ExportedType`, + `func ExportedTypeConstructor\(\) \*ExportedType`, + `Comment about constructor for exported type`, + `func ReturnExported\(\) ExportedType`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + `type T1 = T2`, + `type T2 int`, + }, + []string{ + `constThree`, + `_, _ uint64 = 2 \* iota, 1 << iota`, + `constLeft1, constRight1`, + `duplicate`, + `varFour`, + `func internalFunc`, + `unexportedField`, + `func \(unexportedType\)`, }, }, // Package dump -u @@ -164,6 +227,58 @@ var tests = []test{ `MultiLine(String|Method|Field)`, // No data from multi line portions. }, }, + // Package dump -u -all + { + "full package", + []string{"-u", "-all", p}, + []string{ + `package pkg .*import`, + `Package comment`, + `CONSTANTS`, + `Comment before ConstOne`, + `ConstOne += 1`, + `ConstTwo += 2 // Comment on line with ConstTwo`, + `constThree = 3 // Comment on line with constThree`, + `ConstFive`, + `const internalConstant += 2`, + `Comment about internal constant`, + `VARIABLES`, + `Comment before VarOne`, + `VarOne += 1`, + `Comment about block of variables`, + `varFour += 4`, + `VarFive += 5`, + `varSix += 6`, + `var ExportedVariable = 1`, + `var LongLine = newLongLine\(`, + `var MultiLineVar = map\[struct {`, + `var internalVariable = 2`, + `Comment about internal variable`, + `FUNCTIONS`, + `func ExportedFunc\(a int\) bool`, + `Comment about exported function`, + `func MultiLineFunc\(x interface`, + `func internalFunc\(a int\) bool`, + `Comment about internal function`, + `func newLongLine\(ss .*string\)`, + `TYPES`, + `type ExportedType struct`, + `type T1 = T2`, + `type T2 int`, + `type unexportedType int`, + `Comment about unexported type`, + `ConstGroup1 unexportedType = iota`, + `ConstGroup2`, + `ConstGroup3`, + `ExportedTypedConstant_unexported unexportedType = iota`, + `Constants tied to unexportedType`, + `const unexportedTypedConstant unexportedType = 1`, + `func ReturnUnexported\(\) unexportedType`, + `func \(unexportedType\) ExportedMethod\(\) bool`, + `func \(unexportedType\) unexportedMethod\(\) bool`, + }, + nil, + }, // Single constant. { @@ -207,6 +322,18 @@ var tests = []test{ }, nil, }, + // Block of constants -src. + { + "block of constants with -src", + []string{"-src", p, `ConstTwo`}, + []string{ + `Comment about block of constants`, // Top comment. + `ConstOne.*=.*1`, // Each constant seen. + `ConstTwo.*=.*2.*Comment on line with ConstTwo`, + `constThree`, // Even unexported constants. + }, + nil, + }, // Block of constants with carryover type from unexported field. { "block of constants with carryover type", @@ -295,6 +422,17 @@ var tests = []test{ }, nil, }, + // Function with -src. + { + "function with -src", + []string{"-src", p, `ExportedFunc`}, + []string{ + `Comment about exported function`, // Include comment. + `func ExportedFunc\(a int\) bool`, + `return true != false`, // Include body. + }, + nil, + }, // Type. { @@ -304,21 +442,63 @@ var tests = []test{ `Comment about exported type`, // Include comment. `type ExportedType struct`, // Type definition. `Comment before exported field.*\n.*ExportedField +int` + - `.*Comment on line with exported field.`, - `ExportedEmbeddedType.*Comment on line with exported embedded field.`, + `.*Comment on line with exported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, `Has unexported fields`, `func \(ExportedType\) ExportedMethod\(a int\) bool`, `const ExportedTypedConstant ExportedType = iota`, // Must include associated constant. `func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor. - `io.Reader.*Comment on line with embedded Reader.`, + `io.Reader.*Comment on line with embedded Reader`, }, []string{ - `unexportedField`, // No unexported field. - `int.*embedded`, // No unexported embedded field. - `Comment about exported method.`, // No comment about exported method. - `unexportedMethod`, // No unexported method. - `unexportedTypedConstant`, // No unexported constant. - `error`, // No embedded error. + `unexportedField`, // No unexported field. + `int.*embedded`, // No unexported embedded field. + `Comment about exported method`, // No comment about exported method. + `unexportedMethod`, // No unexported method. + `unexportedTypedConstant`, // No unexported constant. + `error`, // No embedded error. + }, + }, + // Type with -src. Will see unexported fields. + { + "type", + []string{"-src", p, `ExportedType`}, + []string{ + `Comment about exported type`, // Include comment. + `type ExportedType struct`, // Type definition. + `Comment before exported field`, + `ExportedField.*Comment on line with exported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, + `unexportedType.*Comment on line with unexported embedded field`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `const ExportedTypedConstant ExportedType = iota`, // Must include associated constant. + `func ExportedTypeConstructor\(\) \*ExportedType`, // Must include constructor. + `io.Reader.*Comment on line with embedded Reader`, + }, + []string{ + `Comment about exported method`, // No comment about exported method. + `unexportedMethod`, // No unexported method. + `unexportedTypedConstant`, // No unexported constant. + }, + }, + // Type -all. + { + "type", + []string{"-all", p, `ExportedType`}, + []string{ + `type ExportedType struct {`, // Type definition as source. + `Comment about exported type`, // Include comment afterwards. + `const ConstGroup4 ExportedType = ExportedType\{\}`, // Related constants. + `ExportedTypedConstant ExportedType = iota`, + `Constants tied to ExportedType`, + `func ExportedTypeConstructor\(\) \*ExportedType`, + `Comment about constructor for exported type.`, + `func ReturnExported\(\) ExportedType`, + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method.`, + }, + []string{ + `unexportedType`, }, }, // Type T1 dump (alias). @@ -341,14 +521,14 @@ var tests = []test{ `Comment about exported type`, // Include comment. `type ExportedType struct`, // Type definition. `Comment before exported field.*\n.*ExportedField +int`, - `unexportedField.*int.*Comment on line with unexported field.`, - `ExportedEmbeddedType.*Comment on line with exported embedded field.`, - `\*ExportedEmbeddedType.*Comment on line with exported embedded \*field.`, - `\*qualified.ExportedEmbeddedType.*Comment on line with exported embedded \*selector.field.`, - `unexportedType.*Comment on line with unexported embedded field.`, - `\*unexportedType.*Comment on line with unexported embedded \*field.`, - `io.Reader.*Comment on line with embedded Reader.`, - `error.*Comment on line with embedded error.`, + `unexportedField.*int.*Comment on line with unexported field`, + `ExportedEmbeddedType.*Comment on line with exported embedded field`, + `\*ExportedEmbeddedType.*Comment on line with exported embedded \*field`, + `\*qualified.ExportedEmbeddedType.*Comment on line with exported embedded \*selector.field`, + `unexportedType.*Comment on line with unexported embedded field`, + `\*unexportedType.*Comment on line with unexported embedded \*field`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, `func \(ExportedType\) unexportedMethod\(a int\) bool`, `unexportedTypedConstant`, }, @@ -380,8 +560,8 @@ var tests = []test{ `type ExportedInterface interface`, // Interface definition. `Comment before exported method.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, - `io.Reader.*Comment on line with embedded Reader.`, - `error.*Comment on line with embedded error.`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, `Has unexported methods`, }, []string{ @@ -400,9 +580,9 @@ var tests = []test{ `type ExportedInterface interface`, // Interface definition. `Comment before exported method.*\n.*ExportedMethod\(\)` + `.*Comment on line with exported method`, - `unexportedMethod\(\).*Comment on line with unexported method.`, - `io.Reader.*Comment on line with embedded Reader.`, - `error.*Comment on line with embedded error.`, + `unexportedMethod\(\).*Comment on line with unexported method`, + `io.Reader.*Comment on line with embedded Reader`, + `error.*Comment on line with embedded error`, }, []string{ `Has unexported methods`, @@ -418,7 +598,7 @@ var tests = []test{ `.*Comment on line with exported method`, }, []string{ - `Comment about exported interface.`, + `Comment about exported interface`, }, }, @@ -428,7 +608,7 @@ var tests = []test{ []string{p, `ExportedType.ExportedMethod`}, []string{ `func \(ExportedType\) ExportedMethod\(a int\) bool`, - `Comment about exported method.`, + `Comment about exported method`, }, nil, }, @@ -438,7 +618,18 @@ var tests = []test{ []string{"-u", p, `ExportedType.unexportedMethod`}, []string{ `func \(ExportedType\) unexportedMethod\(a int\) bool`, - `Comment about unexported method.`, + `Comment about unexported method`, + }, + nil, + }, + // Method with -src. + { + "method with -src", + []string{"-src", p, `ExportedType.ExportedMethod`}, + []string{ + `func \(ExportedType\) ExportedMethod\(a int\) bool`, + `Comment about exported method`, + `return true != true`, }, nil, }, @@ -450,8 +641,8 @@ var tests = []test{ []string{ `type ExportedType struct`, `ExportedField int`, - `Comment before exported field.`, - `Comment on line with exported field.`, + `Comment before exported field`, + `Comment on line with exported field`, `other fields elided`, }, nil, @@ -463,7 +654,7 @@ var tests = []test{ []string{"-u", p, `ExportedType.unexportedField`}, []string{ `unexportedField int`, - `Comment on line with unexported field.`, + `Comment on line with unexported field`, }, nil, }, @@ -562,6 +753,9 @@ func TestDoc(t *testing.T) { failed = true } } + if bytes.Count(output, []byte("TYPES\n")) > 1 { + t.Fatalf("%s: repeating headers", test.name) + } if failed { t.Logf("\n%s", output) } diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go index 982c8e054acac..ec15ec5826149 100644 --- a/src/cmd/doc/main.go +++ b/src/cmd/doc/main.go @@ -28,6 +28,12 @@ // For commands, unless the -cmd flag is present "go doc command" // shows only the package-level docs for the package. // +// The -src flag causes doc to print the full source code for the symbol, such +// as the body of a struct, function or method. +// +// The -all flag causes doc to print all documentation for the package and +// all its visible symbols. The argument must identify a package. +// // For complete documentation, run "go help doc". package main @@ -49,7 +55,9 @@ import ( var ( unexported bool // -u flag matchCase bool // -c flag + showAll bool // -all flag showCmd bool // -cmd flag + showSrc bool // -src flag ) // usage is a replacement usage function for the flags package. @@ -84,7 +92,9 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) { matchCase = false flagSet.BoolVar(&unexported, "u", false, "show unexported symbols as well as exported") flagSet.BoolVar(&matchCase, "c", false, "symbol matching honors case (paths not affected)") + flagSet.BoolVar(&showAll, "all", false, "show all documentation for package") flagSet.BoolVar(&showCmd, "cmd", false, "show symbols with package docs even if package is a command") + flagSet.BoolVar(&showSrc, "src", false, "show source code for symbol") flagSet.Parse(args) var paths []string var symbol, method string @@ -122,6 +132,12 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) { unexported = true } + // We have a package. + if showAll && symbol == "" { + pkg.allDoc() + return + } + switch { case symbol == "": pkg.packageDoc() // The package exists, so we got some output. diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go index 14e41b9106ce1..7c4e00767dcab 100644 --- a/src/cmd/doc/pkg.go +++ b/src/cmd/doc/pkg.go @@ -12,6 +12,7 @@ import ( "go/doc" "go/format" "go/parser" + "go/printer" "go/token" "io" "log" @@ -29,15 +30,18 @@ const ( ) type Package struct { - writer io.Writer // Destination for output. - name string // Package name, json for encoding/json. - userPath string // String the user used to find this package. - pkg *ast.Package // Parsed package. - file *ast.File // Merged from all files in the package - doc *doc.Package - build *build.Package - fs *token.FileSet // Needed for printing. - buf bytes.Buffer + writer io.Writer // Destination for output. + name string // Package name, json for encoding/json. + userPath string // String the user used to find this package. + pkg *ast.Package // Parsed package. + file *ast.File // Merged from all files in the package + doc *doc.Package + build *build.Package + typedValue map[*doc.Value]bool // Consts and vars related to types. + constructor map[*doc.Func]bool // Constructors. + packageClausePrinted bool // Prevent repeated package clauses. + fs *token.FileSet // Needed for printing. + buf bytes.Buffer } type PackageError string // type returned by pkg.Fatalf. @@ -137,22 +141,43 @@ func parsePackage(writer io.Writer, pkg *build.Package, userPath string) *Packag // from finding the symbol. Work around this for now, but we // should fix it in go/doc. // A similar story applies to factory functions. - docPkg := doc.New(astPkg, pkg.ImportPath, doc.AllDecls) + mode := doc.AllDecls + if showSrc { + mode |= doc.PreserveAST // See comment for Package.emit. + } + docPkg := doc.New(astPkg, pkg.ImportPath, mode) + typedValue := make(map[*doc.Value]bool) + constructor := make(map[*doc.Func]bool) for _, typ := range docPkg.Types { docPkg.Consts = append(docPkg.Consts, typ.Consts...) + for _, value := range typ.Consts { + typedValue[value] = true + } docPkg.Vars = append(docPkg.Vars, typ.Vars...) + for _, value := range typ.Vars { + typedValue[value] = true + } docPkg.Funcs = append(docPkg.Funcs, typ.Funcs...) + for _, fun := range typ.Funcs { + // We don't count it as a constructor bound to the type + // if the type itself is not exported. + if isExported(typ.Name) { + constructor[fun] = true + } + } } return &Package{ - writer: writer, - name: pkg.Name, - userPath: userPath, - pkg: astPkg, - file: ast.MergePackageFiles(astPkg, 0), - doc: docPkg, - build: pkg, - fs: fs, + writer: writer, + name: pkg.Name, + userPath: userPath, + pkg: astPkg, + file: ast.MergePackageFiles(astPkg, 0), + doc: docPkg, + typedValue: typedValue, + constructor: constructor, + build: pkg, + fs: fs, } } @@ -177,14 +202,24 @@ func (pkg *Package) newlines(n int) { } } -// emit prints the node. +// emit prints the node. If showSrc is true, it ignores the provided comment, +// assuming the comment is in the node itself. Otherwise, the go/doc package +// clears the stuff we don't want to print anyway. It's a bit of a magic trick. func (pkg *Package) emit(comment string, node ast.Node) { if node != nil { - err := format.Node(&pkg.buf, pkg.fs, node) + var arg interface{} = node + if showSrc { + // Need an extra little dance to get internal comments to appear. + arg = &printer.CommentedNode{ + Node: node, + Comments: pkg.file.Comments, + } + } + err := format.Node(&pkg.buf, pkg.fs, arg) if err != nil { log.Fatal(err) } - if comment != "" { + if comment != "" && !showSrc { pkg.newlines(1) doc.ToText(&pkg.buf, comment, " ", indent, indentedWidth) pkg.newlines(2) // Blank line after comment to separate from next item. @@ -384,6 +419,69 @@ func joinStrings(ss []string) string { return strings.Join(ss, ", ") } +// allDoc prints all the docs for the package. +func (pkg *Package) allDoc() { + defer pkg.flush() + if pkg.showInternals() { + pkg.packageClause(false) + } + + doc.ToText(&pkg.buf, pkg.doc.Doc, "", indent, indentedWidth) + pkg.newlines(1) + + printed := make(map[*ast.GenDecl]bool) + + hdr := "" + printHdr := func(s string) { + if hdr != s { + pkg.Printf("\n%s\n\n", s) + hdr = s + } + } + + // Constants. + for _, value := range pkg.doc.Consts { + // Constants and variables come in groups, and valueDoc prints + // all the items in the group. We only need to find one exported symbol. + for _, name := range value.Names { + if isExported(name) && !pkg.typedValue[value] { + printHdr("CONSTANTS") + pkg.valueDoc(value, printed) + break + } + } + } + + // Variables. + for _, value := range pkg.doc.Vars { + // Constants and variables come in groups, and valueDoc prints + // all the items in the group. We only need to find one exported symbol. + for _, name := range value.Names { + if isExported(name) && !pkg.typedValue[value] { + printHdr("VARIABLES") + pkg.valueDoc(value, printed) + break + } + } + } + + // Functions. + for _, fun := range pkg.doc.Funcs { + if isExported(fun.Name) && !pkg.constructor[fun] { + printHdr("FUNCTIONS") + pkg.emit(fun.Doc, fun.Decl) + } + } + + // Types. + for _, typ := range pkg.doc.Types { + if isExported(typ.Name) { + printHdr("TYPES") + pkg.typeDoc(typ) + } + } +} + // packageDoc prints the docs for the package (package doc plus one-liners of the rest). func (pkg *Package) packageDoc() { defer pkg.flush() @@ -420,6 +518,10 @@ func (pkg *Package) showInternals() bool { // user's argument is identical to the actual package path or // is empty, meaning it's the current directory. func (pkg *Package) packageClause(checkUserPath bool) { + if pkg.packageClausePrinted { + return + } + if checkUserPath { if pkg.userPath == "" || pkg.userPath == pkg.build.ImportPath { return @@ -457,6 +559,7 @@ func (pkg *Package) packageClause(checkUserPath bool) { if !usingModules && importPath != pkg.build.ImportPath { pkg.Printf("WARNING: package source is installed in %q\n", pkg.build.ImportPath) } + pkg.packageClausePrinted = true } // valueSummary prints a one-line summary for each set of values and constants. @@ -491,22 +594,10 @@ func (pkg *Package) valueSummary(values []*doc.Value, showGrouped bool) { // funcSummary prints a one-line summary for each function. Constructors // are printed by typeSummary, below, and so can be suppressed here. func (pkg *Package) funcSummary(funcs []*doc.Func, showConstructors bool) { - // First, identify the constructors. Don't bother figuring out if they're exported. - var isConstructor map[*doc.Func]bool - if !showConstructors { - isConstructor = make(map[*doc.Func]bool) - for _, typ := range pkg.doc.Types { - if isExported(typ.Name) { - for _, f := range typ.Funcs { - isConstructor[f] = true - } - } - } - } for _, fun := range funcs { // Exported functions only. The go/doc package does not include methods here. if isExported(fun.Name) { - if !isConstructor[fun] { + if showConstructors || !pkg.constructor[fun] { pkg.Printf("%s\n", pkg.oneLineNode(fun.Decl)) } } @@ -611,7 +702,6 @@ func (pkg *Package) symbolDoc(symbol string) bool { } // Symbol is a function. decl := fun.Decl - decl.Body = nil pkg.emit(fun.Doc, decl) found = true } @@ -624,86 +714,115 @@ func (pkg *Package) symbolDoc(symbol string) bool { // So we remember which declarations we've printed to avoid duplication. printed := make(map[*ast.GenDecl]bool) for _, value := range values { - // Print each spec only if there is at least one exported symbol in it. - // (See issue 11008.) - // TODO: Should we elide unexported symbols from a single spec? - // It's an unlikely scenario, probably not worth the trouble. - // TODO: Would be nice if go/doc did this for us. - specs := make([]ast.Spec, 0, len(value.Decl.Specs)) - var typ ast.Expr - for _, spec := range value.Decl.Specs { - vspec := spec.(*ast.ValueSpec) + pkg.valueDoc(value, printed) + found = true + } + // Types. + for _, typ := range pkg.findTypes(symbol) { + pkg.typeDoc(typ) + found = true + } + if !found { + // See if there are methods. + if !pkg.printMethodDoc("", symbol) { + return false + } + } + return true +} - // The type name may carry over from a previous specification in the - // case of constants and iota. - if vspec.Type != nil { - typ = vspec.Type - } +// valueDoc prints the docs for a constant or variable. +func (pkg *Package) valueDoc(value *doc.Value, printed map[*ast.GenDecl]bool) { + if printed[value.Decl] { + return + } + // Print each spec only if there is at least one exported symbol in it. + // (See issue 11008.) + // TODO: Should we elide unexported symbols from a single spec? + // It's an unlikely scenario, probably not worth the trouble. + // TODO: Would be nice if go/doc did this for us. + specs := make([]ast.Spec, 0, len(value.Decl.Specs)) + var typ ast.Expr + for _, spec := range value.Decl.Specs { + vspec := spec.(*ast.ValueSpec) + + // The type name may carry over from a previous specification in the + // case of constants and iota. + if vspec.Type != nil { + typ = vspec.Type + } - for _, ident := range vspec.Names { - if isExported(ident.Name) { - if vspec.Type == nil && vspec.Values == nil && typ != nil { - // This a standalone identifier, as in the case of iota usage. - // Thus, assume the type comes from the previous type. - vspec.Type = &ast.Ident{ - Name: pkg.oneLineNode(typ), - NamePos: vspec.End() - 1, - } + for _, ident := range vspec.Names { + if showSrc || isExported(ident.Name) { + if vspec.Type == nil && vspec.Values == nil && typ != nil { + // This a standalone identifier, as in the case of iota usage. + // Thus, assume the type comes from the previous type. + vspec.Type = &ast.Ident{ + Name: pkg.oneLineNode(typ), + NamePos: vspec.End() - 1, } - - specs = append(specs, vspec) - typ = nil // Only inject type on first exported identifier - break } + + specs = append(specs, vspec) + typ = nil // Only inject type on first exported identifier + break } } - if len(specs) == 0 || printed[value.Decl] { - continue - } - value.Decl.Specs = specs - if !found { - pkg.packageClause(true) - } - pkg.emit(value.Doc, value.Decl) - printed[value.Decl] = true - found = true } - // Types. - for _, typ := range pkg.findTypes(symbol) { - if !found { - pkg.packageClause(true) - } - decl := typ.Decl - spec := pkg.findTypeSpec(decl, typ.Name) - trimUnexportedElems(spec) - // If there are multiple types defined, reduce to just this one. - if len(decl.Specs) > 1 { - decl.Specs = []ast.Spec{spec} + if len(specs) == 0 { + return + } + value.Decl.Specs = specs + pkg.emit(value.Doc, value.Decl) + printed[value.Decl] = true +} + +// typeDoc prints the docs for a type, including constructors and other items +// related to it. +func (pkg *Package) typeDoc(typ *doc.Type) { + decl := typ.Decl + spec := pkg.findTypeSpec(decl, typ.Name) + trimUnexportedElems(spec) + // If there are multiple types defined, reduce to just this one. + if len(decl.Specs) > 1 { + decl.Specs = []ast.Spec{spec} + } + pkg.emit(typ.Doc, decl) + pkg.newlines(2) + // Show associated methods, constants, etc. + if showAll { + printed := make(map[*ast.GenDecl]bool) + // We can use append here to print consts, then vars. Ditto for funcs and methods. + values := typ.Consts + values = append(values, typ.Vars...) + for _, value := range values { + for _, name := range value.Names { + if isExported(name) { + pkg.valueDoc(value, printed) + break + } + } } - pkg.emit(typ.Doc, decl) - // Show associated methods, constants, etc. - if len(typ.Consts) > 0 || len(typ.Vars) > 0 || len(typ.Funcs) > 0 || len(typ.Methods) > 0 { - pkg.Printf("\n") + funcs := typ.Funcs + funcs = append(funcs, typ.Methods...) + for _, fun := range funcs { + if isExported(fun.Name) { + pkg.emit(fun.Doc, fun.Decl) + } } + } else { pkg.valueSummary(typ.Consts, true) pkg.valueSummary(typ.Vars, true) pkg.funcSummary(typ.Funcs, true) pkg.funcSummary(typ.Methods, true) - found = true } - if !found { - // See if there are methods. - if !pkg.printMethodDoc("", symbol) { - return false - } - } - return true } // trimUnexportedElems modifies spec in place to elide unexported fields from -// structs and methods from interfaces (unless the unexported flag is set). +// structs and methods from interfaces (unless the unexported flag is set or we +// are asked to show the original source). func trimUnexportedElems(spec *ast.TypeSpec) { - if unexported { + if unexported || showSrc { return } switch typ := spec.Type.(type) { @@ -808,7 +927,6 @@ func (pkg *Package) printMethodDoc(symbol, method string) bool { for _, meth := range typ.Methods { if match(method, meth.Name) { decl := meth.Decl - decl.Body = nil pkg.emit(meth.Doc, decl) found = true } diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go index bc069939f8183..50105b5fccce8 100644 --- a/src/cmd/doc/testdata/pkg.go +++ b/src/cmd/doc/testdata/pkg.go @@ -5,6 +5,8 @@ // Package comment. package pkg +import "io" + // Constants // Comment about exported constant. @@ -52,7 +54,9 @@ var ( ) // Comment about exported function. -func ExportedFunc(a int) bool +func ExportedFunc(a int) bool { + return true != false +} // Comment about internal function. func internalFunc(a int) bool @@ -73,7 +77,7 @@ type ExportedType struct { // Comment about exported method. func (ExportedType) ExportedMethod(a int) bool { - return true + return true != true } // Comment about unexported method. diff --git a/src/cmd/fix/egltype.go b/src/cmd/fix/egltype.go new file mode 100644 index 0000000000000..c8c4f03e97f2b --- /dev/null +++ b/src/cmd/fix/egltype.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" +) + +func init() { + register(eglFix) +} + +var eglFix = fix{ + name: "egl", + date: "2018-12-15", + f: eglfix, + desc: `Fixes initializers of EGLDisplay`, + disabled: false, +} + +// Old state: +// type EGLDisplay unsafe.Pointer +// New state: +// type EGLDisplay uintptr +// This fix finds nils initializing these types and replaces the nils with 0s. +func eglfix(f *ast.File) bool { + return typefix(f, func(s string) bool { + return s == "C.EGLDisplay" + }) +} diff --git a/src/cmd/fix/egltype_test.go b/src/cmd/fix/egltype_test.go new file mode 100644 index 0000000000000..35ffe925958a8 --- /dev/null +++ b/src/cmd/fix/egltype_test.go @@ -0,0 +1,185 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(eglTests, eglfix) +} + +var eglTests = []testCase{ + { + Name: "egl.localVariable", + In: `package main + +import "C" + +func f() { + var x C.EGLDisplay = nil + x = nil + x, x = nil, nil +} +`, + Out: `package main + +import "C" + +func f() { + var x C.EGLDisplay = 0 + x = 0 + x, x = 0, 0 +} +`, + }, + { + Name: "egl.globalVariable", + In: `package main + +import "C" + +var x C.EGLDisplay = nil + +func f() { + x = nil +} +`, + Out: `package main + +import "C" + +var x C.EGLDisplay = 0 + +func f() { + x = 0 +} +`, + }, + { + Name: "egl.EqualArgument", + In: `package main + +import "C" + +var x C.EGLDisplay +var y = x == nil +var z = x != nil +`, + Out: `package main + +import "C" + +var x C.EGLDisplay +var y = x == 0 +var z = x != 0 +`, + }, + { + Name: "egl.StructField", + In: `package main + +import "C" + +type T struct { + x C.EGLDisplay +} + +var t = T{x: nil} +`, + Out: `package main + +import "C" + +type T struct { + x C.EGLDisplay +} + +var t = T{x: 0} +`, + }, + { + Name: "egl.FunctionArgument", + In: `package main + +import "C" + +func f(x C.EGLDisplay) { +} + +func g() { + f(nil) +} +`, + Out: `package main + +import "C" + +func f(x C.EGLDisplay) { +} + +func g() { + f(0) +} +`, + }, + { + Name: "egl.ArrayElement", + In: `package main + +import "C" + +var x = [3]C.EGLDisplay{nil, nil, nil} +`, + Out: `package main + +import "C" + +var x = [3]C.EGLDisplay{0, 0, 0} +`, + }, + { + Name: "egl.SliceElement", + In: `package main + +import "C" + +var x = []C.EGLDisplay{nil, nil, nil} +`, + Out: `package main + +import "C" + +var x = []C.EGLDisplay{0, 0, 0} +`, + }, + { + Name: "egl.MapKey", + In: `package main + +import "C" + +var x = map[C.EGLDisplay]int{nil: 0} +`, + Out: `package main + +import "C" + +var x = map[C.EGLDisplay]int{0: 0} +`, + }, + { + Name: "egl.MapValue", + In: `package main + +import "C" + +var x = map[int]C.EGLDisplay{0: nil} +`, + Out: `package main + +import "C" + +var x = map[int]C.EGLDisplay{0: 0} +`, + }, +} diff --git a/src/cmd/fix/fix.go b/src/cmd/fix/fix.go index 03c828a581649..2c64e9b414df4 100644 --- a/src/cmd/fix/fix.go +++ b/src/cmd/fix/fix.go @@ -478,7 +478,7 @@ func newPkgDot(pos token.Pos, pkg, name string) ast.Expr { } // renameTop renames all references to the top-level name old. -// It returns true if it makes any changes. +// It reports whether it makes any changes. func renameTop(f *ast.File, old, new string) bool { var fixed bool diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index f06abae171b0e..f54a5e0d963bf 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -52,7 +52,7 @@ func usage() { fmt.Fprintf(os.Stderr, "\n%s\n", f.name) } desc := strings.TrimSpace(f.desc) - desc = strings.Replace(desc, "\n", "\n\t", -1) + desc = strings.ReplaceAll(desc, "\n", "\n\t") fmt.Fprintf(os.Stderr, "\t%s\n", desc) } os.Exit(2) diff --git a/src/cmd/fix/typecheck.go b/src/cmd/fix/typecheck.go index eafb626c74768..66e0cdcec056d 100644 --- a/src/cmd/fix/typecheck.go +++ b/src/cmd/fix/typecheck.go @@ -193,12 +193,12 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass var params, results []string for _, p := range fn.Type.Params.List { t := gofmt(p.Type) - t = strings.Replace(t, "_Ctype_", "C.", -1) + t = strings.ReplaceAll(t, "_Ctype_", "C.") params = append(params, t) } for _, r := range fn.Type.Results.List { t := gofmt(r.Type) - t = strings.Replace(t, "_Ctype_", "C.", -1) + t = strings.ReplaceAll(t, "_Ctype_", "C.") results = append(results, t) } cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index c67e3f5a1c3bc..186f42156a2ef 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -144,7 +144,7 @@ // link against shared libraries previously created with // -buildmode=shared. // -mod mode -// module download mode to use: readonly, release, or vendor. +// module download mode to use: readonly or vendor. // See 'go help modules' for more. // -pkgdir dir // install and load all packages from dir instead of the usual locations. @@ -342,12 +342,21 @@ // cd go/src/encoding/json; go doc decode // // Flags: +// -all +// Show all the documentation for the package. // -c // Respect case when matching symbols. // -cmd // Treat a command (package main) like a regular package. // Otherwise package main's exported symbols are hidden // when showing the package's top-level documentation. +// -src +// Show the full source code for the symbol. This will +// display the full Go source of its declaration and +// definition, such as a function definition (including +// the body), type declaration or enclosing const +// block. The output may therefore include unexported +// details. // -u // Show documentation for unexported as well as exported // symbols, methods, and fields. @@ -433,11 +442,14 @@ // command alias, described below. // // To convey to humans and machine tools that code is generated, -// generated source should have a line early in the file that -// matches the following regular expression (in Go syntax): +// generated source should have a line that matches the following +// regular expression (in Go syntax): // // ^// Code generated .* DO NOT EDIT\.$ // +// The line may appear anywhere in the file, but is typically +// placed near the beginning so it is easy to find. +// // Note that go generate does not parse the file, so lines that look // like directives in comments or multiline strings will be treated // as directives. @@ -889,7 +901,7 @@ // // Usage: // -// go mod download [-dir] [-json] [modules] +// go mod download [-json] [modules] // // Download downloads the named modules, which can be module patterns selecting // dependencies of the main module or module queries of the form path@version. @@ -905,13 +917,15 @@ // corresponding to this Go struct: // // type Module struct { -// Path string // module path -// Version string // module version -// Error string // error loading module -// Info string // absolute path to cached .info file -// GoMod string // absolute path to cached .mod file -// Zip string // absolute path to cached .zip file -// Dir string // absolute path to cached source root directory +// Path string // module path +// Version string // module version +// Error string // error loading module +// Info string // absolute path to cached .info file +// GoMod string // absolute path to cached .mod file +// Zip string // absolute path to cached .zip file +// Dir string // absolute path to cached source root directory +// Sum string // checksum for path, version (as in go.sum) +// GoModSum string // checksum for go.mod (as in go.sum) // } // // See 'go help modules' for more about module queries. @@ -961,6 +975,8 @@ // and -dropreplace editing flags may be repeated, and the changes // are applied in the order given. // +// The -go=version flag sets the expected Go language version. +// // The -print flag prints the final go.mod in its text format instead of // writing it back to go.mod. // @@ -973,7 +989,8 @@ // } // // type GoMod struct { -// Module Module +// Module Module +// Go string // Require []Require // Exclude []Module // Replace []Replace @@ -1285,16 +1302,25 @@ // // Usage: // -// go vet [-n] [-x] [build flags] [vet flags] [packages] +// go vet [-n] [-x] [-vettool prog] [build flags] [vet flags] [packages] // // Vet runs the Go vet command on the packages named by the import paths. // // For more about vet and its flags, see 'go doc cmd/vet'. // For more about specifying packages, see 'go help packages'. +// For a list of checkers and their flags, see 'go tool vet help'. +// For details of a specific checker such as 'printf', see 'go tool vet help printf'. // // The -n flag prints commands that would be executed. // The -x flag prints commands as they are executed. // +// The -vettool=prog flag selects a different analysis tool with alternative +// or additional checks. +// For example, the 'shadow' analyzer can be built and run using these commands: +// +// go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow +// go vet -vettool=$(which shadow) +// // The build flags supported by go vet are those that control package resolution // and execution, such as -n, -x, -v, -tags, and -toolexec. // For more about these flags, see 'go help build'. @@ -1374,7 +1400,6 @@ // in the standard user cache directory for the current operating system. // Setting the GOCACHE environment variable overrides this default, // and running 'go env GOCACHE' prints the current cache directory. -// You can set the variable to 'off' to disable the cache. // // The go command periodically deletes cached data that has not been // used recently. Running 'go clean -cache' deletes all cached data. @@ -1447,6 +1472,10 @@ // The directory where the go command will write // temporary source files, packages, and binaries. // +// Each entry in the GOFLAGS list must be a standalone flag. +// Because the entries are space-separated, flag values must +// not contain spaces. +// // Environment variables for use with cgo: // // CC @@ -1480,6 +1509,10 @@ // The command to use to compile C++ code. // PKG_CONFIG // Path to pkg-config tool. +// AR +// The command to use to manipulate library archives when +// building with the gccgo compiler. +// The default is 'ar'. // // Architecture-specific environment variables: // @@ -1565,14 +1598,14 @@ // line comment. See the go/build package documentation for // more details. // -// Non-test Go source files can also include a //go:binary-only-package -// comment, indicating that the package sources are included -// for documentation only and must not be used to build the -// package binary. This enables distribution of Go packages in -// their compiled form alone. Even binary-only packages require -// accurate import blocks listing required dependencies, so that -// those dependencies can be supplied when linking the resulting -// command. +// Through the Go 1.12 release, non-test Go source files can also include +// a //go:binary-only-package comment, indicating that the package +// sources are included for documentation only and must not be used to +// build the package binary. This enables distribution of Go packages in +// their compiled form alone. Even binary-only packages require accurate +// import blocks listing required dependencies, so that those +// dependencies can be supplied when linking the resulting command. +// Note that this feature is scheduled to be removed after the Go 1.12 release. // // // The go.mod file @@ -1587,17 +1620,20 @@ // verb followed by arguments. For example: // // module my/thing +// go 1.12 // require other/thing v1.0.2 -// require new/thing v2.3.4 +// require new/thing/v2 v2.3.4 // exclude old/thing v1.2.3 // replace bad/thing v1.4.5 => good/thing v1.4.5 // -// The verbs are module, to define the module path; require, to require -// a particular module at a given version or later; exclude, to exclude -// a particular module version from use; and replace, to replace a module -// version with a different module version. Exclude and replace apply only -// in the main module's go.mod and are ignored in dependencies. -// See https://research.swtch.com/vgo-mvs for details. +// The verbs are +// module, to define the module path; +// go, to set the expected language version; +// require, to require a particular module at a given version or later; +// exclude, to exclude a particular module version from use; and +// replace, to replace a module version with a different module version. +// Exclude and replace apply only in the main module's go.mod and are ignored +// in dependencies. See https://research.swtch.com/vgo-mvs for details. // // The leading verb can be factored out of adjacent lines to create a block, // like in Go imports: @@ -1614,7 +1650,19 @@ // // The go command automatically updates go.mod each time it uses the // module graph, to make sure go.mod always accurately reflects reality -// and is properly formatted. +// and is properly formatted. For example, consider this go.mod file: +// +// module M +// +// require ( +// A v1 +// B v1.0.0 +// C v1.0.0 +// D v1.2.3 +// E dev +// ) +// +// exclude D v1.2.3 // // The update rewrites non-canonical version identifiers to semver form, // so A's v1 becomes v1.0.0 and E's dev becomes the pseudo-version for the @@ -2008,7 +2056,7 @@ // (See 'go help gopath-get' and 'go help gopath'.) // // When using modules, downloaded packages are stored in the module cache. -// (See 'go help modules-get' and 'go help goproxy'.) +// (See 'go help module-get' and 'go help goproxy'.) // // When using modules, an additional variant of the go-import meta tag is // recognized and is preferred over those listing version control systems. @@ -2448,7 +2496,8 @@ // development module, then get will update the required version. // Specifying a version earlier than the current required version is valid and // downgrades the dependency. The version suffix @none indicates that the -// dependency should be removed entirely. +// dependency should be removed entirely, downgrading or removing modules +// depending on it as needed. // // Although get defaults to using the latest version of the module containing // a named package, it does not use the latest version of that module's @@ -2470,7 +2519,7 @@ // In general, adding a new dependency may require upgrading // existing dependencies to keep a working build, and 'go get' does // this automatically. Similarly, downgrading one dependency may -// require downgrading other dependenceis, and 'go get' does +// require downgrading other dependencies, and 'go get' does // this automatically as well. // // The -m flag instructs get to stop here, after resolving, upgrading, @@ -2632,6 +2681,8 @@ // Run enough iterations of each benchmark to take t, specified // as a time.Duration (for example, -benchtime 1h30s). // The default is 1 second (1s). +// The special syntax Nx means to run the benchmark N times +// (for example, -benchtime 100x). // // -count n // Run each test and benchmark n times (default 1). diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 85cae90f87905..c58bc7408d009 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -6,6 +6,8 @@ package main_test import ( "bytes" + "cmd/internal/sys" + "context" "debug/elf" "debug/macho" "flag" @@ -107,6 +109,12 @@ var testGo string var testTmpDir string var testBin string +// testCtx is canceled when the test binary is about to time out. +// +// If https://golang.org/issue/28135 is accepted, uses of this variable in test +// functions should be replaced by t.Context(). +var testCtx = context.Background() + // The TestMain function creates a go command for testing purposes and // deletes it after the tests have been run. func TestMain(m *testing.M) { @@ -119,6 +127,20 @@ func TestMain(m *testing.M) { os.Unsetenv("GOROOT_FINAL") flag.Parse() + + timeoutFlag := flag.Lookup("test.timeout") + if timeoutFlag != nil { + // TODO(golang.org/issue/28147): The go command does not pass the + // test.timeout flag unless either -timeout or -test.timeout is explicitly + // set on the command line. + if d := timeoutFlag.Value.(flag.Getter).Get().(time.Duration); d != 0 { + aBitShorter := d * 95 / 100 + var cancel context.CancelFunc + testCtx, cancel = context.WithTimeout(testCtx, aBitShorter) + defer cancel() + } + } + if *proxyAddr != "" { StartProxy() select {} @@ -209,15 +231,13 @@ func TestMain(m *testing.M) { } testGOCACHE = strings.TrimSpace(string(out)) - // As of Sept 2017, MSan is only supported on linux/amd64. - // https://github.com/google/sanitizers/wiki/MemorySanitizer#getting-memorysanitizer - canMSan = canCgo && runtime.GOOS == "linux" && runtime.GOARCH == "amd64" - - switch runtime.GOOS { - case "linux", "darwin", "freebsd", "windows": - // The race detector doesn't work on Alpine Linux: - // golang.org/issue/14481 - canRace = canCgo && runtime.GOARCH == "amd64" && !isAlpineLinux() && runtime.Compiler != "gccgo" + canMSan = canCgo && sys.MSanSupported(runtime.GOOS, runtime.GOARCH) + canRace = canCgo && sys.RaceDetectorSupported(runtime.GOOS, runtime.GOARCH) + // The race detector doesn't work on Alpine Linux: + // golang.org/issue/14481 + // gccgo does not support the race detector. + if isAlpineLinux() || runtime.Compiler == "gccgo" { + canRace = false } } // Don't let these environment variables confuse the test. @@ -846,12 +866,54 @@ func (tg *testgoData) failSSH() { func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { if testing.Short() { - t.Skip("don't rebuild the standard library in short mode") + t.Skip("skipping lengthy test in short mode") } tg := testgo(t) defer tg.cleanup() + // Copy the runtime packages into a temporary GOROOT + // so that we can change files. + for _, copydir := range []string{ + "src/runtime", + "src/internal/bytealg", + "src/internal/cpu", + "src/unsafe", + filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH), + filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH), + "pkg/include", + } { + srcdir := filepath.Join(testGOROOT, copydir) + tg.tempDir(filepath.Join("goroot", copydir)) + err := filepath.Walk(srcdir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + srcrel, err := filepath.Rel(srcdir, path) + if err != nil { + return err + } + dest := filepath.Join("goroot", copydir, srcrel) + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + tg.tempFile(dest, string(data)) + if err := os.Chmod(tg.path(dest), info.Mode()); err != nil { + return err + } + return nil + }) + if err != nil { + t.Fatal(err) + } + } + tg.setenv("GOROOT", tg.path("goroot")) + addVar := func(name string, idx int) (restore func()) { data, err := ioutil.ReadFile(name) if err != nil { @@ -880,7 +942,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { // Changing mtime of runtime/internal/sys/sys.go // should have no effect: only the content matters. // In fact this should be true even outside a release branch. - sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go" + sys := tg.path("goroot/src/runtime/internal/sys/sys.go") tg.sleep() restore := addVar(sys, 0) restore() @@ -895,7 +957,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { restore() tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release") addVar(sys, 2) - tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again") + tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again") tg.run("install", "-i", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release") @@ -904,9 +966,6 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go") tg.run("install", "-i", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release") - - // Everything is out of date. Rebuild to leave things in a better state. - tg.run("install", "std") } func testLocalRun(tg *testgoData, exepath, local, match string) { @@ -1075,6 +1134,8 @@ func testMove(t *testing.T, vcs, url, base, config string) { defer tg.cleanup() tg.parallel() tg.tempDir("src") + tg.must(os.Mkdir(tg.path(".hg"), 0700)) + tg.must(ioutil.WriteFile(filepath.Join(tg.path(".hg"), "hgrc"), nil, 0600)) tg.setenv("GOPATH", tg.path(".")) tg.run("get", "-d", url) tg.run("get", "-d", "-u", url) @@ -1089,7 +1150,7 @@ func testMove(t *testing.T, vcs, url, base, config string) { path := tg.path(filepath.Join("src", config)) data, err := ioutil.ReadFile(path) tg.must(err) - data = bytes.Replace(data, []byte(base), []byte(base+"XXX"), -1) + data = bytes.ReplaceAll(data, []byte(base), []byte(base+"XXX")) tg.must(ioutil.WriteFile(path, data, 0644)) } if vcs == "git" { @@ -1185,21 +1246,6 @@ func TestImportCycle(t *testing.T) { tg.run("list", "-e", "-json", "selfimport") } -func TestListImportMap(t *testing.T) { - tg := testgo(t) - defer tg.cleanup() - tg.parallel() - tg.run("list", "-f", "{{.ImportPath}}: {{.ImportMap}}", "net", "fmt") - tg.grepStdout(`^net: map\[(.* )?golang_org/x/net/dns/dnsmessage:vendor/golang_org/x/net/dns/dnsmessage.*\]`, "net/http should have rewritten dnsmessage import") - tg.grepStdout(`^fmt: map\[\]`, "fmt should have no rewritten imports") - tg.run("list", "-deps", "-test", "-f", "{{.ImportPath}} MAP: {{.ImportMap}}\n{{.ImportPath}} IMPORT: {{.Imports}}", "fmt") - tg.grepStdout(`^flag \[fmt\.test\] MAP: map\[fmt:fmt \[fmt\.test\]\]`, "flag [fmt.test] should import fmt [fmt.test] as fmt") - tg.grepStdout(`^fmt\.test MAP: map\[(.* )?testing:testing \[fmt\.test\]`, "fmt.test should import testing [fmt.test] as testing") - tg.grepStdout(`^fmt\.test MAP: map\[(.* )?testing:testing \[fmt\.test\]`, "fmt.test should import testing [fmt.test] as testing") - tg.grepStdoutNot(`^fmt\.test MAP: map\[(.* )?os:`, "fmt.test should not import a modified os") - tg.grepStdout(`^fmt\.test IMPORT: \[fmt \[fmt\.test\] fmt_test \[fmt\.test\] os testing \[fmt\.test\] testing/internal/testdeps \[fmt\.test\]\]`, "wrong imports for fmt.test") -} - // cmd/go: custom import path checking should not apply to Go packages without import comment. func TestIssue10952(t *testing.T) { testenv.MustHaveExternalNetwork(t) @@ -1419,6 +1465,7 @@ func TestRelativeGOBINFail(t *testing.T) { defer tg.cleanup() tg.tempFile("triv.go", `package main; func main() {}`) tg.setenv("GOBIN", ".") + tg.cd(tg.path(".")) tg.runFail("install") tg.grepStderr("cannot install, GOBIN must be an absolute path", "go install must fail if $GOBIN is a relative path") } @@ -1438,8 +1485,38 @@ func TestInstallIntoGOPATH(t *testing.T) { func TestBuildOutputToDevNull(t *testing.T) { tg := testgo(t) defer tg.cleanup() + fi1, err1 := os.Lstat(os.DevNull) tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.run("build", "-o", os.DevNull, "go-cmd-test") + fi2, err2 := os.Lstat(os.DevNull) + if err1 == nil { + if err2 != nil { + t.Errorf("second stat of /dev/null failed: %v", err2) + } else if !os.SameFile(fi1, fi2) { + t.Errorf("/dev/null changed: now %v was %v", fi1, fi2) + } + } +} + +// Issue 28549. +func TestTestOutputToDevNull(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + fi1, err1 := os.Lstat(os.DevNull) + tg.makeTempdir() + tg.setenv("GOPATH", tg.path(".")) + tg.tempFile("src/p/p.go", "package p\n") + tg.tempFile("src/p/p_test.go", "package p\nimport \"testing\"\nfunc TestX(t *testing.T) {}\n") + tg.run("test", "-o", os.DevNull, "-c", "p") + tg.mustNotExist("p.test") + fi2, err2 := os.Lstat(os.DevNull) + if err1 == nil { + if err2 != nil { + t.Errorf("second stat of /dev/null failed: %v", err2) + } else if !os.SameFile(fi1, fi2) { + t.Errorf("/dev/null changed: now %v was %v", fi1, fi2) + } + } } func TestPackageMainTestImportsArchiveNotBinary(t *testing.T) { @@ -1730,20 +1807,23 @@ func TestGoListDeps(t *testing.T) { tg.run("list", "-deps", "p1") tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4") - // Check the list is in dependency order. - tg.run("list", "-deps", "math") - want := "internal/cpu\nunsafe\nmath\n" - out := tg.stdout.String() - if !strings.Contains(out, "internal/cpu") { - // Some systems don't use internal/cpu. - want = "unsafe\nmath\n" - } - if tg.stdout.String() != want { - t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want) + if runtime.Compiler != "gccgo" { + // Check the list is in dependency order. + tg.run("list", "-deps", "math") + want := "internal/cpu\nunsafe\nmath/bits\nmath\n" + out := tg.stdout.String() + if !strings.Contains(out, "internal/cpu") { + // Some systems don't use internal/cpu. + want = "unsafe\nmath/bits\nmath\n" + } + if tg.stdout.String() != want { + t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want) + } } } func TestGoListTest(t *testing.T) { + skipIfGccgo(t, "gccgo does not have standard packages") tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -1816,6 +1896,7 @@ func TestGoListCompiledCgo(t *testing.T) { } func TestGoListExport(t *testing.T) { + skipIfGccgo(t, "gccgo does not have standard packages") tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -2052,6 +2133,7 @@ func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) { } func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2108,6 +2190,7 @@ func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) { } func TestGoTestDashOWritesBinary(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2351,14 +2434,14 @@ func TestShadowingLogic(t *testing.T) { // The math in root1 is not "math" because the standard math is. tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/math") - pwdForwardSlash := strings.Replace(pwd, string(os.PathSeparator), "/", -1) + pwdForwardSlash := strings.ReplaceAll(pwd, string(os.PathSeparator), "/") if !strings.HasPrefix(pwdForwardSlash, "/") { pwdForwardSlash = "/" + pwdForwardSlash } // The output will have makeImportValid applies, but we only // bother to deal with characters we might reasonably see. for _, r := range " :" { - pwdForwardSlash = strings.Replace(pwdForwardSlash, string(r), "_", -1) + pwdForwardSlash = strings.ReplaceAll(pwdForwardSlash, string(r), "_") } want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")" if strings.TrimSpace(tg.getStdout()) != want { @@ -2403,6 +2486,7 @@ func checkCoverage(tg *testgoData, data string) { } func TestCoverageRuns(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2414,6 +2498,7 @@ func TestCoverageRuns(t *testing.T) { } func TestCoverageDotImport(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -2426,6 +2511,7 @@ func TestCoverageDotImport(t *testing.T) { // Check that coverage analysis uses set mode. // Also check that coverage profiles merge correctly. func TestCoverageUsesSetMode(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2456,6 +2542,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) { if !canRace { t.Skip("skipping because race detector not supported") } + skipIfGccgo(t, "gccgo has no cover tool") tg := testgo(t) defer tg.cleanup() @@ -2473,6 +2560,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) { } func TestCoverageSyncAtomicImport(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2494,6 +2582,7 @@ func TestCoverageDepLoop(t *testing.T) { } func TestCoverageImportMainLoop(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tg := testgo(t) defer tg.cleanup() tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) @@ -2504,6 +2593,7 @@ func TestCoverageImportMainLoop(t *testing.T) { } func TestCoveragePattern(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2519,6 +2609,7 @@ func TestCoveragePattern(t *testing.T) { } func TestCoverageErrorLine(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2540,7 +2631,7 @@ func TestCoverageErrorLine(t *testing.T) { // It's OK that stderr2 drops the character position in the error, // because of the //line directive (see golang.org/issue/22662). - stderr = strings.Replace(stderr, "p.go:4:2:", "p.go:4:", -1) + stderr = strings.ReplaceAll(stderr, "p.go:4:2:", "p.go:4:") if stderr != stderr2 { t.Logf("test -cover changed error messages:\nbefore:\n%s\n\nafter:\n%s", stderr, stderr2) t.Skip("golang.org/issue/22660") @@ -2562,6 +2653,7 @@ func TestTestBuildFailureOutput(t *testing.T) { } func TestCoverageFunc(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) tg := testgo(t) defer tg.cleanup() @@ -2577,6 +2669,7 @@ func TestCoverageFunc(t *testing.T) { // Issue 24588. func TestCoverageDashC(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -2685,6 +2778,7 @@ func main() { } func TestCoverageWithCgo(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tooSlow(t) if !canCgo { t.Skip("skipping because cgo not enabled") @@ -4008,8 +4102,6 @@ func TestCgoConsistentResults(t *testing.T) { t.Skip("skipping because cgo not enabled") } switch runtime.GOOS { - case "freebsd": - testenv.SkipFlaky(t, 15405) case "solaris": testenv.SkipFlaky(t, 13247) } @@ -4958,7 +5050,8 @@ func TestExecBuildX(t *testing.T) { tg := testgo(t) defer tg.cleanup() - tg.setenv("GOCACHE", "off") + tg.tempDir("cache") + tg.setenv("GOCACHE", tg.path("cache")) tg.tempFile("main.go", `package main; import "C"; func main() { print("hello") }`) src := tg.path("main.go") @@ -5159,6 +5252,7 @@ func TestCacheCoverage(t *testing.T) { } func TestCacheVet(t *testing.T) { + skipIfGccgo(t, "gccgo has no standard packages") tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -5488,30 +5582,6 @@ func TestTestCacheInputs(t *testing.T) { } } -func TestNoCache(t *testing.T) { - switch runtime.GOOS { - case "windows": - t.Skipf("no unwritable directories on %s", runtime.GOOS) - } - if os.Getuid() == 0 { - t.Skip("skipping test because running as root") - } - - tg := testgo(t) - defer tg.cleanup() - tg.parallel() - tg.tempFile("triv.go", `package main; func main() {}`) - tg.must(os.MkdirAll(tg.path("unwritable"), 0555)) - home := "HOME" - if runtime.GOOS == "plan9" { - home = "home" - } - tg.setenv(home, tg.path(filepath.Join("unwritable", "home"))) - tg.unsetenv("GOCACHE") - tg.run("build", "-o", tg.path("triv"), tg.path("triv.go")) - tg.grepStderr("disabling cache", "did not disable cache") -} - func TestTestVet(t *testing.T) { tooSlow(t) tg := testgo(t) @@ -5661,17 +5731,6 @@ func TestFmtLoadErrors(t *testing.T) { tg.run("fmt", "-n", "exclude") } -func TestRelativePkgdir(t *testing.T) { - tooSlow(t) - tg := testgo(t) - defer tg.cleanup() - tg.makeTempdir() - tg.setenv("GOCACHE", "off") - tg.cd(tg.tempdir) - - tg.run("build", "-i", "-pkgdir=.", "runtime") -} - func TestGoTestMinusN(t *testing.T) { // Intent here is to verify that 'go test -n' works without crashing. // This reuses flag_test.go, but really any test would do. @@ -6053,30 +6112,9 @@ func TestDontReportRemoveOfEmptyDir(t *testing.T) { } } -// Issue 23264. -func TestNoRelativeTmpdir(t *testing.T) { - tg := testgo(t) - defer tg.cleanup() - - tg.tempFile("src/a/a.go", `package a`) - tg.cd(tg.path(".")) - tg.must(os.Mkdir("tmp", 0777)) - - tg.setenv("GOCACHE", "off") - tg.setenv("GOPATH", tg.path(".")) - tg.setenv("GOTMPDIR", "tmp") - tg.run("build", "-work", "a") - tg.grepStderr("WORK=[^t]", "work should be absolute path") - - tg.unsetenv("GOTMPDIR") - tg.setenv("TMP", "tmp") // windows - tg.setenv("TMPDIR", "tmp") // unix - tg.run("build", "-work", "a") - tg.grepStderr("WORK=[^t]", "work should be absolute path") -} - // Issue 24704. func TestLinkerTmpDirIsDeleted(t *testing.T) { + skipIfGccgo(t, "gccgo does not use cmd/link") if !canCgo { t.Skip("skipping because cgo not enabled") } @@ -6124,6 +6162,7 @@ func TestLinkerTmpDirIsDeleted(t *testing.T) { } func testCDAndGOPATHAreDifferent(tg *testgoData, cd, gopath string) { + skipIfGccgo(tg.t, "gccgo does not support -ldflags -X") tg.setenv("GOPATH", gopath) tg.tempDir("dir") @@ -6150,7 +6189,7 @@ func TestCDAndGOPATHAreDifferent(t *testing.T) { testCDAndGOPATHAreDifferent(tg, cd, gopath) if runtime.GOOS == "windows" { - testCDAndGOPATHAreDifferent(tg, cd, strings.Replace(gopath, `\`, `/`, -1)) + testCDAndGOPATHAreDifferent(tg, cd, strings.ReplaceAll(gopath, `\`, `/`)) testCDAndGOPATHAreDifferent(tg, cd, strings.ToUpper(gopath)) testCDAndGOPATHAreDifferent(tg, cd, strings.ToLower(gopath)) } @@ -6179,6 +6218,7 @@ func TestGoBuildDashODevNull(t *testing.T) { // Issue 25093. func TestCoverpkgTestOnly(t *testing.T) { + skipIfGccgo(t, "gccgo has no cover tool") tg := testgo(t) defer tg.cleanup() tg.parallel() diff --git a/src/cmd/go/help_test.go b/src/cmd/go/help_test.go new file mode 100644 index 0000000000000..ec6a9d11cbe2e --- /dev/null +++ b/src/cmd/go/help_test.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl + +package main_test + +import ( + "bytes" + "io/ioutil" + "testing" + + "cmd/go/internal/help" +) + +func TestDocsUpToDate(t *testing.T) { + buf := new(bytes.Buffer) + // Match the command in mkalldocs.sh that generates alldocs.go. + help.Help(buf, []string{"documentation"}) + data, err := ioutil.ReadFile("alldocs.go") + if err != nil { + t.Fatalf("error reading alldocs.go: %v", err) + } + if !bytes.Equal(data, buf.Bytes()) { + t.Errorf("alldocs.go is not up to date; run mkalldocs.sh to regenerate it") + } +} diff --git a/src/cmd/go/internal/base/signal_unix.go b/src/cmd/go/internal/base/signal_unix.go index 38490b571bc23..c109eecadb332 100644 --- a/src/cmd/go/internal/base/signal_unix.go +++ b/src/cmd/go/internal/base/signal_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js linux nacl netbsd openbsd solaris package base diff --git a/src/cmd/go/internal/cache/cache.go b/src/cmd/go/internal/cache/cache.go index 0cf01550ff90d..ab84cf6302c0d 100644 --- a/src/cmd/go/internal/cache/cache.go +++ b/src/cmd/go/internal/cache/cache.go @@ -18,6 +18,8 @@ import ( "strconv" "strings" "time" + + "cmd/go/internal/renameio" ) // An ActionID is a cache action key, the hash of a complete description of a @@ -283,7 +285,9 @@ func (c *Cache) Trim() { c.trimSubdir(subdir, cutoff) } - ioutil.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) + // Ignore errors from here: if we don't write the complete timestamp, the + // cache will appear older than it is, and we'll trim it again next time. + renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) } // trimSubdir trims a single cache subdirectory. @@ -338,6 +342,8 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify } file := c.fileName(id, "a") if err := ioutil.WriteFile(file, entry, 0666); err != nil { + // TODO(bcmills): This Remove potentially races with another go command writing to file. + // Can we eliminate it? os.Remove(file) return err } diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go index 02fc1e896f74d..f545c147009f2 100644 --- a/src/cmd/go/internal/cache/default.go +++ b/src/cmd/go/internal/cache/default.go @@ -9,8 +9,9 @@ import ( "io/ioutil" "os" "path/filepath" - "runtime" "sync" + + "cmd/go/internal/base" ) // Default returns the default cache to use, or nil if no cache should be used. @@ -35,15 +36,15 @@ See golang.org to learn more about Go. // initDefaultCache does the work of finding the default cache // the first time Default is called. func initDefaultCache() { - dir, showWarnings := defaultDir() - if dir == "off" { - return + dir := DefaultDir() + if dir == "off" || dir == "" { + if defaultDirErr != nil { + base.Fatalf("build cache is required, but could not be located: %v", defaultDirErr) + } + base.Fatalf("build cache is disabled by GOCACHE=off, but required as of Go 1.12") } if err := os.MkdirAll(dir, 0777); err != nil { - if showWarnings { - fmt.Fprintf(os.Stderr, "go: disabling cache (%s) due to initialization failure: %s\n", dir, err) - } - return + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. @@ -52,78 +53,40 @@ func initDefaultCache() { c, err := Open(dir) if err != nil { - if showWarnings { - fmt.Fprintf(os.Stderr, "go: disabling cache (%s) due to initialization failure: %s\n", dir, err) - } - return + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) } defaultCache = c } +var ( + defaultDirOnce sync.Once + defaultDir string + defaultDirErr error +) + // DefaultDir returns the effective GOCACHE setting. // It returns "off" if the cache is disabled. func DefaultDir() string { - dir, _ := defaultDir() - return dir -} + // Save the result of the first call to DefaultDir for later use in + // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that + // subprocesses will inherit it, but that means initDefaultCache can't + // otherwise distinguish between an explicit "off" and a UserCacheDir error. -// defaultDir returns the effective GOCACHE setting. -// It returns "off" if the cache is disabled. -// The second return value reports whether warnings should -// be shown if the cache fails to initialize. -func defaultDir() (string, bool) { - dir := os.Getenv("GOCACHE") - if dir != "" { - return dir, true - } - - // Compute default location. - // TODO(rsc): This code belongs somewhere else, - // like maybe ioutil.CacheDir or os.CacheDir. - showWarnings := true - switch runtime.GOOS { - case "windows": - dir = os.Getenv("LocalAppData") - if dir == "" { - // Fall back to %AppData%, the old name of - // %LocalAppData% on Windows XP. - dir = os.Getenv("AppData") - } - if dir == "" { - return "off", true + defaultDirOnce.Do(func() { + defaultDir = os.Getenv("GOCACHE") + if defaultDir != "" { + return } - case "darwin": - dir = os.Getenv("HOME") - if dir == "" { - return "off", true + // Compute default location. + dir, err := os.UserCacheDir() + if err != nil { + defaultDir = "off" + defaultDirErr = fmt.Errorf("GOCACHE is not defined and %v", err) + return } - dir += "/Library/Caches" + defaultDir = filepath.Join(dir, "go-build") + }) - case "plan9": - dir = os.Getenv("home") - if dir == "" { - return "off", true - } - // Plan 9 has no established per-user cache directory, - // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. - dir += "/lib/cache" - - default: // Unix - // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - dir = os.Getenv("XDG_CACHE_HOME") - if dir == "" { - dir = os.Getenv("HOME") - if dir == "" { - return "off", true - } - if dir == "/" { - // probably docker run with -u flag - // https://golang.org/issue/26280 - showWarnings = false - } - dir += "/.cache" - } - } - return filepath.Join(dir, "go-build"), showWarnings + return defaultDir } diff --git a/src/cmd/go/internal/cache/default_unix_test.go b/src/cmd/go/internal/cache/default_unix_test.go deleted file mode 100644 index a207497a42cc0..0000000000000 --- a/src/cmd/go/internal/cache/default_unix_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows,!darwin,!plan9 - -package cache - -import ( - "os" - "strings" - "testing" -) - -func TestDefaultDir(t *testing.T) { - goCacheDir := "/tmp/test-go-cache" - xdgCacheDir := "/tmp/test-xdg-cache" - homeDir := "/tmp/test-home" - - // undo env changes when finished - defer func(GOCACHE, XDG_CACHE_HOME, HOME string) { - os.Setenv("GOCACHE", GOCACHE) - os.Setenv("XDG_CACHE_HOME", XDG_CACHE_HOME) - os.Setenv("HOME", HOME) - }(os.Getenv("GOCACHE"), os.Getenv("XDG_CACHE_HOME"), os.Getenv("HOME")) - - os.Setenv("GOCACHE", goCacheDir) - os.Setenv("XDG_CACHE_HOME", xdgCacheDir) - os.Setenv("HOME", homeDir) - - dir, showWarnings := defaultDir() - if dir != goCacheDir { - t.Errorf("Cache DefaultDir %q should be $GOCACHE %q", dir, goCacheDir) - } - if !showWarnings { - t.Error("Warnings should be shown when $GOCACHE is set") - } - - os.Unsetenv("GOCACHE") - dir, showWarnings = defaultDir() - if !strings.HasPrefix(dir, xdgCacheDir+"/") { - t.Errorf("Cache DefaultDir %q should be under $XDG_CACHE_HOME %q when $GOCACHE is unset", dir, xdgCacheDir) - } - if !showWarnings { - t.Error("Warnings should be shown when $XDG_CACHE_HOME is set") - } - - os.Unsetenv("XDG_CACHE_HOME") - dir, showWarnings = defaultDir() - if !strings.HasPrefix(dir, homeDir+"/.cache/") { - t.Errorf("Cache DefaultDir %q should be under $HOME/.cache %q when $GOCACHE and $XDG_CACHE_HOME are unset", dir, homeDir+"/.cache") - } - if !showWarnings { - t.Error("Warnings should be shown when $HOME is not /") - } - - os.Unsetenv("HOME") - if dir, _ := defaultDir(); dir != "off" { - t.Error("Cache not disabled when $GOCACHE, $XDG_CACHE_HOME, and $HOME are unset") - } - - os.Setenv("HOME", "/") - if _, showWarnings := defaultDir(); showWarnings { - // https://golang.org/issue/26280 - t.Error("Cache initalization warnings should be squelched when $GOCACHE and $XDG_CACHE_HOME are unset and $HOME is /") - } -} diff --git a/src/cmd/go/internal/cache/hash.go b/src/cmd/go/internal/cache/hash.go index 0e45e7db5478b..e4bb2a34bb4a1 100644 --- a/src/cmd/go/internal/cache/hash.go +++ b/src/cmd/go/internal/cache/hash.go @@ -123,7 +123,7 @@ var hashFileCache struct { m map[string][HashSize]byte } -// HashFile returns the hash of the named file. +// FileHash returns the hash of the named file. // It caches repeated lookups for a given file, // and the cache entry for a file can be initialized // using SetFileHash. diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index d023592eedcea..27121ed2ae679 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -10,6 +10,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" "strings" "time" @@ -17,6 +18,7 @@ import ( "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch" "cmd/go/internal/modload" "cmd/go/internal/work" @@ -103,18 +105,16 @@ func init() { } func runClean(cmd *base.Command, args []string) { - if len(args) == 0 && modload.Failed() { - // Don't try to clean current directory, - // which will cause modload to base.Fatalf. - } else { + if len(args) > 0 || !modload.Enabled() || modload.HasModRoot() { for _, pkg := range load.PackagesAndErrors(args) { clean(pkg) } } + var b work.Builder + b.Print = fmt.Print + if cleanCache { - var b work.Builder - b.Print = fmt.Print dir := cache.DefaultDir() if dir != "off" { // Remove the cache subdirectories but not the top cache directory. @@ -145,7 +145,22 @@ func runClean(cmd *base.Command, args []string) { // right now are to be ignored. dir := cache.DefaultDir() if dir != "off" { - err := ioutil.WriteFile(filepath.Join(dir, "testexpire.txt"), []byte(fmt.Sprintf("%d\n", time.Now().UnixNano())), 0666) + f, err := lockedfile.Edit(filepath.Join(dir, "testexpire.txt")) + if err == nil { + now := time.Now().UnixNano() + buf, _ := ioutil.ReadAll(f) + prev, _ := strconv.ParseInt(strings.TrimSpace(string(buf)), 10, 64) + if now > prev { + if err = f.Truncate(0); err == nil { + if _, err = f.Seek(0, 0); err == nil { + _, err = fmt.Fprintf(f, "%d\n", now) + } + } + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + } if err != nil { base.Errorf("go clean -testcache: %v", err) } @@ -156,24 +171,15 @@ func runClean(cmd *base.Command, args []string) { if modfetch.PkgMod == "" { base.Fatalf("go clean -modcache: no module cache") } - if err := removeAll(modfetch.PkgMod); err != nil { - base.Errorf("go clean -modcache: %v", err) - } - } -} - -func removeAll(dir string) error { - // Module cache has 0555 directories; make them writable in order to remove content. - filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return nil // ignore errors walking in file system + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "rm -rf %s", modfetch.PkgMod) } - if info.IsDir() { - os.Chmod(path, 0777) + if !cfg.BuildN { + if err := modfetch.RemoveAll(modfetch.PkgMod); err != nil { + base.Errorf("go clean -modcache: %v", err) + } } - return nil - }) - return os.RemoveAll(dir) + } } var cleaned = map[*load.Package]bool{} diff --git a/src/cmd/go/internal/cmdflag/flag.go b/src/cmd/go/internal/cmdflag/flag.go index b2a67e6f74a82..7f2c53def8fa2 100644 --- a/src/cmd/go/internal/cmdflag/flag.go +++ b/src/cmd/go/internal/cmdflag/flag.go @@ -79,15 +79,15 @@ func AddKnownFlags(cmd string, defns []*Defn) { // Parse sees if argument i is present in the definitions and if so, // returns its definition, value, and whether it consumed an extra word. -// If the flag begins (cmd+".") it is ignored for the purpose of this function. -func Parse(cmd string, defns []*Defn, args []string, i int) (f *Defn, value string, extra bool) { +// If the flag begins (cmd.Name()+".") it is ignored for the purpose of this function. +func Parse(cmd string, usage func(), defns []*Defn, args []string, i int) (f *Defn, value string, extra bool) { arg := args[i] if strings.HasPrefix(arg, "--") { // reduce two minuses to one arg = arg[1:] } switch arg { case "-?", "-h", "-help": - base.Usage() + usage() } if arg == "" || arg[0] != '-' { return diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go index 4e7dca082d742..bad05ff9128cb 100644 --- a/src/cmd/go/internal/doc/doc.go +++ b/src/cmd/go/internal/doc/doc.go @@ -106,12 +106,21 @@ Examples: cd go/src/encoding/json; go doc decode Flags: + -all + Show all the documentation for the package. -c Respect case when matching symbols. -cmd Treat a command (package main) like a regular package. Otherwise package main's exported symbols are hidden when showing the package's top-level documentation. + -src + Show the full source code for the symbol. This will + display the full Go source of its declaration and + definition, such as a function definition (including + the body), type declaration or enclosing const + block. The output may therefore include unexported + details. -u Show documentation for unexported as well as exported symbols, methods, and fields. diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index afadbade38ef7..ae98d3999a1fa 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -115,8 +115,10 @@ func findEnv(env []cfg.EnvVar, name string) string { // ExtraEnvVars returns environment variables that should not leak into child processes. func ExtraEnvVars() []cfg.EnvVar { gomod := "" - if modload.Init(); modload.ModRoot != "" { - gomod = filepath.Join(modload.ModRoot, "go.mod") + if modload.HasModRoot() { + gomod = filepath.Join(modload.ModRoot(), "go.mod") + } else if modload.Enabled() { + gomod = os.DevNull } return []cfg.EnvVar{ {Name: "GOMOD", Value: gomod}, @@ -203,7 +205,7 @@ func runEnv(cmd *base.Command, args []string) { fmt.Printf("%s=\"%s\"\n", e.Name, e.Value) case "plan9": if strings.IndexByte(e.Value, '\x00') < 0 { - fmt.Printf("%s='%s'\n", e.Name, strings.Replace(e.Value, "'", "''", -1)) + fmt.Printf("%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''")) } else { v := strings.Split(e.Value, "\x00") fmt.Printf("%s=(", e.Name) diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go index aab164148ff88..4d741df2b4f1a 100644 --- a/src/cmd/go/internal/fix/fix.go +++ b/src/cmd/go/internal/fix/fix.go @@ -34,7 +34,7 @@ See also: go fmt, go vet. func runFix(cmd *base.Command, args []string) { printed := false for _, pkg := range load.Packages(args) { - if modload.Enabled() && !pkg.Module.Main { + if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index 9482be98aefb5..124dbc05f5d8f 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -49,11 +49,14 @@ that can be run locally. It must either be in the shell path command alias, described below. To convey to humans and machine tools that code is generated, -generated source should have a line early in the file that -matches the following regular expression (in Go syntax): +generated source should have a line that matches the following +regular expression (in Go syntax): ^// Code generated .* DO NOT EDIT\.$ +The line may appear anywhere in the file, but is typically +placed near the beginning so it is easy to find. + Note that go generate does not parse the file, so lines that look like directives in comments or multiline strings will be treated as directives. @@ -161,7 +164,7 @@ func runGenerate(cmd *base.Command, args []string) { // Even if the arguments are .go files, this loop suffices. printed := false for _, pkg := range load.Packages(args) { - if modload.Enabled() && !pkg.Module.Main { + if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index e4148bceb0484..a314c57160b39 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -232,7 +232,7 @@ var downloadCache = map[string]bool{} var downloadRootCache = map[string]bool{} // download runs the download half of the get command -// for the package named by the argument. +// for the package or pattern named by the argument. func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) { if mode&load.ResolveImport != 0 { // Caller is responsible for expanding vendor paths. @@ -402,6 +402,23 @@ func downloadPackage(p *load.Package) error { security = web.Insecure } + // p can be either a real package, or a pseudo-package whose “import path” is + // actually a wildcard pattern. + // Trim the path at the element containing the first wildcard, + // and hope that it applies to the wildcarded parts too. + // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH. + importPrefix := p.ImportPath + if i := strings.Index(importPrefix, "..."); i >= 0 { + slash := strings.LastIndexByte(importPrefix[:i], '/') + if slash < 0 { + return fmt.Errorf("cannot expand ... in %q", p.ImportPath) + } + importPrefix = importPrefix[:slash] + } + if err := CheckImportPath(importPrefix); err != nil { + return fmt.Errorf("%s: invalid import path: %v", p.ImportPath, err) + } + if p.Internal.Build.SrcRoot != "" { // Directory exists. Look for checkout along path to src. vcs, rootPath, err = vcsFromDir(p.Dir, p.Internal.Build.SrcRoot) @@ -421,7 +438,7 @@ func downloadPackage(p *load.Package) error { } repo = remote if !*getF && err == nil { - if rr, err := RepoRootForImportPath(p.ImportPath, IgnoreMod, security); err == nil { + if rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security); err == nil { repo := rr.Repo if rr.vcs.resolveRepo != nil { resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo) @@ -438,7 +455,7 @@ func downloadPackage(p *load.Package) error { } else { // Analyze the import path to determine the version control system, // repository, and the import path for the root of the repository. - rr, err := RepoRootForImportPath(p.ImportPath, IgnoreMod, security) + rr, err := RepoRootForImportPath(importPrefix, IgnoreMod, security) if err != nil { return err } diff --git a/src/cmd/go/internal/get/path.go b/src/cmd/go/internal/get/path.go new file mode 100644 index 0000000000000..d443bd28a9656 --- /dev/null +++ b/src/cmd/go/internal/get/path.go @@ -0,0 +1,192 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package get + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// The following functions are copied verbatim from cmd/go/internal/module/module.go, +// with a change to additionally reject Windows short-names, +// and one to accept arbitrary letters (golang.org/issue/29101). +// +// TODO(bcmills): After the call site for this function is backported, +// consolidate this back down to a single copy. +// +// NOTE: DO NOT MERGE THESE UNTIL WE DECIDE ABOUT ARBITRARY LETTERS IN MODULE MODE. + +// CheckImportPath checks that an import path is valid. +func CheckImportPath(path string) error { + if err := checkPath(path, false); err != nil { + return fmt.Errorf("malformed import path %q: %v", path, err) + } + return nil +} + +// checkPath checks that a general path is valid. +// It returns an error describing why but not mentioning path. +// Because these checks apply to both module paths and import paths, +// the caller is expected to add the "malformed ___ path %q: " prefix. +// fileName indicates whether the final element of the path is a file name +// (as opposed to a directory name). +func checkPath(path string, fileName bool) error { + if !utf8.ValidString(path) { + return fmt.Errorf("invalid UTF-8") + } + if path == "" { + return fmt.Errorf("empty string") + } + if strings.Contains(path, "..") { + return fmt.Errorf("double dot") + } + if strings.Contains(path, "//") { + return fmt.Errorf("double slash") + } + if path[len(path)-1] == '/' { + return fmt.Errorf("trailing slash") + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if err := checkElem(path[elemStart:i], fileName); err != nil { + return err + } + elemStart = i + 1 + } + } + if err := checkElem(path[elemStart:], fileName); err != nil { + return err + } + return nil +} + +// checkElem checks whether an individual path element is valid. +// fileName indicates whether the element is a file name (not a directory name). +func checkElem(elem string, fileName bool) error { + if elem == "" { + return fmt.Errorf("empty path element") + } + if strings.Count(elem, ".") == len(elem) { + return fmt.Errorf("invalid path element %q", elem) + } + if elem[0] == '.' && !fileName { + return fmt.Errorf("leading dot in path element") + } + if elem[len(elem)-1] == '.' { + return fmt.Errorf("trailing dot in path element") + } + + charOK := pathOK + if fileName { + charOK = fileNameOK + } + for _, r := range elem { + if !charOK(r) { + return fmt.Errorf("invalid char %q", r) + } + } + + // Windows disallows a bunch of path elements, sadly. + // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file + short := elem + if i := strings.Index(short, "."); i >= 0 { + short = short[:i] + } + for _, bad := range badWindowsNames { + if strings.EqualFold(bad, short) { + return fmt.Errorf("disallowed path element %q", elem) + } + } + + // Reject path components that look like Windows short-names. + // Those usually end in a tilde followed by one or more ASCII digits. + if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { + suffix := short[tilde+1:] + suffixIsDigits := true + for _, r := range suffix { + if r < '0' || r > '9' { + suffixIsDigits = false + break + } + } + if suffixIsDigits { + return fmt.Errorf("trailing tilde and digits in path element") + } + } + + return nil +} + +// pathOK reports whether r can appear in an import path element. +// +// NOTE: This function DIVERGES from module mode pathOK by accepting Unicode letters. +func pathOK(r rune) bool { + if r < utf8.RuneSelf { + return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || + '0' <= r && r <= '9' || + 'A' <= r && r <= 'Z' || + 'a' <= r && r <= 'z' + } + return unicode.IsLetter(r) +} + +// fileNameOK reports whether r can appear in a file name. +// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. +// If we expand the set of allowed characters here, we have to +// work harder at detecting potential case-folding and normalization collisions. +// See note about "safe encoding" below. +func fileNameOK(r rune) bool { + if r < utf8.RuneSelf { + // Entire set of ASCII punctuation, from which we remove characters: + // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ + // We disallow some shell special characters: " ' * < > ? ` | + // (Note that some of those are disallowed by the Windows file system as well.) + // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). + // We allow spaces (U+0020) in file names. + const allowed = "!#$%&()+,-.=@[]^_{}~ " + if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { + return true + } + for i := 0; i < len(allowed); i++ { + if rune(allowed[i]) == r { + return true + } + } + return false + } + // It may be OK to add more ASCII punctuation here, but only carefully. + // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. + return unicode.IsLetter(r) +} + +// badWindowsNames are the reserved file path elements on Windows. +// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +var badWindowsNames = []string{ + "CON", + "PRN", + "AUX", + "NUL", + "COM1", + "COM2", + "COM3", + "COM4", + "COM5", + "COM6", + "COM7", + "COM8", + "COM9", + "LPT1", + "LPT2", + "LPT3", + "LPT4", + "LPT5", + "LPT6", + "LPT7", + "LPT8", + "LPT9", +} diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index 5cd164f2ff3a1..a7a2ba32cc33c 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -647,14 +647,7 @@ const ( func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { rr, err := repoRootFromVCSPaths(importPath, "", security, vcsPaths) if err == errUnknownSite { - // If there are wildcards, look up the thing before the wildcard, - // hoping it applies to the wildcarded parts too. - // This makes 'go get rsc.io/pdf/...' work in a fresh GOPATH. - lookup := strings.TrimSuffix(importPath, "/...") - if i := strings.Index(lookup, "/.../"); i >= 0 { - lookup = lookup[:i] - } - rr, err = repoRootForImportDynamic(lookup, mod, security) + rr, err = repoRootForImportDynamic(importPath, mod, security) if err != nil { err = fmt.Errorf("unrecognized import path %q (%v)", importPath, err) } @@ -667,6 +660,7 @@ func RepoRootForImportPath(importPath string, mod ModuleMode, security web.Secur } } + // Should have been taken care of above, but make sure. if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") { // Do not allow wildcards in the repo root. rr = nil @@ -903,16 +897,16 @@ type metaImport struct { Prefix, VCS, RepoRoot string } -func splitPathHasPrefix(path, prefix []string) bool { - if len(path) < len(prefix) { +// pathPrefix reports whether sub is a prefix of s, +// only considering entire path components. +func pathPrefix(s, sub string) bool { + // strings.HasPrefix is necessary but not sufficient. + if !strings.HasPrefix(s, sub) { return false } - for i, p := range prefix { - if path[i] != p { - return false - } - } - return true + // The remainder after the prefix must either be empty or start with a slash. + rem := s[len(sub):] + return rem == "" || rem[0] == '/' } // A ImportMismatchError is returned where metaImport/s are present @@ -935,13 +929,10 @@ func (m ImportMismatchError) Error() string { // errNoMatch is returned if none match. func matchGoImport(imports []metaImport, importPath string) (metaImport, error) { match := -1 - imp := strings.Split(importPath, "/") errImportMismatch := ImportMismatchError{importPath: importPath} for i, im := range imports { - pre := strings.Split(im.Prefix, "/") - - if !splitPathHasPrefix(imp, pre) { + if !pathPrefix(importPath, im.Prefix) { errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix) continue } @@ -966,10 +957,14 @@ func matchGoImport(imports []metaImport, importPath string) (metaImport, error) // expand rewrites s to replace {k} with match[k] for each key k in match. func expand(match map[string]string, s string) string { + // We want to replace each match exactly once, and the result of expansion + // must not depend on the iteration order through the map. + // A strings.Replacer has exactly the properties we're looking for. + oldNew := make([]string, 0, 2*len(match)) for k, v := range match { - s = strings.Replace(s, "{"+k+"}", v, -1) + oldNew = append(oldNew, "{"+k+"}", v) } - return s + return strings.NewReplacer(oldNew...).Replace(s) } // vcsPaths defines the meaning of import paths referring to diff --git a/src/cmd/go/internal/help/help.go b/src/cmd/go/internal/help/help.go index a80afe36c412c..312a29590f43f 100644 --- a/src/cmd/go/internal/help/help.go +++ b/src/cmd/go/internal/help/help.go @@ -20,16 +20,16 @@ import ( ) // Help implements the 'help' command. -func Help(args []string) { +func Help(w io.Writer, args []string) { // 'go help documentation' generates doc.go. if len(args) == 1 && args[0] == "documentation" { - fmt.Println("// Copyright 2011 The Go Authors. All rights reserved.") - fmt.Println("// Use of this source code is governed by a BSD-style") - fmt.Println("// license that can be found in the LICENSE file.") - fmt.Println() - fmt.Println("// Code generated by mkalldocs.sh; DO NOT EDIT.") - fmt.Println("// Edit the documentation in other files and rerun mkalldocs.sh to generate this one.") - fmt.Println() + fmt.Fprintln(w, "// Copyright 2011 The Go Authors. All rights reserved.") + fmt.Fprintln(w, "// Use of this source code is governed by a BSD-style") + fmt.Fprintln(w, "// license that can be found in the LICENSE file.") + fmt.Fprintln(w) + fmt.Fprintln(w, "// Code generated by mkalldocs.sh; DO NOT EDIT.") + fmt.Fprintln(w, "// Edit the documentation in other files and rerun mkalldocs.sh to generate this one.") + fmt.Fprintln(w) buf := new(bytes.Buffer) PrintUsage(buf, base.Go) usage := &base.Command{Long: buf.String()} @@ -42,8 +42,8 @@ func Help(args []string) { cmds = append(cmds, cmd) cmds = append(cmds, cmd.Commands...) } - tmpl(&commentWriter{W: os.Stdout}, documentationTemplate, cmds) - fmt.Println("package main") + tmpl(&commentWriter{W: w}, documentationTemplate, cmds) + fmt.Fprintln(w, "package main") return } diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index aff4ce12f6c9a..c219a45d74948 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -266,7 +266,7 @@ listed in the GOPATH environment variable. (See 'go help gopath-get' and 'go help gopath'.) When using modules, downloaded packages are stored in the module cache. -(See 'go help modules-get' and 'go help goproxy'.) +(See 'go help module-get' and 'go help goproxy'.) When using modules, an additional variant of the go-import meta tag is recognized and is preferred over those listing version control systems. @@ -507,6 +507,10 @@ General-purpose environment variables: The directory where the go command will write temporary source files, packages, and binaries. +Each entry in the GOFLAGS list must be a standalone flag. +Because the entries are space-separated, flag values must +not contain spaces. + Environment variables for use with cgo: CC @@ -540,6 +544,10 @@ Environment variables for use with cgo: The command to use to compile C++ code. PKG_CONFIG Path to pkg-config tool. + AR + The command to use to manipulate library archives when + building with the gccgo compiler. + The default is 'ar'. Architecture-specific environment variables: @@ -628,14 +636,14 @@ at the first item in the file that is not a blank line or //-style line comment. See the go/build package documentation for more details. -Non-test Go source files can also include a //go:binary-only-package -comment, indicating that the package sources are included -for documentation only and must not be used to build the -package binary. This enables distribution of Go packages in -their compiled form alone. Even binary-only packages require -accurate import blocks listing required dependencies, so that -those dependencies can be supplied when linking the resulting -command. +Through the Go 1.12 release, non-test Go source files can also include +a //go:binary-only-package comment, indicating that the package +sources are included for documentation only and must not be used to +build the package binary. This enables distribution of Go packages in +their compiled form alone. Even binary-only packages require accurate +import blocks listing required dependencies, so that those +dependencies can be supplied when linking the resulting command. +Note that this feature is scheduled to be removed after the Go 1.12 release. `, } @@ -697,7 +705,6 @@ The default location for cache data is a subdirectory named go-build in the standard user cache directory for the current operating system. Setting the GOCACHE environment variable overrides this default, and running 'go env GOCACHE' prints the current cache directory. -You can set the variable to 'off' to disable the cache. The go command periodically deletes cached data that has not been used recently. Running 'go clean -cache' deletes all cached data. diff --git a/src/cmd/go/internal/imports/build.go b/src/cmd/go/internal/imports/build.go index d1adf9440cb44..ddf425b0204dc 100644 --- a/src/cmd/go/internal/imports/build.go +++ b/src/cmd/go/internal/imports/build.go @@ -207,5 +207,5 @@ func init() { } } -const goosList = "android darwin dragonfly freebsd js linux nacl netbsd openbsd plan9 solaris windows zos " +const goosList = "aix android darwin dragonfly freebsd hurd js linux nacl netbsd openbsd plan9 solaris windows zos " const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm " diff --git a/src/cmd/go/internal/imports/scan.go b/src/cmd/go/internal/imports/scan.go index d944e95724e54..966a38cfef3ad 100644 --- a/src/cmd/go/internal/imports/scan.go +++ b/src/cmd/go/internal/imports/scan.go @@ -22,6 +22,16 @@ func ScanDir(dir string, tags map[string]bool) ([]string, []string, error) { var files []string for _, info := range infos { name := info.Name() + + // If the directory entry is a symlink, stat it to obtain the info for the + // link target instead of the link itself. + if info.Mode()&os.ModeSymlink != 0 { + info, err = os.Stat(name) + if err != nil { + continue // Ignore broken symlinks. + } + } + if info.Mode().IsRegular() && !strings.HasPrefix(name, "_") && strings.HasSuffix(name, ".go") && MatchFile(name, tags) { files = append(files, filepath.Join(dir, name)) } diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index 186b006c12acd..f3cb4e47eccc2 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -510,7 +510,9 @@ func runList(cmd *base.Command, args []string) { a := &work.Action{} // TODO: Use pkgsFilter? for _, p := range pkgs { - a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) + if len(p.GoFiles)+len(p.CgoFiles) > 0 { + a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) + } } b.Do(a) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index ec2fa730c69bc..228be07f2492a 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -440,6 +440,10 @@ const ( // this package, as part of a bigger load operation, and by GOPATH-based "go get". // TODO(rsc): When GOPATH-based "go get" is removed, unexport this function. func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) *Package { + if path == "" { + panic("LoadImport called with empty package path") + } + stk.Push(path) defer stk.Pop() @@ -993,10 +997,12 @@ func disallowInternal(srcDir string, importer *Package, importerPath string, p * } else { // p is in a module, so make it available based on the importer's import path instead // of the file path (https://golang.org/issue/23970). - if importerPath == "." { + if importer.Internal.CmdlineFiles { // The importer is a list of command-line files. // Pretend that the import path is the import path of the // directory containing them. + // If the directory is outside the main module, this will resolve to ".", + // which is not a prefix of any valid module. importerPath = ModDirImportPath(importer.Dir) } parentOfInternal := p.ImportPath[:i] @@ -1047,20 +1053,6 @@ func disallowVendor(srcDir string, importer *Package, importerPath, path string, return p } - // Modules must not import vendor packages in the standard library, - // but the usual vendor visibility check will not catch them - // because the module loader presents them with an ImportPath starting - // with "golang_org/" instead of "vendor/". - if p.Standard && !importer.Standard && strings.HasPrefix(p.ImportPath, "golang_org") { - perr := *p - perr.Error = &PackageError{ - ImportStack: stk.Copy(), - Err: "use of vendored package " + path + " not allowed", - } - perr.Incomplete = true - return &perr - } - if perr := disallowVendorVisibility(srcDir, p, stk); perr != p { return perr } @@ -1339,6 +1331,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { // SWIG adds imports of some standard packages. if p.UsesSwig() { + addImport("unsafe", true) if cfg.BuildContext.Compiler != "gccgo" { addImport("runtime/cgo", true) } @@ -1524,9 +1517,13 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { } if cfg.ModulesEnabled { - p.Module = ModPackageModuleInfo(p.ImportPath) + mainPath := p.ImportPath + if p.Internal.CmdlineFiles { + mainPath = "command-line-arguments" + } + p.Module = ModPackageModuleInfo(mainPath) if p.Name == "main" { - p.Internal.BuildInfo = ModPackageBuildInfo(p.ImportPath, p.Deps) + p.Internal.BuildInfo = ModPackageBuildInfo(mainPath, p.Deps) } } } @@ -1750,6 +1747,9 @@ func LoadPackageNoFlags(arg string, stk *ImportStack) *Package { // loadPackage accepts pseudo-paths beginning with cmd/ to denote commands // in the Go command directory, as well as paths to those directories. func loadPackage(arg string, stk *ImportStack) *Package { + if arg == "" { + panic("loadPackage called with empty package path") + } if build.IsLocalImport(arg) { dir := arg if !filepath.IsAbs(dir) { @@ -1773,9 +1773,6 @@ func loadPackage(arg string, stk *ImportStack) *Package { bp.ImportPath = arg bp.Goroot = true bp.BinDir = cfg.GOROOTbin - if cfg.GOROOTbin != "" { - bp.BinDir = cfg.GOROOTbin - } bp.Root = cfg.GOROOT bp.SrcRoot = cfg.GOROOTsrc p := new(Package) @@ -1848,6 +1845,9 @@ func PackagesAndErrors(patterns []string) []*Package { for _, m := range matches { for _, pkg := range m.Pkgs { + if pkg == "" { + panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern)) + } p := loadPackage(pkg, &stk) p.Match = append(p.Match, m.Pattern) p.Internal.CmdlinePkg = true @@ -1990,11 +1990,6 @@ func GoFilesPackage(gofiles []string) *Package { } bp, err := ctxt.ImportDir(dir, 0) - if ModDirImportPath != nil { - // Use the effective import path of the directory - // for deciding visibility during pkg.load. - bp.ImportPath = ModDirImportPath(dir) - } pkg := new(Package) pkg.Internal.Local = true pkg.Internal.CmdlineFiles = true diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index bb9568d07e32d..bd6f00bb669fb 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -227,6 +227,12 @@ func TestPackagesFor(p *Package, cover *TestCover) (pmain, ptest, pxtest *Packag } } + allTestImports := make([]*Package, 0, len(pmain.Internal.Imports)+len(imports)+len(ximports)) + allTestImports = append(allTestImports, pmain.Internal.Imports...) + allTestImports = append(allTestImports, imports...) + allTestImports = append(allTestImports, ximports...) + setToolFlags(allTestImports...) + // Do initial scan for metadata needed for writing _testmain.go // Use that metadata to update the list of imports for package main. // The list of imports is used by recompileForTest and by the loop diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go new file mode 100644 index 0000000000000..aba3eed7767db --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "os" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (os.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by ErrNotSupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return isNotSupported(underlyingError(err)) +} + +var ErrNotSupported = errors.New("operation not supported") + +// underlyingError returns the underlying error for known os error types. +func underlyingError(err error) error { + switch err := err.(type) { + case *os.PathError: + return err.Err + case *os.LinkError: + return err.Err + case *os.SyscallError: + return err.Err + } + return err +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go new file mode 100644 index 0000000000000..2831975c0c69e --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -0,0 +1,159 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix solaris + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// Most platforms provide some alternative API, such as an 'flock' system call +// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and +// does not require per-inode bookkeeping in the application. +// +// TODO(bcmills): If we add a build tag for Illumos (see golang.org/issue/20603) +// then Illumos should use F_OFD_SETLK, and the resulting code would be as +// simple as filelock_unix.go. We will still need the code in this file for AIX +// or as long as Oracle Solaris provides only F_SETLK. + +package filelock + +import ( + "errors" + "io" + "os" + "sync" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.F_RDLCK + writeLock lockType = syscall.F_WRLCK +) + +type inode = uint64 // type of syscall.Stat_t.Ino + +type inodeLock struct { + owner File + queue []<-chan File +} + +type token struct{} + +var ( + mu sync.Mutex + inodes = map[File]inode{} + locks = map[inode]inodeLock{} +) + +func lock(f File, lt lockType) (err error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.Stat() + if err != nil { + return err + } + ino := fi.Sys().(*syscall.Stat_t).Ino + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + inodes[f] = ino + + var wait chan File + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else { + // Already owned: add a channel to wait on. + wait = make(chan File) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + err = setlkw(f.Fd(), lt) + + if err != nil { + unlock(f) + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + + return nil +} + +func unlock(f File) error { + var owner File + + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner != f { + panic("unlock called on a file that is not locked") + } + + err := setlkw(f.Fd(), syscall.F_UNLCK) + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. +func setlkw(fd uintptr, lt lockType) error { + for { + err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != syscall.EINTR { + return err + } + } +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go new file mode 100644 index 0000000000000..107611e1ce85f --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go @@ -0,0 +1,36 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!plan9,!solaris,!windows + +package filelock + +import "os" + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go new file mode 100644 index 0000000000000..afdffe323fcde --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_plan9.go @@ -0,0 +1,38 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package filelock + +import ( + "os" +) + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func unlock(f File) error { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: ErrNotSupported, + } +} + +func isNotSupported(err error) bool { + return err == ErrNotSupported +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go new file mode 100644 index 0000000000000..aa67093a48aad --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js,!nacl,!plan9 + +package filelock_test + +import ( + "fmt" + "internal/testenv" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "time" + + "cmd/go/internal/lockedfile/internal/filelock" +) + +func lock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Lock(f) + t.Logf("Lock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func rLock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.RLock(f) + t.Logf("RLock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func unlock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Unlock(f) + t.Logf("Unlock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func mustTempFile(t *testing.T) (f *os.File, remove func()) { + t.Helper() + + base := filepath.Base(t.Name()) + f, err := ioutil.TempFile("", base) + if err != nil { + t.Fatalf(`ioutil.TempFile("", %q) = %v`, base, err) + } + t.Logf("fd %d = %s", f.Fd(), f.Name()) + + return f, func() { + f.Close() + os.Remove(f.Name()) + } +} + +func mustOpen(t *testing.T, name string) *os.File { + t.Helper() + + f, err := os.OpenFile(name, os.O_RDWR, 0) + if err != nil { + t.Fatalf("os.Open(%q) = %v", name, err) + } + + t.Logf("fd %d = os.Open(%q)", f.Fd(), name) + return f +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) { + t.Helper() + + desc := fmt.Sprintf("%s(fd %d)", op, f.Fd()) + + done := make(chan struct{}) + go func() { + t.Helper() + switch op { + case "Lock": + lock(t, f) + case "RLock": + rLock(t, f) + default: + panic("invalid op: " + op) + } + close(done) + }() + + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + return nil + + case <-time.After(quiescent): + t.Logf("%s is blocked (as expected)", desc) + return func(t *testing.T) { + t.Helper() + select { + case <-time.After(probablyStillBlocked): + t.Fatalf("%s is unexpectedly still blocked", desc) + case <-done: + } + } + } +} + +func TestLockExcludesLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} + +func TestLockExcludesRLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + rLockOther := mustBlock(t, "RLock", other) + unlock(t, f) + rLockOther(t) + unlock(t, other) +} + +func TestRLockExcludesOnlyLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + rLock(t, f) + + f2 := mustOpen(t, f.Name()) + defer f2.Close() + + if runtime.GOOS == "solaris" || runtime.GOOS == "aix" { + // When using POSIX locks (as on Solaris), we can't safely read-lock the + // same inode through two different descriptors at the same time: when the + // first descriptor is closed, the second descriptor would still be open but + // silently unlocked. So a second RLock must block instead of proceeding. + lockF2 := mustBlock(t, "RLock", f2) + unlock(t, f) + lockF2(t) + } else { + rLock(t, f2) + } + + other := mustOpen(t, f.Name()) + defer other.Close() + lockOther := mustBlock(t, "Lock", other) + + unlock(t, f2) + if runtime.GOOS != "solaris" && runtime.GOOS != "aix" { + unlock(t, f) + } + lockOther(t) + unlock(t, other) +} + +func TestLockNotDroppedByExecCommand(t *testing.T) { + testenv.MustHaveExec(t) + + f, remove := mustTempFile(t) + defer remove() + + lock(t, f) + + other := mustOpen(t, f.Name()) + defer other.Close() + + // Some kinds of file locks are dropped when a duplicated or forked file + // descriptor is unlocked. Double-check that the approach used by os/exec does + // not accidentally drop locks. + cmd := exec.Command(os.Args[0], "-test.run=^$") + if err := cmd.Run(); err != nil { + t.Fatalf("exec failed: %v", err) + } + + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go new file mode 100644 index 0000000000000..00c4262832214 --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd + +package filelock + +import ( + "os" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} + +func isNotSupported(err error) bool { + return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported +} diff --git a/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go new file mode 100644 index 0000000000000..43e85e450ec01 --- /dev/null +++ b/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go @@ -0,0 +1,66 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package filelock + +import ( + "internal/syscall/windows" + "os" + "syscall" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +func lock(f File, lt lockType) error { + // Per https://golang.org/issue/19098, “Programs currently expect the Fd + // method to return a handle that uses ordinary synchronous I/O.” + // However, LockFileEx still requires an OVERLAPPED structure, + // which contains the file offset of the beginning of the lock range. + // We want to lock the entire file, so we leave the offset as zero. + ol := new(syscall.Overlapped) + + err := windows.LockFileEx(syscall.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) + if err != nil { + return &os.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + ol := new(syscall.Overlapped) + err := windows.UnlockFileEx(syscall.Handle(f.Fd()), reserved, allBytes, allBytes, ol) + if err != nil { + return &os.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: err, + } + } + return nil +} + +func isNotSupported(err error) bool { + switch err { + case windows.ERROR_NOT_SUPPORTED, windows.ERROR_CALL_NOT_IMPLEMENTED, ErrNotSupported: + return true + default: + return false + } +} diff --git a/src/cmd/go/internal/lockedfile/lockedfile.go b/src/cmd/go/internal/lockedfile/lockedfile.go new file mode 100644 index 0000000000000..bb184b1085e4e --- /dev/null +++ b/src/cmd/go/internal/lockedfile/lockedfile.go @@ -0,0 +1,122 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &os.PathError{ + Op: "close", + Path: f.Name(), + Err: os.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return ioutil.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm os.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/src/cmd/go/internal/lockedfile/lockedfile_filelock.go b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go new file mode 100644 index 0000000000000..f63dd8664b0ab --- /dev/null +++ b/src/cmd/go/internal/lockedfile/lockedfile_filelock.go @@ -0,0 +1,64 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package lockedfile + +import ( + "os" + + "cmd/go/internal/lockedfile/internal/filelock" +) + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says “if possible, truncate file when + // opened”, but doesn't define “possible” (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as “possible” + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go new file mode 100644 index 0000000000000..4a52c94976381 --- /dev/null +++ b/src/cmd/go/internal/lockedfile/lockedfile_plan9.go @@ -0,0 +1,93 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package lockedfile + +import ( + "math/rand" + "os" + "strings" + "time" +) + +// Opening an exclusive-use file returns an error. +// The expected error strings are: +// +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) +var lockedErrStrings = [...]string{ + "file is locked", + "exclusive lock", + "exclusive use file already open", +} + +// Even though plan9 doesn't support the Lock/RLock/Unlock functions to +// manipulate already-open files, IsLocked is still meaningful: os.OpenFile +// itself may return errors that indicate that a file with the ModeExclusive bit +// set is already open. +func isLocked(err error) bool { + s := err.Error() + + for _, frag := range lockedErrStrings { + if strings.Contains(s, frag) { + return true + } + } + + return false +} + +func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // So we can try to open a locked file, but if it fails we're on our own to + // figure out when it becomes available. We'll use exponential backoff with + // some jitter and an arbitrary limit of 500ms. + + // If the file was unpacked or created by some other program, it might not + // have the ModeExclusive bit set. Set it before we call OpenFile, so that we + // can be confident that a successful OpenFile implies exclusive use. + if fi, err := os.Stat(name); err == nil { + if fi.Mode()&os.ModeExclusive == 0 { + if err := os.Chmod(name, fi.Mode()|os.ModeExclusive); err != nil { + return nil, err + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + f, err := os.OpenFile(name, flag, perm|os.ModeExclusive) + if err == nil { + return f, nil + } + + if !isLocked(err) { + return nil, err + } + + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } +} + +func closeFile(f *os.File) error { + return f.Close() +} diff --git a/src/cmd/go/internal/lockedfile/lockedfile_test.go b/src/cmd/go/internal/lockedfile/lockedfile_test.go new file mode 100644 index 0000000000000..6d5819efdb0e9 --- /dev/null +++ b/src/cmd/go/internal/lockedfile/lockedfile_test.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// js and nacl do not support inter-process file locking. +// +build !js,!nacl + +package lockedfile_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "cmd/go/internal/lockedfile" +) + +func mustTempDir(t *testing.T) (dir string, remove func()) { + t.Helper() + + dir, err := ioutil.TempDir("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + return dir, func() { os.RemoveAll(dir) } +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) { + t.Helper() + + done := make(chan struct{}) + go func() { + f() + close(done) + }() + + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + return nil + + case <-time.After(quiescent): + return func(t *testing.T) { + t.Helper() + select { + case <-time.After(probablyStillBlocked): + t.Fatalf("%s is unexpectedly still blocked after %v", desc, probablyStillBlocked) + case <-done: + } + } + } +} + +func TestMutexExcludes(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "lock") + + mu := lockedfile.MutexAt(path) + t.Logf("mu := MutexAt(_)") + + unlock, err := mu.Lock() + if err != nil { + t.Fatalf("mu.Lock: %v", err) + } + t.Logf("unlock, _ := mu.Lock()") + + mu2 := lockedfile.MutexAt(mu.Path) + t.Logf("mu2 := MutexAt(mu.Path)") + + wait := mustBlock(t, "mu2.Lock()", func() { + unlock2, err := mu2.Lock() + if err != nil { + t.Errorf("mu2.Lock: %v", err) + return + } + t.Logf("unlock2, _ := mu2.Lock()") + t.Logf("unlock2()") + unlock2() + }) + + t.Logf("unlock()") + unlock() + wait(t) +} + +func TestReadWaitsForLock(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "timestamp.txt") + + f, err := lockedfile.Create(path) + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + + const ( + part1 = "part 1\n" + part2 = "part 2\n" + ) + _, err = f.WriteString(part1) + if err != nil { + t.Fatalf("WriteString: %v", err) + } + t.Logf("WriteString(%q) = ", part1) + + wait := mustBlock(t, "Read", func() { + b, err := lockedfile.Read(path) + if err != nil { + t.Errorf("Read: %v", err) + return + } + + const want = part1 + part2 + got := string(b) + if got == want { + t.Logf("Read(_) = %q", got) + } else { + t.Errorf("Read(_) = %q, _; want %q", got, want) + } + }) + + _, err = f.WriteString(part2) + if err != nil { + t.Errorf("WriteString: %v", err) + } else { + t.Logf("WriteString(%q) = ", part2) + } + f.Close() + + wait(t) +} + +func TestCanLockExistingFile(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + path := filepath.Join(dir, "existing.txt") + + if err := ioutil.WriteFile(path, []byte("ok"), 0777); err != nil { + t.Fatalf("ioutil.WriteFile: %v", err) + } + + f, err := lockedfile.Edit(path) + if err != nil { + t.Fatalf("first Edit: %v", err) + } + + wait := mustBlock(t, "Edit", func() { + other, err := lockedfile.Edit(path) + if err != nil { + t.Errorf("second Edit: %v", err) + } + other.Close() + }) + + f.Close() + wait(t) +} diff --git a/src/cmd/go/internal/lockedfile/mutex.go b/src/cmd/go/internal/lockedfile/mutex.go new file mode 100644 index 0000000000000..17f3751c37169 --- /dev/null +++ b/src/cmd/go/internal/lockedfile/mutex.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + return func() { f.Close() }, nil +} diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 2f072d73cf306..bbaba444f507f 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -15,7 +15,7 @@ import ( ) var cmdDownload = &base.Command{ - UsageLine: "go mod download [-dir] [-json] [modules]", + UsageLine: "go mod download [-json] [modules]", Short: "download modules to local cache", Long: ` Download downloads the named modules, which can be module patterns selecting @@ -32,13 +32,15 @@ to standard output, describing each downloaded module (or failure), corresponding to this Go struct: type Module struct { - Path string // module path - Version string // module version - Error string // error loading module - Info string // absolute path to cached .info file - GoMod string // absolute path to cached .mod file - Zip string // absolute path to cached .zip file - Dir string // absolute path to cached source root directory + Path string // module path + Version string // module version + Error string // error loading module + Info string // absolute path to cached .info file + GoMod string // absolute path to cached .mod file + Zip string // absolute path to cached .zip file + Dir string // absolute path to cached source root directory + Sum string // checksum for path, version (as in go.sum) + GoModSum string // checksum for go.mod (as in go.sum) } See 'go help modules' for more about module queries. @@ -52,13 +54,15 @@ func init() { } type moduleJSON struct { - Path string `json:",omitempty"` - Version string `json:",omitempty"` - Error string `json:",omitempty"` - Info string `json:",omitempty"` - GoMod string `json:",omitempty"` - Zip string `json:",omitempty"` - Dir string `json:",omitempty"` + Path string `json:",omitempty"` + Version string `json:",omitempty"` + Error string `json:",omitempty"` + Info string `json:",omitempty"` + GoMod string `json:",omitempty"` + Zip string `json:",omitempty"` + Dir string `json:",omitempty"` + Sum string `json:",omitempty"` + GoModSum string `json:",omitempty"` } func runDownload(cmd *base.Command, args []string) { @@ -98,12 +102,18 @@ func runDownload(cmd *base.Command, args []string) { m.Error = err.Error() return } + m.GoModSum, err = modfetch.GoModSum(m.Path, m.Version) + if err != nil { + m.Error = err.Error() + return + } mod := module.Version{Path: m.Path, Version: m.Version} m.Zip, err = modfetch.DownloadZip(mod) if err != nil { m.Error = err.Error() return } + m.Sum = modfetch.Sum(mod) m.Dir, err = modfetch.Download(mod) if err != nil { m.Error = err.Error() @@ -118,6 +128,16 @@ func runDownload(cmd *base.Command, args []string) { base.Fatalf("%v", err) } os.Stdout.Write(append(b, '\n')) + if m.Error != "" { + base.SetExitStatus(1) + } + } + } else { + for _, m := range mods { + if m.Error != "" { + base.Errorf("%s@%s: %s\n", m.Path, m.Version, m.Error) + } } + base.ExitIfErrors() } } diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index 5fea3e48e0816..5066e4ddf75f9 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -7,6 +7,7 @@ package modcmd import ( + "bytes" "encoding/json" "fmt" "io/ioutil" @@ -15,6 +16,7 @@ import ( "strings" "cmd/go/internal/base" + "cmd/go/internal/modfetch" "cmd/go/internal/modfile" "cmd/go/internal/modload" "cmd/go/internal/module" @@ -62,6 +64,8 @@ The -require, -droprequire, -exclude, -dropexclude, -replace, and -dropreplace editing flags may be repeated, and the changes are applied in the order given. +The -go=version flag sets the expected Go language version. + The -print flag prints the final go.mod in its text format instead of writing it back to go.mod. @@ -74,7 +78,8 @@ writing it back to go.mod. The JSON output corresponds to these Go types: } type GoMod struct { - Module Module + Module Module + Go string Require []Require Exclude []Module Replace []Replace @@ -102,8 +107,8 @@ by invoking 'go mod edit' with -require, -exclude, and so on. } var ( - editFmt = cmdEdit.Flag.Bool("fmt", false, "") - // editGo = cmdEdit.Flag.String("go", "", "") + editFmt = cmdEdit.Flag.Bool("fmt", false, "") + editGo = cmdEdit.Flag.String("go", "", "") editJSON = cmdEdit.Flag.Bool("json", false, "") editPrint = cmdEdit.Flag.Bool("print", false, "") editModule = cmdEdit.Flag.String("module", "", "") @@ -131,6 +136,7 @@ func init() { func runEdit(cmd *base.Command, args []string) { anyFlags := *editModule != "" || + *editGo != "" || *editJSON || *editPrint || *editFmt || @@ -151,8 +157,7 @@ func runEdit(cmd *base.Command, args []string) { if len(args) == 1 { gomod = args[0] } else { - modload.MustInit() - gomod = filepath.Join(modload.ModRoot, "go.mod") + gomod = filepath.Join(modload.ModRoot(), "go.mod") } if *editModule != "" { @@ -161,7 +166,11 @@ func runEdit(cmd *base.Command, args []string) { } } - // TODO(rsc): Implement -go= once we start advertising it. + if *editGo != "" { + if !modfile.GoVersionRE.MatchString(*editGo) { + base.Fatalf(`go mod: invalid -go option; expecting something like "-go 1.12"`) + } + } data, err := ioutil.ReadFile(gomod) if err != nil { @@ -174,7 +183,13 @@ func runEdit(cmd *base.Command, args []string) { } if *editModule != "" { - modFile.AddModuleStmt(modload.CmdModModule) + modFile.AddModuleStmt(*editModule) + } + + if *editGo != "" { + if err := modFile.AddGoStmt(*editGo); err != nil { + base.Fatalf("go: internal error: %v", err) + } } if len(edits) > 0 { @@ -190,17 +205,23 @@ func runEdit(cmd *base.Command, args []string) { return } - data, err = modFile.Format() + out, err := modFile.Format() if err != nil { base.Fatalf("go: %v", err) } if *editPrint { - os.Stdout.Write(data) + os.Stdout.Write(out) return } - if err := ioutil.WriteFile(gomod, data, 0666); err != nil { + unlock := modfetch.SideLock() + defer unlock() + lockedData, err := ioutil.ReadFile(gomod) + if err == nil && !bytes.Equal(lockedData, data) { + base.Fatalf("go: go.mod changed during editing; not overwriting") + } + if err := ioutil.WriteFile(gomod, out, 0666); err != nil { base.Fatalf("go: %v", err) } } @@ -344,6 +365,7 @@ func flagDropReplace(arg string) { // fileJSON is the -json output data structure. type fileJSON struct { Module module.Version + Go string `json:",omitempty"` Require []requireJSON Exclude []module.Version Replace []replaceJSON @@ -364,6 +386,9 @@ type replaceJSON struct { func editPrintJSON(modFile *modfile.File) { var f fileJSON f.Module = modFile.Module.Mod + if modFile.Go != nil { + f.Go = modFile.Go.Version + } for _, r := range modFile.Require { f.Require = append(f.Require, requireJSON{Path: r.Mod.Path, Version: r.Mod.Version, Indirect: r.Indirect}) } diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index f510a46262b4e..0f7421e5849f2 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -10,6 +10,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/modload" "os" + "strings" ) var cmdInit = &base.Command{ @@ -37,5 +38,8 @@ func runInit(cmd *base.Command, args []string) { if _, err := os.Stat("go.mod"); err == nil { base.Fatalf("go mod init: go.mod already exists") } + if strings.Contains(modload.CmdModModule, "@") { + base.Fatalf("go mod init: module path must not contain '@'") + } modload.InitMod() // does all the hard work } diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go index f2063a9ea601c..789e93660854f 100644 --- a/src/cmd/go/internal/modcmd/tidy.go +++ b/src/cmd/go/internal/modcmd/tidy.go @@ -75,12 +75,24 @@ func modTidyGoSum() { // we only have to tell modfetch what needs keeping. reqs := modload.Reqs() keep := make(map[module.Version]bool) + replaced := make(map[module.Version]bool) var walk func(module.Version) walk = func(m module.Version) { - keep[m] = true + // If we build using a replacement module, keep the sum for the replacement, + // since that's the code we'll actually use during a build. + // + // TODO(golang.org/issue/29182): Perhaps we should keep both sums, and the + // sums for both sets of transitive requirements. + r := modload.Replacement(m) + if r.Path == "" { + keep[m] = true + } else { + keep[r] = true + replaced[m] = true + } list, _ := reqs.Required(m) for _, r := range list { - if !keep[r] { + if !keep[r] && !replaced[r] { walk(r) } } diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index 62e74585359e7..b70f25cec3952 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -43,9 +43,9 @@ func runVendor(cmd *base.Command, args []string) { } pkgs := modload.LoadVendor() - vdir := filepath.Join(modload.ModRoot, "vendor") + vdir := filepath.Join(modload.ModRoot(), "vendor") if err := os.RemoveAll(vdir); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } modpkgs := make(map[module.Version][]string) @@ -85,7 +85,7 @@ func runVendor(cmd *base.Command, args []string) { return } if err := ioutil.WriteFile(filepath.Join(vdir, "modules.txt"), buf.Bytes(), 0666); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } } @@ -172,10 +172,10 @@ func matchNonTest(info os.FileInfo) bool { func copyDir(dst, src string, match func(os.FileInfo) bool) { files, err := ioutil.ReadDir(src) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } if err := os.MkdirAll(dst, 0777); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } for _, file := range files { if file.IsDir() || !file.Mode().IsRegular() || !match(file) { @@ -183,18 +183,18 @@ func copyDir(dst, src string, match func(os.FileInfo) bool) { } r, err := os.Open(filepath.Join(src, file.Name())) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } w, err := os.Create(filepath.Join(dst, file.Name())) if err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } if _, err := io.Copy(w, r); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } r.Close() if err := w.Close(); err != nil { - base.Fatalf("go vendor: %v", err) + base.Fatalf("go mod vendor: %v", err) } } } diff --git a/src/cmd/go/internal/modconv/convert_test.go b/src/cmd/go/internal/modconv/convert_test.go index ad27abb8ef7e1..4d55d73f21423 100644 --- a/src/cmd/go/internal/modconv/convert_test.go +++ b/src/cmd/go/internal/modconv/convert_test.go @@ -146,7 +146,7 @@ func TestConvertLegacyConfig(t *testing.T) { } for _, tt := range tests { - t.Run(strings.Replace(tt.path, "/", "_", -1)+"_"+tt.vers, func(t *testing.T) { + t.Run(strings.ReplaceAll(tt.path, "/", "_")+"_"+tt.vers, func(t *testing.T) { f, err := modfile.Parse("golden", []byte(tt.gomod), nil) if err != nil { t.Fatal(err) diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go index efcd4854e82fa..1ccd43dc2ae84 100644 --- a/src/cmd/go/internal/modfetch/cache.go +++ b/src/cmd/go/internal/modfetch/cache.go @@ -8,15 +8,18 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" "strings" "cmd/go/internal/base" + "cmd/go/internal/lockedfile" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/module" "cmd/go/internal/par" + "cmd/go/internal/renameio" "cmd/go/internal/semver" ) @@ -53,6 +56,8 @@ func CachePath(m module.Version, suffix string) (string, error) { return filepath.Join(dir, encVer+"."+suffix), nil } +// DownloadDir returns the directory to which m should be downloaded. +// Note that the directory may not yet exist. func DownloadDir(m module.Version) (string, error) { if PkgMod == "" { return "", fmt.Errorf("internal error: modfetch.PkgMod not set") @@ -74,6 +79,37 @@ func DownloadDir(m module.Version) (string, error) { return filepath.Join(PkgMod, enc+"@"+encVer), nil } +// lockVersion locks a file within the module cache that guards the downloading +// and extraction of the zipfile for the given module version. +func lockVersion(mod module.Version) (unlock func(), err error) { + path, err := CachePath(mod, "lock") + if err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return nil, err + } + return lockedfile.MutexAt(path).Lock() +} + +// SideLock locks a file within the module cache that that guards edits to files +// outside the cache, such as go.sum and go.mod files in the user's working +// directory. It returns a function that must be called to unlock the file. +func SideLock() (unlock func()) { + if PkgMod == "" { + base.Fatalf("go: internal error: modfetch.PkgMod not set") + } + path := filepath.Join(PkgMod, "cache", "lock") + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + base.Fatalf("go: failed to create cache directory %s: %v", filepath.Dir(path), err) + } + unlock, err := lockedfile.MutexAt(path).Lock() + if err != nil { + base.Fatalf("go: failed to lock file at %v", path) + } + return unlock +} + // A cachingRepo is a cache around an underlying Repo, // avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not Zip). // It is also safe for simultaneous use by multiple goroutines @@ -129,16 +165,18 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) { } info, err = r.r.Stat(rev) if err == nil { - if err := writeDiskStat(file, info); err != nil { - fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err) - } // If we resolved, say, 1234abcde to v0.0.0-20180604122334-1234abcdef78, // then save the information under the proper version, for future use. if info.Version != rev { + file, _ = CachePath(module.Version{Path: r.path, Version: info.Version}, "info") r.cache.Do("stat:"+info.Version, func() interface{} { return cachedInfo{info, err} }) } + + if err := writeDiskStat(file, info); err != nil { + fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err) + } } return cachedInfo{info, err} }).(cachedInfo) @@ -213,8 +251,8 @@ func (r *cachingRepo) GoMod(rev string) ([]byte, error) { return append([]byte(nil), c.text...), nil } -func (r *cachingRepo) Zip(version, tmpdir string) (string, error) { - return r.r.Zip(version, tmpdir) +func (r *cachingRepo) Zip(dst io.Writer, version string) error { + return r.r.Zip(dst, version) } // Stat is like Lookup(path).Stat(rev) but avoids the @@ -290,6 +328,23 @@ func GoModFile(path, version string) (string, error) { return file, nil } +// GoModSum returns the go.sum entry for the module version's go.mod file. +// (That is, it returns the entry listed in go.sum as "path version/go.mod".) +func GoModSum(path, version string) (string, error) { + if !semver.IsValid(version) { + return "", fmt.Errorf("invalid version %q", version) + } + data, err := GoMod(path, version) + if err != nil { + return "", err + } + sum, err := goModSum(data) + if err != nil { + return "", err + } + return sum, nil +} + var errNotCached = fmt.Errorf("not in cache") // readDiskStat reads a cached stat result from disk, @@ -366,7 +421,7 @@ func readDiskStatByHash(path, rev string) (file string, info *RevInfo, err error // and should ignore it. var oldVgoPrefix = []byte("//vgo 0.0.") -// readDiskGoMod reads a cached stat result from disk, +// readDiskGoMod reads a cached go.mod file from disk, // returning the name of the cache file and the result. // If the read fails, the caller can use // writeDiskGoMod(file, data) to write a new cache entry. @@ -432,22 +487,8 @@ func writeDiskCache(file string, data []byte) error { if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { return err } - // Write data to temp file next to target file. - f, err := ioutil.TempFile(filepath.Dir(file), filepath.Base(file)+".tmp-") - if err != nil { - return err - } - defer os.Remove(f.Name()) - defer f.Close() - if _, err := f.Write(data); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - // Rename temp file onto cache file, - // so that the cache file is always a complete file. - if err := os.Rename(f.Name(), file); err != nil { + + if err := renameio.WriteFile(file, data); err != nil { return err } @@ -464,8 +505,18 @@ func rewriteVersionList(dir string) { base.Fatalf("go: internal error: misuse of rewriteVersionList") } - // TODO(rsc): We should do some kind of directory locking here, - // to avoid lost updates. + listFile := filepath.Join(dir, "list") + + // We use a separate lockfile here instead of locking listFile itself because + // we want to use Rename to write the file atomically. The list may be read by + // a GOPROXY HTTP server, and if we crash midway through a rewrite (or if the + // HTTP server ignores our locking and serves the file midway through a + // rewrite) it's better to serve a stale list than a truncated one. + unlock, err := lockedfile.MutexAt(listFile + ".lock").Lock() + if err != nil { + base.Fatalf("go: can't lock version list lockfile: %v", err) + } + defer unlock() infos, err := ioutil.ReadDir(dir) if err != nil { @@ -494,12 +545,12 @@ func rewriteVersionList(dir string) { buf.WriteString(v) buf.WriteString("\n") } - listFile := filepath.Join(dir, "list") old, _ := ioutil.ReadFile(listFile) if bytes.Equal(buf.Bytes(), old) { return } - // TODO: Use rename to install file, - // so that readers never see an incomplete file. - ioutil.WriteFile(listFile, buf.Bytes(), 0666) + + if err := renameio.WriteFile(listFile, buf.Bytes()); err != nil { + base.Fatalf("go: failed to write version list: %v", err) + } } diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go index 4103ddc717f49..6c17f7886f19e 100644 --- a/src/cmd/go/internal/modfetch/codehost/codehost.go +++ b/src/cmd/go/internal/modfetch/codehost/codehost.go @@ -20,6 +20,7 @@ import ( "time" "cmd/go/internal/cfg" + "cmd/go/internal/lockedfile" "cmd/go/internal/str" ) @@ -131,9 +132,9 @@ var WorkRoot string // WorkDir returns the name of the cached work directory to use for the // given repository type and name. -func WorkDir(typ, name string) (string, error) { +func WorkDir(typ, name string) (dir, lockfile string, err error) { if WorkRoot == "" { - return "", fmt.Errorf("codehost.WorkRoot not set") + return "", "", fmt.Errorf("codehost.WorkRoot not set") } // We name the work directory for the SHA256 hash of the type and name. @@ -142,22 +143,41 @@ func WorkDir(typ, name string) (string, error) { // that one checkout is never nested inside another. That nesting has // led to security problems in the past. if strings.Contains(typ, ":") { - return "", fmt.Errorf("codehost.WorkDir: type cannot contain colon") + return "", "", fmt.Errorf("codehost.WorkDir: type cannot contain colon") } key := typ + ":" + name - dir := filepath.Join(WorkRoot, fmt.Sprintf("%x", sha256.Sum256([]byte(key)))) + dir = filepath.Join(WorkRoot, fmt.Sprintf("%x", sha256.Sum256([]byte(key)))) + + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "mkdir -p %s # %s %s\n", filepath.Dir(dir), typ, name) + } + if err := os.MkdirAll(filepath.Dir(dir), 0777); err != nil { + return "", "", err + } + + lockfile = dir + ".lock" + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "# lock %s", lockfile) + } + + unlock, err := lockedfile.MutexAt(lockfile).Lock() + if err != nil { + return "", "", fmt.Errorf("codehost.WorkDir: can't find or create lock file: %v", err) + } + defer unlock() + data, err := ioutil.ReadFile(dir + ".info") info, err2 := os.Stat(dir) if err == nil && err2 == nil && info.IsDir() { // Info file and directory both already exist: reuse. have := strings.TrimSuffix(string(data), "\n") if have != key { - return "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key) + return "", "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key) } if cfg.BuildX { fmt.Fprintf(os.Stderr, "# %s for %s %s\n", dir, typ, name) } - return dir, nil + return dir, lockfile, nil } // Info file or directory missing. Start from scratch. @@ -166,26 +186,30 @@ func WorkDir(typ, name string) (string, error) { } os.RemoveAll(dir) if err := os.MkdirAll(dir, 0777); err != nil { - return "", err + return "", "", err } if err := ioutil.WriteFile(dir+".info", []byte(key), 0666); err != nil { os.RemoveAll(dir) - return "", err + return "", "", err } - return dir, nil + return dir, lockfile, nil } type RunError struct { - Cmd string - Err error - Stderr []byte + Cmd string + Err error + Stderr []byte + HelpText string } func (e *RunError) Error() string { text := e.Cmd + ": " + e.Err.Error() stderr := bytes.TrimRight(e.Stderr, "\n") if len(stderr) > 0 { - text += ":\n\t" + strings.Replace(string(stderr), "\n", "\n\t", -1) + text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t") + } + if len(e.HelpText) > 0 { + text += "\n" + e.HelpText } return text } diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index 87940a8f02c9c..588e7496cc503 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -17,6 +17,7 @@ import ( "sync" "time" + "cmd/go/internal/lockedfile" "cmd/go/internal/par" ) @@ -57,22 +58,29 @@ func newGitRepo(remote string, localOK bool) (Repo, error) { r := &gitRepo{remote: remote} if strings.Contains(remote, "://") { // This is a remote path. - dir, err := WorkDir(gitWorkDirType, r.remote) + var err error + r.dir, r.mu.Path, err = WorkDir(gitWorkDirType, r.remote) if err != nil { return nil, err } - r.dir = dir - if _, err := os.Stat(filepath.Join(dir, "objects")); err != nil { - if _, err := Run(dir, "git", "init", "--bare"); err != nil { - os.RemoveAll(dir) + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "objects")); err != nil { + if _, err := Run(r.dir, "git", "init", "--bare"); err != nil { + os.RemoveAll(r.dir) return nil, err } // We could just say git fetch https://whatever later, // but this lets us say git fetch origin instead, which // is a little nicer. More importantly, using a named remote // avoids a problem with Git LFS. See golang.org/issue/25605. - if _, err := Run(dir, "git", "remote", "add", "origin", r.remote); err != nil { - os.RemoveAll(dir) + if _, err := Run(r.dir, "git", "remote", "add", "origin", r.remote); err != nil { + os.RemoveAll(r.dir) return nil, err } r.remote = "origin" @@ -97,6 +105,7 @@ func newGitRepo(remote string, localOK bool) (Repo, error) { return nil, fmt.Errorf("%s exists but is not a directory", remote) } r.dir = remote + r.mu.Path = r.dir + ".lock" } return r, nil } @@ -106,7 +115,8 @@ type gitRepo struct { local bool dir string - mu sync.Mutex // protects fetchLevel, some git repo state + mu lockedfile.Mutex // protects fetchLevel and git repo state + fetchLevel int statCache par.Cache @@ -154,6 +164,11 @@ func (r *gitRepo) loadRefs() { // Most of the time we only care about tags but sometimes we care about heads too. out, err := Run(r.dir, "git", "ls-remote", "-q", r.remote) if err != nil { + if rerr, ok := err.(*RunError); ok { + if bytes.Contains(rerr.Stderr, []byte("fatal: could not read Username")) { + rerr.HelpText = "If this is a private repository, see https://golang.org/doc/faq#git_https for additional information." + } + } r.refsErr = err return } @@ -304,11 +319,11 @@ func (r *gitRepo) stat(rev string) (*RevInfo, error) { } // Protect r.fetchLevel and the "fetch more and more" sequence. - // TODO(rsc): Add LockDir and use it for protecting that - // sequence, so that multiple processes don't collide in their - // git commands. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() // Perhaps r.localTags did not have the ref when we loaded local tags, // but we've since done fetches that pulled down the hash we need @@ -495,8 +510,11 @@ func (r *gitRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[s // Protect r.fetchLevel and the "fetch more and more" sequence. // See stat method above. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() var refs []string var protoFlag []string @@ -658,8 +676,11 @@ func (r *gitRepo) RecentTag(rev, prefix string) (tag string, err error) { // There are plausible tags, but we don't know if rev is a descendent of any of them. // Fetch the history to find out. - r.mu.Lock() - defer r.mu.Unlock() + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() if r.fetchLevel < fetchAll { // Fetch all heads and tags and see if that gives us enough history. @@ -678,7 +699,7 @@ func (r *gitRepo) RecentTag(rev, prefix string) (tag string, err error) { // unreachable for a reason). // // Try one last time in case some other goroutine fetched rev while we were - // waiting on r.mu. + // waiting on the lock. describe() return tag, err } @@ -694,6 +715,16 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, return nil, "", err } + unlock, err := r.mu.Lock() + if err != nil { + return nil, "", err + } + defer unlock() + + if err := ensureGitAttributes(r.dir); err != nil { + return nil, "", err + } + // Incredibly, git produces different archives depending on whether // it is running on a Windows system or not, in an attempt to normalize // text file line endings. Setting -c core.autocrlf=input means only @@ -709,3 +740,43 @@ func (r *gitRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, return ioutil.NopCloser(bytes.NewReader(archive)), "", nil } + +// ensureGitAttributes makes sure export-subst and export-ignore features are +// disabled for this repo. This is intended to be run prior to running git +// archive so that zip files are generated that produce consistent ziphashes +// for a given revision, independent of variables such as git version and the +// size of the repo. +// +// See: https://github.com/golang/go/issues/27153 +func ensureGitAttributes(repoDir string) (err error) { + const attr = "\n* -export-subst -export-ignore\n" + + d := repoDir + "/info" + p := d + "/attributes" + + if err := os.MkdirAll(d, 0755); err != nil { + return err + } + + f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) + if err != nil { + return err + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = closeErr + } + }() + + b, err := ioutil.ReadAll(f) + if err != nil { + return err + } + if !bytes.HasSuffix(b, []byte(attr)) { + _, err := f.WriteString(attr) + return err + } + + return nil +} diff --git a/src/cmd/go/internal/modfetch/codehost/vcs.go b/src/cmd/go/internal/modfetch/codehost/vcs.go index 9e862a0ef8c0d..59c2b15d19a06 100644 --- a/src/cmd/go/internal/modfetch/codehost/vcs.go +++ b/src/cmd/go/internal/modfetch/codehost/vcs.go @@ -18,6 +18,7 @@ import ( "sync" "time" + "cmd/go/internal/lockedfile" "cmd/go/internal/par" "cmd/go/internal/str" ) @@ -27,12 +28,19 @@ import ( // to get the code, but we can't access it due to the error. // The caller should report this error instead of continuing to probe // other possible module paths. +// +// TODO(bcmills): See if we can invert this. (Return a distinguished error for +// “repo not found” and treat everything else as terminal.) type VCSError struct { Err error } func (e *VCSError) Error() string { return e.Err.Error() } +func vcsErrorf(format string, a ...interface{}) error { + return &VCSError{Err: fmt.Errorf(format, a...)} +} + func NewRepo(vcs, remote string) (Repo, error) { type key struct { vcs string @@ -56,6 +64,8 @@ func NewRepo(vcs, remote string) (Repo, error) { var vcsRepoCache par.Cache type vcsRepo struct { + mu lockedfile.Mutex // protects all commands, so we don't have to decide which are safe on a per-VCS basis + remote string cmd *vcsCmd dir string @@ -81,18 +91,27 @@ func newVCSRepo(vcs, remote string) (Repo, error) { if !strings.Contains(remote, "://") { return nil, fmt.Errorf("invalid vcs remote: %s %s", vcs, remote) } + r := &vcsRepo{remote: remote, cmd: cmd} + var err error + r.dir, r.mu.Path, err = WorkDir(vcsWorkDirType+vcs, r.remote) + if err != nil { + return nil, err + } + if cmd.init == nil { return r, nil } - dir, err := WorkDir(vcsWorkDirType+vcs, r.remote) + + unlock, err := r.mu.Lock() if err != nil { return nil, err } - r.dir = dir - if _, err := os.Stat(filepath.Join(dir, "."+vcs)); err != nil { - if _, err := Run(dir, cmd.init(r.remote)); err != nil { - os.RemoveAll(dir) + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "."+vcs)); err != nil { + if _, err := Run(r.dir, cmd.init(r.remote)); err != nil { + os.RemoveAll(r.dir) return nil, err } } @@ -270,6 +289,12 @@ func (r *vcsRepo) loadBranches() { } func (r *vcsRepo) Tags(prefix string) ([]string, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + r.tagsOnce.Do(r.loadTags) tags := []string{} @@ -283,6 +308,12 @@ func (r *vcsRepo) Tags(prefix string) ([]string, error) { } func (r *vcsRepo) Stat(rev string) (*RevInfo, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + if rev == "latest" { rev = r.cmd.latest } @@ -315,7 +346,7 @@ func (r *vcsRepo) fetch() { func (r *vcsRepo) statLocal(rev string) (*RevInfo, error) { out, err := Run(r.dir, r.cmd.statLocal(rev, r.remote)) if err != nil { - return nil, fmt.Errorf("unknown revision %s", rev) + return nil, vcsErrorf("unknown revision %s", rev) } return r.cmd.parseStat(rev, string(out)) } @@ -332,6 +363,14 @@ func (r *vcsRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) { if err != nil { return nil, err } + + // r.Stat acquires r.mu, so lock after that. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + out, err := Run(r.dir, r.cmd.readFile(rev, file, r.remote)) if err != nil { return nil, os.ErrNotExist @@ -340,14 +379,42 @@ func (r *vcsRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) { } func (r *vcsRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[string]*FileRev, error) { - return nil, fmt.Errorf("ReadFileRevs not implemented") + // We don't technically need to lock here since we're returning an error + // uncondititonally, but doing so anyway will help to avoid baking in + // lock-inversion bugs. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + return nil, vcsErrorf("ReadFileRevs not implemented") } func (r *vcsRepo) RecentTag(rev, prefix string) (tag string, err error) { - return "", fmt.Errorf("RecentTags not implemented") + // We don't technically need to lock here since we're returning an error + // uncondititonally, but doing so anyway will help to avoid baking in + // lock-inversion bugs. + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() + + return "", vcsErrorf("RecentTag not implemented") } func (r *vcsRepo) ReadZip(rev, subdir string, maxSize int64) (zip io.ReadCloser, actualSubdir string, err error) { + if r.cmd.readZip == nil { + return nil, "", vcsErrorf("ReadZip not implemented for %s", r.cmd.vcs) + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, "", err + } + defer unlock() + if rev == "latest" { rev = r.cmd.latest } @@ -392,7 +459,7 @@ func (d *deleteCloser) Close() error { func hgParseStat(rev, out string) (*RevInfo, error) { f := strings.Fields(string(out)) if len(f) < 3 { - return nil, fmt.Errorf("unexpected response from hg log: %q", out) + return nil, vcsErrorf("unexpected response from hg log: %q", out) } hash := f[0] version := rev @@ -401,7 +468,7 @@ func hgParseStat(rev, out string) (*RevInfo, error) { } t, err := strconv.ParseInt(f[1], 10, 64) if err != nil { - return nil, fmt.Errorf("invalid time from hg log: %q", out) + return nil, vcsErrorf("invalid time from hg log: %q", out) } var tags []string @@ -430,12 +497,12 @@ func svnParseStat(rev, out string) (*RevInfo, error) { } `xml:"logentry"` } if err := xml.Unmarshal([]byte(out), &log); err != nil { - return nil, fmt.Errorf("unexpected response from svn log --xml: %v\n%s", err, out) + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) } t, err := time.Parse(time.RFC3339, log.Logentry.Date) if err != nil { - return nil, fmt.Errorf("unexpected response from svn log --xml: %v\n%s", err, out) + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) } info := &RevInfo{ @@ -471,23 +538,23 @@ func bzrParseStat(rev, out string) (*RevInfo, error) { } i, err := strconv.ParseInt(val, 10, 64) if err != nil { - return nil, fmt.Errorf("unexpected revno from bzr log: %q", line) + return nil, vcsErrorf("unexpected revno from bzr log: %q", line) } revno = i case "timestamp": j := strings.Index(val, " ") if j < 0 { - return nil, fmt.Errorf("unexpected timestamp from bzr log: %q", line) + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) } t, err := time.Parse("2006-01-02 15:04:05 -0700", val[j+1:]) if err != nil { - return nil, fmt.Errorf("unexpected timestamp from bzr log: %q", line) + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) } tm = t.UTC() } } if revno == 0 || tm.IsZero() { - return nil, fmt.Errorf("unexpected response from bzr log: %q", out) + return nil, vcsErrorf("unexpected response from bzr log: %q", out) } info := &RevInfo{ @@ -504,11 +571,11 @@ func fossilParseStat(rev, out string) (*RevInfo, error) { if strings.HasPrefix(line, "uuid:") { f := strings.Fields(line) if len(f) != 5 || len(f[1]) != 40 || f[4] != "UTC" { - return nil, fmt.Errorf("unexpected response from fossil info: %q", line) + return nil, vcsErrorf("unexpected response from fossil info: %q", line) } t, err := time.Parse("2006-01-02 15:04:05", f[2]+" "+f[3]) if err != nil { - return nil, fmt.Errorf("unexpected response from fossil info: %q", line) + return nil, vcsErrorf("unexpected response from fossil info: %q", line) } hash := f[1] version := rev @@ -524,5 +591,5 @@ func fossilParseStat(rev, out string) (*RevInfo, error) { return info, nil } } - return nil, fmt.Errorf("unexpected response from fossil info: %q", out) + return nil, vcsErrorf("unexpected response from fossil info: %q", out) } diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go index 9cf0e911508c0..5018b6d8af7c7 100644 --- a/src/cmd/go/internal/modfetch/coderepo.go +++ b/src/cmd/go/internal/modfetch/coderepo.go @@ -407,25 +407,26 @@ func (r *codeRepo) modPrefix(rev string) string { return r.modPath + "@" + rev } -func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error) { +func (r *codeRepo) Zip(dst io.Writer, version string) error { rev, dir, _, err := r.findDir(version) if err != nil { - return "", err + return err } dl, actualDir, err := r.code.ReadZip(rev, dir, codehost.MaxZipFile) if err != nil { - return "", err + return err } + defer dl.Close() if actualDir != "" && !hasPathPrefix(dir, actualDir) { - return "", fmt.Errorf("internal error: downloading %v %v: dir=%q but actualDir=%q", r.path, rev, dir, actualDir) + return fmt.Errorf("internal error: downloading %v %v: dir=%q but actualDir=%q", r.path, rev, dir, actualDir) } subdir := strings.Trim(strings.TrimPrefix(dir, actualDir), "/") // Spool to local file. - f, err := ioutil.TempFile(tmpdir, "go-codehost-") + f, err := ioutil.TempFile("", "go-codehost-") if err != nil { dl.Close() - return "", err + return err } defer os.Remove(f.Name()) defer f.Close() @@ -433,35 +434,24 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error lr := &io.LimitedReader{R: dl, N: maxSize + 1} if _, err := io.Copy(f, lr); err != nil { dl.Close() - return "", err + return err } dl.Close() if lr.N <= 0 { - return "", fmt.Errorf("downloaded zip file too large") + return fmt.Errorf("downloaded zip file too large") } size := (maxSize + 1) - lr.N if _, err := f.Seek(0, 0); err != nil { - return "", err + return err } // Translate from zip file we have to zip file we want. zr, err := zip.NewReader(f, size) if err != nil { - return "", err - } - f2, err := ioutil.TempFile(tmpdir, "go-codezip-") - if err != nil { - return "", err + return err } - zw := zip.NewWriter(f2) - newName := f2.Name() - defer func() { - f2.Close() - if err != nil { - os.Remove(newName) - } - }() + zw := zip.NewWriter(dst) if subdir != "" { subdir += "/" } @@ -472,12 +462,12 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error if topPrefix == "" { i := strings.Index(zf.Name, "/") if i < 0 { - return "", fmt.Errorf("missing top-level directory prefix") + return fmt.Errorf("missing top-level directory prefix") } topPrefix = zf.Name[:i+1] } if !strings.HasPrefix(zf.Name, topPrefix) { - return "", fmt.Errorf("zip file contains more than one top-level directory") + return fmt.Errorf("zip file contains more than one top-level directory") } dir, file := path.Split(zf.Name) if file == "go.mod" { @@ -497,11 +487,17 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error name = dir[:len(dir)-1] } } + for _, zf := range zr.File { + if !zf.FileInfo().Mode().IsRegular() { + // Skip symlinks (golang.org/issue/27093). + continue + } + if topPrefix == "" { i := strings.Index(zf.Name, "/") if i < 0 { - return "", fmt.Errorf("missing top-level directory prefix") + return fmt.Errorf("missing top-level directory prefix") } topPrefix = zf.Name[:i+1] } @@ -509,7 +505,7 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error continue } if !strings.HasPrefix(zf.Name, topPrefix) { - return "", fmt.Errorf("zip file contains more than one top-level directory") + return fmt.Errorf("zip file contains more than one top-level directory") } name := strings.TrimPrefix(zf.Name, topPrefix) if !strings.HasPrefix(name, subdir) { @@ -529,28 +525,28 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error } base := path.Base(name) if strings.ToLower(base) == "go.mod" && base != "go.mod" { - return "", fmt.Errorf("zip file contains %s, want all lower-case go.mod", zf.Name) + return fmt.Errorf("zip file contains %s, want all lower-case go.mod", zf.Name) } if name == "LICENSE" { haveLICENSE = true } - size := int64(zf.UncompressedSize) + size := int64(zf.UncompressedSize64) if size < 0 || maxSize < size { - return "", fmt.Errorf("module source tree too big") + return fmt.Errorf("module source tree too big") } maxSize -= size rc, err := zf.Open() if err != nil { - return "", err + return err } w, err := zw.Create(r.modPrefix(version) + "/" + name) lr := &io.LimitedReader{R: rc, N: size + 1} if _, err := io.Copy(w, lr); err != nil { - return "", err + return err } if lr.N <= 0 { - return "", fmt.Errorf("individual file too large") + return fmt.Errorf("individual file too large") } } @@ -559,21 +555,15 @@ func (r *codeRepo) Zip(version string, tmpdir string) (tmpfile string, err error if err == nil { w, err := zw.Create(r.modPrefix(version) + "/LICENSE") if err != nil { - return "", err + return err } if _, err := w.Write(data); err != nil { - return "", err + return err } } } - if err := zw.Close(); err != nil { - return "", err - } - if err := f2.Close(); err != nil { - return "", err - } - return f2.Name(), nil + return zw.Close() } // hasPathPrefix reports whether the path s begins with the diff --git a/src/cmd/go/internal/modfetch/coderepo_test.go b/src/cmd/go/internal/modfetch/coderepo_test.go index 79b82786cb965..c93d8dbe44284 100644 --- a/src/cmd/go/internal/modfetch/coderepo_test.go +++ b/src/cmd/go/internal/modfetch/coderepo_test.go @@ -284,10 +284,10 @@ var codeRepoTests = []struct { { path: "gopkg.in/yaml.v2", rev: "v2", - version: "v2.2.1", - name: "5420a8b6744d3b0345ab293f6fcba19c978f1183", - short: "5420a8b6744d", - time: time.Date(2018, 3, 28, 19, 50, 20, 0, time.UTC), + version: "v2.2.2", + name: "51d6538a90f86fe93ac480b35f37b2be17fef232", + short: "51d6538a90f8", + time: time.Date(2018, 11, 15, 11, 05, 04, 0, time.UTC), gomod: "module \"gopkg.in/yaml.v2\"\n\nrequire (\n\t\"gopkg.in/check.v1\" v0.0.0-20161208181325-20d25e280405\n)\n", }, { @@ -391,7 +391,13 @@ func TestCodeRepo(t *testing.T) { } } if tt.zip != nil || tt.ziperr != "" { - zipfile, err := repo.Zip(tt.version, tmpdir) + f, err := ioutil.TempFile(tmpdir, tt.version+".zip.") + if err != nil { + t.Fatalf("ioutil.TempFile: %v", err) + } + zipfile := f.Name() + err = repo.Zip(f, tt.version) + f.Close() if err != nil { if tt.ziperr != "" { if err.Error() == tt.ziperr { @@ -423,7 +429,7 @@ func TestCodeRepo(t *testing.T) { } } } - t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f) + t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f) if strings.HasPrefix(tt.path, vgotest1git) { for _, alt := range altVgotests { // Note: Communicating with f through tt; should be cleaned up. @@ -442,7 +448,7 @@ func TestCodeRepo(t *testing.T) { tt.rev = remap(tt.rev, m) tt.gomoderr = remap(tt.gomoderr, m) tt.ziperr = remap(tt.ziperr, m) - t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f) + t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f) tt = old } } @@ -473,9 +479,9 @@ func remap(name string, m map[string]string) string { } } for k, v := range m { - name = strings.Replace(name, k, v, -1) + name = strings.ReplaceAll(name, k, v) if codehost.AllHex(k) { - name = strings.Replace(name, k[:12], v[:12], -1) + name = strings.ReplaceAll(name, k[:12], v[:12]) } } return name @@ -505,11 +511,11 @@ var codeRepoVersionsTests = []struct { }, { path: "gopkg.in/russross/blackfriday.v2", - versions: []string{"v2.0.0"}, + versions: []string{"v2.0.0", "v2.0.1"}, }, { path: "gopkg.in/natefinch/lumberjack.v2", - versions: nil, + versions: []string{"v2.0.0"}, }, } @@ -522,7 +528,7 @@ func TestCodeRepoVersions(t *testing.T) { } defer os.RemoveAll(tmpdir) for _, tt := range codeRepoVersionsTests { - t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) { + t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { repo, err := Lookup(tt.path) if err != nil { t.Fatalf("Lookup(%q): %v", tt.path, err) @@ -570,7 +576,7 @@ func TestLatest(t *testing.T) { } defer os.RemoveAll(tmpdir) for _, tt := range latestTests { - name := strings.Replace(tt.path, "/", "_", -1) + name := strings.ReplaceAll(tt.path, "/", "_") t.Run(name, func(t *testing.T) { repo, err := Lookup(tt.path) if err != nil { diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index 480579156fefb..81a6c843abccf 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -21,6 +21,7 @@ import ( "cmd/go/internal/dirhash" "cmd/go/internal/module" "cmd/go/internal/par" + "cmd/go/internal/renameio" ) var downloadCache par.Cache @@ -34,9 +35,7 @@ func Download(mod module.Version) (dir string, err error) { return "", fmt.Errorf("missing modfetch.PkgMod") } - // The par.Cache here avoids duplicate work but also - // avoids conflicts from simultaneous calls by multiple goroutines - // for the same version. + // The par.Cache here avoids duplicate work. type cached struct { dir string err error @@ -46,16 +45,8 @@ func Download(mod module.Version) (dir string, err error) { if err != nil { return cached{"", err} } - if files, _ := ioutil.ReadDir(dir); len(files) == 0 { - zipfile, err := DownloadZip(mod) - if err != nil { - return cached{"", err} - } - modpath := mod.Path + "@" + mod.Version - if err := Unzip(dir, zipfile, modpath, 0); err != nil { - fmt.Fprintf(os.Stderr, "-> %s\n", err) - return cached{"", err} - } + if err := download(mod, dir); err != nil { + return cached{"", err} } checkSum(mod) return cached{dir, nil} @@ -63,14 +54,88 @@ func Download(mod module.Version) (dir string, err error) { return c.dir, c.err } +func download(mod module.Version, dir string) (err error) { + // If the directory exists, the module has already been extracted. + fi, err := os.Stat(dir) + if err == nil && fi.IsDir() { + return nil + } + + // To avoid cluttering the cache with extraneous files, + // DownloadZip uses the same lockfile as Download. + // Invoke DownloadZip before locking the file. + zipfile, err := DownloadZip(mod) + if err != nil { + return err + } + + if cfg.CmdName != "mod download" { + fmt.Fprintf(os.Stderr, "go: extracting %s %s\n", mod.Path, mod.Version) + } + + unlock, err := lockVersion(mod) + if err != nil { + return err + } + defer unlock() + + // Check whether the directory was populated while we were waiting on the lock. + fi, err = os.Stat(dir) + if err == nil && fi.IsDir() { + return nil + } + + // Clean up any remaining temporary directories from previous runs. + // This is only safe to do because the lock file ensures that their writers + // are no longer active. + parentDir := filepath.Dir(dir) + tmpPrefix := filepath.Base(dir) + ".tmp-" + if old, err := filepath.Glob(filepath.Join(parentDir, tmpPrefix+"*")); err == nil { + for _, path := range old { + RemoveAll(path) // best effort + } + } + + // Extract the zip file to a temporary directory, then rename it to the + // final path. That way, we can use the existence of the source directory to + // signal that it has been extracted successfully, and if someone deletes + // the entire directory (e.g. as an attempt to prune out file corruption) + // the module cache will still be left in a recoverable state. + if err := os.MkdirAll(parentDir, 0777); err != nil { + return err + } + tmpDir, err := ioutil.TempDir(parentDir, tmpPrefix) + if err != nil { + return err + } + defer func() { + if err != nil { + RemoveAll(tmpDir) + } + }() + + modpath := mod.Path + "@" + mod.Version + if err := Unzip(tmpDir, zipfile, modpath, 0); err != nil { + fmt.Fprintf(os.Stderr, "-> %s\n", err) + return err + } + + if err := os.Rename(tmpDir, dir); err != nil { + return err + } + + // Make dir read-only only *after* renaming it. + // os.Rename was observed to fail for read-only directories on macOS. + makeDirsReadOnly(dir) + return nil +} + var downloadZipCache par.Cache // DownloadZip downloads the specific module version to the // local zip cache and returns the name of the zip file. func DownloadZip(mod module.Version) (zipfile string, err error) { - // The par.Cache here avoids duplicate work but also - // avoids conflicts from simultaneous calls by multiple goroutines - // for the same version. + // The par.Cache here avoids duplicate work. type cached struct { zipfile string err error @@ -80,83 +145,134 @@ func DownloadZip(mod module.Version) (zipfile string, err error) { if err != nil { return cached{"", err} } + + // Skip locking if the zipfile already exists. if _, err := os.Stat(zipfile); err == nil { - // Use it. - // This should only happen if the mod/cache directory is preinitialized - // or if pkg/mod/path was removed but not pkg/mod/cache/download. - if cfg.CmdName != "mod download" { - fmt.Fprintf(os.Stderr, "go: extracting %s %s\n", mod.Path, mod.Version) - } - } else { - if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { - return cached{"", err} - } - if cfg.CmdName != "mod download" { - fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, mod.Version) - } - if err := downloadZip(mod, zipfile); err != nil { - return cached{"", err} - } + return cached{zipfile, nil} + } + + // The zip file does not exist. Acquire the lock and create it. + if cfg.CmdName != "mod download" { + fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, mod.Version) + } + unlock, err := lockVersion(mod) + if err != nil { + return cached{"", err} + } + defer unlock() + + // Double-check that the zipfile was not created while we were waiting for + // the lock. + if _, err := os.Stat(zipfile); err == nil { + return cached{zipfile, nil} + } + if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { + return cached{"", err} + } + if err := downloadZip(mod, zipfile); err != nil { + return cached{"", err} } return cached{zipfile, nil} }).(cached) return c.zipfile, c.err } -func downloadZip(mod module.Version, target string) error { - repo, err := Lookup(mod.Path) +func downloadZip(mod module.Version, zipfile string) (err error) { + // Clean up any remaining tempfiles from previous runs. + // This is only safe to do because the lock file ensures that their + // writers are no longer active. + for _, base := range []string{zipfile, zipfile + "hash"} { + if old, err := filepath.Glob(renameio.Pattern(base)); err == nil { + for _, path := range old { + os.Remove(path) // best effort + } + } + } + + // From here to the os.Rename call below is functionally almost equivalent to + // renameio.WriteToFile, with one key difference: we want to validate the + // contents of the file (by hashing it) before we commit it. Because the file + // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to + // validate it: we can't just tee the stream as we write it. + f, err := ioutil.TempFile(filepath.Dir(zipfile), filepath.Base(renameio.Pattern(zipfile))) if err != nil { return err } - tmpfile, err := repo.Zip(mod.Version, os.TempDir()) + defer func() { + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + repo, err := Lookup(mod.Path) if err != nil { return err } - defer os.Remove(tmpfile) + if err := repo.Zip(f, mod.Version); err != nil { + return err + } - // Double-check zip file looks OK. - z, err := zip.OpenReader(tmpfile) + // Double-check that the paths within the zip file are well-formed. + // + // TODO(bcmills): There is a similar check within the Unzip function. Can we eliminate one? + fi, err := f.Stat() + if err != nil { + return err + } + z, err := zip.NewReader(f, fi.Size()) if err != nil { return err } - prefix := mod.Path + "@" + mod.Version + prefix := mod.Path + "@" + mod.Version + "/" for _, f := range z.File { if !strings.HasPrefix(f.Name, prefix) { - z.Close() return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name) } } - z.Close() - hash, err := dirhash.HashZip(tmpfile, dirhash.DefaultHash) - if err != nil { + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { return err } - checkOneSum(mod, hash) // check before installing the zip file - r, err := os.Open(tmpfile) - if err != nil { + if err := f.Close(); err != nil { return err } - defer r.Close() - w, err := os.Create(target) + + // Hash the zip file and check the sum before renaming to the final location. + hash, err := dirhash.HashZip(f.Name(), dirhash.DefaultHash) if err != nil { return err } - if _, err := io.Copy(w, r); err != nil { - w.Close() - return fmt.Errorf("copying: %v", err) + checkOneSum(mod, hash) + + if err := renameio.WriteFile(zipfile+"hash", []byte(hash)); err != nil { + return err } - if err := w.Close(); err != nil { + if err := os.Rename(f.Name(), zipfile); err != nil { return err } - return ioutil.WriteFile(target+"hash", []byte(hash), 0666) + + // TODO(bcmills): Should we make the .zip and .ziphash files read-only to discourage tampering? + + return nil } var GoSumFile string // path to go.sum; set by package modload +type modSum struct { + mod module.Version + sum string +} + var goSum struct { mu sync.Mutex m map[module.Version][]string // content of go.sum file (+ go.modverify if present) + checked map[modSum]bool // sums actually checked during execution + dirty bool // whether we added any new sums to m + overwrite bool // if true, overwrite go.sum without incorporating its contents enabled bool // whether to use go.sum at all modverify string // path to go.modverify, to be deleted } @@ -173,18 +289,25 @@ func initGoSum() bool { } goSum.m = make(map[module.Version][]string) + goSum.checked = make(map[modSum]bool) data, err := ioutil.ReadFile(GoSumFile) if err != nil && !os.IsNotExist(err) { base.Fatalf("go: %v", err) } goSum.enabled = true - readGoSum(GoSumFile, data) + readGoSum(goSum.m, GoSumFile, data) // Add old go.modverify file. // We'll delete go.modverify in WriteGoSum. alt := strings.TrimSuffix(GoSumFile, ".sum") + ".modverify" if data, err := ioutil.ReadFile(alt); err == nil { - readGoSum(alt, data) + migrate := make(map[module.Version][]string) + readGoSum(migrate, alt, data) + for mod, sums := range migrate { + for _, sum := range sums { + checkOneSumLocked(mod, sum) + } + } goSum.modverify = alt } return true @@ -197,7 +320,7 @@ const emptyGoModHash = "h1:G7mAYYxgmS0lVkHyy2hEOLQCFB0DlQFTMLWggykrydY=" // readGoSum parses data, which is the content of file, // and adds it to goSum.m. The goSum lock must be held. -func readGoSum(file string, data []byte) { +func readGoSum(dst map[module.Version][]string, file string, data []byte) { lineno := 0 for len(data) > 0 { var line []byte @@ -221,7 +344,7 @@ func readGoSum(file string, data []byte) { continue } mod := module.Version{Path: f[0], Version: f[1]} - goSum.m[mod] = append(goSum.m[mod], f[2]) + dst[mod] = append(dst[mod], f[2]) } } @@ -235,7 +358,7 @@ func checkSum(mod module.Version) { // Do the file I/O before acquiring the go.sum lock. ziphash, err := CachePath(mod, "ziphash") if err != nil { - base.Fatalf("go: verifying %s@%s: %v", mod.Path, mod.Version, err) + base.Fatalf("verifying %s@%s: %v", mod.Path, mod.Version, err) } data, err := ioutil.ReadFile(ziphash) if err != nil { @@ -243,24 +366,29 @@ func checkSum(mod module.Version) { // This can happen if someone does rm -rf GOPATH/src/cache/download. So it goes. return } - base.Fatalf("go: verifying %s@%s: %v", mod.Path, mod.Version, err) + base.Fatalf("verifying %s@%s: %v", mod.Path, mod.Version, err) } h := strings.TrimSpace(string(data)) if !strings.HasPrefix(h, "h1:") { - base.Fatalf("go: verifying %s@%s: unexpected ziphash: %q", mod.Path, mod.Version, h) + base.Fatalf("verifying %s@%s: unexpected ziphash: %q", mod.Path, mod.Version, h) } checkOneSum(mod, h) } +// goModSum returns the checksum for the go.mod contents. +func goModSum(data []byte) (string, error) { + return dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(data)), nil + }) +} + // checkGoMod checks the given module's go.mod checksum; // data is the go.mod content. func checkGoMod(path, version string, data []byte) { - h, err := dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(data)), nil - }) + h, err := goModSum(data) if err != nil { - base.Fatalf("go: verifying %s %s go.mod: %v", path, version, err) + base.Fatalf("verifying %s %s go.mod: %v", path, version, err) } checkOneSum(module.Version{Path: path, Version: version + "/go.mod"}, h) @@ -270,22 +398,27 @@ func checkGoMod(path, version string, data []byte) { func checkOneSum(mod module.Version, h string) { goSum.mu.Lock() defer goSum.mu.Unlock() - if !initGoSum() { - return + if initGoSum() { + checkOneSumLocked(mod, h) } +} + +func checkOneSumLocked(mod module.Version, h string) { + goSum.checked[modSum{mod, h}] = true for _, vh := range goSum.m[mod] { if h == vh { return } if strings.HasPrefix(vh, "h1:") { - base.Fatalf("go: verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\tgo.sum: %v", mod.Path, mod.Version, h, vh) + base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\tgo.sum: %v", mod.Path, mod.Version, h, vh) } } if len(goSum.m[mod]) > 0 { fmt.Fprintf(os.Stderr, "warning: verifying %s@%s: unknown hashes in go.sum: %v; adding %v", mod.Path, mod.Version, strings.Join(goSum.m[mod], ", "), h) } goSum.m[mod] = append(goSum.m[mod], h) + goSum.dirty = true } // Sum returns the checksum for the downloaded copy of the given module, @@ -311,9 +444,54 @@ func Sum(mod module.Version) string { func WriteGoSum() { goSum.mu.Lock() defer goSum.mu.Unlock() - if !initGoSum() { + + if !goSum.enabled { + // If we haven't read the go.sum file yet, don't bother writing it: at best, + // we could rename the go.modverify file if it isn't empty, but we haven't + // needed to touch it so far — how important could it be? return } + if !goSum.dirty { + // Don't bother opening the go.sum file if we don't have anything to add. + return + } + + // We want to avoid races between creating the lockfile and deleting it, but + // we also don't want to leave a permanent lockfile in the user's repository. + // + // On top of that, if we crash while writing go.sum, we don't want to lose the + // sums that were already present in the file, so it's important that we write + // the file by renaming rather than truncating — which means that we can't + // lock the go.sum file itself. + // + // Instead, we'll lock a distinguished file in the cache directory: that will + // only race if the user runs `go clean -modcache` concurrently with a command + // that updates go.sum, and that's already racy to begin with. + // + // We'll end up slightly over-synchronizing go.sum writes if the user runs a + // bunch of go commands that update sums in separate modules simultaneously, + // but that's unlikely to matter in practice. + + unlock := SideLock() + defer unlock() + + if !goSum.overwrite { + // Re-read the go.sum file to incorporate any sums added by other processes + // in the meantime. + data, err := ioutil.ReadFile(GoSumFile) + if err != nil && !os.IsNotExist(err) { + base.Fatalf("go: re-reading go.sum: %v", err) + } + + // Add only the sums that we actually checked: the user may have edited or + // truncated the file to remove erroneous hashes, and we shouldn't restore + // them without good reason. + goSum.m = make(map[module.Version][]string, len(goSum.m)) + readGoSum(goSum.m, GoSumFile, data) + for ms := range goSum.checked { + checkOneSumLocked(ms.mod, ms.sum) + } + } var mods []module.Version for m := range goSum.m { @@ -329,15 +507,16 @@ func WriteGoSum() { } } - data, _ := ioutil.ReadFile(GoSumFile) - if !bytes.Equal(data, buf.Bytes()) { - if err := ioutil.WriteFile(GoSumFile, buf.Bytes(), 0666); err != nil { - base.Fatalf("go: writing go.sum: %v", err) - } + if err := renameio.WriteFile(GoSumFile, buf.Bytes()); err != nil { + base.Fatalf("go: writing go.sum: %v", err) } + goSum.checked = make(map[modSum]bool) + goSum.dirty = false + goSum.overwrite = false + if goSum.modverify != "" { - os.Remove(goSum.modverify) + os.Remove(goSum.modverify) // best effort } } @@ -355,6 +534,8 @@ func TrimGoSum(keep map[module.Version]bool) { noGoMod := module.Version{Path: m.Path, Version: strings.TrimSuffix(m.Version, "/go.mod")} if !keep[m] && !keep[noGoMod] { delete(goSum.m, m) + goSum.dirty = true + goSum.overwrite = true } } } diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go index 5f856b80d2e16..60ed2a3796606 100644 --- a/src/cmd/go/internal/modfetch/proxy.go +++ b/src/cmd/go/internal/modfetch/proxy.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/url" "os" "strings" @@ -209,44 +208,31 @@ func (p *proxyRepo) GoMod(version string) ([]byte, error) { return data, nil } -func (p *proxyRepo) Zip(version string, tmpdir string) (tmpfile string, err error) { +func (p *proxyRepo) Zip(dst io.Writer, version string) error { var body io.ReadCloser encVer, err := module.EncodeVersion(version) if err != nil { - return "", err + return err } err = webGetBody(p.url+"/@v/"+pathEscape(encVer)+".zip", &body) if err != nil { - return "", err + return err } defer body.Close() - // Spool to local file. - f, err := ioutil.TempFile(tmpdir, "go-proxy-download-") - if err != nil { - return "", err - } - defer f.Close() - maxSize := int64(codehost.MaxZipFile) - lr := &io.LimitedReader{R: body, N: maxSize + 1} - if _, err := io.Copy(f, lr); err != nil { - os.Remove(f.Name()) - return "", err + lr := &io.LimitedReader{R: body, N: codehost.MaxZipFile + 1} + if _, err := io.Copy(dst, lr); err != nil { + return err } if lr.N <= 0 { - os.Remove(f.Name()) - return "", fmt.Errorf("downloaded zip file too large") - } - if err := f.Close(); err != nil { - os.Remove(f.Name()) - return "", err + return fmt.Errorf("downloaded zip file too large") } - return f.Name(), nil + return nil } // pathEscape escapes s so it can be used in a path. // That is, it escapes things like ? and # (which really shouldn't appear anyway). // It does not escape / to %2F: our REST API is designed so that / can be left as is. func pathEscape(s string) string { - return strings.Replace(url.PathEscape(s), "%2F", "/", -1) + return strings.ReplaceAll(url.PathEscape(s), "%2F", "/") } diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go index 0ea8c1f0e35e8..c63f6b04221dd 100644 --- a/src/cmd/go/internal/modfetch/repo.go +++ b/src/cmd/go/internal/modfetch/repo.go @@ -6,8 +6,10 @@ package modfetch import ( "fmt" + "io" "os" "sort" + "strconv" "time" "cmd/go/internal/cfg" @@ -45,11 +47,8 @@ type Repo interface { // GoMod returns the go.mod file for the given version. GoMod(version string) (data []byte, err error) - // Zip downloads a zip file for the given version - // to a new file in a given temporary directory. - // It returns the name of the new file. - // The caller should remove the file when finished with it. - Zip(version, tmpdir string) (tmpfile string, err error) + // Zip writes a zip file for the given version to dst. + Zip(dst io.Writer, version string) error } // A Rev describes a single revision in a module repository. @@ -357,7 +356,11 @@ func (l *loggingRepo) GoMod(version string) ([]byte, error) { return l.r.GoMod(version) } -func (l *loggingRepo) Zip(version, tmpdir string) (string, error) { - defer logCall("Repo[%s]: Zip(%q, %q)", l.r.ModulePath(), version, tmpdir)() - return l.r.Zip(version, tmpdir) +func (l *loggingRepo) Zip(dst io.Writer, version string) error { + dstName := "_" + if dst, ok := dst.(interface{ Name() string }); ok { + dstName = strconv.Quote(dst.Name()) + } + defer logCall("Repo[%s]: Zip(%s, %q)", l.r.ModulePath(), dstName, version)() + return l.r.Zip(dst, version) } diff --git a/src/cmd/go/internal/modfetch/unzip.go b/src/cmd/go/internal/modfetch/unzip.go index a50431fd8629d..ac13ede257b61 100644 --- a/src/cmd/go/internal/modfetch/unzip.go +++ b/src/cmd/go/internal/modfetch/unzip.go @@ -12,7 +12,6 @@ import ( "os" "path" "path/filepath" - "sort" "strings" "cmd/go/internal/modfetch/codehost" @@ -21,12 +20,12 @@ import ( ) func Unzip(dir, zipfile, prefix string, maxSize int64) error { + // TODO(bcmills): The maxSize parameter is invariantly 0. Remove it. if maxSize == 0 { maxSize = codehost.MaxZipFile } // Directory can exist, but must be empty. - // except maybe files, _ := ioutil.ReadDir(dir) if len(files) > 0 { return fmt.Errorf("target directory %v exists and is not empty", dir) @@ -98,22 +97,16 @@ func Unzip(dir, zipfile, prefix string, maxSize int64) error { } // Unzip, enforcing sizes checked earlier. - dirs := map[string]bool{dir: true} for _, zf := range z.File { if zf.Name == prefix || strings.HasSuffix(zf.Name, "/") { continue } name := zf.Name[len(prefix):] dst := filepath.Join(dir, name) - parent := filepath.Dir(dst) - for parent != dir { - dirs[parent] = true - parent = filepath.Dir(parent) - } if err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil { return err } - w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0444) + w, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0444) if err != nil { return fmt.Errorf("unzip %v: %v", zipfile, err) } @@ -137,17 +130,44 @@ func Unzip(dir, zipfile, prefix string, maxSize int64) error { } } - // Mark directories unwritable, best effort. - var dirlist []string - for dir := range dirs { - dirlist = append(dirlist, dir) + return nil +} + +// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir +// and its transitive contents. +func makeDirsReadOnly(dir string) { + type pathMode struct { + path string + mode os.FileMode } - sort.Strings(dirlist) + var dirs []pathMode // in lexical order + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err == nil && info.Mode()&0222 != 0 { + if info.IsDir() { + dirs = append(dirs, pathMode{path, info.Mode()}) + } + } + return nil + }) // Run over list backward to chmod children before parents. - for i := len(dirlist) - 1; i >= 0; i-- { - os.Chmod(dirlist[i], 0555) + for i := len(dirs) - 1; i >= 0; i-- { + os.Chmod(dirs[i].path, dirs[i].mode&^0222) } +} - return nil +// RemoveAll removes a directory written by Download or Unzip, first applying +// any permission changes needed to do so. +func RemoveAll(dir string) error { + // Module cache has 0555 directories; make them writable in order to remove content. + filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil // ignore errors walking in file system + } + if info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return os.RemoveAll(dir) } diff --git a/src/cmd/go/internal/modfile/rule.go b/src/cmd/go/internal/modfile/rule.go index e11f0a6e31e15..7f9a18c6c2a6c 100644 --- a/src/cmd/go/internal/modfile/rule.go +++ b/src/cmd/go/internal/modfile/rule.go @@ -154,7 +154,7 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (*File return f, nil } -var goVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) +var GoVersionRE = regexp.MustCompile(`([1-9][0-9]*)\.(0|[1-9][0-9]*)`) func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. @@ -181,7 +181,7 @@ func (f *File) add(errs *bytes.Buffer, line *Line, verb string, args []string, f fmt.Fprintf(errs, "%s:%d: repeated go statement\n", f.Syntax.Name, line.Start.Line) return } - if len(args) != 1 || !goVersionRE.MatchString(args[0]) { + if len(args) != 1 || !GoVersionRE.MatchString(args[0]) { fmt.Fprintf(errs, "%s:%d: usage: go 1.23\n", f.Syntax.Name, line.Start.Line) return } @@ -477,6 +477,22 @@ func (f *File) Cleanup() { f.Syntax.Cleanup() } +func (f *File) AddGoStmt(version string) error { + if !GoVersionRE.MatchString(version) { + return fmt.Errorf("invalid language version string %q", version) + } + if f.Go == nil { + f.Go = &Go{ + Version: version, + Syntax: f.Syntax.addLine(nil, "go", version), + } + } else { + f.Go.Version = version + f.Syntax.updateLine(f.Go.Syntax, "go", version) + } + return nil +} + func (f *File) AddRequire(path, vers string) error { need := true for _, r := range f.Require { diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 90a5bd81302c7..17a0ed45e21b4 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -56,7 +56,8 @@ If a module under consideration is already a dependency of the current development module, then get will update the required version. Specifying a version earlier than the current required version is valid and downgrades the dependency. The version suffix @none indicates that the -dependency should be removed entirely. +dependency should be removed entirely, downgrading or removing modules +depending on it as needed. Although get defaults to using the latest version of the module containing a named package, it does not use the latest version of that module's @@ -78,7 +79,7 @@ to use newer patch releases when available. Continuing the previous example, In general, adding a new dependency may require upgrading existing dependencies to keep a working build, and 'go get' does this automatically. Similarly, downgrading one dependency may -require downgrading other dependenceis, and 'go get' does +require downgrading other dependencies, and 'go get' does this automatically as well. The -m flag instructs get to stop here, after resolving, upgrading, @@ -247,7 +248,7 @@ func runGet(cmd *base.Command, args []string) { // Deciding which module to upgrade/downgrade for a particular argument is difficult. // Patterns only make it more difficult. // We impose restrictions to avoid needing to interlace pattern expansion, - // like in in modload.ImportPaths. + // like in modload.ImportPaths. // Specifically, these patterns are supported: // // - Relative paths like ../../foo or ../../foo... are restricted to matching directories @@ -281,8 +282,8 @@ func runGet(cmd *base.Command, args []string) { base.Errorf("go get %s: %v", arg, err) continue } - if !str.HasFilePathPrefix(abs, modload.ModRoot) { - base.Errorf("go get %s: directory %s is outside module root %s", arg, abs, modload.ModRoot) + if !str.HasFilePathPrefix(abs, modload.ModRoot()) { + base.Errorf("go get %s: directory %s is outside module root %s", arg, abs, modload.ModRoot()) continue } // TODO: Check if abs is inside a nested module. @@ -534,9 +535,11 @@ func runGet(cmd *base.Command, args []string) { // module root. continue } + base.Errorf("%s", p.Error) } todo = append(todo, p) } + base.ExitIfErrors() // If -d was specified, we're done after the download: no build. // (The load.PackagesAndErrors is what did the download diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 5893db14aa60c..2a8be90b78f54 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -14,8 +14,10 @@ import ( "cmd/go/internal/search" "encoding/hex" "fmt" + "internal/goroot" "os" "path/filepath" + "runtime/debug" "strings" ) @@ -25,15 +27,22 @@ var ( ) func isStandardImportPath(path string) bool { + return findStandardImportPath(path) != "" +} + +func findStandardImportPath(path string) string { + if path == "" { + panic("findStandardImportPath called with empty path") + } if search.IsStandardImportPath(path) { - if _, err := os.Stat(filepath.Join(cfg.GOROOT, "src", path)); err == nil { - return true + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + return filepath.Join(cfg.GOROOT, "src", path) } - if _, err := os.Stat(filepath.Join(cfg.GOROOT, "src/vendor", path)); err == nil { - return true + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, "vendor/"+path) { + return filepath.Join(cfg.GOROOT, "src/vendor", path) } } - return false + return "" } func PackageModuleInfo(pkgpath string) *modinfo.ModulePublic { @@ -90,11 +99,13 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { Path: m.Path, Version: m.Version, Main: true, - Dir: ModRoot, - GoMod: filepath.Join(ModRoot, "go.mod"), } - if modFile.Go != nil { - info.GoVersion = modFile.Go.Version + if HasModRoot() { + info.Dir = ModRoot() + info.GoMod = filepath.Join(info.Dir, "go.mod") + if modFile.Go != nil { + info.GoVersion = modFile.Go.Version + } } return info } @@ -109,7 +120,7 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { } if cfg.BuildMod == "vendor" { - info.Dir = filepath.Join(ModRoot, "vendor", m.Path) + info.Dir = filepath.Join(ModRoot(), "vendor", m.Path) return info } @@ -137,34 +148,38 @@ func moduleInfo(m module.Version, fromBuildList bool) *modinfo.ModulePublic { } } } - if cfg.BuildMod == "vendor" { - m.Dir = filepath.Join(ModRoot, "vendor", m.Path) - } } - complete(info) + if !fromBuildList { + complete(info) + return info + } - if fromBuildList { - if r := Replacement(m); r.Path != "" { - info.Replace = &modinfo.ModulePublic{ - Path: r.Path, - Version: r.Version, - GoVersion: info.GoVersion, - } - if r.Version == "" { - if filepath.IsAbs(r.Path) { - info.Replace.Dir = r.Path - } else { - info.Replace.Dir = filepath.Join(ModRoot, r.Path) - } - } - complete(info.Replace) - info.Dir = info.Replace.Dir - info.GoMod = filepath.Join(info.Dir, "go.mod") - info.Error = nil // ignore error loading original module version (it has been replaced) - } + r := Replacement(m) + if r.Path == "" { + complete(info) + return info } + // Don't hit the network to fill in extra data for replaced modules. + // The original resolved Version and Time don't matter enough to be + // worth the cost, and we're going to overwrite the GoMod and Dir from the + // replacement anyway. See https://golang.org/issue/27859. + info.Replace = &modinfo.ModulePublic{ + Path: r.Path, + Version: r.Version, + GoVersion: info.GoVersion, + } + if r.Version == "" { + if filepath.IsAbs(r.Path) { + info.Replace.Dir = r.Path + } else { + info.Replace.Dir = filepath.Join(ModRoot(), r.Path) + } + } + complete(info.Replace) + info.Dir = info.Replace.Dir + info.GoMod = filepath.Join(info.Dir, "go.mod") return info } @@ -172,6 +187,7 @@ func PackageBuildInfo(path string, deps []string) string { if isStandardImportPath(path) || !Enabled() { return "" } + target := findModule(path, path) mdeps := make(map[module.Version]bool) for _, dep := range deps { @@ -211,28 +227,44 @@ func PackageBuildInfo(path string, deps []string) string { return buf.String() } +// findModule returns the module containing the package at path, +// needed to build the package at target. func findModule(target, path string) module.Version { - // TODO: This should use loaded. - if path == "." { - return buildList[0] - } - for _, mod := range buildList { - if maybeInModule(path, mod.Path) { - return mod + pkg, ok := loaded.pkgCache.Get(path).(*loadPkg) + if ok { + if pkg.err != nil { + base.Fatalf("build %v: cannot load %v: %v", target, path, pkg.err) } + return pkg.mod + } + + if path == "command-line-arguments" { + return Target + } + + if printStackInDie { + debug.PrintStack() } base.Fatalf("build %v: cannot find module for path %v", target, path) panic("unreachable") } func ModInfoProg(info string) []byte { - return []byte(fmt.Sprintf(` - package main - import _ "unsafe" - //go:linkname __debug_modinfo__ runtime/debug.modinfo - var __debug_modinfo__ string - func init() { - __debug_modinfo__ = %q - } + // Inject a variable with the debug information as runtime/debug.modinfo, + // but compile it in package main so that it is specific to the binary. + // + // The variable must be a literal so that it will have the correct value + // before the initializer for package main runs. + // + // We also want the value to be present even if runtime/debug.modinfo is + // otherwise unused in the rest of the program. Reading it in an init function + // suffices for now. + + return []byte(fmt.Sprintf(`package main +import _ "unsafe" +//go:linkname __debug_modinfo__ runtime/debug.modinfo +var __debug_modinfo__ = %q +var keepalive_modinfo = __debug_modinfo__ +func init() { keepalive_modinfo = __debug_modinfo__ } `, string(infoStart)+info+string(infoEnd))) } diff --git a/src/cmd/go/internal/modload/help.go b/src/cmd/go/internal/modload/help.go index 9a12b24482070..d9c8ae40d88f3 100644 --- a/src/cmd/go/internal/modload/help.go +++ b/src/cmd/go/internal/modload/help.go @@ -393,17 +393,20 @@ no /* */ comments. Each line holds a single directive, made up of a verb followed by arguments. For example: module my/thing + go 1.12 require other/thing v1.0.2 - require new/thing v2.3.4 + require new/thing/v2 v2.3.4 exclude old/thing v1.2.3 replace bad/thing v1.4.5 => good/thing v1.4.5 -The verbs are module, to define the module path; require, to require -a particular module at a given version or later; exclude, to exclude -a particular module version from use; and replace, to replace a module -version with a different module version. Exclude and replace apply only -in the main module's go.mod and are ignored in dependencies. -See https://research.swtch.com/vgo-mvs for details. +The verbs are + module, to define the module path; + go, to set the expected language version; + require, to require a particular module at a given version or later; + exclude, to exclude a particular module version from use; and + replace, to replace a module version with a different module version. +Exclude and replace apply only in the main module's go.mod and are ignored +in dependencies. See https://research.swtch.com/vgo-mvs for details. The leading verb can be factored out of adjacent lines to create a block, like in Go imports: @@ -420,7 +423,19 @@ See 'go help mod edit'. The go command automatically updates go.mod each time it uses the module graph, to make sure go.mod always accurately reflects reality -and is properly formatted. +and is properly formatted. For example, consider this go.mod file: + + module M + + require ( + A v1 + B v1.0.0 + C v1.0.0 + D v1.2.3 + E dev + ) + + exclude D v1.2.3 The update rewrites non-canonical version identifiers to semver form, so A's v1 becomes v1.0.0 and E's dev becomes the pseudo-version for the diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index 78ae83e4bf3b5..3210e16c25b08 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -9,15 +9,20 @@ import ( "errors" "fmt" "go/build" + "internal/goroot" "os" "path/filepath" + "sort" "strings" + "time" "cmd/go/internal/cfg" + "cmd/go/internal/modfetch" "cmd/go/internal/modfetch/codehost" "cmd/go/internal/module" "cmd/go/internal/par" "cmd/go/internal/search" + "cmd/go/internal/semver" ) type ImportMissingError struct { @@ -57,11 +62,8 @@ func Import(path string) (m module.Version, dir string, err error) { // Is the package in the standard library? if search.IsStandardImportPath(path) { - if strings.HasPrefix(path, "golang_org/") { - return module.Version{}, filepath.Join(cfg.GOROOT, "src/vendor", path), nil - } - dir := filepath.Join(cfg.GOROOT, "src", path) - if _, err := os.Stat(dir); err == nil { + if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + dir := filepath.Join(cfg.GOROOT, "src", path) return module.Version{}, dir, nil } } @@ -69,8 +71,8 @@ func Import(path string) (m module.Version, dir string, err error) { // -mod=vendor is special. // Everything must be in the main module or the main module's vendor directory. if cfg.BuildMod == "vendor" { - mainDir, mainOK := dirInModule(path, Target.Path, ModRoot, true) - vendorDir, vendorOK := dirInModule(path, "", filepath.Join(ModRoot, "vendor"), false) + mainDir, mainOK := dirInModule(path, Target.Path, ModRoot(), true) + vendorDir, vendorOK := dirInModule(path, "", filepath.Join(ModRoot(), "vendor"), false) if mainOK && vendorOK { return module.Version{}, "", fmt.Errorf("ambiguous import: found %s in multiple directories:\n\t%s\n\t%s", path, mainDir, vendorDir) } @@ -124,14 +126,58 @@ func Import(path string) (m module.Version, dir string, err error) { return module.Version{}, "", errors.New(buf.String()) } - // Not on build list. - // Look up module containing the package, for addition to the build list. // Goal is to determine the module, download it to dir, and return m, dir, ErrMissing. if cfg.BuildMod == "readonly" { return module.Version{}, "", fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) } + // Not on build list. + // To avoid spurious remote fetches, next try the latest replacement for each module. + // (golang.org/issue/26241) + if modFile != nil { + latest := map[string]string{} // path -> version + for _, r := range modFile.Replace { + if maybeInModule(path, r.Old.Path) { + latest[r.Old.Path] = semver.Max(r.Old.Version, latest[r.Old.Path]) + } + } + + mods = make([]module.Version, 0, len(latest)) + for p, v := range latest { + // If the replacement didn't specify a version, synthesize a + // pseudo-version with an appropriate major version and a timestamp below + // any real timestamp. That way, if the main module is used from within + // some other module, the user will be able to upgrade the requirement to + // any real version they choose. + if v == "" { + if _, pathMajor, ok := module.SplitPathVersion(p); ok && len(pathMajor) > 0 { + v = modfetch.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000") + } else { + v = modfetch.PseudoVersion("v0", "", time.Time{}, "000000000000") + } + } + mods = append(mods, module.Version{Path: p, Version: v}) + } + + // Every module path in mods is a prefix of the import path. + // As in QueryPackage, prefer the longest prefix that satisfies the import. + sort.Slice(mods, func(i, j int) bool { + return len(mods[i].Path) > len(mods[j].Path) + }) + for _, m := range mods { + root, isLocal, err := fetch(m) + if err != nil { + // Report fetch error as above. + return module.Version{}, "", err + } + _, ok := dirInModule(path, m.Path, root, isLocal) + if ok { + return m, "", &ImportMissingError{ImportPath: path, Module: m} + } + } + } + m, _, err = QueryPackage(path, "latest", Allowed) if err != nil { if _, ok := err.(*codehost.VCSError); ok { @@ -181,7 +227,7 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile // So we only check local module trees // (the main module, and any directory trees pointed at by replace directives). if isLocal { - for d := dir; d != mdir && len(d) > len(mdir); d = filepath.Dir(d) { + for d := dir; d != mdir && len(d) > len(mdir); { haveGoMod := haveGoModCache.Do(d, func() interface{} { _, err := os.Stat(filepath.Join(d, "go.mod")) return err == nil @@ -190,6 +236,13 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile if haveGoMod { return "", false } + parent := filepath.Dir(d) + if parent == d { + // Break the loop, as otherwise we'd loop + // forever if d=="." and mdir=="". + break + } + d = parent } } diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index 8e01dc50916a3..9422a3d960c16 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -21,7 +21,7 @@ var importTests = []struct { }, { path: "golang.org/x/net", - err: "missing module for import: golang.org/x/net@.* provides golang.org/x/net", + err: "cannot find module providing package golang.org/x/net", }, { path: "golang.org/x/text", @@ -45,7 +45,7 @@ func TestImport(t *testing.T) { testenv.MustHaveExternalNetwork(t) for _, tt := range importTests { - t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) { + t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { // Note that there is no build list, so Import should always fail. m, dir, err := Import(tt.path) if err == nil { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index f995bad13b543..22d14ccce78b2 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -16,27 +16,31 @@ import ( "cmd/go/internal/modfile" "cmd/go/internal/module" "cmd/go/internal/mvs" + "cmd/go/internal/renameio" "cmd/go/internal/search" "encoding/json" "fmt" + "go/build" "io/ioutil" "os" "path" "path/filepath" "regexp" + "runtime/debug" "strconv" "strings" ) var ( - cwd string + cwd string // TODO(bcmills): Is this redundant with base.Cwd? MustUseModules = mustUseModules() initialized bool - ModRoot string - modFile *modfile.File - excluded map[module.Version]bool - Target module.Version + modRoot string + modFile *modfile.File + modFileData []byte + excluded map[module.Version]bool + Target module.Version gopath string @@ -53,11 +57,15 @@ var ( // To make permanent changes to the require statements // in go.mod, edit it before calling ImportPaths or LoadBuildList. func ModFile() *modfile.File { + Init() + if modFile == nil { + die() + } return modFile } func BinDir() string { - MustInit() + Init() return filepath.Join(gopath, "bin") } @@ -73,6 +81,10 @@ func mustUseModules() bool { var inGOPATH bool // running in GOPATH/src +// Init determines whether module mode is enabled, locates the root of the +// current module (if any), sets environment variables for Git subprocesses, and +// configures the cfg, codehost, load, modfetch, and search packages for use +// with modules. func Init() { if initialized { return @@ -138,6 +150,9 @@ func Init() { } if inGOPATH && !MustUseModules { + if CmdModInit { + die() // Don't init a module that we're just going to ignore. + } // No automatic enabling in GOPATH. if root, _ := FindModuleRoot(cwd, "", false); root != "" { cfg.GoModInGOPATH = filepath.Join(root, "go.mod") @@ -147,26 +162,54 @@ func Init() { if CmdModInit { // Running 'go mod init': go.mod will be created in current directory. - ModRoot = cwd + modRoot = cwd } else { - ModRoot, _ = FindModuleRoot(cwd, "", MustUseModules) - if !MustUseModules { - if ModRoot == "" { - return - } - if search.InDir(ModRoot, os.TempDir()) == "." { - // If you create /tmp/go.mod for experimenting, - // then any tests that create work directories under /tmp - // will find it and get modules when they're not expecting them. - // It's a bit of a peculiar thing to disallow but quite mysterious - // when it happens. See golang.org/issue/26708. - ModRoot = "" - fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) + modRoot, _ = FindModuleRoot(cwd, "", MustUseModules) + if modRoot == "" { + if !MustUseModules { + // GO111MODULE is 'auto' (or unset), and we can't find a module root. + // Stay in GOPATH mode. return } + } else if search.InDir(modRoot, os.TempDir()) == "." { + // If you create /tmp/go.mod for experimenting, + // then any tests that create work directories under /tmp + // will find it and get modules when they're not expecting them. + // It's a bit of a peculiar thing to disallow but quite mysterious + // when it happens. See golang.org/issue/26708. + modRoot = "" + fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) } } + // We're in module mode. Install the hooks to make it work. + + if c := cache.Default(); c == nil { + // With modules, there are no install locations for packages + // other than the build cache. + base.Fatalf("go: cannot use modules with build cache disabled") + } + + list := filepath.SplitList(cfg.BuildContext.GOPATH) + if len(list) == 0 || list[0] == "" { + base.Fatalf("missing $GOPATH") + } + gopath = list[0] + if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil { + base.Fatalf("$GOPATH/go.mod exists but should not") + } + + oldSrcMod := filepath.Join(list[0], "src/mod") + pkgMod := filepath.Join(list[0], "pkg/mod") + infoOld, errOld := os.Stat(oldSrcMod) + _, errMod := os.Stat(pkgMod) + if errOld == nil && infoOld.IsDir() && errMod != nil && os.IsNotExist(errMod) { + os.Rename(oldSrcMod, pkgMod) + } + + modfetch.PkgMod = pkgMod + codehost.WorkRoot = filepath.Join(pkgMod, "cache/vcs") + cfg.ModulesEnabled = true load.ModBinDir = BinDir load.ModLookup = Lookup @@ -177,7 +220,35 @@ func Init() { load.ModImportFromFiles = ImportFromFiles load.ModDirImportPath = DirImportPath - search.SetModRoot(ModRoot) + if modRoot == "" { + // We're in module mode, but not inside a module. + // + // If the command is 'go get' or 'go list' and all of the args are in the + // same existing module, we could use that module's download directory in + // the module cache as the module root, applying any replacements and/or + // exclusions specified by that module. However, that would leave us in a + // strange state: we want 'go get' to be consistent with 'go list', and 'go + // list' should be able to operate on multiple modules. Moreover, the 'get' + // target might specify relative file paths (e.g. in the same repository) as + // replacements, and we would not be able to apply those anyway: we would + // need to either error out or ignore just those replacements, when a build + // from an empty module could proceed without error. + // + // Instead, we'll operate as though we're in some ephemeral external module, + // ignoring all replacements and exclusions uniformly. + + // Normally we check sums using the go.sum file from the main module, but + // without a main module we do not have an authoritative go.sum file. + // + // TODO(bcmills): In Go 1.13, check sums when outside the main module. + // + // One possible approach is to merge the go.sum files from all of the + // modules we download: that doesn't protect us against bad top-level + // modules, but it at least ensures consistency for transitive dependencies. + } else { + modfetch.GoSumFile = filepath.Join(modRoot, "go.sum") + search.SetModRoot(modRoot) + } } func init() { @@ -190,38 +261,41 @@ func init() { } // Enabled reports whether modules are (or must be) enabled. -// If modules must be enabled but are not, Enabled returns true +// If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die -// (usually through InitMod and MustInit). +// (usually through MustModRoot). func Enabled() bool { - if !initialized { - panic("go: Enabled called before Init") - } - return ModRoot != "" || MustUseModules + Init() + return modRoot != "" || MustUseModules } -// MustInit calls Init if needed and checks that -// modules are enabled and the main module has been found. -// If not, MustInit calls base.Fatalf with an appropriate message. -func MustInit() { - if Init(); ModRoot == "" { +// ModRoot returns the root of the main module. +// It calls base.Fatalf if there is no main module. +func ModRoot() string { + if !HasModRoot() { die() } - if c := cache.Default(); c == nil { - // With modules, there are no install locations for packages - // other than the build cache. - base.Fatalf("go: cannot use modules with build cache disabled") - } + return modRoot } -// Failed reports whether module loading failed. -// If Failed returns true, then any use of module information will call die. -func Failed() bool { +// HasModRoot reports whether a main module is present. +// HasModRoot may return false even if Enabled returns true: for example, 'get' +// does not require a main module. +func HasModRoot() bool { Init() - return cfg.ModulesEnabled && ModRoot == "" + return modRoot != "" } +// printStackInDie causes die to print a stack trace. +// +// It is enabled by the testgo tag, and helps to diagnose paths that +// unexpectedly require a main module. +var printStackInDie = false + func die() { + if printStackInDie { + debug.PrintStack() + } if os.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } @@ -231,33 +305,20 @@ func die() { base.Fatalf("go: cannot find main module; see 'go help modules'") } +// InitMod sets Target and, if there is a main module, parses the initial build +// list from its go.mod file, creating and populating that file if needed. func InitMod() { - MustInit() - if modFile != nil { + if len(buildList) > 0 { return } - list := filepath.SplitList(cfg.BuildContext.GOPATH) - if len(list) == 0 || list[0] == "" { - base.Fatalf("missing $GOPATH") - } - gopath = list[0] - if _, err := os.Stat(filepath.Join(gopath, "go.mod")); err == nil { - base.Fatalf("$GOPATH/go.mod exists but should not") - } - - oldSrcMod := filepath.Join(list[0], "src/mod") - pkgMod := filepath.Join(list[0], "pkg/mod") - infoOld, errOld := os.Stat(oldSrcMod) - _, errMod := os.Stat(pkgMod) - if errOld == nil && infoOld.IsDir() && errMod != nil && os.IsNotExist(errMod) { - os.Rename(oldSrcMod, pkgMod) + Init() + if modRoot == "" { + Target = module.Version{Path: "command-line-arguments"} + buildList = []module.Version{Target} + return } - modfetch.PkgMod = pkgMod - modfetch.GoSumFile = filepath.Join(ModRoot, "go.sum") - codehost.WorkRoot = filepath.Join(pkgMod, "cache/vcs") - if CmdModInit { // Running go mod init: do legacy module conversion legacyModInit() @@ -266,7 +327,7 @@ func InitMod() { return } - gomod := filepath.Join(ModRoot, "go.mod") + gomod := filepath.Join(modRoot, "go.mod") data, err := ioutil.ReadFile(gomod) if err != nil { if os.IsNotExist(err) { @@ -284,10 +345,11 @@ func InitMod() { base.Fatalf("go: errors parsing go.mod:\n%s\n", err) } modFile = f + modFileData = data if len(f.Syntax.Stmt) == 0 || f.Module == nil { // Empty mod file. Must add module path. - path, err := FindModulePath(ModRoot) + path, err := FindModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } @@ -325,7 +387,7 @@ func Allowed(m module.Version) bool { func legacyModInit() { if modFile == nil { - path, err := FindModulePath(ModRoot) + path, err := FindModulePath(modRoot) if err != nil { base.Fatalf("go: %v", err) } @@ -334,8 +396,10 @@ func legacyModInit() { modFile.AddModuleStmt(path) } + addGoStmt() + for _, name := range altConfigs { - cfg := filepath.Join(ModRoot, name) + cfg := filepath.Join(modRoot, name) data, err := ioutil.ReadFile(cfg) if err == nil { convert := modconv.Converters[name] @@ -356,6 +420,25 @@ func legacyModInit() { } } +// InitGoStmt adds a go statement, unless there already is one. +func InitGoStmt() { + if modFile.Go == nil { + addGoStmt() + } +} + +// addGoStmt adds a go statement referring to the current version. +func addGoStmt() { + tags := build.Default.ReleaseTags + version := tags[len(tags)-1] + if !strings.HasPrefix(version, "go") || !modfile.GoVersionRE.MatchString(version[2:]) { + base.Fatalf("go: unrecognized default version %q", version) + } + if err := modFile.AddGoStmt(version[2:]); err != nil { + base.Fatalf("go: internal error: %v", err) + } +} + var altConfigs = []string{ "Gopkg.lock", @@ -379,7 +462,7 @@ func FindModuleRoot(dir, limit string, legacyConfigOK bool) (root, file string) // Look for enclosing go.mod. for { - if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { return dir, "go.mod" } if dir == limit { @@ -397,7 +480,7 @@ func FindModuleRoot(dir, limit string, legacyConfigOK bool) (root, file string) dir = dir1 for { for _, name := range altConfigs { - if _, err := os.Stat(filepath.Join(dir, name)); err == nil { + if fi, err := os.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() { return dir, name } } @@ -541,6 +624,11 @@ func WriteGoMod() { return } + // If we aren't in a module, we don't have anywhere to write a go.mod file. + if modRoot == "" { + return + } + if loaded != nil { reqs := MinReqs() min, err := reqs.Required(Target) @@ -557,22 +645,53 @@ func WriteGoMod() { modFile.SetRequire(list) } - file := filepath.Join(ModRoot, "go.mod") - old, _ := ioutil.ReadFile(file) modFile.Cleanup() // clean file after edits new, err := modFile.Format() if err != nil { base.Fatalf("go: %v", err) } - if !bytes.Equal(old, new) { - if cfg.BuildMod == "readonly" { - base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") + + // Always update go.sum, even if we didn't change go.mod: we may have + // downloaded modules that we didn't have before. + modfetch.WriteGoSum() + + if bytes.Equal(new, modFileData) { + // We don't need to modify go.mod from what we read previously. + // Ignore any intervening edits. + return + } + if cfg.BuildMod == "readonly" { + base.Fatalf("go: updates to go.mod needed, disabled by -mod=readonly") + } + + unlock := modfetch.SideLock() + defer unlock() + + file := filepath.Join(modRoot, "go.mod") + old, err := ioutil.ReadFile(file) + if !bytes.Equal(old, modFileData) { + if bytes.Equal(old, new) { + // Some other process wrote the same go.mod file that we were about to write. + modFileData = new + return } - if err := ioutil.WriteFile(file, new, 0666); err != nil { - base.Fatalf("go: %v", err) + if err != nil { + base.Fatalf("go: can't determine whether go.mod has changed: %v", err) } + // The contents of the go.mod file have changed. In theory we could add all + // of the new modules to the build list, recompute, and check whether any + // module in *our* build list got bumped to a different version, but that's + // a lot of work for marginal benefit. Instead, fail the command: if users + // want to run concurrent commands, they need to start with a complete, + // consistent module definition. + base.Fatalf("go: updates to go.mod needed, but contents have changed") + } - modfetch.WriteGoSum() + + if err := renameio.WriteFile(file, new); err != nil { + base.Fatalf("error writing go.mod: %v", err) + } + modFileData = new } func fixVersion(path, vers string) (string, error) { diff --git a/src/cmd/go/internal/modload/init_test.go b/src/cmd/go/internal/modload/init_test.go new file mode 100644 index 0000000000000..2df9d8af7df1b --- /dev/null +++ b/src/cmd/go/internal/modload/init_test.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestFindModuleRootIgnoreDir(t *testing.T) { + // In Plan 9, directories are automatically created in /n. + // For example, /n/go.mod always exist, but it's a directory. + // Test that we ignore directories when trying to find go.mod and other config files. + + dir, err := ioutil.TempDir("", "gotest") + if err != nil { + t.Fatalf("failed to create temporary directory: %v", err) + } + defer os.RemoveAll(dir) + if err := os.Mkdir(filepath.Join(dir, "go.mod"), os.ModeDir|0755); err != nil { + t.Fatalf("Mkdir failed: %v", err) + } + for _, name := range altConfigs { + if err := os.MkdirAll(filepath.Join(dir, name), os.ModeDir|0755); err != nil { + t.Fatalf("MkdirAll failed: %v", err) + } + } + p := filepath.Join(dir, "example") + if err := os.Mkdir(p, os.ModeDir|0755); err != nil { + t.Fatalf("Mkdir failed: %v", err) + } + if root, _ := FindModuleRoot(p, "", false); root != "" { + t.Errorf("FindModuleRoot(%q, \"\", false): %q, want empty string", p, root) + } + if root, _ := FindModuleRoot(p, "", true); root != "" { + t.Errorf("FindModuleRoot(%q, \"\", true): %q, want empty string", p, root) + } +} diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 69a832de1df52..2f1a3c24d223c 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -17,7 +17,7 @@ import ( ) func ListModules(args []string, listU, listVersions bool) []*modinfo.ModulePublic { - mods := listModules(args) + mods := listModules(args, listVersions) if listU || listVersions { var work par.Work for _, m := range mods { @@ -39,7 +39,7 @@ func ListModules(args []string, listU, listVersions bool) []*modinfo.ModulePubli return mods } -func listModules(args []string) []*modinfo.ModulePublic { +func listModules(args []string, listVersions bool) []*modinfo.ModulePublic { LoadBuildList() if len(args) == 0 { return []*modinfo.ModulePublic{moduleInfo(buildList[0], true)} @@ -83,6 +83,10 @@ func listModules(args []string) []*modinfo.ModulePublic { } matched := false for i, m := range buildList { + if i == 0 && !HasModRoot() { + // The root module doesn't actually exist: omit it. + continue + } if match(m.Path) { matched = true if !matchedBuildList[i] { @@ -93,6 +97,16 @@ func listModules(args []string) []*modinfo.ModulePublic { } if !matched { if literal { + if listVersions { + // Don't make the user provide an explicit '@latest' when they're + // explicitly asking what the available versions are. + // Instead, resolve the module, even if it isn't an existing dependency. + info, err := Query(arg, "latest", nil) + if err == nil { + mods = append(mods, moduleInfo(module.Version{Path: arg, Version: info.Version}, false)) + continue + } + } mods = append(mods, &modinfo.ModulePublic{ Path: arg, Error: &modinfo.ModuleError{ diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 285daa8f4fcb9..5bb943dd6decb 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -90,7 +90,7 @@ func ImportPaths(patterns []string) []*search.Match { // the exact version of a particular module increases during // the loader iterations. m.Pkgs = str.StringList(fsDirs[i]) - for i, pkg := range m.Pkgs { + for j, pkg := range m.Pkgs { dir := pkg if !filepath.IsAbs(dir) { dir = filepath.Join(cwd, pkg) @@ -101,10 +101,10 @@ func ImportPaths(patterns []string) []*search.Match { // Note: The checks for @ here are just to avoid misinterpreting // the module cache directories (formerly GOPATH/src/mod/foo@v1.5.2/bar). // It's not strictly necessary but helpful to keep the checks. - if dir == ModRoot { + if modRoot != "" && dir == modRoot { pkg = Target.Path - } else if strings.HasPrefix(dir, ModRoot+string(filepath.Separator)) && !strings.Contains(dir[len(ModRoot):], "@") { - suffix := filepath.ToSlash(dir[len(ModRoot):]) + } else if modRoot != "" && strings.HasPrefix(dir, modRoot+string(filepath.Separator)) && !strings.Contains(dir[len(modRoot):], "@") { + suffix := filepath.ToSlash(dir[len(modRoot):]) if strings.HasPrefix(suffix, "/vendor/") { // TODO getmode vendor check pkg = strings.TrimPrefix(suffix, "/vendor/") @@ -118,24 +118,21 @@ func ImportPaths(patterns []string) []*search.Match { } else { pkg = "" if !iterating { + ModRoot() base.Errorf("go: directory %s outside available modules", base.ShortPath(dir)) } } info, err := os.Stat(dir) if err != nil || !info.IsDir() { - // If the directory does not exist, - // don't turn it into an import path - // that will trigger a lookup. - pkg = "" - if !iterating { - if err != nil { - base.Errorf("go: no such directory %v", m.Pattern) - } else { - base.Errorf("go: %s is not a directory", m.Pattern) - } + // If the directory is local but does not exist, don't return it + // while loader is iterating, since this would trigger a fetch. + // After loader is done iterating, we still need to return the + // path, so that "go list -e" produces valid output. + if iterating { + pkg = "" } } - m.Pkgs[i] = pkg + m.Pkgs[j] = pkg } case strings.Contains(m.Pattern, "..."): @@ -251,17 +248,21 @@ func ImportFromFiles(gofiles []string) { // DirImportPath returns the effective import path for dir, // provided it is within the main module, or else returns ".". func DirImportPath(dir string) string { + if modRoot == "" { + return "." + } + if !filepath.IsAbs(dir) { dir = filepath.Join(cwd, dir) } else { dir = filepath.Clean(dir) } - if dir == ModRoot { + if dir == modRoot { return Target.Path } - if strings.HasPrefix(dir, ModRoot+string(filepath.Separator)) { - suffix := filepath.ToSlash(dir[len(ModRoot):]) + if strings.HasPrefix(dir, modRoot+string(filepath.Separator)) { + suffix := filepath.ToSlash(dir[len(modRoot):]) if strings.HasPrefix(suffix, "/vendor/") { return strings.TrimPrefix(suffix, "/vendor/") } @@ -397,13 +398,22 @@ func ModuleUsedDirectly(path string) bool { // Lookup requires that one of the Load functions in this package has already // been called. func Lookup(path string) (dir, realPath string, err error) { + if path == "" { + panic("Lookup called with empty package path") + } pkg, ok := loaded.pkgCache.Get(path).(*loadPkg) if !ok { - if isStandardImportPath(path) { - dir := filepath.Join(cfg.GOROOT, "src", path) - if _, err := os.Stat(dir); err == nil { - return dir, path, nil - } + // The loader should have found all the relevant paths. + // There are a few exceptions, though: + // - during go list without -test, the p.Resolve calls to process p.TestImports and p.XTestImports + // end up here to canonicalize the import paths. + // - during any load, non-loaded packages like "unsafe" end up here. + // - during any load, build-injected dependencies like "runtime/cgo" end up here. + // - because we ignore appengine/* in the module loader, + // the dependencies of any actual appengine/* library end up here. + dir := findStandardImportPath(path) + if dir != "" { + return dir, path, nil } return "", "", errMissing } @@ -752,7 +762,7 @@ func (pkg *loadPkg) stackText() string { } // why returns the text to use in "go mod why" output about the given package. -// It is less ornate than the stackText but conatins the same information. +// It is less ornate than the stackText but contains the same information. func (pkg *loadPkg) why() string { var buf strings.Builder var stack []*loadPkg @@ -801,7 +811,7 @@ func WhyDepth(path string) int { // a module.Version with Path == "". func Replacement(mod module.Version) module.Version { if modFile == nil { - // Happens during testing. + // Happens during testing and if invoking 'go get' or 'go list' outside a module. return module.Version{} } @@ -878,7 +888,7 @@ func readVendorList() { vendorOnce.Do(func() { vendorList = nil vendorMap = make(map[string]module.Version) - data, _ := ioutil.ReadFile(filepath.Join(ModRoot, "vendor/modules.txt")) + data, _ := ioutil.ReadFile(filepath.Join(ModRoot(), "vendor/modules.txt")) var m module.Version for _, line := range strings.Split(string(data), "\n") { if strings.HasPrefix(line, "# ") { @@ -908,7 +918,7 @@ func (r *mvsReqs) modFileToList(f *modfile.File) []module.Version { func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { if mod == Target { - if modFile.Go != nil { + if modFile != nil && modFile.Go != nil { r.versions.LoadOrStore(mod, modFile.Go.Version) } var list []module.Version @@ -928,7 +938,7 @@ func (r *mvsReqs) required(mod module.Version) ([]module.Version, error) { // TODO: need to slip the new version into the tags list etc. dir := repl.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(ModRoot, dir) + dir = filepath.Join(ModRoot(), dir) } gomod := filepath.Join(dir, "go.mod") data, err := ioutil.ReadFile(gomod) @@ -1043,13 +1053,13 @@ func (*mvsReqs) next(m module.Version) (module.Version, error) { func fetch(mod module.Version) (dir string, isLocal bool, err error) { if mod == Target { - return ModRoot, true, nil + return ModRoot(), true, nil } if r := Replacement(mod); r.Path != "" { if r.Version == "" { dir = r.Path if !filepath.IsAbs(dir) { - dir = filepath.Join(ModRoot, dir) + dir = filepath.Join(ModRoot(), dir) } return dir, true, nil } diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index 3b550f1db7f74..0856486c212c2 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -207,21 +207,23 @@ func matchSemverPrefix(p, v string) bool { // If multiple modules with revisions matching the query provide the requested // package, QueryPackage picks the one with the longest module path. // -// If the path is in the the main module and the query is "latest", +// If the path is in the main module and the query is "latest", // QueryPackage returns Target as the version. func QueryPackage(path, query string, allowed func(module.Version) bool) (module.Version, *modfetch.RevInfo, error) { - if _, ok := dirInModule(path, Target.Path, ModRoot, true); ok { - if query != "latest" { - return module.Version{}, nil, fmt.Errorf("can't query specific version (%q) for package %s in the main module (%s)", query, path, Target.Path) - } - if !allowed(Target) { - return module.Version{}, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed", path, Target.Path) + if HasModRoot() { + if _, ok := dirInModule(path, Target.Path, modRoot, true); ok { + if query != "latest" { + return module.Version{}, nil, fmt.Errorf("can't query specific version (%q) for package %s in the main module (%s)", query, path, Target.Path) + } + if !allowed(Target) { + return module.Version{}, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed", path, Target.Path) + } + return Target, &modfetch.RevInfo{Version: Target.Version}, nil } - return Target, &modfetch.RevInfo{Version: Target.Version}, nil } finalErr := errMissing - for p := path; p != "."; p = pathpkg.Dir(p) { + for p := path; p != "." && p != "/"; p = pathpkg.Dir(p) { info, err := Query(p, query, allowed) if err != nil { if _, ok := err.(*codehost.VCSError); ok { diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go index 7f3ffabef7420..9b07383217175 100644 --- a/src/cmd/go/internal/modload/query_test.go +++ b/src/cmd/go/internal/modload/query_test.go @@ -132,7 +132,7 @@ func TestQuery(t *testing.T) { ok, _ := path.Match(allow, m.Version) return ok } - t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.query+"/"+allow, func(t *testing.T) { + t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+allow, func(t *testing.T) { info, err := Query(tt.path, tt.query, allowed) if tt.err != "" { if err != nil && err.Error() == tt.err { diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 24825cc35d8a3..7d8852d01d7f7 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -118,7 +118,10 @@ func matchPackages(pattern string, tags map[string]bool, useStd bool, modules [] } var root string if mod.Version == "" { - root = ModRoot + if !HasModRoot() { + continue // If there is no main module, we can't search in it. + } + root = ModRoot() } else { var err error root, _, err = fetch(mod) diff --git a/src/net/http/race.go b/src/cmd/go/internal/modload/testgo.go similarity index 52% rename from src/net/http/race.go rename to src/cmd/go/internal/modload/testgo.go index 766503967c3c4..663b24a68d792 100644 --- a/src/net/http/race.go +++ b/src/cmd/go/internal/modload/testgo.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build race +//+build testgo -package http +package modload func init() { - raceEnabled = true + printStackInDie = true } diff --git a/src/cmd/go/internal/module/module.go b/src/cmd/go/internal/module/module.go index 1dbb0f5cb7996..481a90b1c46d3 100644 --- a/src/cmd/go/internal/module/module.go +++ b/src/cmd/go/internal/module/module.go @@ -226,7 +226,7 @@ func checkElem(elem string, fileName bool) error { } for _, bad := range badWindowsNames { if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) + return fmt.Errorf("%q disallowed as path element component on Windows", short) } } return nil @@ -284,7 +284,7 @@ func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { } i-- } - if i <= 1 || path[i-1] != 'v' || path[i-2] != '/' { + if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { return path, "", true } prefix, pathMajor = path[:i-2], path[i-2:] diff --git a/src/cmd/go/internal/module/module_test.go b/src/cmd/go/internal/module/module_test.go index f21d620d328f8..b40bd03dfa65b 100644 --- a/src/cmd/go/internal/module/module_test.go +++ b/src/cmd/go/internal/module/module_test.go @@ -214,6 +214,7 @@ var splitPathVersionTests = []struct { {"x.y/z", ""}, {"x.y/z", "/v2"}, {"x.y/z", "/v3"}, + {"x.y/v", ""}, {"gopkg.in/yaml", ".v0"}, {"gopkg.in/yaml", ".v1"}, {"gopkg.in/yaml", ".v2"}, diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go index 8ec9162dabcdc..aa109693f307e 100644 --- a/src/cmd/go/internal/mvs/mvs.go +++ b/src/cmd/go/internal/mvs/mvs.go @@ -68,6 +68,7 @@ func (e *MissingModuleError) Error() string { } // BuildList returns the build list for the target module. +// The first element is the target itself, with the remainder of the list sorted by path. func BuildList(target module.Version, reqs Reqs) ([]module.Version, error) { return buildList(target, reqs, nil) } diff --git a/src/cmd/go/internal/renameio/renameio.go b/src/cmd/go/internal/renameio/renameio.go new file mode 100644 index 0000000000000..8f59e1a577325 --- /dev/null +++ b/src/cmd/go/internal/renameio/renameio.go @@ -0,0 +1,63 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package renameio writes files atomically by renaming temporary files. +package renameio + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +const patternSuffix = "*.tmp" + +// Pattern returns a glob pattern that matches the unrenamed temporary files +// created when writing to filename. +func Pattern(filename string) string { + return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) +} + +// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// file in the same directory as filename, then renames it atomically to the +// final name. +// +// That ensures that the final location, if it exists, is always a complete file. +func WriteFile(filename string, data []byte) (err error) { + return WriteToFile(filename, bytes.NewReader(data)) +} + +// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader +// instead of a slice. +func WriteToFile(filename string, data io.Reader) (err error) { + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // that. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := io.Copy(f, data); err != nil { + return err + } + // Sync the file before renaming it: otherwise, after a crash the reader may + // observe a 0-length file instead of the actual contents. + // See https://golang.org/issue/22397#issuecomment-380831736. + if err := f.Sync(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + return os.Rename(f.Name(), filename) +} diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index 303e6842e7be3..feccf23b2782a 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -78,6 +78,9 @@ func runRun(cmd *base.Command, args []string) { p = load.GoFilesPackage(files) } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { pkgs := load.PackagesAndErrors(args[:1]) + if len(pkgs) == 0 { + base.Fatalf("go run: no packages loaded from %s", args[0]) + } if len(pkgs) > 1 { var names []string for _, p := range pkgs { diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go index 60ae73696bb4c..0ca60e73497e2 100644 --- a/src/cmd/go/internal/search/search.go +++ b/src/cmd/go/internal/search/search.go @@ -275,7 +275,7 @@ func MatchPattern(pattern string) func(name string) bool { case strings.HasSuffix(re, `/\.\.\.`): re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` } - re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1) + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) reg := regexp.MustCompile(`^` + re + `$`) @@ -353,7 +353,7 @@ func CleanPatterns(patterns []string) []string { // as a courtesy to Windows developers, rewrite \ to / // in command-line arguments. Handles .\... and so on. if filepath.Separator == '\\' { - a = strings.Replace(a, `\`, `/`, -1) + a = strings.ReplaceAll(a, `\`, `/`) } // Put argument in canonical form, but preserve leading ./. diff --git a/src/cmd/go/internal/semver/semver.go b/src/cmd/go/internal/semver/semver.go index 4af7118e55d2e..122e612dd4b8f 100644 --- a/src/cmd/go/internal/semver/semver.go +++ b/src/cmd/go/internal/semver/semver.go @@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) { i := 1 start := 1 for i < len(v) { - if !isIdentChar(v[i]) { + if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { diff --git a/src/cmd/go/internal/semver/semver_test.go b/src/cmd/go/internal/semver/semver_test.go index 96b64a5807549..77025a44abd60 100644 --- a/src/cmd/go/internal/semver/semver_test.go +++ b/src/cmd/go/internal/semver/semver_test.go @@ -44,6 +44,7 @@ var tests = []struct { {"v1.2.3", "v1.2.3"}, {"v1.2.3+meta", "v1.2.3"}, {"v1.2.3+meta-pre", "v1.2.3"}, + {"v1.2.3+meta-pre.sha.256a", "v1.2.3"}, } func TestIsValid(t *testing.T) { diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 3295e8ffe24ba..8dfb3df22d3b9 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -27,6 +27,7 @@ import ( "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/go/internal/lockedfile" "cmd/go/internal/modload" "cmd/go/internal/str" "cmd/go/internal/work" @@ -124,16 +125,6 @@ A cached test result is treated as executing in no time at all, so a successful package test result will be cached and reused regardless of -timeout setting. -` + strings.TrimSpace(testFlag1) + ` See 'go help testflag' for details. - -For more about build flags, see 'go help build'. -For more about specifying packages, see 'go help packages'. - -See also: go build, go vet. -`, -} - -const testFlag1 = ` In addition to the build flags, the flags handled by 'go test' itself are: -args @@ -164,15 +155,13 @@ In addition to the build flags, the flags handled by 'go test' itself are: The test still runs (unless -c or -i is specified). The test binary also accepts flags that control execution of the test; these -flags are also accessible by 'go test'. -` - -// Usage prints the usage message for 'go test -h' and exits. -func Usage() { - os.Stderr.WriteString("usage: " + testUsage + "\n\n" + - strings.TrimSpace(testFlag1) + "\n\n\t" + - strings.TrimSpace(testFlag2) + "\n") - os.Exit(2) +flags are also accessible by 'go test'. See 'go help testflag' for details. + +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. + +See also: go build, go vet. +`, } var HelpTestflag = &base.Command{ @@ -190,11 +179,6 @@ options of pprof control how the information is presented. The following flags are recognized by the 'go test' command and control the execution of any test: - ` + strings.TrimSpace(testFlag2) + ` -`, -} - -const testFlag2 = ` -bench regexp Run only those benchmarks matching a regular expression. By default, no benchmarks are run. @@ -212,6 +196,8 @@ const testFlag2 = ` Run enough iterations of each benchmark to take t, specified as a time.Duration (for example, -benchtime 1h30s). The default is 1 second (1s). + The special syntax Nx means to run the benchmark N times + (for example, -benchtime 100x). -count n Run each test and benchmark n times (default 1). @@ -412,7 +398,8 @@ In the first example, the -x and the second -v are passed through to the test binary unchanged and with no effect on the go command itself. In the second example, the argument math is passed through to the test binary, instead of being interpreted as the package list. -` +`, +} var HelpTestfunc = &base.Command{ UsageLine: "testfunc", @@ -530,7 +517,7 @@ var testVetFlags = []string{ func runTest(cmd *base.Command, args []string) { modload.LoadTests = true - pkgArgs, testArgs = testFlags(args) + pkgArgs, testArgs = testFlags(cmd.Usage, args) work.FindExecCmd() // initialize cached result @@ -580,7 +567,7 @@ func runTest(cmd *base.Command, args []string) { // (We implement go clean -testcache by writing an expiration date // instead of searching out and deleting test result cache entries.) if dir := cache.DefaultDir(); dir != "off" { - if data, _ := ioutil.ReadFile(filepath.Join(dir, "testexpire.txt")); len(data) > 0 && data[len(data)-1] == '\n' { + if data, _ := lockedfile.Read(filepath.Join(dir, "testexpire.txt")); len(data) > 0 && data[len(data)-1] == '\n' { if t, err := strconv.ParseInt(string(data[:len(data)-1]), 10, 64); err == nil { testCacheExpire = time.Unix(0, t) } @@ -885,15 +872,19 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin target = filepath.Join(base.Cwd, target) } } - pmain.Target = target - installAction = &work.Action{ - Mode: "test build", - Func: work.BuildInstallFunc, - Deps: []*work.Action{buildAction}, - Package: pmain, - Target: target, + if target == os.DevNull { + runAction = buildAction + } else { + pmain.Target = target + installAction = &work.Action{ + Mode: "test build", + Func: work.BuildInstallFunc, + Deps: []*work.Action{buildAction}, + Package: pmain, + Target: target, + } + runAction = installAction // make sure runAction != nil even if not running test } - runAction = installAction // make sure runAction != nil even if not running test } var vetRunAction *work.Action if testC { diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 73f8c69d9e171..ebcf49a4e9c92 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -87,7 +87,7 @@ func init() { // to allow both // go test fmt -custom-flag-for-fmt-test // go test -x math -func testFlags(args []string) (packageNames, passToTest []string) { +func testFlags(usage func(), args []string) (packageNames, passToTest []string) { args = str.StringList(cmdflag.FindGOFLAGS(testFlagDefn), args) inPkg := false var explicitArgs []string @@ -108,7 +108,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { inPkg = false } - f, value, extraWord := cmdflag.Parse(cmd, testFlagDefn, args, i) + f, value, extraWord := cmdflag.Parse(cmd, usage, testFlagDefn, args, i) if f == nil { // This is a flag we do not know; we must assume // that any args we see after this might be flag diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index b64bf3f8e8784..327b761c3cd6b 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -16,17 +16,26 @@ import ( var CmdVet = &base.Command{ Run: runVet, CustomFlags: true, - UsageLine: "go vet [-n] [-x] [build flags] [vet flags] [packages]", + UsageLine: "go vet [-n] [-x] [-vettool prog] [build flags] [vet flags] [packages]", Short: "report likely mistakes in packages", Long: ` Vet runs the Go vet command on the packages named by the import paths. For more about vet and its flags, see 'go doc cmd/vet'. For more about specifying packages, see 'go help packages'. +For a list of checkers and their flags, see 'go tool vet help'. +For details of a specific checker such as 'printf', see 'go tool vet help printf'. The -n flag prints commands that would be executed. The -x flag prints commands as they are executed. +The -vettool=prog flag selects a different analysis tool with alternative +or additional checks. +For example, the 'shadow' analyzer can be built and run using these commands: + + go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow + go vet -vettool=$(which shadow) + The build flags supported by go vet are those that control package resolution and execution, such as -n, -x, -v, -tags, and -toolexec. For more about these flags, see 'go help build'. @@ -38,7 +47,7 @@ See also: go fmt, go fix. func runVet(cmd *base.Command, args []string) { modload.LoadTests = true - vetFlags, pkgArgs := vetFlags(args) + vetFlags, pkgArgs := vetFlags(vetUsage, args) work.BuildInit() work.VetFlags = vetFlags diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index 50eac425ec369..37342f4163342 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -5,9 +5,14 @@ package vet import ( + "bytes" + "encoding/json" "flag" "fmt" + "log" "os" + "os/exec" + "path/filepath" "strings" "cmd/go/internal/base" @@ -16,72 +21,116 @@ import ( "cmd/go/internal/work" ) -const cmd = "vet" - -// vetFlagDefn is the set of flags we process. -var vetFlagDefn = []*cmdflag.Defn{ - // Note: Some flags, in particular -tags and -v, are known to - // vet but also defined as build flags. This works fine, so we - // don't define them here but use AddBuildFlags to init them. - // However some, like -x, are known to the build but not - // to vet. We handle them in vetFlags. - - // local. - {Name: "all", BoolVar: new(bool), PassToTest: true}, - {Name: "asmdecl", BoolVar: new(bool), PassToTest: true}, - {Name: "assign", BoolVar: new(bool), PassToTest: true}, - {Name: "atomic", BoolVar: new(bool), PassToTest: true}, - {Name: "bool", BoolVar: new(bool), PassToTest: true}, - {Name: "buildtags", BoolVar: new(bool), PassToTest: true}, - {Name: "cgocall", BoolVar: new(bool), PassToTest: true}, - {Name: "composites", BoolVar: new(bool), PassToTest: true}, - {Name: "copylocks", BoolVar: new(bool), PassToTest: true}, - {Name: "httpresponse", BoolVar: new(bool), PassToTest: true}, - {Name: "lostcancel", BoolVar: new(bool), PassToTest: true}, - {Name: "methods", BoolVar: new(bool), PassToTest: true}, - {Name: "nilfunc", BoolVar: new(bool), PassToTest: true}, - {Name: "printf", BoolVar: new(bool), PassToTest: true}, - {Name: "printfuncs", PassToTest: true}, - {Name: "rangeloops", BoolVar: new(bool), PassToTest: true}, - {Name: "shadow", BoolVar: new(bool), PassToTest: true}, - {Name: "shadowstrict", BoolVar: new(bool), PassToTest: true}, - {Name: "shift", BoolVar: new(bool), PassToTest: true}, - {Name: "source", BoolVar: new(bool), PassToTest: true}, - {Name: "structtags", BoolVar: new(bool), PassToTest: true}, - {Name: "tests", BoolVar: new(bool), PassToTest: true}, - {Name: "unreachable", BoolVar: new(bool), PassToTest: true}, - {Name: "unsafeptr", BoolVar: new(bool), PassToTest: true}, - {Name: "unusedfuncs", PassToTest: true}, - {Name: "unusedresult", BoolVar: new(bool), PassToTest: true}, - {Name: "unusedstringmethods", PassToTest: true}, -} +// go vet flag processing +// +// We query the flags of the tool specified by -vettool and accept any +// of those flags plus any flag valid for 'go build'. The tool must +// support -flags, which prints a description of its flags in JSON to +// stdout. -var vetTool string +// vetTool specifies the vet command to run. +// Any tool that supports the (still unpublished) vet +// command-line protocol may be supplied; see +// golang.org/x/tools/go/analysis/unitchecker for one +// implementation. It is also used by tests. +// +// The default behavior (vetTool=="") runs 'go tool vet'. +// +var vetTool string // -vettool -// add build flags to vetFlagDefn. func init() { - cmdflag.AddKnownFlags("vet", vetFlagDefn) + // Extract -vettool by ad hoc flag processing: + // its value is needed even before we can declare + // the flags available during main flag processing. + for i, arg := range os.Args { + if arg == "-vettool" || arg == "--vettool" { + if i+1 >= len(os.Args) { + log.Fatalf("%s requires a filename", arg) + } + vetTool = os.Args[i+1] + break + } else if strings.HasPrefix(arg, "-vettool=") || + strings.HasPrefix(arg, "--vettool=") { + vetTool = arg[strings.IndexByte(arg, '=')+1:] + break + } + } +} + +// vetFlags processes the command line, splitting it at the first non-flag +// into the list of flags and list of packages. +func vetFlags(usage func(), args []string) (passToVet, packageNames []string) { + // Query the vet command for its flags. + tool := vetTool + if tool != "" { + var err error + tool, err = filepath.Abs(tool) + if err != nil { + log.Fatal(err) + } + } else { + tool = base.Tool("vet") + } + out := new(bytes.Buffer) + vetcmd := exec.Command(tool, "-flags") + vetcmd.Stdout = out + if err := vetcmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "go vet: can't execute %s -flags: %v\n", tool, err) + os.Exit(2) + } + var analysisFlags []struct { + Name string + Bool bool + Usage string + } + if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil { + fmt.Fprintf(os.Stderr, "go vet: can't unmarshal JSON from %s -flags: %v", tool, err) + os.Exit(2) + } + + // Add vet's flags to vetflagDefn. + // + // Some flags, in particular -tags and -v, are known to vet but + // also defined as build flags. This works fine, so we don't + // define them here but use AddBuildFlags to init them. + // However some, like -x, are known to the build but not to vet. + var vetFlagDefn []*cmdflag.Defn + for _, f := range analysisFlags { + switch f.Name { + case "tags", "v": + continue + } + defn := &cmdflag.Defn{ + Name: f.Name, + PassToTest: true, + } + if f.Bool { + defn.BoolVar = new(bool) + } + vetFlagDefn = append(vetFlagDefn, defn) + } + + // Add build flags to vetFlagDefn. var cmd base.Command work.AddBuildFlags(&cmd) - cmd.Flag.StringVar(&vetTool, "vettool", "", "path to vet tool binary") // for cmd/vet tests; undocumented for now + // This flag declaration is a placeholder: + // -vettool is actually parsed by the init function above. + cmd.Flag.StringVar(new(string), "vettool", "", "path to vet tool binary") cmd.Flag.VisitAll(func(f *flag.Flag) { vetFlagDefn = append(vetFlagDefn, &cmdflag.Defn{ Name: f.Name, Value: f.Value, }) }) -} -// vetFlags processes the command line, splitting it at the first non-flag -// into the list of flags and list of packages. -func vetFlags(args []string) (passToVet, packageNames []string) { + // Process args. args = str.StringList(cmdflag.FindGOFLAGS(vetFlagDefn), args) for i := 0; i < len(args); i++ { if !strings.HasPrefix(args[i], "-") { return args[:i], args[i:] } - f, value, extraWord := cmdflag.Parse(cmd, vetFlagDefn, args, i) + f, value, extraWord := cmdflag.Parse("vet", usage, vetFlagDefn, args, i) if f == nil { fmt.Fprintf(os.Stderr, "vet: flag %q not defined\n", args[i]) fmt.Fprintf(os.Stderr, "Run \"go help vet\" for more information\n") @@ -117,3 +166,21 @@ func vetFlags(args []string) (passToVet, packageNames []string) { } return args, nil } + +var vetUsage func() + +func init() { vetUsage = usage } // break initialization cycle + +func usage() { + fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName()) + + // This part is additional to what (*Command).Usage does: + cmd := "go tool vet" + if vetTool != "" { + cmd = vetTool + } + fmt.Fprintf(os.Stderr, "Run '%s -help' for the vet tool's flags.\n", cmd) + + os.Exit(2) +} diff --git a/src/cmd/go/internal/web2/web.go b/src/cmd/go/internal/web2/web.go index f3900379e1784..64934f1d506db 100644 --- a/src/cmd/go/internal/web2/web.go +++ b/src/cmd/go/internal/web2/web.go @@ -7,11 +7,13 @@ package web2 import ( "bytes" "cmd/go/internal/base" + "cmd/go/internal/cfg" "encoding/json" "flag" "fmt" "io" "io/ioutil" + "log" "net/http" "os" "path/filepath" @@ -187,10 +189,10 @@ func SetHTTPDoForTesting(do func(*http.Request) (*http.Response, error)) { } func Get(url string, options ...Option) error { - if TraceGET || webstack { - println("GET", url) + if TraceGET || webstack || cfg.BuildV { + log.Printf("Fetching %s", url) if webstack { - println(string(debug.Stack())) + log.Println(string(debug.Stack())) } } diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index ed41ce5d0730a..145b87513a916 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -99,7 +99,7 @@ and test commands: link against shared libraries previously created with -buildmode=shared. -mod mode - module download mode to use: readonly, release, or vendor. + module download mode to use: readonly or vendor. See 'go help modules' for more. -pkgdir dir install and load all packages from dir instead of the usual locations. @@ -398,10 +398,10 @@ func libname(args []string, pkgs []*load.Package) (string, error) { arg = bp.ImportPath } } - appendName(strings.Replace(arg, "/", "-", -1)) + appendName(strings.ReplaceAll(arg, "/", "-")) } else { for _, pkg := range pkgs { - appendName(strings.Replace(pkg.ImportPath, "/", "-", -1)) + appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-")) } } } else if haveNonMeta { // have both meta package and a non-meta one diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go index 010e17ee48057..ef95a408ca31b 100644 --- a/src/cmd/go/internal/work/build_test.go +++ b/src/cmd/go/internal/work/build_test.go @@ -227,6 +227,8 @@ func TestRespectSetgidDir(t *testing.T) { if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { t.Skip("can't set SetGID bit with chmod on iOS") } + case "windows", "plan9", "js": + t.Skip("chown/chmod setgid are not supported on Windows, Plan 9, or JS") } var b Builder @@ -245,11 +247,13 @@ func TestRespectSetgidDir(t *testing.T) { } defer os.RemoveAll(setgiddir) - if runtime.GOOS == "freebsd" { - err = os.Chown(setgiddir, os.Getuid(), os.Getgid()) - if err != nil { - t.Fatal(err) - } + // BSD mkdir(2) inherits the parent directory group, and other platforms + // can inherit the parent directory group via setgid. The test setup (chmod + // setgid) will fail if the process does not have the group permission to + // the new temporary directory. + err = os.Chown(setgiddir, os.Getuid(), os.Getgid()) + if err != nil { + t.Fatal(err) } // Change setgiddir's permissions to include the SetGID bit. diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index f6b79711f9d8f..750bc3c6cdbe4 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -18,7 +18,6 @@ import ( "cmd/go/internal/load" "cmd/go/internal/str" "cmd/internal/buildid" - "cmd/internal/objabi" ) // Build IDs @@ -178,7 +177,8 @@ func (b *Builder) toolID(name string) string { path := base.Tool(name) desc := "go tool " + name - // Special case: undocumented -vettool overrides usual vet, for testing vet. + // Special case: undocumented -vettool overrides usual vet, + // for testing vet or supplying an alternative analysis tool. if name == "vet" && VetTool != "" { path = VetTool desc = VetTool @@ -207,11 +207,6 @@ func (b *Builder) toolID(name string) string { id = f[2] } - // For the compiler, add any experiments. - if name == "compile" { - id += " " + objabi.Expstring() - } - b.id.Lock() b.toolIDCache[name] = id b.id.Unlock() @@ -322,13 +317,16 @@ func assemblerIsGas() bool { } } -// gccgoBuildIDELFFile creates an assembler file that records the -// action's build ID in an SHF_EXCLUDE section. -func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) { +// gccgoBuildIDFile creates an assembler file that records the +// action's build ID in an SHF_EXCLUDE section for ELF files or +// in a CSECT in XCOFF files. +func (b *Builder) gccgoBuildIDFile(a *Action) (string, error) { sfile := a.Objdir + "_buildid.s" var buf bytes.Buffer - if cfg.Goos != "solaris" || assemblerIsGas() { + if cfg.Goos == "aix" { + fmt.Fprintf(&buf, "\t.csect .go.buildid[XO]\n") + } else if cfg.Goos != "solaris" || assemblerIsGas() { fmt.Fprintf(&buf, "\t"+`.section .go.buildid,"e"`+"\n") } else if cfg.Goarch == "sparc" || cfg.Goarch == "sparc64" { fmt.Fprintf(&buf, "\t"+`.section ".go.buildid",#exclude`+"\n") @@ -347,9 +345,13 @@ func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) { fmt.Fprintf(&buf, "%#02x", a.buildID[i]) } fmt.Fprintf(&buf, "\n") - if cfg.Goos != "solaris" { - fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",@progbits`+"\n") - fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",@progbits`+"\n") + if cfg.Goos != "solaris" && cfg.Goos != "aix" { + secType := "@progbits" + if cfg.Goarch == "arm" { + secType = "%progbits" + } + fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType) + fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType) } if cfg.BuildN || cfg.BuildX { diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index 42fa0e64ac007..baa587268744f 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -224,7 +224,9 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { if len(p.SFiles) > 0 { fmt.Fprintf(h, "asm %q %q %q\n", b.toolID("asm"), forcedAsmflags, p.Internal.Asmflags) } - fmt.Fprintf(h, "GO$GOARCH=%s\n", os.Getenv("GO"+strings.ToUpper(cfg.BuildContext.GOARCH))) // GO386, GOARM, etc + // GO386, GOARM, GOMIPS, etc. + baseArch := strings.TrimSuffix(cfg.BuildContext.GOARCH, "le") + fmt.Fprintf(h, "GO$GOARCH=%s\n", os.Getenv("GO"+strings.ToUpper(baseArch))) // TODO(rsc): Convince compiler team not to add more magic environment variables, // or perhaps restrict the environment variables passed to subprocesses. @@ -375,7 +377,7 @@ func (b *Builder) build(a *Action) (err error) { if b.NeedExport { p.Export = a.built } - if need&needCompiledGoFiles != 0 && b.loadCachedGoFiles(a) { + if need&needCompiledGoFiles != 0 && b.loadCachedSrcFiles(a) { need &^= needCompiledGoFiles } // Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr). @@ -384,6 +386,13 @@ func (b *Builder) build(a *Action) (err error) { cached = true a.output = []byte{} // start saving output in case we miss any cache results } + + // Source files might be cached, even if the full action is not + // (e.g., go list -compiled -find). + if !cached && need&needCompiledGoFiles != 0 && b.loadCachedSrcFiles(a) { + need &^= needCompiledGoFiles + } + if need == 0 { return nil } @@ -432,10 +441,6 @@ func (b *Builder) build(a *Action) (err error) { return fmt.Errorf("missing or invalid binary-only package; expected file %q", a.Package.Target) } - if p.Module != nil && !allowedVersion(p.Module.GoVersion) { - return fmt.Errorf("module requires Go %s", p.Module.GoVersion) - } - if err := b.Mkdir(a.Objdir); err != nil { return err } @@ -577,7 +582,13 @@ func (b *Builder) build(a *Action) (err error) { b.cacheCgoHdr(a) } } - b.cacheGofiles(a, gofiles) + + var srcfiles []string // .go and non-.go + srcfiles = append(srcfiles, gofiles...) + srcfiles = append(srcfiles, sfiles...) + srcfiles = append(srcfiles, cfiles...) + srcfiles = append(srcfiles, cxxfiles...) + b.cacheSrcFiles(a, srcfiles) // Running cgo generated the cgo header. need &^= needCgoHdr @@ -589,11 +600,11 @@ func (b *Builder) build(a *Action) (err error) { // Prepare Go vet config if needed. if need&needVet != 0 { - buildVetConfig(a, gofiles) + buildVetConfig(a, srcfiles) need &^= needVet } if need&needCompiledGoFiles != 0 { - if !b.loadCachedGoFiles(a) { + if !b.loadCachedSrcFiles(a) { return fmt.Errorf("failed to cache compiled Go files") } need &^= needCompiledGoFiles @@ -603,6 +614,12 @@ func (b *Builder) build(a *Action) (err error) { return nil } + // Collect symbol ABI requirements from assembly. + symabis, err := BuildToolchain.symabis(b, a, sfiles) + if err != nil { + return err + } + // Prepare Go import config. // We start it off with a comment so it can't be empty, so icfg.Bytes() below is never nil. // It should never be empty anyway, but there have been bugs in the past that resulted @@ -634,14 +651,21 @@ func (b *Builder) build(a *Action) (err error) { // Compile Go. objpkg := objdir + "_pkg_.a" - ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), len(sfiles) > 0, gofiles) + ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), symabis, len(sfiles) > 0, gofiles) if len(out) > 0 { - b.showOutput(a, a.Package.Dir, a.Package.Desc(), b.processOutput(out)) + output := b.processOutput(out) + if p.Module != nil && !allowedVersion(p.Module.GoVersion) { + output += "note: module requires Go " + p.Module.GoVersion + } + b.showOutput(a, a.Package.Dir, a.Package.Desc(), output) if err != nil { return errPrintedOutput } } if err != nil { + if p.Module != nil && !allowedVersion(p.Module.GoVersion) { + b.showOutput(a, a.Package.Dir, a.Package.Desc(), "note: module requires Go "+p.Module.GoVersion) + } return err } if ofile != objpkg { @@ -697,8 +721,8 @@ func (b *Builder) build(a *Action) (err error) { // This is read by readGccgoArchive in cmd/internal/buildid/buildid.go. if a.buildID != "" && cfg.BuildToolchainName == "gccgo" { switch cfg.Goos { - case "android", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - asmfile, err := b.gccgoBuildIDELFFile(a) + case "aix", "android", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + asmfile, err := b.gccgoBuildIDFile(a) if err != nil { return err } @@ -783,13 +807,13 @@ func (b *Builder) loadCachedCgoHdr(a *Action) bool { return err == nil } -func (b *Builder) cacheGofiles(a *Action, gofiles []string) { +func (b *Builder) cacheSrcFiles(a *Action, srcfiles []string) { c := cache.Default() if c == nil { return } var buf bytes.Buffer - for _, file := range gofiles { + for _, file := range srcfiles { if !strings.HasPrefix(file, a.Objdir) { // not generated buf.WriteString("./") @@ -804,7 +828,7 @@ func (b *Builder) cacheGofiles(a *Action, gofiles []string) { return } } - c.PutBytes(cache.Subkey(a.actionID, "gofiles"), buf.Bytes()) + c.PutBytes(cache.Subkey(a.actionID, "srcfiles"), buf.Bytes()) } func (b *Builder) loadCachedVet(a *Action) bool { @@ -812,34 +836,34 @@ func (b *Builder) loadCachedVet(a *Action) bool { if c == nil { return false } - list, _, err := c.GetBytes(cache.Subkey(a.actionID, "gofiles")) + list, _, err := c.GetBytes(cache.Subkey(a.actionID, "srcfiles")) if err != nil { return false } - var gofiles []string + var srcfiles []string for _, name := range strings.Split(string(list), "\n") { if name == "" { // end of list continue } if strings.HasPrefix(name, "./") { - gofiles = append(gofiles, name[2:]) + srcfiles = append(srcfiles, name[2:]) continue } if err := b.loadCachedObjdirFile(a, c, name); err != nil { return false } - gofiles = append(gofiles, a.Objdir+name) + srcfiles = append(srcfiles, a.Objdir+name) } - buildVetConfig(a, gofiles) + buildVetConfig(a, srcfiles) return true } -func (b *Builder) loadCachedGoFiles(a *Action) bool { +func (b *Builder) loadCachedSrcFiles(a *Action) bool { c := cache.Default() if c == nil { return false } - list, _, err := c.GetBytes(cache.Subkey(a.actionID, "gofiles")) + list, _, err := c.GetBytes(cache.Subkey(a.actionID, "srcfiles")) if err != nil { return false } @@ -864,10 +888,12 @@ func (b *Builder) loadCachedGoFiles(a *Action) bool { // vetConfig is the configuration passed to vet describing a single package. type vetConfig struct { + ID string // package ID (example: "fmt [fmt.test]") Compiler string // compiler name (gc, gccgo) Dir string // directory containing package ImportPath string // canonical import path ("package path") GoFiles []string // absolute paths to package source files + NonGoFiles []string // absolute paths to package non-Go files ImportMap map[string]string // map import path in source code to package path PackageFile map[string]string // map package path to .a file with export data @@ -879,15 +905,28 @@ type vetConfig struct { SucceedOnTypecheckFailure bool // awful hack; see #18395 and below } -func buildVetConfig(a *Action, gofiles []string) { +func buildVetConfig(a *Action, srcfiles []string) { + // Classify files based on .go extension. + // srcfiles does not include raw cgo files. + var gofiles, nongofiles []string + for _, name := range srcfiles { + if strings.HasSuffix(name, ".go") { + gofiles = append(gofiles, name) + } else { + nongofiles = append(nongofiles, name) + } + } + // Pass list of absolute paths to vet, // so that vet's error messages will use absolute paths, // so that we can reformat them relative to the directory // in which the go command is invoked. vcfg := &vetConfig{ + ID: a.Package.ImportPath, Compiler: cfg.BuildToolchainName, Dir: a.Package.Dir, GoFiles: mkAbsFiles(a.Package.Dir, gofiles), + NonGoFiles: mkAbsFiles(a.Package.Dir, nongofiles), ImportPath: a.Package.ImportPath, ImportMap: make(map[string]string), PackageFile: make(map[string]string), @@ -984,6 +1023,8 @@ func (b *Builder) vet(a *Action) error { } } + // TODO(adonovan): delete this when we use the new vet printf checker. + // https://github.com/golang/go/issues/28756 if vcfg.ImportMap["fmt"] == "" { a1 := a.Deps[1] vcfg.ImportMap["fmt"] = "fmt" @@ -1607,6 +1648,25 @@ func (b *Builder) writeFile(file string, text []byte) error { return ioutil.WriteFile(file, text, 0666) } +// appendFile appends the text to file. +func (b *Builder) appendFile(file string, text []byte) error { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "cat >>%s << 'EOF' # internal\n%sEOF", file, text) + } + if cfg.BuildN { + return nil + } + f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + defer f.Close() + if _, err = f.Write(text); err != nil { + return err + } + return f.Close() +} + // Install the cgo export header file, if there is one. func (b *Builder) installHeader(a *Action) error { src := a.Objdir + "_cgo_install.h" @@ -1646,6 +1706,7 @@ func (b *Builder) cover(a *Action, dst, src string, varName string) error { var objectMagic = [][]byte{ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive + {'<', 'b', 'i', 'g', 'a', 'f', '>', '\n'}, // Package AIX big archive {'\x7F', 'E', 'L', 'F'}, // ELF {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit @@ -1656,6 +1717,8 @@ var objectMagic = [][]byte{ {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm {0x00, 0x61, 0x73, 0x6D}, // WASM + {0x01, 0xDF}, // XCOFF 32bit + {0x01, 0xF7}, // XCOFF 64bit } func isObject(s string) bool { @@ -1703,14 +1766,14 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string if dir[len(dir)-1] == filepath.Separator { dot += string(filepath.Separator) } - cmd = strings.Replace(" "+cmd, " "+dir, dot, -1)[1:] + cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:] if b.scriptDir != dir { b.scriptDir = dir cmd = "cd " + dir + "\n" + cmd } } if b.WorkDir != "" { - cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1) + cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK") } return cmd } @@ -1752,10 +1815,10 @@ func (b *Builder) showOutput(a *Action, dir, desc, out string) { prefix := "# " + desc suffix := "\n" + out if reldir := base.ShortPath(dir); reldir != dir { - suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1) - suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1) + suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir) + suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir) } - suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1) + suffix = strings.ReplaceAll(suffix, " "+b.WorkDir, " $WORK") if a != nil && a.output != nil { a.output = append(a.output, prefix...) @@ -1959,13 +2022,18 @@ func mkAbs(dir, f string) string { type toolchain interface { // gc runs the compiler in a specific directory on a set of files // and returns the name of the generated output file. - gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) + // + // TODO: This argument list is long. Consider putting it in a struct. + gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) // cc runs the toolchain's C compiler in a directory on a C file // to produce an output file. cc(b *Builder, a *Action, ofile, cfile string) error // asm runs the assembler in a specific directory on specific files // and returns a list of named output files. asm(b *Builder, a *Action, sfiles []string) ([]string, error) + // symabis scans the symbol ABIs from sfiles and returns the + // path to the output symbol ABIs file, or "" if none. + symabis(b *Builder, a *Action, sfiles []string) (string, error) // pack runs the archive packer in a specific directory to create // an archive from a set of object files. // typically it is run in the object directory. @@ -1996,7 +2064,7 @@ func (noToolchain) linker() string { return "" } -func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) { +func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) { return "", nil, noCompiler() } @@ -2004,6 +2072,10 @@ func (noToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) return nil, noCompiler() } +func (noToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + return "", noCompiler() +} + func (noToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { return noCompiler() } @@ -2075,14 +2147,37 @@ func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []s } // gccld runs the gcc linker to create an executable from a set of object files. -func (b *Builder) gccld(p *load.Package, objdir, out string, flags []string, objs []string) error { +func (b *Builder) gccld(p *load.Package, objdir, outfile string, flags []string, objs []string) error { var cmd []string if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { cmd = b.GxxCmd(p.Dir, objdir) } else { cmd = b.GccCmd(p.Dir, objdir) } - return b.run(nil, p.Dir, p.ImportPath, b.cCompilerEnv(), cmd, "-o", out, objs, flags) + + cmdargs := []interface{}{cmd, "-o", outfile, objs, flags} + dir := p.Dir + out, err := b.runOut(dir, b.cCompilerEnv(), cmdargs...) + if len(out) > 0 { + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/link/internal/ld's hostlink method. + var save [][]byte + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + save = append(save, line) + } + out = bytes.Join(save, nil) + if len(out) > 0 { + b.showOutput(nil, dir, p.ImportPath, b.processOutput(out)) + if err != nil { + err = errPrintedOutput + } + } + } + return err } // Grab these before main helpfully overwrites them. @@ -2269,6 +2364,10 @@ func (b *Builder) gccArchArgs() []string { return []string{"-mabi=64"} case "mips", "mipsle": return []string{"-mabi=32", "-march=mips32"} + case "ppc64": + if cfg.Goos == "aix" { + return []string{"-maix64"} + } } return nil } @@ -2660,7 +2759,7 @@ func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { p := load.GoFilesPackage(srcs) - if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, false, srcs); e != nil { + if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, "", false, srcs); e != nil { return "32", nil } return "64", nil @@ -2858,7 +2957,7 @@ func useResponseFile(path string, argLen int) bool { } // On the Go build system, use response files about 10% of the - // time, just to excercise this codepath. + // time, just to exercise this codepath. isBuilder := os.Getenv("GO_BUILDER_NAME") != "" if isBuilder && rand.Intn(10) == 0 { return true diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index 6e5333ccbc40d..3d09f69fcc3e4 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -36,7 +36,7 @@ func (gcToolchain) linker() string { return base.Tool("link") } -func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { +func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { p := a.Package objdir := a.Objdir if archive != "" { @@ -53,6 +53,9 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, a pkgpath = "main" } gcargs := []string{"-p", pkgpath} + if p.Module != nil && p.Module.GoVersion != "" && allowedVersion(p.Module.GoVersion) { + gcargs = append(gcargs, "-lang=go"+p.Module.GoVersion) + } if p.Standard { gcargs = append(gcargs, "-std") } @@ -95,6 +98,9 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, a if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { gcargs = append(gcargs, "-goversion", runtimeVersion) } + if symabis != "" { + gcargs = append(gcargs, "-symabis", symabis) + } gcflags := str.StringList(forcedGcflags, p.Internal.Gcflags) if compilingRuntime { @@ -168,7 +174,7 @@ CheckFlags: } // TODO: Test and delete these conditions. - if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 || objabi.Clobberdead_enabled != 0 { + if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 { canDashC = false } @@ -215,8 +221,7 @@ func trimDir(dir string) string { return dir } -func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { - p := a.Package +func asmArgs(a *Action, p *load.Package) []interface{} { // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. inc := filepath.Join(cfg.GOROOT, "pkg", "include") args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-trimpath", trimDir(a.Objdir), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags} @@ -238,6 +243,13 @@ func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) args = append(args, "-D", "GOMIPS64_"+cfg.GOMIPS64) } + return args +} + +func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { + p := a.Package + args := asmArgs(a, p) + var ofiles []string for _, sfile := range sfiles { ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o" @@ -250,6 +262,88 @@ func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) return ofiles, nil } +func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + mkSymabis := func(p *load.Package, sfiles []string, path string) error { + args := asmArgs(a, p) + args = append(args, "-gensymabis", "-o", path) + for _, sfile := range sfiles { + if p.ImportPath == "runtime/cgo" && strings.HasPrefix(sfile, "gcc_") { + continue + } + args = append(args, mkAbs(p.Dir, sfile)) + } + + // Supply an empty go_asm.h as if the compiler had been run. + // -gensymabis parsing is lax enough that we don't need the + // actual definitions that would appear in go_asm.h. + if err := b.writeFile(a.Objdir+"go_asm.h", nil); err != nil { + return err + } + + return b.run(a, p.Dir, p.ImportPath, nil, args...) + } + + var symabis string // Only set if we actually create the file + p := a.Package + if len(sfiles) != 0 { + symabis = a.Objdir + "symabis" + if err := mkSymabis(p, sfiles, symabis); err != nil { + return "", err + } + } + + // Gather known cross-package references from assembly code. + var otherPkgs []string + if p.ImportPath == "runtime" { + // Assembly in the following packages references + // symbols in runtime. + otherPkgs = []string{"syscall", "internal/syscall/unix", "runtime/cgo"} + } else if p.ImportPath == "runtime/internal/atomic" { + // sync/atomic is an assembly wrapper around + // runtime/internal/atomic. + otherPkgs = []string{"sync/atomic"} + } + for _, p2name := range otherPkgs { + p2 := load.LoadPackage(p2name, &load.ImportStack{}) + if len(p2.SFiles) == 0 { + continue + } + + symabis2 := a.Objdir + "symabis2" + if err := mkSymabis(p2, p2.SFiles, symabis2); err != nil { + return "", err + } + + // Filter out just the symbol refs and append them to + // the symabis file. + if cfg.BuildN { + // -x will print the lines from symabis2 that are actually appended + // to symabis. With -n, we don't know what those lines will be. + b.Showcmd("", `grep '^ref' <%s | grep -v '^ref\s*""\.' >>%s`, symabis2, a.Objdir+"symabis") + continue + } + abis2, err := ioutil.ReadFile(symabis2) + if err != nil { + return "", err + } + var refs bytes.Buffer + for _, line := range strings.Split(string(abis2), "\n") { + fs := strings.Fields(line) + if len(fs) >= 2 && fs[0] == "ref" && !strings.HasPrefix(fs[1], `"".`) { + fmt.Fprintf(&refs, "%s\n", line) + } + } + if refs.Len() != 0 { + symabis = a.Objdir + "symabis" + if err := b.appendFile(symabis, refs.Bytes()); err != nil { + return "", err + } + } + } + + return symabis, nil +} + // toolVerify checks that the command line args writes the same output file // if run using newTool instead. // Unused now but kept around for future use. diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go index 91daf529d4111..69a25bea62ff4 100644 --- a/src/cmd/go/internal/work/gccgo.go +++ b/src/cmd/go/internal/work/gccgo.go @@ -43,6 +43,14 @@ func (gccgoToolchain) linker() string { return GccgoBin } +func (gccgoToolchain) ar() string { + ar := os.Getenv("AR") + if ar == "" { + ar = "ar" + } + return ar +} + func checkGccgoBin() { if gccgoErr == nil { return @@ -51,7 +59,7 @@ func checkGccgoBin() { os.Exit(2) } -func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { +func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { p := a.Package objdir := a.Objdir out := "_go_.o" @@ -172,6 +180,10 @@ func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]strin return ofiles, nil } +func (gccgoToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { + return "", nil +} + func gccgoArchive(basedir, imp string) string { end := filepath.FromSlash(imp + ".a") afile := filepath.Join(basedir, end) @@ -179,14 +191,22 @@ func gccgoArchive(basedir, imp string) string { return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile)) } -func (gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { +func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { p := a.Package objdir := a.Objdir var absOfiles []string for _, f := range ofiles { absOfiles = append(absOfiles, mkAbs(objdir, f)) } - return b.run(a, p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objdir, afile), absOfiles) + var arArgs []string + if cfg.Goos == "aix" && cfg.Goarch == "ppc64" { + // AIX puts both 32-bit and 64-bit objects in the same archive. + // Tell the AIX "ar" command to only care about 64-bit objects. + // AIX "ar" command does not know D option. + arArgs = []string{"-X64"} + } + + return b.run(a, p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", mkAbs(objdir, afile), absOfiles) } func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error { @@ -245,11 +265,11 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string return "", nil } } - err := b.run(root, root.Objdir, desc, nil, "ar", "x", newArchive, "_cgo_flags") + err := b.run(root, root.Objdir, desc, nil, tools.ar(), "x", newArchive, "_cgo_flags") if err != nil { return "", err } - err = b.run(root, ".", desc, nil, "ar", "d", newArchive, "_cgo_flags") + err = b.run(root, ".", desc, nil, tools.ar(), "d", newArchive, "_cgo_flags") if err != nil { return "", err } @@ -342,17 +362,24 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string } } - ldflags = append(ldflags, "-Wl,--whole-archive") + wholeArchive := []string{"-Wl,--whole-archive"} + noWholeArchive := []string{"-Wl,--no-whole-archive"} + if cfg.Goos == "aix" { + wholeArchive = nil + noWholeArchive = nil + } + ldflags = append(ldflags, wholeArchive...) ldflags = append(ldflags, afiles...) - ldflags = append(ldflags, "-Wl,--no-whole-archive") + ldflags = append(ldflags, noWholeArchive...) ldflags = append(ldflags, cgoldflags...) ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...) if root.Package != nil { ldflags = append(ldflags, root.Package.CgoLDFLAGS...) } - - ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") + if cfg.Goos != "aix" { + ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") + } if root.buildID != "" { // On systems that normally use gold or the GNU linker, @@ -363,11 +390,17 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string } } + var rLibPath string + if cfg.Goos == "aix" { + rLibPath = "-Wl,-blibpath=" + } else { + rLibPath = "-Wl,-rpath=" + } for _, shlib := range shlibs { ldflags = append( ldflags, "-L"+filepath.Dir(shlib), - "-Wl,-rpath="+filepath.Dir(shlib), + rLibPath+filepath.Dir(shlib), "-l"+strings.TrimSuffix( strings.TrimPrefix(filepath.Base(shlib), "lib"), ".so")) @@ -412,7 +445,10 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string case "c-shared": ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc") case "shared": - ldflags = append(ldflags, "-zdefs", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") + if cfg.Goos != "aix" { + ldflags = append(ldflags, "-zdefs") + } + ldflags = append(ldflags, "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") default: base.Fatalf("-buildmode=%s not supported for gccgo", buildmode) @@ -445,7 +481,7 @@ func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string switch buildmode { case "c-archive": - if err := b.run(root, ".", desc, nil, "ar", "rc", realOut, out); err != nil { + if err := b.run(root, ".", desc, nil, tools.ar(), "rc", realOut, out); err != nil { return err } } diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go index eb99815338757..693a53e9ab78d 100644 --- a/src/cmd/go/internal/work/init.go +++ b/src/cmd/go/internal/work/init.go @@ -10,6 +10,7 @@ import ( "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/load" + "cmd/internal/sys" "flag" "fmt" "os" @@ -42,18 +43,14 @@ func instrumentInit() { fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously\n", flag.Args()[0]) os.Exit(2) } - if cfg.BuildMSan && (cfg.Goos != "linux" || cfg.Goarch != "amd64" && cfg.Goarch != "arm64") { + if cfg.BuildMSan && !sys.MSanSupported(cfg.Goos, cfg.Goarch) { fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch) os.Exit(2) } if cfg.BuildRace { - platform := cfg.Goos + "/" + cfg.Goarch - switch platform { - default: - fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, linux/ppc64le, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) + if !sys.RaceDetectorSupported(cfg.Goos, cfg.Goarch) { + fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) os.Exit(2) - case "linux/amd64", "linux/ppc64le", "freebsd/amd64", "netbsd/amd64", "darwin/amd64", "windows/amd64": - // race supported on these platforms } } mode := "race" @@ -85,19 +82,23 @@ func buildModeInit() { pkgsFilter = pkgsNotMain case "c-archive": pkgsFilter = oneMainPkg - switch platform { - case "darwin/arm", "darwin/arm64": - codegenArg = "-shared" - default: - switch cfg.Goos { - case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - if platform == "linux/ppc64" { - base.Fatalf("-buildmode=c-archive not supported on %s\n", platform) - } - // Use -shared so that the result is - // suitable for inclusion in a PIE or - // shared library. + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "darwin/arm", "darwin/arm64": codegenArg = "-shared" + default: + switch cfg.Goos { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + if platform == "linux/ppc64" { + base.Fatalf("-buildmode=c-archive not supported on %s\n", platform) + } + // Use -shared so that the result is + // suitable for inclusion in a PIE or + // shared library. + codegenArg = "-shared" + } } } cfg.ExeSuffix = ".a" @@ -132,6 +133,9 @@ func buildModeInit() { default: ldBuildmode = "exe" } + if gccgo { + codegenArg = "" + } case "exe": pkgsFilter = pkgsMain ldBuildmode = "exe" @@ -146,7 +150,7 @@ func buildModeInit() { base.Fatalf("-buildmode=pie not supported when -race is enabled") } if gccgo { - base.Fatalf("-buildmode=pie not supported by gccgo") + codegenArg = "-fPIE" } else { switch platform { case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go index d5d126123a4b0..1a401b8981ec2 100644 --- a/src/cmd/go/internal/work/security.go +++ b/src/cmd/go/internal/work/security.go @@ -89,7 +89,9 @@ var validCompilerFlags = []*regexp.Regexp{ re(`-m32`), re(`-m64`), re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`), + re(`-m(no-)?v?aes`), re(`-marm`), + re(`-m(no-)?avx[0-9a-z]*`), re(`-mfloat-abi=([^@\-].*)`), re(`-mfpmath=[0-9a-z,+]*`), re(`-m(no-)?avx[0-9a-z.]*`), @@ -100,6 +102,7 @@ var validCompilerFlags = []*regexp.Regexp{ re(`-miphoneos-version-min=(.+)`), re(`-mnop-fun-dllimport`), re(`-m(no-)?sse[0-9.]*`), + re(`-m(no-)?ssse3`), re(`-mthumb(-interwork)?`), re(`-mthreads`), re(`-mwindows`), @@ -170,6 +173,7 @@ var validLinkerFlags = []*regexp.Regexp{ re(`-Wl,-e[=,][a-zA-Z0-9]*`), re(`-Wl,--enable-new-dtags`), re(`-Wl,--end-group`), + re(`-Wl,--(no-)?export-dynamic`), re(`-Wl,-framework,[^,@\-][^,]+`), re(`-Wl,-headerpad_max_install_names`), re(`-Wl,--no-undefined`), diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index 0639b4d2ca09f..4f8ab7f55a9eb 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -93,9 +93,18 @@ func main() { *get.CmdGet = *modget.CmdGet } + if args[0] == "get" || args[0] == "help" { + // Replace get with module-aware get if appropriate. + // Note that if MustUseModules is true, this happened already above, + // but no harm in doing it again. + if modload.Init(); modload.Enabled() { + *get.CmdGet = *modget.CmdGet + } + } + cfg.CmdName = args[0] // for error messages if args[0] == "help" { - help.Help(args[1:]) + help.Help(os.Stdout, args[1:]) return } @@ -142,10 +151,10 @@ func main() { flag = flag[:i] } switch flag { - case "-sync": - fmt.Fprintf(os.Stderr, "go: go mod -sync is now go mod tidy\n") + case "-sync", "-fix": + fmt.Fprintf(os.Stderr, "go: go mod %s is now go mod tidy\n", flag) os.Exit(2) - case "-init", "-fix", "-graph", "-vendor", "-verify": + case "-init", "-graph", "-vendor", "-verify": fmt.Fprintf(os.Stderr, "go: go mod %s is now go mod %s\n", flag, flag[1:]) os.Exit(2) case "-fmt", "-json", "-module", "-require", "-droprequire", "-replace", "-dropreplace", "-exclude", "-dropexclude": @@ -161,15 +170,6 @@ func main() { os.Exit(2) } - if args[0] == "get" { - // Replace get with module-aware get if appropriate. - // Note that if MustUseModules is true, this happened already above, - // but no harm in doing it again. - if modload.Init(); modload.Enabled() { - *get.CmdGet = *modget.CmdGet - } - } - // Set environment (GOOS, GOARCH, etc) explicitly. // In theory all the commands we invoke should have // the same default computation of these as we do, @@ -199,7 +199,7 @@ BigCmdLoop: } if args[0] == "help" { // Accept 'go mod help' and 'go mod help foo' for 'go help mod' and 'go help mod foo'. - help.Help(append(strings.Split(cfg.CmdName, " "), args[1:]...)) + help.Help(os.Stdout, append(strings.Split(cfg.CmdName, " "), args[1:]...)) return } cfg.CmdName += " " + args[0] @@ -235,10 +235,6 @@ func init() { } func mainUsage() { - // special case "go test -h" - if len(os.Args) > 1 && os.Args[1] == "test" { - test.Usage() - } help.PrintUsage(os.Stderr, base.Go) os.Exit(2) } diff --git a/src/cmd/go/mkalldocs.sh b/src/cmd/go/mkalldocs.sh index 72886db1eac7e..f37d59d2d7431 100755 --- a/src/cmd/go/mkalldocs.sh +++ b/src/cmd/go/mkalldocs.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2012 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. @@ -6,6 +6,8 @@ set -e go build -o go.latest +# If the command used to generate alldocs.go changes, update TestDocsUpToDate in +# help_test.go. ./go.latest help documentation >alldocs.go gofmt -w alldocs.go rm go.latest diff --git a/src/cmd/go/proxy_test.go b/src/cmd/go/proxy_test.go index 212e5aa08f7a1..830cea029b7ac 100644 --- a/src/cmd/go/proxy_test.go +++ b/src/cmd/go/proxy_test.go @@ -78,7 +78,7 @@ func readModList() { if i < 0 { continue } - encPath := strings.Replace(name[:i], "_", "/", -1) + encPath := strings.ReplaceAll(name[:i], "_", "/") path, err := module.DecodePath(encPath) if err != nil { fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err) @@ -197,7 +197,13 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) { if strings.HasPrefix(f.Name, ".") { continue } - zf, err := z.Create(path + "@" + vers + "/" + f.Name) + var zipName string + if strings.HasPrefix(f.Name, "/") { + zipName = f.Name[1:] + } else { + zipName = path + "@" + vers + "/" + f.Name + } + zf, err := z.Create(zipName) if err != nil { return cached{nil, err} } @@ -256,7 +262,7 @@ func readArchive(path, vers string) *txtar.Archive { return nil } - prefix := strings.Replace(enc, "/", "_", -1) + prefix := strings.ReplaceAll(enc, "/", "_") name := filepath.Join(cmdGoDir, "testdata/mod", prefix+"_"+encVers+".txt") a := archiveCache.Do(name, func() interface{} { a, err := txtar.ParseFile(name) diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go index 389485bc6582e..c56c1fd3e42a1 100644 --- a/src/cmd/go/script_test.go +++ b/src/cmd/go/script_test.go @@ -9,7 +9,9 @@ package main_test import ( "bytes" + "context" "fmt" + "go/build" "internal/testenv" "io/ioutil" "os" @@ -25,6 +27,7 @@ import ( "cmd/go/internal/imports" "cmd/go/internal/par" "cmd/go/internal/txtar" + "cmd/go/internal/work" ) // TestScript runs the tests in testdata/script/*.txt. @@ -55,21 +58,28 @@ func TestScript(t *testing.T) { // A testScript holds execution state for a single test script. type testScript struct { - t *testing.T - workdir string // temporary work dir ($WORK) - log bytes.Buffer // test execution log (printed at end of test) - mark int // offset of next log truncation - cd string // current directory during test execution; initially $WORK/gopath/src - name string // short name of test ("foo") - file string // full file name ("testdata/script/foo.txt") - lineno int // line number currently executing - line string // line currently executing - env []string // environment list (for os/exec) - envMap map[string]string // environment mapping (matches env) - stdout string // standard output from last 'go' command; for 'stdout' command - stderr string // standard error from last 'go' command; for 'stderr' command - stopped bool // test wants to stop early - start time.Time // time phase started + t *testing.T + workdir string // temporary work dir ($WORK) + log bytes.Buffer // test execution log (printed at end of test) + mark int // offset of next log truncation + cd string // current directory during test execution; initially $WORK/gopath/src + name string // short name of test ("foo") + file string // full file name ("testdata/script/foo.txt") + lineno int // line number currently executing + line string // line currently executing + env []string // environment list (for os/exec) + envMap map[string]string // environment mapping (matches env) + stdout string // standard output from last 'go' command; for 'stdout' command + stderr string // standard error from last 'go' command; for 'stderr' command + stopped bool // test wants to stop early + start time.Time // time phase started + background []backgroundCmd // backgrounded 'exec' and 'go' commands +} + +type backgroundCmd struct { + cmd *exec.Cmd + wait <-chan struct{} + neg bool // if true, cmd should fail } var extraEnvKeys = []string{ @@ -96,6 +106,7 @@ func (ts *testScript) setup() { "GOROOT=" + testGOROOT, tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"), "devnull=" + os.DevNull, + "goversion=" + goVersion(ts), ":=" + string(os.PathListSeparator), } @@ -122,6 +133,16 @@ func (ts *testScript) setup() { } } +// goVersion returns the current Go version. +func goVersion(ts *testScript) string { + tags := build.Default.ReleaseTags + version := tags[len(tags)-1] + if !regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`).MatchString(version) { + ts.fatalf("invalid go version %q", version) + } + return version[2:] +} + var execCache par.Cache // run runs the test script. @@ -146,6 +167,17 @@ func (ts *testScript) run() { } defer func() { + // On a normal exit from the test loop, background processes are cleaned up + // before we print PASS. If we return early (e.g., due to a test failure), + // don't print anything about the processes that were still running. + for _, bg := range ts.background { + interruptProcess(bg.cmd.Process) + } + for _, bg := range ts.background { + <-bg.wait + } + ts.background = nil + markTime() // Flush testScript log to testing.T log. ts.t.Log("\n" + ts.abbrev(ts.log.String())) @@ -243,12 +275,19 @@ Script: ok = testenv.HasExternalNetwork() case "link": ok = testenv.HasLink() + case "root": + ok = os.Geteuid() == 0 case "symlink": ok = testenv.HasSymlink() default: if strings.HasPrefix(cond, "exec:") { prog := cond[len("exec:"):] ok = execCache.Do(prog, func() interface{} { + if runtime.GOOS == "plan9" && prog == "git" { + // The Git command is usually not the real Git on Plan 9. + // See https://golang.org/issues/29640. + return false + } _, err := exec.LookPath(prog) return err == nil }).(bool) @@ -284,14 +323,23 @@ Script: // Command can ask script to stop early. if ts.stopped { - return + // Break instead of returning, so that we check the status of any + // background processes and print PASS. + break } } + for _, bg := range ts.background { + interruptProcess(bg.cmd.Process) + } + ts.cmdWait(false, nil) + // Final phase ended. rewind() markTime() - fmt.Fprintf(&ts.log, "PASS\n") + if !ts.stopped { + fmt.Fprintf(&ts.log, "PASS\n") + } } // scriptCmds are the script command implementations. @@ -301,8 +349,11 @@ Script: // var scriptCmds = map[string]func(*testScript, bool, []string){ "addcrlf": (*testScript).cmdAddcrlf, + "cc": (*testScript).cmdCc, "cd": (*testScript).cmdCd, + "chmod": (*testScript).cmdChmod, "cmp": (*testScript).cmdCmp, + "cmpenv": (*testScript).cmdCmpenv, "cp": (*testScript).cmdCp, "env": (*testScript).cmdEnv, "exec": (*testScript).cmdExec, @@ -317,6 +368,7 @@ var scriptCmds = map[string]func(*testScript, bool, []string){ "stdout": (*testScript).cmdStdout, "stop": (*testScript).cmdStop, "symlink": (*testScript).cmdSymlink, + "wait": (*testScript).cmdWait, } // addcrlf adds CRLF line endings to the named files. @@ -329,10 +381,21 @@ func (ts *testScript) cmdAddcrlf(neg bool, args []string) { file = ts.mkabs(file) data, err := ioutil.ReadFile(file) ts.check(err) - ts.check(ioutil.WriteFile(file, bytes.Replace(data, []byte("\n"), []byte("\r\n"), -1), 0666)) + ts.check(ioutil.WriteFile(file, bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n")), 0666)) } } +// cc runs the C compiler along with platform specific options. +func (ts *testScript) cmdCc(neg bool, args []string) { + if len(args) < 1 || (len(args) == 1 && args[0] == "&") { + ts.fatalf("usage: cc args... [&]") + } + + var b work.Builder + b.Init() + ts.cmdExec(neg, append(b.GccCmd(".", ""), args...)) +} + // cd changes to a different directory. func (ts *testScript) cmdCd(neg bool, args []string) { if neg { @@ -358,6 +421,24 @@ func (ts *testScript) cmdCd(neg bool, args []string) { fmt.Fprintf(&ts.log, "%s\n", ts.cd) } +// chmod changes permissions for a file or directory. +func (ts *testScript) cmdChmod(neg bool, args []string) { + if neg { + ts.fatalf("unsupported: ! chmod") + } + if len(args) < 2 { + ts.fatalf("usage: chmod perm paths...") + } + perm, err := strconv.ParseUint(args[0], 0, 32) + if err != nil || perm&uint64(os.ModePerm) != perm { + ts.fatalf("invalid mode: %s", args[0]) + } + for _, path := range args[1:] { + err := os.Chmod(path, os.FileMode(perm)) + ts.check(err) + } +} + // cmp compares two files. func (ts *testScript) cmdCmp(neg bool, args []string) { if neg { @@ -367,7 +448,21 @@ func (ts *testScript) cmdCmp(neg bool, args []string) { if len(args) != 2 { ts.fatalf("usage: cmp file1 file2") } + ts.doCmdCmp(args, false) +} +// cmpenv compares two files with environment variable substitution. +func (ts *testScript) cmdCmpenv(neg bool, args []string) { + if neg { + ts.fatalf("unsupported: ! cmpenv") + } + if len(args) != 2 { + ts.fatalf("usage: cmpenv file1 file2") + } + ts.doCmdCmp(args, true) +} + +func (ts *testScript) doCmdCmp(args []string, env bool) { name1, name2 := args[0], args[1] var text1, text2 string if name1 == "stdout" { @@ -384,6 +479,11 @@ func (ts *testScript) cmdCmp(neg bool, args []string) { ts.check(err) text2 = string(data) + if env { + text1 = ts.expand(text1) + text2 = ts.expand(text2) + } + if text1 == text2 { return } @@ -451,26 +551,43 @@ func (ts *testScript) cmdEnv(neg bool, args []string) { // exec runs the given command. func (ts *testScript) cmdExec(neg bool, args []string) { - if len(args) < 1 { - ts.fatalf("usage: exec program [args...]") + if len(args) < 1 || (len(args) == 1 && args[0] == "&") { + ts.fatalf("usage: exec program [args...] [&]") } + var err error - ts.stdout, ts.stderr, err = ts.exec(args[0], args[1:]...) - if ts.stdout != "" { - fmt.Fprintf(&ts.log, "[stdout]\n%s", ts.stdout) - } - if ts.stderr != "" { - fmt.Fprintf(&ts.log, "[stderr]\n%s", ts.stderr) + if len(args) > 0 && args[len(args)-1] == "&" { + var cmd *exec.Cmd + cmd, err = ts.execBackground(args[0], args[1:len(args)-1]...) + if err == nil { + wait := make(chan struct{}) + go func() { + ctxWait(testCtx, cmd) + close(wait) + }() + ts.background = append(ts.background, backgroundCmd{cmd, wait, neg}) + } + ts.stdout, ts.stderr = "", "" + } else { + ts.stdout, ts.stderr, err = ts.exec(args[0], args[1:]...) + if ts.stdout != "" { + fmt.Fprintf(&ts.log, "[stdout]\n%s", ts.stdout) + } + if ts.stderr != "" { + fmt.Fprintf(&ts.log, "[stderr]\n%s", ts.stderr) + } + if err == nil && neg { + ts.fatalf("unexpected command success") + } } + if err != nil { fmt.Fprintf(&ts.log, "[%v]\n", err) - if !neg { + if testCtx.Err() != nil { + ts.fatalf("test timed out while running command") + } else if !neg { ts.fatalf("unexpected command failure") } - } else { - if neg { - ts.fatalf("unexpected command success") - } } } @@ -545,6 +662,14 @@ func (ts *testScript) cmdSkip(neg bool, args []string) { if neg { ts.fatalf("unsupported: ! skip") } + + // Before we mark the test as skipped, shut down any background processes and + // make sure they have returned the correct status. + for _, bg := range ts.background { + interruptProcess(bg.cmd.Process) + } + ts.cmdWait(false, nil) + if len(args) == 1 { ts.t.Skip(args[0]) } @@ -614,7 +739,7 @@ func scriptMatch(ts *testScript, neg bool, args []string, text, name string) { want = 2 } if len(args) != want { - ts.fatalf("usage: %s [-count=N] 'pattern' file%s", name, extraUsage) + ts.fatalf("usage: %s [-count=N] 'pattern'%s", name, extraUsage) } pattern := args[0] @@ -629,6 +754,9 @@ func scriptMatch(ts *testScript, neg bool, args []string, text, name string) { text = string(data) } + // Matching against workdir would be misleading. + text = strings.ReplaceAll(text, ts.workdir, "$WORK") + if neg { if re.MatchString(text) { if isGrep { @@ -684,11 +812,57 @@ func (ts *testScript) cmdSymlink(neg bool, args []string) { ts.check(os.Symlink(args[2], ts.mkabs(args[0]))) } +// wait waits for background commands to exit, setting stderr and stdout to their result. +func (ts *testScript) cmdWait(neg bool, args []string) { + if neg { + ts.fatalf("unsupported: ! wait") + } + if len(args) > 0 { + ts.fatalf("usage: wait") + } + + var stdouts, stderrs []string + for _, bg := range ts.background { + <-bg.wait + + args := append([]string{filepath.Base(bg.cmd.Args[0])}, bg.cmd.Args[1:]...) + fmt.Fprintf(&ts.log, "[background] %s: %v\n", strings.Join(args, " "), bg.cmd.ProcessState) + + cmdStdout := bg.cmd.Stdout.(*strings.Builder).String() + if cmdStdout != "" { + fmt.Fprintf(&ts.log, "[stdout]\n%s", cmdStdout) + stdouts = append(stdouts, cmdStdout) + } + + cmdStderr := bg.cmd.Stderr.(*strings.Builder).String() + if cmdStderr != "" { + fmt.Fprintf(&ts.log, "[stderr]\n%s", cmdStderr) + stderrs = append(stderrs, cmdStderr) + } + + if bg.cmd.ProcessState.Success() { + if bg.neg { + ts.fatalf("unexpected command success") + } + } else { + if testCtx.Err() != nil { + ts.fatalf("test timed out while running command") + } else if !bg.neg { + ts.fatalf("unexpected command failure") + } + } + } + + ts.stdout = strings.Join(stdouts, "") + ts.stderr = strings.Join(stderrs, "") + ts.background = nil +} + // Helpers for command implementations. // abbrev abbreviates the actual work directory in the string s to the literal string "$WORK". func (ts *testScript) abbrev(s string) string { - s = strings.Replace(s, ts.workdir, "$WORK", -1) + s = strings.ReplaceAll(s, ts.workdir, "$WORK") if *testWork { // Expose actual $WORK value in environment dump on first line of work script, // so that the user can find out what directory -testwork left behind. @@ -713,10 +887,51 @@ func (ts *testScript) exec(command string, args ...string) (stdout, stderr strin var stdoutBuf, stderrBuf strings.Builder cmd.Stdout = &stdoutBuf cmd.Stderr = &stderrBuf - err = cmd.Run() + if err = cmd.Start(); err == nil { + err = ctxWait(testCtx, cmd) + } return stdoutBuf.String(), stderrBuf.String(), err } +// execBackground starts the given command line (an actual subprocess, not simulated) +// in ts.cd with environment ts.env. +func (ts *testScript) execBackground(command string, args ...string) (*exec.Cmd, error) { + cmd := exec.Command(command, args...) + cmd.Dir = ts.cd + cmd.Env = append(ts.env, "PWD="+ts.cd) + var stdoutBuf, stderrBuf strings.Builder + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + return cmd, cmd.Start() +} + +// ctxWait is like cmd.Wait, but terminates cmd with os.Interrupt if ctx becomes done. +// +// This differs from exec.CommandContext in that it prefers os.Interrupt over os.Kill. +// (See https://golang.org/issue/21135.) +func ctxWait(ctx context.Context, cmd *exec.Cmd) error { + errc := make(chan error, 1) + go func() { errc <- cmd.Wait() }() + + select { + case err := <-errc: + return err + case <-ctx.Done(): + interruptProcess(cmd.Process) + return <-errc + } +} + +// interruptProcess sends os.Interrupt to p if supported, or os.Kill otherwise. +func interruptProcess(p *os.Process) { + if err := p.Signal(os.Interrupt); err != nil { + // Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on + // Windows; using it with os.Process.Signal will return an error.” + // Fall back to Kill instead. + p.Kill() + } +} + // expand applies environment variable expansion to the string s. func (ts *testScript) expand(s string) string { return os.Expand(s, func(key string) string { return ts.envMap[key] }) @@ -882,17 +1097,17 @@ var diffTests = []struct { func TestDiff(t *testing.T) { for _, tt := range diffTests { // Turn spaces into \n. - text1 := strings.Replace(tt.text1, " ", "\n", -1) + text1 := strings.ReplaceAll(tt.text1, " ", "\n") if text1 != "" { text1 += "\n" } - text2 := strings.Replace(tt.text2, " ", "\n", -1) + text2 := strings.ReplaceAll(tt.text2, " ", "\n") if text2 != "" { text2 += "\n" } out := diff(text1, text2) // Cut final \n, cut spaces, turn remaining \n into spaces. - out = strings.Replace(strings.Replace(strings.TrimSuffix(out, "\n"), " ", "", -1), "\n", " ", -1) + out = strings.ReplaceAll(strings.ReplaceAll(strings.TrimSuffix(out, "\n"), " ", ""), "\n", " ") if out != tt.diff { t.Errorf("diff(%q, %q) = %q, want %q", text1, text2, out, tt.diff) } diff --git a/src/cmd/go/testdata/addmod.go b/src/cmd/go/testdata/addmod.go index 19850af0f37b2..8bb6056a540ae 100644 --- a/src/cmd/go/testdata/addmod.go +++ b/src/cmd/go/testdata/addmod.go @@ -142,7 +142,7 @@ func main() { } data := txtar.Format(a) - target := filepath.Join("mod", strings.Replace(path, "/", "_", -1)+"_"+vers+".txt") + target := filepath.Join("mod", strings.ReplaceAll(path, "/", "_")+"_"+vers+".txt") if err := ioutil.WriteFile(target, data, 0666); err != nil { log.Printf("%s: %v", arg, err) exitCode = 1 diff --git a/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt new file mode 100644 index 0000000000000..7d9d1303a9043 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt @@ -0,0 +1,13 @@ +example.com/invalidpath/v1 v1.0.0 +written by hand + +-- .mod -- +module example.com/invalidpath/v1 +-- .info -- +{"Version":"v1.0.0"} +-- go.mod -- +module example.com/invalidpath/v1 +-- version.go -- +package version + +const V = "v1.0.0" diff --git a/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt b/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt new file mode 100644 index 0000000000000..bae8b13d470e2 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt @@ -0,0 +1,27 @@ +example.com/printversion v0.1.0 + +-- .mod -- +module example.com/printversion +-- .info -- +{"Version":"v0.1.0"} +-- README.txt -- +There is no go.mod file for this version of the module. +-- printversion.go -- +package main + +import ( + "fmt" + "os" + "runtime/debug" + + _ "example.com/version" +) + +func main() { + info, _ := debug.ReadBuildInfo() + fmt.Fprintf(os.Stdout, "path is %s\n", info.Path) + fmt.Fprintf(os.Stdout, "main is %s %s\n", info.Main.Path, info.Main.Version) + for _, m := range info.Deps { + fmt.Fprintf(os.Stdout, "using %s %s\n", m.Path, m.Version) + } +} diff --git a/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt new file mode 100644 index 0000000000000..246741821aff1 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt @@ -0,0 +1,35 @@ +example.com/printversion v1.0.0 + +-- .mod -- +module example.com/printversion + +require example.com/version v1.0.0 +replace example.com/version v1.0.0 => ../oops v0.0.0 +exclude example.com/version v1.1.0 +-- .info -- +{"Version":"v1.0.0"} +-- go.mod -- +module example.com/printversion + +require example.com/version v1.0.0 +replace example.com/version v1.0.0 => ../oops v0.0.0 +exclude example.com/version v1.0.1 +-- printversion.go -- +package main + +import ( + "fmt" + "os" + "runtime/debug" + + _ "example.com/version" +) + +func main() { + info, _ := debug.ReadBuildInfo() + fmt.Fprintf(os.Stdout, "path is %s\n", info.Path) + fmt.Fprintf(os.Stdout, "main is %s %s\n", info.Main.Path, info.Main.Version) + for _, m := range info.Deps { + fmt.Fprintf(os.Stdout, "using %s %s\n", m.Path, m.Version) + } +} diff --git a/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt b/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt new file mode 100644 index 0000000000000..d8c45b527e9d5 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt @@ -0,0 +1,11 @@ +example.com/version v1.0.0 +written by hand + +-- .mod -- +module example.com/version +-- .info -- +{"Version":"v1.0.0"} +-- version.go -- +package version + +const V = "v1.0.0" diff --git a/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt b/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt new file mode 100644 index 0000000000000..3bfdb0e4cdcc2 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt @@ -0,0 +1,11 @@ +example.com/version v1.0.1 +written by hand + +-- .mod -- +module example.com/version +-- .info -- +{"Version":"v1.0.1"} +-- version.go -- +package version + +const V = "v1.0.1" diff --git a/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt b/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt new file mode 100644 index 0000000000000..8109a9acc9e53 --- /dev/null +++ b/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt @@ -0,0 +1,11 @@ +example.com/version v1.1.0 +written by hand + +-- .mod -- +module example.com/version +-- .info -- +{"Version":"v1.1.0"} +-- version.go -- +package version + +const V = "v1.1.0" diff --git a/src/cmd/go/testdata/mod/research.swtch.com_vgo-tour_v1.0.0.txt b/src/cmd/go/testdata/mod/research.swtch.com_vgo-tour_v1.0.0.txt deleted file mode 100644 index 0f060dc8e32fe..0000000000000 --- a/src/cmd/go/testdata/mod/research.swtch.com_vgo-tour_v1.0.0.txt +++ /dev/null @@ -1,23 +0,0 @@ -research.swtch.com/vgo-tour@v1.0.0 - --- .mod -- -module "research.swtch.com/vgo-tour" --- .info -- -{"Version":"v1.0.0","Name":"84de74b35823c1e49634f2262f1a58cfc951ebae","Short":"84de74b35823","Time":"2018-02-20T00:04:00Z"} --- go.mod -- -module "research.swtch.com/vgo-tour" --- hello.go -- -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "fmt" - "rsc.io/quote" -) - -func main() { - fmt.Println(quote.Hello()) -} diff --git a/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt b/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt new file mode 100644 index 0000000000000..07a38fa6d7d74 --- /dev/null +++ b/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt @@ -0,0 +1,11 @@ +rsc.io/badzip v1.0.0 +written by hand + +-- .mod -- +module rsc.io/badzip +-- .info -- +{"Version":"v1.0.0"} +-- x.go -- +package x +-- /rsc.io/badzip@v1.0.0.txt -- +This file should not be here. diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README index a80233b8c3830..a7b50fff16454 100644 --- a/src/cmd/go/testdata/script/README +++ b/src/cmd/go/testdata/script/README @@ -36,6 +36,7 @@ Scripts also have access to these other environment variables: PATH= TMPDIR=$WORK/tmp devnull= + goversion= The environment variable $exe (lowercase) is an empty string on most systems, ".exe" on Windows. @@ -74,6 +75,7 @@ should only run when the condition is satisfied. The available conditions are: - [cgo], [msan], [race] for whether cgo, msan, and the race detector can be used - [net] for whether the external network can be used - [link] for testenv.HasLink() + - [root] for os.Geteuid() == 0 - [symlink] for testenv.HasSymlink() - [exec:prog] for whether prog is available for execution (found by exec.LookPath) @@ -82,9 +84,17 @@ when testing.Short() is false. The commands are: +- [!] cc args... [&] + Run the C compiler, the platform specific flags (i.e. `go env GOGCCFLAGS`) will be + added automatically before args. + - cd dir Change to the given directory for future commands. +- chmod perm path... + Change the permissions of the files or directories named by the path arguments + to be equal to perm. Only numerical permissions are supported. + - cmp file1 file2 Check that the named files have the same content. By convention, file1 is the actual data and file2 the expected data. @@ -92,6 +102,10 @@ The commands are: from the most recent exec or go command. (If the files have differing content, the failure prints a diff.) +- cmpenv file1 file2 + Like cmp, but environment variables are substituted in the file contents + before the comparison. For example, $GOOS is replaced by the target GOOS. + - cp src... dst Copy the listed files to the target file or existing directory. @@ -99,16 +113,23 @@ The commands are: With no arguments, print the environment (useful for debugging). Otherwise add the listed key=value pairs to the environment. -- [!] exec program [args...] +- [!] exec program [args...] [&] Run the given executable program with the arguments. It must (or must not) succeed. Note that 'exec' does not terminate the script (unlike in Unix shells). + If the last token is '&', the program executes in the background. The standard + output and standard error of the previous command is cleared, but the output + of the background process is buffered — and checking of its exit status is + delayed — until the next call to 'wait', 'skip', or 'stop' or the end of the + test. At the end of the test, any remaining background processes are + terminated using os.Interrupt (if supported) or os.Kill. + - [!] exists [-readonly] file... Each of the listed files or directories must (or must not) exist. If -readonly is given, the files or directories must be unwritable. -- [!] go args... +- [!] go args... [&] Run the (test copy of the) go command with the given arguments. It must (or must not) succeed. @@ -131,11 +152,11 @@ The commands are: - [!] stderr [-count=N] pattern Apply the grep command (see above) to the standard error - from the most recent exec or go command. + from the most recent exec, go, or wait command. - [!] stdout [-count=N] pattern Apply the grep command (see above) to the standard output - from the most recent exec or go command. + from the most recent exec, go, or wait command. - stop [message] Stop the test early (marking it as passing), including the message if given. @@ -143,6 +164,13 @@ The commands are: - symlink file -> target Create file as a symlink to target. The -> (like in ls -l output) is required. +- wait + Wait for all 'exec' and 'go' commands started in the background (with the '&' + token) to exit, and display success or failure status for them. + After a call to wait, the 'stderr' and 'stdout' commands will apply to the + concatenation of the corresponding streams of the background commands, + in the order in which those commands were started. + When TestScript runs a script and the script fails, by default TestScript shows the execution of the most recent phase of the script (since the last # comment) and only shows the # comments for earlier phases. For example, here is a diff --git a/src/cmd/go/testdata/script/build_GOTMPDIR.txt b/src/cmd/go/testdata/script/build_GOTMPDIR.txt index 4c387afbbabd7..ea06dcc472def 100644 --- a/src/cmd/go/testdata/script/build_GOTMPDIR.txt +++ b/src/cmd/go/testdata/script/build_GOTMPDIR.txt @@ -1,6 +1,8 @@ +# Set GOCACHE to a clean directory to ensure that 'go build' has work to report. +env GOCACHE=$WORK/gocache + # Build should use GOTMPDIR if set. env GOTMPDIR=$WORK/my-favorite-tmpdir -env GOCACHE=off mkdir $GOTMPDIR go build -work hello.go stderr ^WORK=.*my-favorite-tmpdir @@ -8,4 +10,3 @@ stderr ^WORK=.*my-favorite-tmpdir -- hello.go -- package main func main() { println("hello") } - diff --git a/src/cmd/go/testdata/script/build_cache_gomips.txt b/src/cmd/go/testdata/script/build_cache_gomips.txt new file mode 100644 index 0000000000000..c77acc3f2f32d --- /dev/null +++ b/src/cmd/go/testdata/script/build_cache_gomips.txt @@ -0,0 +1,37 @@ +# Set up fresh GOCACHE. +env GOCACHE=$WORK/gocache +mkdir $GOCACHE + +# Building for mipsle without setting GOMIPS will use floating point registers. +env GOARCH=mipsle +env GOOS=linux +go build -gcflags=-S f.go +stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+ + +# Clean cache +go clean -cache + +# Building with GOMIPS=softfloat will not use floating point registers +env GOMIPS=softfloat +go build -gcflags=-S f.go +! stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+ + +# Clean cache +go clean -cache + +# Build without setting GOMIPS +env GOMIPS= +go build -gcflags=-S f.go +stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+ + +# Building with GOMIPS should still not use floating point registers. +env GOMIPS=softfloat +go build -gcflags=-S f.go +! stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+ + +-- f.go -- +package f + +func F(x float64) float64 { + return x + x +} diff --git a/src/cmd/go/testdata/script/build_nocache.txt b/src/cmd/go/testdata/script/build_nocache.txt new file mode 100644 index 0000000000000..5aa46e0b77378 --- /dev/null +++ b/src/cmd/go/testdata/script/build_nocache.txt @@ -0,0 +1,33 @@ +# As of Go 1.12, the module cache is required. + +# If none of the variables we use to locate GOCACHE are set, the cache is off +# and we cannot build. +env GOCACHE= +env XDG_CACHE_HOME= +env HOME= +[plan9] env home= +[windows] env LocalAppData= +! go build -o triv triv.go +stderr 'build cache is required, but could not be located: GOCACHE is not defined and .*' + +# An explicit GOCACHE=off also disables builds. +env GOCACHE=off +! go build -o triv triv.go +stderr 'build cache is disabled by GOCACHE=off' + +# If GOCACHE is set to an unwritable directory, we should diagnose it as such. +[windows] stop # Does not support unwritable directories. +[root] skip # Can write to unwritable directories. + +mkdir $WORK/unwritable/home +chmod 0555 $WORK/unwritable/home +[!plan9] env HOME=$WORK/unwritable/home +[plan9] env home=$WORK/unwritable/home + +env GOCACHE=$WORK/unwritable/home +! go build -o triv triv.go +stderr 'failed to initialize build cache.* permission denied' + +-- triv.go -- +package main +func main() {} diff --git a/src/cmd/go/testdata/script/build_relative_pkgdir.txt b/src/cmd/go/testdata/script/build_relative_pkgdir.txt new file mode 100644 index 0000000000000..76098a0662ba9 --- /dev/null +++ b/src/cmd/go/testdata/script/build_relative_pkgdir.txt @@ -0,0 +1,7 @@ +# Regression test for golang.org/issue/21309: accept relative -pkgdir argument. + +[short] skip + +mkdir $WORK/gocache +env GOCACHE=$WORK/gocache +go build -i -pkgdir=. runtime diff --git a/src/cmd/go/testdata/script/build_relative_tmpdir.txt b/src/cmd/go/testdata/script/build_relative_tmpdir.txt new file mode 100644 index 0000000000000..9490a285d34fc --- /dev/null +++ b/src/cmd/go/testdata/script/build_relative_tmpdir.txt @@ -0,0 +1,16 @@ +# If GOTMPDIR is relative, 'go build' should derive an absolute $WORK directory. +cd $WORK +mkdir tmp +env GOTMPDIR=tmp +go build -work a +stderr 'WORK=\$WORK' # the test script itself converts the absolute directory back to $WORK + +# Similarly if TMP/TMPDIR is relative. +env GOTMPDIR= +env TMP=tmp # Windows +env TMPDIR=tmp # Unix +go build -work a +stderr 'WORK=\$WORK' + +-- a/a.go -- +package a diff --git a/src/cmd/go/testdata/script/build_runtime_gcflags.txt b/src/cmd/go/testdata/script/build_runtime_gcflags.txt new file mode 100644 index 0000000000000..767b768b82f87 --- /dev/null +++ b/src/cmd/go/testdata/script/build_runtime_gcflags.txt @@ -0,0 +1,8 @@ +# Set up fresh GOCACHE. +env GOCACHE=$WORK/gocache +mkdir $GOCACHE + +# Verify the standard library (specifically runtime/internal/atomic) can be +# built with -gcflags when -n is given. See golang.org/issue/29346. +go build -n -gcflags=all='-l' std +stderr 'compile.* -l .* runtime/internal/atomic' diff --git a/src/cmd/go/testdata/script/cache_unix.txt b/src/cmd/go/testdata/script/cache_unix.txt new file mode 100644 index 0000000000000..f700ebe3ed2cf --- /dev/null +++ b/src/cmd/go/testdata/script/cache_unix.txt @@ -0,0 +1,34 @@ +# Integration test for cache directory calculation (cmd/go/internal/cache). + +[windows] skip +[darwin] skip +[plan9] skip + +mkdir $WORK/gocache +mkdir $WORK/xdg +mkdir $WORK/home + +# Set GOCACHE, XDG_CACHE_HOME, and HOME. +env GOCACHE=$WORK/gocache +env XDG_CACHE_HOME=$WORK/xdg +env HOME=$WORK/home + +# With all three set, we should prefer GOCACHE. +go env GOCACHE +stdout '\$WORK/gocache$' + +# Without GOCACHE, we should prefer XDG_CACHE_HOME over HOME. +env GOCACHE= +go env GOCACHE +stdout '\$WORK/xdg/go-build$$' + +# With only HOME set, we should use $HOME/.cache. +env XDG_CACHE_HOME= +go env GOCACHE +stdout '\$WORK/home/.cache/go-build$' + +# With no guidance from the environment, we must disable the cache, but that +# should not cause commands that do not write to the cache to fail. +env HOME= +go env GOCACHE +stdout 'off' diff --git a/src/cmd/go/testdata/script/cgo_syso_issue29253.txt b/src/cmd/go/testdata/script/cgo_syso_issue29253.txt new file mode 100644 index 0000000000000..0d18fa91d6b7f --- /dev/null +++ b/src/cmd/go/testdata/script/cgo_syso_issue29253.txt @@ -0,0 +1,28 @@ +# This test tests that we can link in-package syso files that provides symbols +# for cgo. See issue 29253. +[!cgo] stop +[!gc] stop +cc -c -o pkg/o.syso ext.c +go build main.go + +-- ext.c -- +// +build ignore + +int f() { return 42; } +-- pkg/pkg.go -- +package pkg + +// extern int f(void); +import "C" + +func init() { + if v := C.f(); v != 42 { + panic(v) + } +} +-- main.go -- +package main + +import _ "pkg" + +func main() {} diff --git a/src/cmd/go/testdata/script/clean_testcache.txt b/src/cmd/go/testdata/script/clean_testcache.txt new file mode 100644 index 0000000000000..a2d592deffdda --- /dev/null +++ b/src/cmd/go/testdata/script/clean_testcache.txt @@ -0,0 +1,16 @@ +# go clean -testcache +# should work (see golang.org/issue/29757). +cd x +go test x_test.go +go clean -testcache +go test x_test.go +! stdout 'cached' + + +-- x/x_test.go -- +package x_test +import ( + "testing" +) +func TestMain(t *testing.T) { +} \ No newline at end of file diff --git a/src/cmd/go/testdata/script/gcflags_patterns.txt b/src/cmd/go/testdata/script/gcflags_patterns.txt index fe2cf6f0fb6d4..40f80b7d6e971 100644 --- a/src/cmd/go/testdata/script/gcflags_patterns.txt +++ b/src/cmd/go/testdata/script/gcflags_patterns.txt @@ -2,24 +2,28 @@ # -gcflags=-e applies to named packages, not dependencies go build -n -v -gcflags=-e z1 z2 -stderr 'compile.* -e .*-p z1' -stderr 'compile.* -e .*-p z2' +stderr 'compile.* -e.* -p z1' +stderr 'compile.* -e.* -p z2' stderr 'compile.* -p y' -! stderr 'compile.* -e .*-p [^z]' +! stderr 'compile.* -e.* -p [^z]' # -gcflags can specify package=flags, and can be repeated; last match wins go build -n -v -gcflags=-e -gcflags=z1=-N z1 z2 -stderr 'compile.* -N .*-p z1' -! stderr 'compile.* -e .*-p z1' -! stderr 'compile.* -N .*-p z2' -stderr 'compile.* -e .*-p z2' +stderr 'compile.* -N.* -p z1' +! stderr 'compile.* -e.* -p z1' +! stderr 'compile.* -N.* -p z2' +stderr 'compile.* -e.* -p z2' stderr 'compile.* -p y' -! stderr 'compile.* -e .*-p [^z]' -! stderr 'compile.* -N .*-p [^z]' +! stderr 'compile.* -e.* -p [^z]' +! stderr 'compile.* -N.* -p [^z]' # -gcflags can have arbitrary spaces around the flags go build -n -v -gcflags=' z1 = -e ' z1 -stderr 'compile.* -e .*-p z1' +stderr 'compile.* -e.* -p z1' + +# -gcflags='all=-e' should apply to all packages, even with go test +go test -c -n -gcflags='all=-e' z1 +stderr 'compile.* -e.* -p z3 ' # -ldflags for implicit test package applies to test binary go test -c -n -gcflags=-N -ldflags=-X=x.y=z z1 @@ -58,11 +62,15 @@ import _ "z2" -- z1/z_test.go -- package z1_test import "testing" +import _ "z3" func Test(t *testing.T) {} -- z2/z.go -- package z2 +-- z3/z.go -- +package z3 + -- y/y.go -- package y diff --git a/src/cmd/go/testdata/script/get_brace.txt b/src/cmd/go/testdata/script/get_brace.txt new file mode 100644 index 0000000000000..be81d8f4875cc --- /dev/null +++ b/src/cmd/go/testdata/script/get_brace.txt @@ -0,0 +1,49 @@ +[!exec:git] skip + +# Set up some empty repositories. +cd $WORK/_origin/foo +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git commit --allow-empty -m 'create master branch' + +cd $WORK +cd '_origin/{confusing}' +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git commit --allow-empty -m 'create master branch' + +# Clone the empty repositories into GOPATH. +# This tells the Go command where to find them: it takes the place of a user's meta-tag redirector. +mkdir $GOPATH/src/example.com +cd $GOPATH/src/example.com +exec git clone $WORK/_origin/foo +exec git clone $WORK/_origin/{confusing} + +# Commit contents to the repositories. +cd $WORK/_origin/foo +exec git add main.go +exec git commit -m 'add main' + +cd $WORK +cd '_origin/{confusing}' +exec git add confusing.go +exec git commit -m 'just try to delete this!' + +# 'go get' should refuse to download or update the confusingly-named repo. +cd $GOPATH/src/example.com/foo +! go get -u 'example.com/{confusing}' +stderr 'invalid char' +! go get -u example.com/foo +stderr 'invalid import path' +! exists example.com/{confusing} + +-- $WORK/_origin/foo/main.go -- +package main +import _ "example.com/{confusing}" + +func main() {} + +-- $WORK/_origin/{confusing}/confusing.go -- +package confusing diff --git a/src/cmd/go/testdata/script/get_dotfiles.txt b/src/cmd/go/testdata/script/get_dotfiles.txt new file mode 100644 index 0000000000000..1876114362690 --- /dev/null +++ b/src/cmd/go/testdata/script/get_dotfiles.txt @@ -0,0 +1,61 @@ +[!exec:git] skip + +# Set up a benign repository and a repository with a dotfile name. +cd $WORK/_origin/foo +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git commit --allow-empty -m 'create master branch' + +cd $WORK/_origin/.hidden +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git commit --allow-empty -m 'create master branch' + +# Clone the empty repositories into GOPATH. +# This tells the Go command where to find them: it takes the place of a user's meta-tag redirector. +mkdir $GOPATH/src/example.com +cd $GOPATH/src/example.com +exec git clone $WORK/_origin/foo +exec git clone $WORK/_origin/.hidden + +# Add a benign commit. +cd $WORK/_origin/foo +cp _ok/main.go main.go +exec git add main.go +exec git commit -m 'add ok' + +# 'go get' should install the benign commit. +cd $GOPATH +go get -u example.com/foo + +# Now sneak in an import of a dotfile path. +cd $WORK/_origin/.hidden +exec git add hidden.go +exec git commit -m 'nothing to see here, move along' + +cd $WORK/_origin/foo +cp _sneaky/main.go main.go +exec git add main.go +exec git commit -m 'fix typo (heh heh heh)' + +# 'go get -u' should refuse to download or update the dotfile-named repo. +cd $GOPATH/src/example.com/foo +! go get -u example.com/foo +stderr 'leading dot' +! exists example.com/.hidden/hidden.go + +-- $WORK/_origin/foo/_ok/main.go -- +package main + +func main() {} + +-- $WORK/_origin/foo/_sneaky/main.go -- +package main +import _ "example.com/.hidden" + +func main() {} + +-- $WORK/_origin/.hidden/hidden.go -- +package hidden diff --git a/src/cmd/go/testdata/script/get_tilde.txt b/src/cmd/go/testdata/script/get_tilde.txt new file mode 100644 index 0000000000000..08289ca4054c0 --- /dev/null +++ b/src/cmd/go/testdata/script/get_tilde.txt @@ -0,0 +1,21 @@ +# Paths containing windows short names should be rejected before attempting to fetch. +! go get example.com/longna~1.dir/thing +stderr 'trailing tilde and digits' +! go get example.com/longna~1/thing +stderr 'trailing tilde and digits' +! go get example.com/~9999999/thing +stderr 'trailing tilde and digits' + +# A path containing an element that is just a tilde, or a tilde followed by non-digits, +# should attempt to resolve. +! go get example.com/~glenda/notfound +! stderr 'trailing tilde and digits' +stderr 'unrecognized import path' + +! go get example.com/~glenda2/notfound +! stderr 'trailing tilde and digits' +stderr 'unrecognized import path' + +! go get example.com/~/notfound +! stderr 'trailing tilde and digits' +stderr 'unrecognized import path' diff --git a/src/cmd/go/testdata/script/get_unicode.txt b/src/cmd/go/testdata/script/get_unicode.txt new file mode 100644 index 0000000000000..31edcdb9f66c9 --- /dev/null +++ b/src/cmd/go/testdata/script/get_unicode.txt @@ -0,0 +1,37 @@ +[!exec:git] skip + +# Construct a repository that imports a non-ASCII path. +cd $WORK/_origin/example.com/unicode +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git add unicode.go +exec git commit -m 'add unicode.go' + +# Clone the repo into GOPATH so that 'go get -u' can find it. +mkdir $GOPATH/src/example.com/unicode +cd $GOPATH/src/example.com/unicode +exec git clone $WORK/_origin/example.com/unicode . + +# Construct the imported repository. +cd $WORK/_origin/example.com/испытание +exec git init +exec git config user.name 'Nameless Gopher' +exec git config user.email 'nobody@golang.org' +exec git add испытание.go +exec git commit -m 'add испытание.go' + +# Clone that repo into GOPATH too. +mkdir $GOPATH/src/example.com/испытание +cd $GOPATH/src/example.com/испытание +exec git clone $WORK/_origin/example.com/испытание . + +# Upgrading the importer should pull from the non-ASCII repo. +cd $GOPATH +go get -u example.com/unicode + +-- $WORK/_origin/example.com/unicode/unicode.go -- +package unicode +import _ "example.com/испытание" +-- $WORK/_origin/example.com/испытание/испытание.go -- +package испытание diff --git a/src/cmd/go/testdata/script/help.txt b/src/cmd/go/testdata/script/help.txt index cbbd15404b536..9f455256f72d8 100644 --- a/src/cmd/go/testdata/script/help.txt +++ b/src/cmd/go/testdata/script/help.txt @@ -28,3 +28,21 @@ stdout 'usage: go mod tidy' # go mod --help doesn't print help but at least suggests it. ! go mod --help stderr 'Run ''go help mod'' for usage.' + +# Earlier versions of Go printed the same as 'go -h' here. +# Also make sure we print the short help line. +! go vet -h +stderr 'usage: go vet' +stderr 'Run ''go help vet'' for details' +stderr 'Run ''go tool vet -help'' for the vet tool''s flags' + +# Earlier versions of Go printed a large document here, instead of these two +# lines. +! go test -h +stderr 'usage: go test' +stderr 'Run ''go help test'' for details' + +# go help get shows usage for get +go help get +stdout 'usage: go get' +stdout 'get when using GOPATH' diff --git a/src/cmd/go/testdata/script/list_bad_import.txt b/src/cmd/go/testdata/script/list_bad_import.txt index ba66b0937f84a..3d9cac0d5f549 100644 --- a/src/cmd/go/testdata/script/list_bad_import.txt +++ b/src/cmd/go/testdata/script/list_bad_import.txt @@ -47,7 +47,7 @@ stdout error stdout incomplete -# The pattern "all" should match only packages that acutally exist, +# The pattern "all" should match only packages that actually exist, # ignoring those whose existence is merely implied by imports. go list -e -f '{{.ImportPath}}' all stdout example.com/direct diff --git a/src/cmd/go/testdata/script/list_find.txt b/src/cmd/go/testdata/script/list_find.txt index dbe8fb0ac98ce..63c6896e507db 100644 --- a/src/cmd/go/testdata/script/list_find.txt +++ b/src/cmd/go/testdata/script/list_find.txt @@ -5,6 +5,15 @@ stdout true go list -find -f '{{.Incomplete}} {{.Imports}}' x/y/z... stdout '^false \[\]' +# go list -find -compiled should use cached sources the second time it's run. +# It might not find the same cached sources as "go build", but the sources +# should be identical. "go build" derives action IDs (which are used as cache +# keys) from dependencies' action IDs. "go list -find" won't know what the +# dependencies are, so it's can't construct the same action IDs. +go list -find -compiled net +go list -find -compiled -x net +! stderr 'cgo' + -- x/y/z/z.go -- package z import "does/not/exist" diff --git a/src/cmd/go/testdata/script/list_importmap.txt b/src/cmd/go/testdata/script/list_importmap.txt new file mode 100644 index 0000000000000..a42dc47f2443b --- /dev/null +++ b/src/cmd/go/testdata/script/list_importmap.txt @@ -0,0 +1,25 @@ +# gccgo does not have standard packages. +[gccgo] skip + +# fmt should have no rewritten imports. +# The import from a/b should map c/d to a's vendor directory. +go list -f '{{.ImportPath}}: {{.ImportMap}}' fmt a/b +stdout 'fmt: map\[\]' +stdout 'a/b: map\[c/d:a/vendor/c/d\]' + +# flag [fmt.test] should import fmt [fmt.test] as fmt +# fmt.test should import testing [fmt.test] as testing +# fmt.test should not import a modified os +go list -deps -test -f '{{.ImportPath}} MAP: {{.ImportMap}}{{"\n"}}{{.ImportPath}} IMPORT: {{.Imports}}' fmt +stdout '^flag \[fmt\.test\] MAP: map\[fmt:fmt \[fmt\.test\]\]' +stdout '^fmt\.test MAP: map\[(.* )?testing:testing \[fmt\.test\]' +! stdout '^fmt\.test MAP: map\[(.* )?os:' +stdout '^fmt\.test IMPORT: \[fmt \[fmt\.test\] fmt_test \[fmt\.test\] os testing \[fmt\.test\] testing/internal/testdeps \[fmt\.test\]\]' + + +-- a/b/b.go -- +package b + +import _ "c/d" +-- a/vendor/c/d/d.go -- +package d diff --git a/src/cmd/go/testdata/script/list_std.txt b/src/cmd/go/testdata/script/list_std.txt index a63d74db1205a..046bec6ac54b0 100644 --- a/src/cmd/go/testdata/script/list_std.txt +++ b/src/cmd/go/testdata/script/list_std.txt @@ -8,5 +8,5 @@ go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' ./... # our vendored packages should be reported as standard go list std cmd -stdout golang_org/x/net/http2/hpack +stdout internal/x/net/http2/hpack stdout cmd/vendor/golang\.org/x/arch/x86/x86asm diff --git a/src/cmd/go/testdata/script/mod_clean_cache.txt b/src/cmd/go/testdata/script/mod_clean_cache.txt new file mode 100644 index 0000000000000..a9519f9d9086c --- /dev/null +++ b/src/cmd/go/testdata/script/mod_clean_cache.txt @@ -0,0 +1,59 @@ +env GO111MODULE=on + +# 'mod download' should download the module to the cache. +go mod download rsc.io/quote@v1.5.0 +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip + +# '-n' should print commands but not actually execute them. +go clean -modcache -n +stdout '^rm -rf .*pkg.mod$' +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip + +# 'go clean -modcache' should actually delete the files. +go clean -modcache +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod +! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip + +# 'go clean -r -modcache' should clean only the dependencies that are within the +# main module. +# BUG(golang.org/issue/28680): Today, it cleans across module boundaries. +cd r +exists ./test.out +exists ../replaced/test.out +go clean -r -modcache +! exists ./test.out +! exists ../replaced/test.out # BUG: should still exist + +# 'go clean -modcache' should not download anything before cleaning. +# BUG(golang.org/issue/28680): Today, it does. +go mod edit -require rsc.io/quote@v1.99999999.0-not-a-real-version +! go clean -modcache # BUG: should succeed +stderr 'finding rsc.io' # BUG: should not resolve module +go mod edit -droprequire rsc.io/quote + +-- go.mod -- +module m +-- m.go -- +package m + +-- r/go.mod -- +module example.com/r +require example.com/r/replaced v0.0.0 +replace example.com/r/replaced => ../replaced +-- r/r.go -- +package r +import _ "example.com/r/replaced" +-- r/test.out -- +DELETE ME + +-- replaced/go.mod -- +module example.com/r/replaced +-- replaced/replaced.go -- +package replaced +-- replaced/test.out -- +DO NOT DELETE diff --git a/src/cmd/go/testdata/script/mod_concurrent.txt b/src/cmd/go/testdata/script/mod_concurrent.txt new file mode 100644 index 0000000000000..e03e5e5edbeda --- /dev/null +++ b/src/cmd/go/testdata/script/mod_concurrent.txt @@ -0,0 +1,31 @@ +env GO111MODULE=on + +# Concurrent builds should succeed, even if they need to download modules. +go build ./x & +go build ./y +wait + +# Concurrent builds should update go.sum to the union of the hashes for the +# modules they read. +cmp go.sum go.sum.want + +-- go.mod -- +module golang.org/issue/26794 + +require ( + golang.org/x/text v0.3.0 + rsc.io/sampler v1.0.0 +) +-- x/x.go -- +package x + +import _ "golang.org/x/text/language" +-- y/y.go -- +package y + +import _ "rsc.io/sampler" +-- go.sum.want -- +golang.org/x/text v0.3.0 h1:ivTorhoiROmZ1mcs15mO2czVF0uy0tnezXpBVNzgrmA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +rsc.io/sampler v1.0.0 h1:SRJnjyQ07sAtq6G4RcfJEmz8JxqLyj3PoGXG2VhbDWo= +rsc.io/sampler v1.0.0/go.mod h1:cqxpM3ZVz9VtirqxZPmrWzkQ+UkiNiGtkrN+B+i8kx8= diff --git a/src/cmd/go/testdata/script/mod_download.txt b/src/cmd/go/testdata/script/mod_download.txt index ef931cfd30fd2..22f07c33c7565 100644 --- a/src/cmd/go/testdata/script/mod_download.txt +++ b/src/cmd/go/testdata/script/mod_download.txt @@ -8,6 +8,12 @@ exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip +# download of an invalid path should report the error +! go mod download this.domain.is.invalid/somemodule@v1.0.0 +stderr 'this.domain.is.invalid' +! go mod download -json this.domain.is.invalid/somemodule@v1.0.0 +stdout '"Error": ".*this.domain.is.invalid.*"' + # download -json with version should print JSON go mod download -json 'rsc.io/quote@<=v1.5.0' stdout '^\t"Path": "rsc.io/quote"' @@ -15,6 +21,8 @@ stdout '^\t"Version": "v1.5.0"' stdout '^\t"Info": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.0.info"' stdout '^\t"GoMod": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.0.mod"' stdout '^\t"Zip": ".*(\\\\|/)pkg(\\\\|/)mod(\\\\|/)cache(\\\\|/)download(\\\\|/)rsc.io(\\\\|/)quote(\\\\|/)@v(\\\\|/)v1.5.0.zip"' +stdout '^\t"Sum": "h1:6fJa6E\+wGadANKkUMlZ0DhXFpoKlslOQDCo259XtdIE="' # hash of testdata/mod version, not real version! +stdout '^\t"GoModSum": "h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe\+TKr0="' ! stdout '"Error"' # download queries above should not have added to go.mod. @@ -40,6 +48,21 @@ exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +# download repopulates deleted files and directories independently. +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info +go mod download +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod +go mod download +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod +rm $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip +go mod download +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip +rm -r $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +go mod download +exists $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 + +# download reports the locations of downloaded files go mod download -json stdout '^\t"Path": "rsc.io/quote"' stdout '^\t"Version": "v1.5.2"' diff --git a/src/cmd/go/testdata/script/mod_download_hash.txt b/src/cmd/go/testdata/script/mod_download_hash.txt new file mode 100644 index 0000000000000..1662043207ed6 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_download_hash.txt @@ -0,0 +1,23 @@ +env GO111MODULE=on + +# Testing mod download with non semantic versions; turn off proxy. +[!net] skip +[!exec:git] skip +env GOPROXY= + +go mod download rsc.io/quote@a91498bed0a73d4bb9c1fb2597925f7883bc40a7 +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180709162918-a91498bed0a7.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180709162918-a91498bed0a7.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180709162918-a91498bed0a7.zip + +go mod download rsc.io/quote@master +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180710144737-5d9f230bcfba.info +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180710144737-5d9f230bcfba.mod +exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v0.0.0-20180710144737-5d9f230bcfba.zip + + +-- go.mod -- +module m + +-- m.go -- +package m \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_edit.txt b/src/cmd/go/testdata/script/mod_edit.txt index 60a6f74536197..aa714e8b3cd7b 100644 --- a/src/cmd/go/testdata/script/mod_edit.txt +++ b/src/cmd/go/testdata/script/mod_edit.txt @@ -10,37 +10,43 @@ stderr 'cannot determine module path' go mod init x.x/y/z stderr 'creating new go.mod: module x.x/y/z' -cmp go.mod $WORK/go.mod.init +cmpenv go.mod $WORK/go.mod.init ! go mod init -cmp go.mod $WORK/go.mod.init +cmpenv go.mod $WORK/go.mod.init # go mod edits go mod edit -droprequire=x.1 -require=x.1@v1.0.0 -require=x.2@v1.1.0 -droprequire=x.2 -exclude='x.1 @ v1.2.0' -exclude=x.1@v1.2.1 -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z' -cmp go.mod $WORK/go.mod.edit1 +cmpenv go.mod $WORK/go.mod.edit1 go mod edit -droprequire=x.1 -dropexclude=x.1@v1.2.1 -dropreplace=x.1@v1.3.0 -require=x.3@v1.99.0 -cmp go.mod $WORK/go.mod.edit2 +cmpenv go.mod $WORK/go.mod.edit2 # go mod edit -json go mod edit -json -cmp stdout $WORK/go.mod.json +cmpenv stdout $WORK/go.mod.json # go mod edit -replace go mod edit -replace=x.1@v1.3.0=y.1/v2@v2.3.5 -replace=x.1@v1.4.0=y.1/v2@v2.3.5 -cmp go.mod $WORK/go.mod.edit3 +cmpenv go.mod $WORK/go.mod.edit3 go mod edit -replace=x.1=y.1/v2@v2.3.6 -cmp go.mod $WORK/go.mod.edit4 +cmpenv go.mod $WORK/go.mod.edit4 go mod edit -dropreplace=x.1 -cmp go.mod $WORK/go.mod.edit5 +cmpenv go.mod $WORK/go.mod.edit5 # go mod edit -fmt cp $WORK/go.mod.badfmt go.mod go mod edit -fmt -print # -print should avoid writing file -cmp stdout $WORK/go.mod.edit4 +cmpenv stdout $WORK/go.mod.edit6 cmp go.mod $WORK/go.mod.badfmt go mod edit -fmt # without -print, should write file (and nothing to stdout) ! stdout . -cmp go.mod $WORK/go.mod.edit4 +cmpenv go.mod $WORK/go.mod.edit6 + +# go mod edit -module +cd $WORK/m +go mod init a.a/b/c +go mod edit -module x.x/y/z +cmpenv go.mod go.mod.edit -- x.go -- package x @@ -50,9 +56,13 @@ package w -- $WORK/go.mod.init -- module x.x/y/z + +go $goversion -- $WORK/go.mod.edit1 -- module x.x/y/z +go $goversion + require x.1 v1.0.0 exclude ( @@ -67,6 +77,8 @@ replace ( -- $WORK/go.mod.edit2 -- module x.x/y/z +go $goversion + exclude x.1 v1.2.0 replace x.1 v1.4.0 => ../z @@ -77,6 +89,7 @@ require x.3 v1.99.0 "Module": { "Path": "x.x/y/z" }, + "Go": "$goversion", "Require": [ { "Path": "x.3", @@ -104,6 +117,8 @@ require x.3 v1.99.0 -- $WORK/go.mod.edit3 -- module x.x/y/z +go $goversion + exclude x.1 v1.2.0 replace ( @@ -115,6 +130,8 @@ require x.3 v1.99.0 -- $WORK/go.mod.edit4 -- module x.x/y/z +go $goversion + exclude x.1 v1.2.0 replace x.1 => y.1/v2 v2.3.6 @@ -123,14 +140,32 @@ require x.3 v1.99.0 -- $WORK/go.mod.edit5 -- module x.x/y/z +go $goversion + +exclude x.1 v1.2.0 + +require x.3 v1.99.0 +-- $WORK/go.mod.edit6 -- +module x.x/y/z + +go 1.10 + exclude x.1 v1.2.0 +replace x.1 => y.1/v2 v2.3.6 + require x.3 v1.99.0 -- $WORK/go.mod.badfmt -- module x.x/y/z +go 1.10 + exclude x.1 v1.2.0 replace x.1 => y.1/v2 v2.3.6 require x.3 v1.99.0 +-- $WORK/m/go.mod.edit -- +module x.x/y/z + +go $goversion \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_edit_go.txt b/src/cmd/go/testdata/script/mod_edit_go.txt new file mode 100644 index 0000000000000..3ec8137e2d313 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_edit_go.txt @@ -0,0 +1,16 @@ +# Test support for go mod -edit to set language version. + +env GO111MODULE=on +! go build +stderr 'type aliases only supported as of' +go mod edit -go=1.9 +grep 'go 1.9' go.mod +go build + +-- go.mod -- +module m +go 1.8 + +-- alias.go -- +package alias +type T = int diff --git a/src/cmd/go/testdata/script/mod_enabled.txt b/src/cmd/go/testdata/script/mod_enabled.txt index 8eef870b02b35..ab5ee3d6dfa65 100644 --- a/src/cmd/go/testdata/script/mod_enabled.txt +++ b/src/cmd/go/testdata/script/mod_enabled.txt @@ -38,9 +38,9 @@ stdout z[/\\]go.mod cd $GOPATH/src/x/y go env GOMOD -! stdout . -! go list -m -stderr 'cannot find main module' +stdout 'NUL|/dev/null' +go list -m +stdout '^command-line-arguments$' cd $GOPATH/foo go env GOMOD diff --git a/src/cmd/go/testdata/script/mod_fs_patterns.txt b/src/cmd/go/testdata/script/mod_fs_patterns.txt index d7d3e0321b53d..9341a1d08305f 100644 --- a/src/cmd/go/testdata/script/mod_fs_patterns.txt +++ b/src/cmd/go/testdata/script/mod_fs_patterns.txt @@ -34,11 +34,11 @@ stderr 'import lookup disabled' ! go build -mod=readonly ./nonexist ! stderr 'import lookup disabled' -stderr '^go: no such directory ./nonexist' +stderr 'unknown import path "m/nonexist": cannot find package' ! go build -mod=readonly ./go.mod ! stderr 'import lookup disabled' -stderr '^go: ./go.mod is not a directory' +stderr 'unknown import path "m/go.mod": cannot find package' -- x/go.mod -- module m diff --git a/src/cmd/go/testdata/script/mod_get_private_vcs.txt b/src/cmd/go/testdata/script/mod_get_private_vcs.txt new file mode 100644 index 0000000000000..86d78e8381396 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_private_vcs.txt @@ -0,0 +1,10 @@ +env GO111MODULE=on + +# Testing stderr for git ls-remote; turn off proxy. +[!net] skip +[!exec:git] skip +env GOPROXY= + +! go get github.com/golang/nonexist +stderr 'If this is a private repository, see https://golang.org/doc/faq#git_https for additional information.' +! stdout . diff --git a/src/cmd/go/testdata/script/mod_get_svn.txt b/src/cmd/go/testdata/script/mod_get_svn.txt new file mode 100644 index 0000000000000..b3436284aff02 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_svn.txt @@ -0,0 +1,20 @@ +[!net] skip +[!exec:svn] skip + +env GO111MODULE=on +env GOPROXY=direct # obtain llvm.org directory, not via svn. + +# Attempting to get a module zip using svn should fail with a reasonable +# message instead of a panic. +# TODO(golang.org/issue/26092): Really, it shouldn't fail at all. +! go get -d llvm.org/llvm/bindings/go/llvm +stderr 'ReadZip not implemented for svn' +! go install . +stderr 'ReadZip not implemented for svn' + +-- go.mod -- +module golang/go/issues/28943/main +-- main.go -- +package main +import _ "llvm.org/llvm/bindings/go/llvm" +func main() {} diff --git a/src/cmd/go/testdata/script/mod_git_export_subst.txt b/src/cmd/go/testdata/script/mod_git_export_subst.txt new file mode 100644 index 0000000000000..2b8e2bc7bc0b2 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_git_export_subst.txt @@ -0,0 +1,21 @@ +env GO111MODULE=on +env GOPROXY= + +# Testing that git export-subst is disabled +[!net] skip +[!exec:git] skip +go build + +-- x.go -- +package x + +import _ "github.com/jasonkeene/export-subst" + +-- go.mod -- +module x + +require github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626 + +-- go.sum -- +github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626 h1:AUkXi/xFnm7lH2pgtvVkGb7buRn1ywFHw+xDpZ29Rz0= +github.com/jasonkeene/export-subst v0.0.0-20180927204031-5845945ec626/go.mod h1:DwJXqVtrgrQkv3Giuf2Jh4YyubVe7y41S1eOIaysTJw= diff --git a/src/cmd/go/testdata/script/mod_go_version.txt b/src/cmd/go/testdata/script/mod_go_version.txt index f2de74cee87df..37f173531b699 100644 --- a/src/cmd/go/testdata/script/mod_go_version.txt +++ b/src/cmd/go/testdata/script/mod_go_version.txt @@ -3,9 +3,10 @@ env GO111MODULE=on go list -! go build -stderr 'module requires Go 1.999' +go build go build sub.1 +go build subver.1 +! stderr 'module requires' ! go build badsub.1 stderr 'module requires Go 1.11111' @@ -19,11 +20,13 @@ module m go 1.999 require ( sub.1 v1.0.0 + subver.1 v1.0.0 badsub.1 v1.0.0 versioned.1 v1.0.0 ) replace ( sub.1 => ./sub + subver.1 => ./subver badsub.1 => ./badsub versioned.1 v1.0.0 => ./versioned1 versioned.1 v1.1.0 => ./versioned2 @@ -39,12 +42,20 @@ go 1.11 -- sub/x.go -- package x +-- subver/go.mod -- +module m +go 1.11111 + +-- subver/x.go -- +package x + -- badsub/go.mod -- module m go 1.11111 -- badsub/x.go -- package x +invalid syntax -- versioned1/go.mod -- module versioned @@ -59,3 +70,4 @@ go 1.99999 -- versioned2/x.go -- package x +invalid syntax diff --git a/src/cmd/go/testdata/script/mod_help.txt b/src/cmd/go/testdata/script/mod_help.txt new file mode 100644 index 0000000000000..b5cd30c5219e8 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_help.txt @@ -0,0 +1,6 @@ +env GO111MODULE=on + +# go help get shows usage for get +go help get +stdout 'usage: go get' +stdout 'get using modules to manage source' \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_import_v1suffix.txt b/src/cmd/go/testdata/script/mod_import_v1suffix.txt new file mode 100644 index 0000000000000..82bb5e2a2fb6f --- /dev/null +++ b/src/cmd/go/testdata/script/mod_import_v1suffix.txt @@ -0,0 +1,11 @@ +env GO111MODULE=on + +! go get -m example.com/invalidpath/v1 +! go install . + +-- go.mod -- +module example.com +-- main.go -- +package main +import _ "example.com/invalidpath/v1" +func main() {} diff --git a/src/cmd/go/testdata/script/mod_internal.txt b/src/cmd/go/testdata/script/mod_internal.txt index e5f5a1205ee76..5a47c3fa44ae6 100644 --- a/src/cmd/go/testdata/script/mod_internal.txt +++ b/src/cmd/go/testdata/script/mod_internal.txt @@ -18,15 +18,6 @@ stderr 'use of internal package golang.org/x/.* not allowed' ! go build ./fromstd stderr 'use of internal package internal/testenv not allowed' -# Packages found via standard-library vendoring should not leak. -! go build ./fromstdvendor -stderr 'use of vendored package golang_org/x/net/http/httpguts not allowed' - -env GO111MODULE=off -! go build ./fromstdvendor -stderr 'cannot find package "golang_org/x/net/http/httpguts" in any of:' -env GO111MODULE=on - # Dependencies should be able to use their own internal modules... rm go.mod go mod init golang.org/notx @@ -83,10 +74,6 @@ import _ "golang.org/notx/useinternal" package fromstd import _ "internal/testenv" --- fromstdvendor/useinternal.go -- -package fromstdvendor -import _ "golang_org/x/net/http/httpguts" - -- replace/golang.org/notx/internal/go.mod -- module golang.org/x/internal diff --git a/src/cmd/go/testdata/script/mod_list_bad_import.txt b/src/cmd/go/testdata/script/mod_list_bad_import.txt index 258eb6a56711c..8a66e0b72a0a5 100644 --- a/src/cmd/go/testdata/script/mod_list_bad_import.txt +++ b/src/cmd/go/testdata/script/mod_list_bad_import.txt @@ -47,7 +47,7 @@ stdout error stdout incomplete -# The pattern "all" should match only packages that acutally exist, +# The pattern "all" should match only packages that actually exist, # ignoring those whose existence is merely implied by imports. go list -e -f '{{.ImportPath}} {{.Error}}' all stdout example.com/direct diff --git a/src/cmd/go/testdata/script/mod_list_dir.txt b/src/cmd/go/testdata/script/mod_list_dir.txt index 800f2775591a8..903651c9d58fd 100644 --- a/src/cmd/go/testdata/script/mod_list_dir.txt +++ b/src/cmd/go/testdata/script/mod_list_dir.txt @@ -10,7 +10,9 @@ stdout ^math$ go list -f '{{.ImportPath}}' . stdout ^x$ ! go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 -stderr '^go: no such directory.*quote@v1.5.2' +stderr 'unknown import path "rsc.io/quote": cannot find package' +go list -e -f '{{with .Error}}{{.}}{{end}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 +stdout 'unknown import path "rsc.io/quote": cannot find package' go mod download rsc.io/quote@v1.5.2 go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2 stdout '^rsc.io/quote$' diff --git a/src/cmd/go/testdata/script/mod_list_test.txt b/src/cmd/go/testdata/script/mod_list_test.txt new file mode 100644 index 0000000000000..a99e4f36cd587 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_test.txt @@ -0,0 +1,16 @@ +env GO111MODULE=on + +# go list -compiled -test must handle test-only packages +# golang.org/issue/27097. +go list -compiled -test +stdout '^m$' +stdout '^m\.test$' +stdout '^m \[m\.test\]$' + +-- go.mod -- +module m + +-- x_test.go -- +package x +import "testing" +func Test(t *testing.T) {} diff --git a/src/cmd/go/testdata/script/mod_load_badzip.txt b/src/cmd/go/testdata/script/mod_load_badzip.txt new file mode 100644 index 0000000000000..95513de4a6588 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_load_badzip.txt @@ -0,0 +1,11 @@ +# Zip files with unexpected file names inside should be rejected. +env GO111MODULE=on + +! go get -d rsc.io/badzip +stderr 'zip for rsc.io/badzip@v1.0.0 has unexpected file rsc.io/badzip@v1.0.0.txt' + +! go build rsc.io/badzip +stderr 'zip for rsc.io/badzip@v1.0.0 has unexpected file rsc.io/badzip@v1.0.0.txt' + +-- go.mod -- +module m diff --git a/src/cmd/go/testdata/script/mod_modinfo.txt b/src/cmd/go/testdata/script/mod_modinfo.txt new file mode 100644 index 0000000000000..fb31f9e43b22d --- /dev/null +++ b/src/cmd/go/testdata/script/mod_modinfo.txt @@ -0,0 +1,89 @@ +# Test to ensure runtime/debug.ReadBuildInfo parses +# the modinfo embedded in a binary by the go tool +# when module is enabled. +env GO111MODULE=on + +cd x +go mod edit -require=rsc.io/quote@v1.5.2 +go mod edit -replace=rsc.io/quote@v1.5.2=rsc.io/quote@v1.0.0 + +# Build a binary and ensure that it can output its own debug info. +# The debug info should be accessible before main starts (golang.org/issue/29628). +go build +exec ./x$GOEXE +stderr 'mod\s+x\s+\(devel\)' +stderr 'dep\s+rsc.io/quote\s+v1.5.2\s+' +stderr '=>\s+rsc.io/quote\s+v1.0.0\s+h1:' +stderr 'Hello, world.' + +[short] skip + +# Build a binary that accesses its debug info by reading the binary directly +# (rather than through debug.ReadBuildInfo). +# The debug info should still be present (golang.org/issue/28753). +cd unused +go build +exec ./unused$GOEXE + +-- x/go.mod -- +module x + +-- x/lib/lib.go -- +// Package lib accesses runtime/debug.modinfo before package main's init +// functions have run. +package lib + +import "runtime/debug" + +func init() { + m, ok := debug.ReadBuildInfo() + if !ok { + panic("failed debug.ReadBuildInfo") + } + println("mod", m.Main.Path, m.Main.Version) + for _, d := range m.Deps { + println("dep", d.Path, d.Version, d.Sum) + if r := d.Replace; r != nil { + println("=>", r.Path, r.Version, r.Sum) + } + } +} + +-- x/main.go -- +package main + +import ( + "rsc.io/quote" + _ "x/lib" +) + +func main() { + println(quote.Hello()) +} + +-- x/unused/main.go -- +// The unused binary does not access runtime/debug.modinfo. +package main + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "log" + "os" + + _ "rsc.io/quote" +) + +func main() { + b, err := ioutil.ReadFile(os.Args[0]) + if err != nil { + log.Fatal(err) + } + + infoStart, _ := hex.DecodeString("3077af0c9274080241e1c107e6d618e6") + if !bytes.Contains(b, infoStart) { + log.Fatal("infoStart not found in binary") + } + log.Println("ok") +} diff --git a/src/cmd/go/testdata/script/mod_nomod.txt b/src/cmd/go/testdata/script/mod_nomod.txt index 640d5a363120b..7e0f55a602fd6 100644 --- a/src/cmd/go/testdata/script/mod_nomod.txt +++ b/src/cmd/go/testdata/script/mod_nomod.txt @@ -16,7 +16,7 @@ go mod edit -json x.mod ! go get ! go install ! go list -! go run x.go +! go run ! go test ! go vet diff --git a/src/cmd/go/testdata/script/mod_outside.txt b/src/cmd/go/testdata/script/mod_outside.txt new file mode 100644 index 0000000000000..db994a1656703 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_outside.txt @@ -0,0 +1,249 @@ +env GO111MODULE=on + +# This script tests commands in module mode outside of any module. +# +# First, ensure that we really are in module mode, and that we really don't have +# a go.mod file. +go env GOMOD +stdout 'NUL|/dev/null' + + +# 'go list' without arguments implicitly operates on the current directory, +# which is not in a module. +! go list +stderr 'cannot find main module' +go list -m +stdout '^command-line-arguments$' +# 'go list' in the working directory should fail even if there is a a 'package +# main' present: without a main module, we do not know its package path. +! go list ./foo +stderr 'cannot find main module' + +# 'go list all' lists the transitive import graph of the main module, +# which is empty if there is no main module. +go list all +! stdout . +stderr 'warning: "all" matched no packages' +go list -m all +stderr 'warning: pattern "all" matched no module dependencies' + +# 'go list' on standard-library packages should work, since they do not depend +# on the contents of any module. +go list -deps cmd +stdout '^fmt$' +stdout '^cmd/go$' + +go list $GOROOT/src/fmt +stdout '^fmt$' + +# 'go list' should work with file arguments. +go list ./foo/foo.go +stdout 'command-line-arguments' + +# 'go list -m' with an explicit version should resolve that version. +go list -m example.com/version@latest +stdout 'example.com/version v1.1.0' + +# 'go list -m -versions' should succeed even without an explicit version. +go list -m -versions example.com/version +stdout 'v1.0.0\s+v1.0.1\s+v1.1.0' + +# 'go list -m all' does not include the dependencies of in the computation of 'all'. +go list -m example.com/printversion@v1.0.0 all +stdout 'example.com/printversion v1.0.0' +stderr 'warning: pattern "all" matched no module dependencies' +! stdout 'example.com/version' + + +# 'go clean' should skip the current directory if it isn't in a module. +go clean -n +! stdout . +! stderr . + +# 'go mod graph' should not display anything, since there are no active modules. +go mod graph +! stdout . +! stderr . + +# 'go mod why' should report that nothing is a dependency. +go mod why -m example.com/version +stdout 'does not need' + + +# 'go mod edit', 'go mod tidy', and 'go mod fmt' should fail: +# there is no go.mod file to edit. +! go mod tidy +stderr 'cannot find main module' +! go mod edit -fmt +stderr 'cannot find main module' +! go mod edit -require example.com/version@v1.0.0 +stderr 'cannot find main module' + + +# 'go mod download' should download exactly the requested module without dependencies. +rm -r $GOPATH/pkg/mod/cache/download/example.com +go mod download example.com/printversion@v1.0.0 +exists $GOPATH/pkg/mod/cache/download/example.com/printversion/@v/v1.0.0.zip +! exists $GOPATH/pkg/mod/cache/download/example.com/version/@v/v1.0.0.zip + +# 'go mod vendor' should fail: it starts by clearing the existing vendor +# directory, and we don't know where that is. +! go mod vendor +stderr 'cannot find main module' + +# 'go mod verify' should succeed: we have no modules to verify. +go mod verify +stdout 'all modules verified' +! stderr . + + +# 'go get' without arguments implicitly operates on the main module, and thus +# should fail. +! go get +stderr 'cannot find main module' +! go get -u +stderr 'cannot find main module' +! go get -u ./foo +stderr 'cannot find main module' + +# 'go get -u all' upgrades the transitive import graph of the main module, +# which is empty. +go get -u all +! stdout . +stderr 'warning: "all" matched no packages' + +# 'go get -m' should check the proposed module graph for consistency, +# even though it will not be saved anywhere. +! go get -m example.com/printversion@v1.0.0 example.com/version@none +stderr 'inconsistent versions' + +# 'go get -d' should download and extract the source code needed to build the requested version. +rm -r $GOPATH/pkg/mod/example.com +go get -d example.com/printversion@v1.0.0 +exists $GOPATH/pkg/mod/example.com/printversion@v1.0.0 +exists $GOPATH/pkg/mod/example.com/version@v1.0.0 + + +# 'go build' without arguments implicitly operates on the current directory, and should fail. +cd foo +! go build +stderr 'cannot find main module' +cd .. + +# 'go build' of a non-module directory should fail too. +! go build ./foo +stderr 'cannot find main module' + +# However, 'go build' should succeed for standard-library packages. +go build -n fmt + + +# TODO(golang.org/issue/28992): 'go doc' should document the latest version. +# For now it does not. +! go doc example.com/version +stderr 'no such package' + +# 'go install' with a version should fail due to syntax. +! go install example.com/printversion@v1.0.0 +stderr 'can only use path@version syntax with' + + +# 'go fmt' should be able to format files outside of a module. +go fmt foo/foo.go + + +# The remainder of the test checks dependencies by linking and running binaries. +[short] stop + +# 'go get' of a binary without a go.mod should install the requested version, +# resolving outside dependencies to the latest available versions. +go get example.com/printversion@v0.1.0 +exec ../bin/printversion +stdout 'path is example.com/printversion' +stdout 'main is example.com/printversion v0.1.0' +stdout 'using example.com/version v1.1.0' + +# 'go get' of a versioned binary should build and install the latest version +# using its minimal module requirements, ignoring replacements and exclusions. +go get example.com/printversion +exec ../bin/printversion +stdout 'path is example.com/printversion' +stdout 'main is example.com/printversion v1.0.0' +stdout 'using example.com/version v1.0.0' + +# 'go get -u=patch' should patch dependencies before installing, +# again ignoring replacements and exclusions. +go get -u=patch example.com/printversion@v1.0.0 +exec ../bin/printversion +stdout 'path is example.com/printversion' +stdout 'main is example.com/printversion v1.0.0' +stdout 'using example.com/version v1.0.1' + +# 'go install' without a version should install the latest version +# using its minimal module requirements. +go install example.com/printversion +exec ../bin/printversion +stdout 'path is example.com/printversion' +stdout 'main is example.com/printversion v1.0.0' +stdout 'using example.com/version v1.0.0' + +# 'go run' should use 'main' as the effective module and import path. +go run ./foo/foo.go +stdout 'path is command-line-arguments$' +stdout 'main is command-line-arguments \(devel\)' +stdout 'using example.com/version v1.1.0' + +# 'go generate' should work with file arguments. +[exec:touch] go generate ./foo/foo.go +[exec:touch] exists ./foo/gen.txt + +# 'go install' should work with file arguments. +go install ./foo/foo.go + +# 'go test' should work with file arguments. +go test -v ./foo/foo_test.go +stdout 'foo was tested' + +# 'go vet' should work with file arguments. +go vet ./foo/foo.go + + +-- README.txt -- +There is no go.mod file in the working directory. + +-- foo/foo.go -- +//go:generate touch gen.txt + +package main + +import ( + "fmt" + "os" + "runtime/debug" + + _ "example.com/version" +) + +func main() { + info, ok := debug.ReadBuildInfo() + if !ok { + panic("missing build info") + } + fmt.Fprintf(os.Stdout, "path is %s\n", info.Path) + fmt.Fprintf(os.Stdout, "main is %s %s\n", info.Main.Path, info.Main.Version) + for _, m := range info.Deps { + fmt.Fprintf(os.Stdout, "using %s %s\n", m.Path, m.Version) + } +} + +-- foo/foo_test.go -- +package main + +import ( + "fmt" + "testing" +) + +func TestFoo(t *testing.T) { + fmt.Println("foo was tested") +} diff --git a/src/cmd/go/testdata/script/mod_patterns.txt b/src/cmd/go/testdata/script/mod_patterns.txt index 4fa436ba2d076..5f9ab62704745 100644 --- a/src/cmd/go/testdata/script/mod_patterns.txt +++ b/src/cmd/go/testdata/script/mod_patterns.txt @@ -34,6 +34,13 @@ env CGO_ENABLED=0 go list -f '{{.ImportPath}}: {{.Match}}' all ... example.com/m/... ./... ./xyz... ! stdout example.com/m/useC +# 'go list ./...' should not try to resolve the main module. +cd ../empty +go list -deps ./... +! stdout . +! stderr 'finding' +stderr -count=1 '^go: warning: "./..." matched no packages' + -- m/go.mod -- module example.com/m @@ -64,3 +71,6 @@ module example.com/m/nested -- nested/useencoding/useencoding.go -- package useencoding import _ "encoding" + +-- empty/go.mod -- +module example.com/empty diff --git a/src/cmd/go/testdata/script/mod_readonly.txt b/src/cmd/go/testdata/script/mod_readonly.txt index 1b5932e441e75..188a66d0e15fa 100644 --- a/src/cmd/go/testdata/script/mod_readonly.txt +++ b/src/cmd/go/testdata/script/mod_readonly.txt @@ -37,6 +37,8 @@ cmp go.mod go.mod.inconsistent -- go.mod -- module m +go 1.20 + -- x.go -- package x import _ "rsc.io/quote" diff --git a/src/cmd/go/testdata/script/mod_replace.txt b/src/cmd/go/testdata/script/mod_replace.txt index 5894ed69f3417..78d6729fce322 100644 --- a/src/cmd/go/testdata/script/mod_replace.txt +++ b/src/cmd/go/testdata/script/mod_replace.txt @@ -1,10 +1,14 @@ env GO111MODULE=on +cp go.mod go.mod.orig + +# Make sure the test builds without replacement. go build -o a1.exe . exec ./a1.exe stdout 'Don''t communicate by sharing memory' # Modules can be replaced by local packages. +cp go.mod.orig go.mod go mod edit -replace=rsc.io/quote/v3=./local/rsc.io/quote/v3 go build -o a2.exe . exec ./a2.exe @@ -12,16 +16,27 @@ stdout 'Concurrency is not parallelism.' # The module path of the replacement doesn't need to match. # (For example, it could be a long-running fork with its own import path.) +cp go.mod.orig go.mod go mod edit -replace=rsc.io/quote/v3=./local/not-rsc.io/quote/v3 go build -o a3.exe . exec ./a3.exe stdout 'Clear is better than clever.' # However, the same module can't be used as two different paths. -go mod edit -dropreplace=rsc.io/quote/v3 -replace=not-rsc.io/quote/v3@v3.0.0=rsc.io/quote/v3@v3.0.0 -require=not-rsc.io/quote/v3@v3.0.0 +cp go.mod.orig go.mod +go mod edit -replace=not-rsc.io/quote/v3@v3.0.0=rsc.io/quote/v3@v3.0.0 -require=not-rsc.io/quote/v3@v3.0.0 ! go build -o a4.exe . stderr 'rsc.io/quote/v3@v3.0.0 used for two different module paths \(not-rsc.io/quote/v3 and rsc.io/quote/v3\)' +# Modules that do not (yet) exist upstream can be replaced too. +cp go.mod.orig go.mod +go mod edit -replace=not-rsc.io/quote/v3@v3.1.0=./local/rsc.io/quote/v3 +go build -o a5.exe ./usenewmodule +! stderr 'finding not-rsc.io/quote/v3' +grep 'not-rsc.io/quote/v3 v3.1.0' go.mod +exec ./a5.exe +stdout 'Concurrency is not parallelism.' + -- go.mod -- module quoter @@ -39,6 +54,18 @@ func main() { fmt.Println(quote.GoV3()) } +-- usenewmodule/main.go -- +package main + +import ( + "fmt" + "not-rsc.io/quote/v3" +) + +func main() { + fmt.Println(quote.GoV3()) +} + -- local/rsc.io/quote/v3/go.mod -- module rsc.io/quote/v3 diff --git a/src/cmd/go/testdata/script/mod_replace_import.txt b/src/cmd/go/testdata/script/mod_replace_import.txt new file mode 100644 index 0000000000000..0da753a1a763c --- /dev/null +++ b/src/cmd/go/testdata/script/mod_replace_import.txt @@ -0,0 +1,109 @@ +env GO111MODULE=on + +# 'go list -mod=readonly' should not add requirements even if they can be +# resolved locally. +cp go.mod go.mod.orig +! go list -mod=readonly all +cmp go.mod go.mod.orig + +# 'go list' should resolve imports using replacements. +go list all +stdout 'example.com/a/b$' +stdout 'example.com/x/v3$' +stdout 'example.com/y/z/w$' +stdout 'example.com/v' + +# The selected modules should prefer longer paths, +# but should try shorter paths if needed. +# Modules with a major-version suffix should have a corresponding pseudo-version. +# Replacements that specify a version should use the latest such version. +go list -m all +stdout 'example.com/a/b v0.0.0-00010101000000-000000000000 => ./b' +stdout 'example.com/y v0.0.0-00010101000000-000000000000 => ./y' +stdout 'example.com/x/v3 v3.0.0-00010101000000-000000000000 => ./v3' +stdout 'example.com/v v1.12.0 => ./v12' + +-- go.mod -- +module example.com/m + +replace ( + example.com/a => ./a + example.com/a/b => ./b +) + +replace ( + example.com/x => ./x + example.com/x/v3 => ./v3 +) + +replace ( + example.com/y/z/w => ./w + example.com/y => ./y +) + +replace ( + example.com/v v1.11.0 => ./v11 + example.com/v v1.12.0 => ./v12 + example.com/v => ./v +) + +-- m.go -- +package main +import ( + _ "example.com/a/b" + _ "example.com/x/v3" + _ "example.com/y/z/w" + _ "example.com/v" +) +func main() {} + +-- a/go.mod -- +module a.localhost +-- a/a.go -- +package a +-- a/b/b.go-- +package b + +-- b/go.mod -- +module a.localhost/b +-- b/b.go -- +package b + +-- x/go.mod -- +module x.localhost +-- x/x.go -- +package x +-- x/v3.go -- +package v3 +import _ "x.localhost/v3" + +-- v3/go.mod -- +module x.localhost/v3 +-- v3/x.go -- +package x + +-- w/go.mod -- +module w.localhost +-- w/skip/skip.go -- +// Package skip is nested below nonexistent package w. +package skip + +-- y/go.mod -- +module y.localhost +-- y/z/w/w.go -- +package w + +-- v12/go.mod -- +module v.localhost +-- v12/v.go -- +package v + +-- v11/go.mod -- +module v.localhost +-- v11/v.go -- +package v + +-- v/go.mod -- +module v.localhost +-- v/v.go -- +package v diff --git a/src/cmd/go/testdata/script/mod_std_vendor.txt b/src/cmd/go/testdata/script/mod_std_vendor.txt new file mode 100644 index 0000000000000..7aa1bc353be85 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_std_vendor.txt @@ -0,0 +1,19 @@ +env GO111MODULE=on + +go list -f '{{.TestImports}}' +stdout net/http # from .TestImports + +go list -test -f '{{.Deps}}' +stdout internal/x/crypto # dep of .TestImports + +-- go.mod -- +module m + +-- x.go -- +package x + +-- x_test.go -- +package x +import "testing" +import _ "net/http" +func Test(t *testing.T) {} diff --git a/src/cmd/go/testdata/script/mod_string_alias.txt b/src/cmd/go/testdata/script/mod_string_alias.txt new file mode 100644 index 0000000000000..5c3d4287cc876 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_string_alias.txt @@ -0,0 +1,14 @@ +[short] skip + +env GO111MODULE=on + +go mod init golang.org/issue/27584 + +go build . + +-- main.go -- +package main + +type string = []int + +func main() {} diff --git a/src/cmd/go/testdata/script/mod_sum_replaced.txt b/src/cmd/go/testdata/script/mod_sum_replaced.txt new file mode 100644 index 0000000000000..b03982d9cff44 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_sum_replaced.txt @@ -0,0 +1,28 @@ +env GO111MODULE=on + +# After 'go get -d', the go.sum file should contain the sum for the module. +go get -d rsc.io/quote@v1.5.0 +grep 'rsc.io/quote v1.5.0' go.sum + +# If we replace the module and run 'go mod tidy', we should get a sum for the replacement. +go mod edit -replace rsc.io/quote@v1.5.0=rsc.io/quote@v1.5.1 +go mod tidy +grep 'rsc.io/quote v1.5.1' go.sum +cp go.sum go.sum.tidy + +# 'go mod vendor' should preserve that sum, and should not need to add any new entries. +go mod vendor +grep 'rsc.io/quote v1.5.1' go.sum +cmp go.sum go.sum.tidy + +-- go.mod -- +module golang.org/issue/27868 + +require rsc.io/quote v1.5.0 + +-- main.go -- +package main + +import _ "rsc.io/quote" + +func main() {} diff --git a/src/cmd/go/testdata/script/mod_symlink.txt b/src/cmd/go/testdata/script/mod_symlink.txt new file mode 100644 index 0000000000000..61da3cc35580b --- /dev/null +++ b/src/cmd/go/testdata/script/mod_symlink.txt @@ -0,0 +1,23 @@ +env GO111MODULE=on +[!symlink] skip + +# 'go list' should resolve modules of imported packages. +go list -deps -f '{{.Module}}' +stdout golang.org/x/text + +# They should continue to resolve if the importing file is a symlink. +mkdir links +cd links +symlink go.mod -> ../go.mod +symlink issue.go -> ../issue.go + +go list -deps -f '{{.Module}}' +stdout golang.org/x/text + +-- go.mod -- +module golang.org/issue/28107 + +-- issue.go -- +package issue + +import _ "golang.org/x/text/language" diff --git a/src/cmd/go/testdata/script/mod_test.txt b/src/cmd/go/testdata/script/mod_test.txt index caeb25ada8458..af4fd76d706cf 100644 --- a/src/cmd/go/testdata/script/mod_test.txt +++ b/src/cmd/go/testdata/script/mod_test.txt @@ -1,5 +1,8 @@ env GO111MODULE=on +# TODO(bcmills): Convert the 'go test' calls below to 'go list -test' once 'go +# list' is more sensitive to package loading errors. + # A test in the module's root package should work. cd a/ cp go.mod.empty go.mod @@ -48,6 +51,10 @@ cd ../d_test go test stdout PASS +cd ../e +go test +stdout PASS + -- a/go.mod.empty -- module example.com/user/a diff --git a/src/cmd/go/testdata/script/mod_test_files.txt b/src/cmd/go/testdata/script/mod_test_files.txt new file mode 100644 index 0000000000000..87aecb44f678e --- /dev/null +++ b/src/cmd/go/testdata/script/mod_test_files.txt @@ -0,0 +1,49 @@ +env GO111MODULE=on + +cd foo + +# Testing an explicit source file should use the same import visibility as the +# package in the same directory. +go list -test -deps +go list -test -deps foo_test.go + +# If the file is inside the main module's vendor directory, it should have +# visibility based on the vendor-relative import path. +mkdir vendor/example.com/foo +cp foo_test.go vendor/example.com/foo +go list -test -deps vendor/example.com/foo/foo_test.go + +# If the file is outside the main module entirely, it should be treated as outside. +cp foo_test.go ../foo_test.go +! go list -test -deps ../foo_test.go +stderr 'use of internal package' + +-- foo/go.mod -- +module example.com/foo +require example.com/internal v0.0.0 +replace example.com/internal => ../internal + +-- foo/internal.go -- +package foo +import _ "example.com/internal" + +-- foo/foo_test.go -- +package foo_test + +import ( + "testing" + "example.com/internal" +) + +func TestHacksEnabled(t *testing.T) { + if !internal.Hacks { + t.Fatal("hacks not enabled") + } +} + +-- internal/go.mod -- +module example.com/internal + +-- internal/internal.go -- +package internal +const Hacks = true diff --git a/src/cmd/go/testdata/script/mod_tidy.txt b/src/cmd/go/testdata/script/mod_tidy.txt index 449aa073a7847..de3b52e2c023c 100644 --- a/src/cmd/go/testdata/script/mod_tidy.txt +++ b/src/cmd/go/testdata/script/mod_tidy.txt @@ -5,6 +5,9 @@ go mod tidy -v stderr '^unused y.1' ! stderr '^unused [^y]' +# tidy should not touch existing go line +grep 'go 1.10' go.mod + go list -m all ! stdout '^y' stdout '^w.1 v1.2.0' @@ -12,11 +15,17 @@ stdout '^z.1 v1.2.0' # empty tidy should not crash cd triv +! grep 'go ' go.mod go mod tidy +# tidy should add missing go line +grep 'go ' go.mod + -- go.mod -- module m +go 1.10 + require ( x.1 v1.0.0 y.1 v1.0.0 diff --git a/src/cmd/go/testdata/script/mod_tidy_replace.txt b/src/cmd/go/testdata/script/mod_tidy_replace.txt new file mode 100644 index 0000000000000..86467a65451d3 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_tidy_replace.txt @@ -0,0 +1,110 @@ +env GO111MODULE=on + +# golang.org/issue/30166: 'go mod tidy' should not crash if a replaced module is +# involved in a cycle. +cd cycle +env GOTRACEBACK=off +go mod tidy +cd .. + +# From inside the module, 'go list -m all' should NOT include transitive +# requirements of modules that have been replaced. +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +! stdout 'rsc.io/sampler' +! stdout 'golang.org/x/text' + +# From outside the module, 'go list -m all' should include them. +cd outside +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +stdout 'rsc.io/sampler v1.3.0' +stdout 'golang.org/x/text' +cd .. + +# 'go list all' should add indirect requirements to satisfy the packages +# imported from replacement modules. +! grep 'rsc.io/sampler' go.mod +! grep 'golang.org/x/text' go.mod +go list all +grep 'rsc.io/sampler' go.mod +grep 'golang.org/x/text' go.mod + +# 'go get' and 'go mod tidy' should follow the requirements of the replacements, +# not the originals, even if that results in a set of versions that are +# misleading or redundant without those replacements. +go get rsc.io/sampler@v1.2.0 +go mod tidy +go list -m all +stdout 'rsc.io/quote/v3 v3.0.0' +stdout 'rsc.io/sampler v1.2.0' +stdout 'golang.org/x/text' + +# The requirements seen from outside may be higher (or lower) +# than those seen from within the module. +grep 'rsc.io/sampler v1.2.0' go.mod +cd outside +go list -m all +stdout 'rsc.io/sampler v1.3.0' + +-- go.mod -- +module example.com/tidy + +require rsc.io/quote/v3 v3.0.0 +replace rsc.io/quote/v3 => ./not-rsc.io/quote/v3 + +-- imports.go -- +package tidy + +import _ "rsc.io/quote/v3" + +-- outside/go.mod -- +module example.com/tidy/outside + +require example.com/tidy v0.0.0 +replace example.com/tidy => ./.. + +-- not-rsc.io/quote/v3/go.mod -- +module not-rsc.io/quote/v3 + +// No requirements specified! + +-- not-rsc.io/quote/v3/quote.go -- +package quote + +import ( + _ "rsc.io/sampler" + _ "golang.org/x/text/language" +) + +-- cycle/go.mod -- +module golang.org/issue/30166 + +require ( + golang.org/issue/30166/a v0.0.0 + golang.org/issue/30166/b v0.0.0 +) + +replace ( + golang.org/issue/30166/a => ./a + golang.org/issue/30166/b => ./b +) +-- cycle/cycle.go -- +package cycle + +import ( + _ "golang.org/issue/30166/a" + _ "golang.org/issue/30166/b" +) +-- cycle/a/a.go -- +package a +-- cycle/a/go.mod -- +module golang.org/issue/30166/a + +require golang.org/issue/30166/b v0.0.0 +-- cycle/b/b.go -- +package b +-- cycle/b/go.mod -- +module golang.org/issue/30166/b + +require golang.org/issue/30166/a v0.0.0 diff --git a/src/cmd/go/testdata/script/mod_vcs_missing.txt b/src/cmd/go/testdata/script/mod_vcs_missing.txt index fb146b44155eb..009bb91c3c79b 100644 --- a/src/cmd/go/testdata/script/mod_vcs_missing.txt +++ b/src/cmd/go/testdata/script/mod_vcs_missing.txt @@ -4,8 +4,25 @@ env GO111MODULE=on env GOPROXY= +cd empty ! go list launchpad.net/gocheck stderr '"bzr": executable file not found' +cd .. --- go.mod -- +# 1.11 used to give the cryptic error "cannot find module for path" here, but +# only for a main package. +cd main +! go build +stderr '"bzr": executable file not found' +cd .. + +-- empty/go.mod -- +module m +-- main/go.mod -- module m +-- main/main.go -- +package main + +import _ "launchpad.net/gocheck" + +func main() {} diff --git a/src/cmd/go/testdata/script/mod_vendor.txt b/src/cmd/go/testdata/script/mod_vendor.txt index b3769a850415f..203183be881f2 100644 --- a/src/cmd/go/testdata/script/mod_vendor.txt +++ b/src/cmd/go/testdata/script/mod_vendor.txt @@ -67,6 +67,7 @@ module m require ( a v1.0.0 + diamondroot v0.0.0 mysite/myname/mypkg v1.0.0 w v1.0.0 // indirect x v1.0.0 @@ -76,6 +77,10 @@ require ( replace ( a v1.0.0 => ./a + diamondleft => ./diamondleft + diamondpoint => ./diamondpoint + diamondright => ./diamondright + diamondroot => ./diamondroot mysite/myname/mypkg v1.0.0 => ./mypkg w v1.0.0 => ./w x v1.0.0 => ./x @@ -200,6 +205,10 @@ import _ "z" package m import _ "x/x1" +-- importdiamond.go -- +package m + +import _ "diamondroot" -- w/go.mod -- module w -- w/w.go -- @@ -228,3 +237,42 @@ package y module z -- z/z.go -- package z + +-- diamondroot/go.mod -- +module diamondroot + +require ( + diamondleft v0.0.0 + diamondright v0.0.0 +) +-- diamondroot/x.go -- +package diamondroot + +import ( + _ "diamondleft" + _ "diamondright" +) +-- diamondleft/go.mod -- +module diamondleft + +require ( + diamondpoint v0.0.0 +) +-- diamondleft/x.go -- +package diamondleft + +import _ "diamondpoint" +-- diamondright/go.mod -- +module diamondright + +require ( + diamondpoint v0.0.0 +) +-- diamondright/x.go -- +package diamondright + +import _ "diamondpoint" +-- diamondpoint/go.mod -- +module diamondpoint +-- diamondpoint/x.go -- +package diamondpoint diff --git a/src/cmd/go/testdata/script/mod_vendor_replace.txt b/src/cmd/go/testdata/script/mod_vendor_replace.txt new file mode 100644 index 0000000000000..6bc1c77ed3d0d --- /dev/null +++ b/src/cmd/go/testdata/script/mod_vendor_replace.txt @@ -0,0 +1,39 @@ +env GO111MODULE=on + +# Before vendoring, we expect to see the original directory. +go list -f '{{.Version}} {{.Dir}}' -m rsc.io/quote/v3 +stdout 'v3.0.0' +stdout '.*[/\\]not-rsc.io[/\\]quote[/\\]v3' + +# Since all dependencies are replaced, 'go mod vendor' should not +# have to download anything from the network. +go mod vendor +! stderr 'downloading' +! stderr 'finding' + +# After vendoring, we expect to see the replacement in the vendor directory, +# without attempting to look up the non-replaced version. +cmp vendor/rsc.io/quote/v3/quote.go local/not-rsc.io/quote/v3/quote.go + +go list -mod=vendor -f '{{.Version}} {{.Dir}}' -m rsc.io/quote/v3 +stdout 'v3.0.0' +stdout '.*[/\\]vendor[/\\]rsc.io[/\\]quote[/\\]v3' +! stderr 'finding' +! stderr 'lookup disabled' + +-- go.mod -- +module example.com/replace + +require rsc.io/quote/v3 v3.0.0 +replace rsc.io/quote/v3 => ./local/not-rsc.io/quote/v3 + +-- imports.go -- +package replace + +import _ "rsc.io/quote/v3" + +-- local/not-rsc.io/quote/v3/go.mod -- +module not-rsc.io/quote/v3 + +-- local/not-rsc.io/quote/v3/quote.go -- +package quote diff --git a/src/cmd/go/testdata/script/run_wildcard.txt b/src/cmd/go/testdata/script/run_wildcard.txt new file mode 100644 index 0000000000000..cd401e00e6441 --- /dev/null +++ b/src/cmd/go/testdata/script/run_wildcard.txt @@ -0,0 +1,5 @@ +# Fix for https://github.com/golang/go/issues/28696: +# go run x/... should not panic when directory x doesn't exist. + +! go run nonexistent/... +stderr '^go run: no packages loaded from nonexistent/...$' diff --git a/src/cmd/go/testdata/script/script_wait.txt b/src/cmd/go/testdata/script/script_wait.txt new file mode 100644 index 0000000000000..0770b39523d5f --- /dev/null +++ b/src/cmd/go/testdata/script/script_wait.txt @@ -0,0 +1,22 @@ +[!exec:echo] skip +[!exec:false] skip + +exec echo foo +stdout foo + +exec echo foo & +exec echo bar & +! exec false & + +# Starting a background process should clear previous output. +! stdout foo + +# Wait should set the output to the concatenated outputs of the background +# programs, in the order in which they were started. +wait +stdout 'foo\nbar' + +# The end of the test should interrupt or kill any remaining background +# programs. +[!exec:sleep] skip +! exec sleep 86400 & diff --git a/src/cmd/go/testdata/script/test_devnull.txt b/src/cmd/go/testdata/script/test_devnull.txt new file mode 100644 index 0000000000000..c414e59ba3a9c --- /dev/null +++ b/src/cmd/go/testdata/script/test_devnull.txt @@ -0,0 +1,13 @@ +# go test -c -o NUL +# should work (see golang.org/issue/28035). +cd x +go test -o=$devnull -c +! exists x.test$exe + +-- x/x_test.go -- +package x_test +import ( + "testing" +) +func TestNUL(t *testing.T) { +} diff --git a/src/cmd/go/testdata/script/vet_asm.txt b/src/cmd/go/testdata/script/vet_asm.txt new file mode 100644 index 0000000000000..807e2b76f5899 --- /dev/null +++ b/src/cmd/go/testdata/script/vet_asm.txt @@ -0,0 +1,31 @@ +# Issue 27665. Verify that "go vet" analyzes non-Go files. + +env GOOS=linux +env GOARCH=amd64 +! go vet -asmdecl a +stderr 'f: invalid MOVW of x' + +# -c flag shows context +! go vet -c=2 -asmdecl a +stderr '...invalid MOVW...' +stderr '1 .*TEXT' +stderr '2 MOVW' +stderr '3 RET' +stderr '4' + +# -json causes success, even with diagnostics and errors. +go vet -json -asmdecl a +stderr '"a": {' +stderr '"asmdecl":' +stderr '"posn": ".*asm.s:2:1",' +stderr '"message": ".*invalid MOVW.*"' + +-- a/a.go -- +package a + +func f(x int8) + +-- a/asm.s -- +TEXT ·f(SB),0,$0-1 + MOVW x+0(FP), AX + RET diff --git a/src/cmd/go/testdata/testterminal18153/terminal_test.go b/src/cmd/go/testdata/testterminal18153/terminal_test.go index d662e55ee552b..71493efe98374 100644 --- a/src/cmd/go/testdata/testterminal18153/terminal_test.go +++ b/src/cmd/go/testdata/testterminal18153/terminal_test.go @@ -5,7 +5,7 @@ // +build linux // This test is run by src/cmd/dist/test.go (cmd_go_test_terminal), -// and not by cmd/go's tests. This is because this test requires that +// and not by cmd/go's tests. This is because this test requires // that it be called with its stdout and stderr being a terminal. // dist doesn't run `cmd/go test` against this test directory if // dist's stdout/stderr aren't terminals. diff --git a/src/cmd/go/vendor_test.go b/src/cmd/go/vendor_test.go index 22aa643b0032e..c302d7e9b5841 100644 --- a/src/cmd/go/vendor_test.go +++ b/src/cmd/go/vendor_test.go @@ -37,7 +37,7 @@ func TestVendorImports(t *testing.T) { vend/x/vendor/p/p [notfound] vend/x/vendor/r [] ` - want = strings.Replace(want+"\t", "\n\t\t", "\n", -1) + want = strings.ReplaceAll(want+"\t", "\n\t\t", "\n") want = strings.TrimPrefix(want, "\n") have := tg.stdout.String() diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index d5b7be327a502..ac6852f2e4e38 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -319,10 +319,7 @@ func backupFile(filename string, data []byte, perm os.FileMode) (string, error) } // write data to backup file - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } + _, err = f.Write(data) if err1 := f.Close(); err == nil { err = err1 } diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go index 16b653b6460d7..3008365cd237c 100644 --- a/src/cmd/gofmt/gofmt_test.go +++ b/src/cmd/gofmt/gofmt_test.go @@ -200,7 +200,7 @@ func TestDiff(t *testing.T) { } if runtime.GOOS == "windows" { - b = bytes.Replace(b, []byte{'\r', '\n'}, []byte{'\n'}, -1) + b = bytes.ReplaceAll(b, []byte{'\r', '\n'}, []byte{'\n'}) } bs := bytes.SplitN(b, []byte{'\n'}, 3) diff --git a/src/cmd/gofmt/long_test.go b/src/cmd/gofmt/long_test.go index 237b86021bf35..e2a6208f871ea 100644 --- a/src/cmd/gofmt/long_test.go +++ b/src/cmd/gofmt/long_test.go @@ -85,6 +85,12 @@ func testFile(t *testing.T, b1, b2 *bytes.Buffer, filename string) { // the first and 2nd result should be identical if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + // A known instance of gofmt not being idempotent + // (see Issue #24472) + if strings.HasSuffix(filename, "issue22662.go") { + t.Log("known gofmt idempotency bug (Issue #24472)") + return + } t.Errorf("gofmt %s not idempotent", filename) } } diff --git a/src/cmd/internal/buildid/buildid.go b/src/cmd/internal/buildid/buildid.go index fa3d7f37ec60d..ac238d70ea0cd 100644 --- a/src/cmd/internal/buildid/buildid.go +++ b/src/cmd/internal/buildid/buildid.go @@ -8,6 +8,7 @@ import ( "bytes" "debug/elf" "fmt" + "internal/xcoff" "io" "os" "strconv" @@ -40,6 +41,9 @@ func ReadFile(name string) (id string, err error) { return "", err } if string(buf) != "!\n" { + if string(buf) == "\n" { + return readGccgoBigArchive(name, f) + } return readBinary(name, f) } @@ -157,6 +161,85 @@ func readGccgoArchive(name string, f *os.File) (string, error) { } } +// readGccgoBigArchive tries to parse the archive as an AIX big +// archive file, and fetch the build ID from the _buildid.o entry. +// The _buildid.o entry is written by (*Builder).gccgoBuildIDXCOFFFile +// in cmd/go/internal/work/exec.go. +func readGccgoBigArchive(name string, f *os.File) (string, error) { + bad := func() (string, error) { + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} + } + + // Read fixed-length header. + if _, err := f.Seek(0, io.SeekStart); err != nil { + return "", err + } + var flhdr [128]byte + if _, err := io.ReadFull(f, flhdr[:]); err != nil { + return "", err + } + // Read first member offset. + offStr := strings.TrimSpace(string(flhdr[68:88])) + off, err := strconv.ParseInt(offStr, 10, 64) + if err != nil { + return bad() + } + for { + if off == 0 { + // No more entries, no build ID. + return "", nil + } + if _, err := f.Seek(off, io.SeekStart); err != nil { + return "", err + } + // Read member header. + var hdr [112]byte + if _, err := io.ReadFull(f, hdr[:]); err != nil { + return "", err + } + // Read member name length. + namLenStr := strings.TrimSpace(string(hdr[108:112])) + namLen, err := strconv.ParseInt(namLenStr, 10, 32) + if err != nil { + return bad() + } + if namLen == 10 { + var nam [10]byte + if _, err := io.ReadFull(f, nam[:]); err != nil { + return "", err + } + if string(nam[:]) == "_buildid.o" { + sizeStr := strings.TrimSpace(string(hdr[0:20])) + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return bad() + } + off += int64(len(hdr)) + namLen + 2 + if off&1 != 0 { + off++ + } + sr := io.NewSectionReader(f, off, size) + x, err := xcoff.NewFile(sr) + if err != nil { + return bad() + } + data := x.CSect(".go.buildid") + if data == nil { + return bad() + } + return string(data), nil + } + } + + // Read next member offset. + offStr = strings.TrimSpace(string(hdr[20:40])) + off, err = strconv.ParseInt(offStr, 10, 64) + if err != nil { + return bad() + } + } +} + var ( goBuildPrefix = []byte("\xff Go build ID: \"") goBuildEnd = []byte("\"\n \xff") diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index 96fb2b765b8eb..8ad84105a4814 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -179,7 +179,7 @@ type Context interface { AddBytes(s Sym, b []byte) AddAddress(s Sym, t interface{}, ofs int64) AddSectionOffset(s Sym, size int, t interface{}, ofs int64) - AddDWARFSectionOffset(s Sym, size int, t interface{}, ofs int64) + AddDWARFAddrSectionOffset(s Sym, t interface{}, ofs int64) CurrentOffset(s Sym) int64 RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int) RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32) @@ -304,6 +304,7 @@ const ( const ( DW_ABRV_NULL = iota DW_ABRV_COMPUNIT + DW_ABRV_COMPUNIT_TEXTLESS DW_ABRV_FUNCTION DW_ABRV_FUNCTION_ABSTRACT DW_ABRV_FUNCTION_CONCRETE @@ -368,6 +369,18 @@ var abbrevs = [DW_NABRV]dwAbbrev{ }, }, + /* COMPUNIT_TEXTLESS */ + { + DW_TAG_compile_unit, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_language, DW_FORM_data1}, + {DW_AT_comp_dir, DW_FORM_string}, + {DW_AT_producer, DW_FORM_string}, + }, + }, + /* FUNCTION */ { DW_TAG_subprogram, @@ -882,7 +895,7 @@ func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, da case DW_FORM_data4: // constant, {line,loclist,mac,rangelist}ptr if cls == DW_CLS_PTR { // DW_AT_stmt_list and DW_AT_ranges - ctxt.AddDWARFSectionOffset(s, 4, data, value) + ctxt.AddDWARFAddrSectionOffset(s, data, value) break } ctxt.AddInt(s, 4, value) @@ -919,7 +932,7 @@ func putattr(ctxt Context, s Sym, abbrev int, form int, cls int, value int64, da if data == nil { return fmt.Errorf("dwarf: null reference in %d", abbrev) } - ctxt.AddDWARFSectionOffset(s, 4, data, value) + ctxt.AddDWARFAddrSectionOffset(s, data, value) case DW_FORM_ref1, // reference within the compilation unit DW_FORM_ref2, // reference @@ -954,7 +967,7 @@ Outer: } } -// HasChildren returns true if 'die' uses an abbrev that supports children. +// HasChildren reports whether 'die' uses an abbrev that supports children. func HasChildren(die *DWDie) bool { return abbrevs[die.Abbrev].children != 0 } diff --git a/src/cmd/internal/goobj/read.go b/src/cmd/internal/goobj/read.go index e39180cad666c..84aed6eeea496 100644 --- a/src/cmd/internal/goobj/read.go +++ b/src/cmd/internal/goobj/read.go @@ -119,10 +119,11 @@ type FuncData struct { // An InlinedCall is a node in an InlTree. // See cmd/internal/obj.InlTree for details. type InlinedCall struct { - Parent int64 - File string - Line int64 - Func SymID + Parent int64 + File string + Line int64 + Func SymID + ParentPC int64 } // A Package is a parsed Go object file or archive defining a Go package. @@ -288,18 +289,31 @@ func (r *objReader) readSymID() SymID { } func (r *objReader) readRef() { - name, vers := r.readString(), r.readInt() + name, abiOrStatic := r.readString(), r.readInt() // In a symbol name in an object file, "". denotes the // prefix for the package in which the object file has been found. // Expand it. - name = strings.Replace(name, `"".`, r.pkgprefix, -1) - - // An individual object file only records version 0 (extern) or 1 (static). - // To make static symbols unique across all files being read, we - // replace version 1 with the version corresponding to the current - // file number. The number is incremented on each call to parseObject. - if vers != 0 { + name = strings.ReplaceAll(name, `"".`, r.pkgprefix) + + // The ABI field records either the ABI or -1 for static symbols. + // + // To distinguish different static symbols with the same name, + // we use the symbol "version". Version 0 corresponds to + // global symbols, and each file has a unique version > 0 for + // all of its static symbols. The version is incremented on + // each call to parseObject. + // + // For global symbols, we currently ignore the ABI. + // + // TODO(austin): Record the ABI in SymID. Since this is a + // public API, we'll have to keep Version as 0 and record the + // ABI in a new field (which differs from how the linker does + // this, but that's okay). Show the ABI in things like + // objdump. + var vers int64 + if abiOrStatic == -1 { + // Static symbol vers = r.p.MaxVersion } r.p.SymRefs = append(r.p.SymRefs, SymID{name, vers}) @@ -487,7 +501,7 @@ func (r *objReader) parseObject(prefix []byte) error { // TODO: extract OS + build ID if/when we need it r.readFull(r.tmp[:8]) - if !bytes.Equal(r.tmp[:8], []byte("\x00\x00go19ld")) { + if !bytes.Equal(r.tmp[:8], []byte("\x00go112ld")) { return r.error(errCorruptObject) } @@ -597,12 +611,13 @@ func (r *objReader) parseObject(prefix []byte) error { f.InlTree[i].File = r.readSymID().Name f.InlTree[i].Line = r.readInt() f.InlTree[i].Func = r.readSymID() + f.InlTree[i].ParentPC = r.readInt() } } } r.readFull(r.tmp[:7]) - if !bytes.Equal(r.tmp[:7], []byte("\xffgo19ld")) { + if !bytes.Equal(r.tmp[:7], []byte("go112ld")) { return r.error(errCorruptObject) } diff --git a/src/cmd/internal/obj/abi_string.go b/src/cmd/internal/obj/abi_string.go new file mode 100644 index 0000000000000..a439da36a34ee --- /dev/null +++ b/src/cmd/internal/obj/abi_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type ABI"; DO NOT EDIT. + +package obj + +import "strconv" + +const _ABI_name = "ABI0ABIInternalABICount" + +var _ABI_index = [...]uint8{0, 4, 15, 23} + +func (i ABI) String() string { + if i >= ABI(len(_ABI_index)-1) { + return "ABI(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ABI_name[_ABI_index[i]:_ABI_index[i+1]] +} diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 3427ea9161c62..b1fb1d394411a 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -1529,7 +1529,7 @@ func buildop(ctxt *obj.Link) { return } - deferreturn = ctxt.Lookup("runtime.deferreturn") + deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) symdiv = ctxt.Lookup("runtime._div") symdivu = ctxt.Lookup("runtime._divu") @@ -2007,7 +2007,7 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { o2 = c.oprrr(p, p.As, int(p.Scond)) o2 |= REGTMP & 15 r := int(p.Reg) - if p.As == AMOVW || p.As == AMVN { + if p.As == AMVN { r = 0 } else if r == 0 { r = int(p.To.Reg) diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index c17bf2a8ac1db..34bd5d6baf1a4 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -723,7 +723,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.As = AMOVW p.From.Type = obj.TYPE_ADDR p.From.Reg = REGSP - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 p.Scond = C_SCOND_NE @@ -739,7 +739,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = AMOVW p.From.Type = obj.TYPE_ADDR - p.From.Offset = int64(framesize) + (objabi.StackGuard - objabi.StackSmall) + p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 p.Scond = C_SCOND_NE diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go index 9be0183edf880..18cdd10f9b134 100644 --- a/src/cmd/internal/obj/arm64/a.out.go +++ b/src/cmd/internal/obj/arm64/a.out.go @@ -407,11 +407,15 @@ const ( C_ABCON0 // could be C_ADDCON0 or C_BITCON C_ADDCON0 // 12-bit unsigned, unshifted C_ABCON // could be C_ADDCON or C_BITCON + C_AMCON // could be C_ADDCON or C_MOVCON C_ADDCON // 12-bit unsigned, shifted left by 0 or 12 C_MBCON // could be C_MOVCON or C_BITCON C_MOVCON // generated by a 16-bit constant, optionally inverted and/or shifted by multiple of 16 C_BITCON // bitfield and logical immediate masks + C_ADDCON2 // 24-bit constant C_LCON // 32-bit constant + C_MOVCON2 // a constant that can be loaded with one MOVZ/MOVN and one MOVK + C_MOVCON3 // a constant that can be loaded with one MOVZ/MOVN and two MOVKs C_VCON // 64-bit constant C_FCON // floating-point constant C_VCONADDR // 64-bit memory address @@ -594,8 +598,10 @@ const ( AHVC AIC AISB - ALDADDALD + ALDADDALB + ALDADDALH ALDADDALW + ALDADDALD ALDADDB ALDADDH ALDADDW @@ -774,9 +780,13 @@ const ( AMOVPSW AMOVPW ASWPD + ASWPALD ASWPW + ASWPALW ASWPH + ASWPALH ASWPB + ASWPALB ABEQ ABNE ABCS @@ -817,6 +827,8 @@ const ( AFCVTZUSW AFDIVD AFDIVS + AFLDPD + AFLDPS AFMOVD AFMOVS AFMULD @@ -825,6 +837,8 @@ const ( AFNEGS AFSQRTD AFSQRTS + AFSTPD + AFSTPS AFSUBD AFSUBS ASCVTFD diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 84fb40b10260c..55e2b5bafbb1a 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -95,8 +95,10 @@ var Anames = []string{ "HVC", "IC", "ISB", - "LDADDALD", + "LDADDALB", + "LDADDALH", "LDADDALW", + "LDADDALD", "LDADDB", "LDADDH", "LDADDW", @@ -275,9 +277,13 @@ var Anames = []string{ "MOVPSW", "MOVPW", "SWPD", + "SWPALD", "SWPW", + "SWPALW", "SWPH", + "SWPALH", "SWPB", + "SWPALB", "BEQ", "BNE", "BCS", @@ -318,6 +324,8 @@ var Anames = []string{ "FCVTZUSW", "FDIVD", "FDIVS", + "FLDPD", + "FLDPS", "FMOVD", "FMOVS", "FMULD", @@ -326,6 +334,8 @@ var Anames = []string{ "FNEGS", "FSQRTD", "FSQRTS", + "FSTPD", + "FSTPS", "FSUBD", "FSUBS", "SCVTFD", diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go index 92f0cec94271b..e1703fc4ab8d5 100644 --- a/src/cmd/internal/obj/arm64/anames7.go +++ b/src/cmd/internal/obj/arm64/anames7.go @@ -23,11 +23,15 @@ var cnames7 = []string{ "ABCON0", "ADDCON0", "ABCON", + "AMCON", "ADDCON", "MBCON", "MOVCON", "BITCON", + "ADDCON2", "LCON", + "MOVCON2", + "MOVCON3", "VCON", "FCON", "VCONADDR", diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index e3bcce826594c..093b2228983d6 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -49,6 +49,7 @@ type ctxt7 struct { blitrl *obj.Prog elitrl *obj.Prog autosize int32 + extrasize int32 instoffset int64 pc int64 pool struct { @@ -163,6 +164,10 @@ func OPBIT(x uint32) uint32 { return 1<<30 | 0<<29 | 0xD6<<21 | 0<<16 | x<<10 } +func MOVCONST(d int64, s int, rt int) uint32 { + return uint32(((d>>uint(s*16))&0xFFFF)<<5) | uint32(s)&3<<21 | uint32(rt&31) +} + const ( LFROM = 1 << 0 LTO = 1 << 1 @@ -191,9 +196,17 @@ var optab = []Optab{ {AADD, C_BITCON, C_RSP, C_NONE, C_RSP, 62, 8, 0, 0, 0}, {AADD, C_BITCON, C_NONE, C_NONE, C_RSP, 62, 8, 0, 0, 0}, {ACMP, C_BITCON, C_RSP, C_NONE, C_NONE, 62, 8, 0, 0, 0}, - {AADD, C_VCON, C_RSP, C_NONE, C_RSP, 13, 8, 0, LFROM, 0}, - {AADD, C_VCON, C_NONE, C_NONE, C_RSP, 13, 8, 0, LFROM, 0}, - {ACMP, C_VCON, C_REG, C_NONE, C_NONE, 13, 8, 0, LFROM, 0}, + {AADD, C_ADDCON2, C_RSP, C_NONE, C_RSP, 48, 8, 0, 0, 0}, + {AADD, C_ADDCON2, C_NONE, C_NONE, C_RSP, 48, 8, 0, 0, 0}, + {AADD, C_MOVCON2, C_RSP, C_NONE, C_RSP, 13, 12, 0, 0, 0}, + {AADD, C_MOVCON2, C_NONE, C_NONE, C_RSP, 13, 12, 0, 0, 0}, + {AADD, C_MOVCON3, C_RSP, C_NONE, C_RSP, 13, 16, 0, 0, 0}, + {AADD, C_MOVCON3, C_NONE, C_NONE, C_RSP, 13, 16, 0, 0, 0}, + {AADD, C_VCON, C_RSP, C_NONE, C_RSP, 13, 20, 0, 0, 0}, + {AADD, C_VCON, C_NONE, C_NONE, C_RSP, 13, 20, 0, 0, 0}, + {ACMP, C_MOVCON2, C_REG, C_NONE, C_NONE, 13, 12, 0, 0, 0}, + {ACMP, C_MOVCON3, C_REG, C_NONE, C_NONE, 13, 16, 0, 0, 0}, + {ACMP, C_VCON, C_REG, C_NONE, C_NONE, 13, 20, 0, 0, 0}, {AADD, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AADD, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AMVN, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, @@ -218,8 +231,6 @@ var optab = []Optab{ {AFADDS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, {AFADDS, C_FREG, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0}, - {AFADDS, C_FCON, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, - {AFADDS, C_FCON, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0}, {AFMSUBD, C_FREG, C_FREG, C_FREG, C_FREG, 15, 4, 0, 0, 0}, {AFCMPS, C_FREG, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0}, {AFCMPS, C_FCON, C_FREG, C_NONE, C_NONE, 56, 4, 0, 0, 0}, @@ -250,11 +261,21 @@ var optab = []Optab{ {AANDS, C_MOVCON, C_REG, C_NONE, C_REG, 62, 8, 0, 0, 0}, {AANDS, C_MOVCON, C_NONE, C_NONE, C_REG, 62, 8, 0, 0, 0}, {ATST, C_MOVCON, C_REG, C_NONE, C_NONE, 62, 8, 0, 0, 0}, - {AAND, C_VCON, C_REG, C_NONE, C_REG, 28, 8, 0, LFROM, 0}, - {AAND, C_VCON, C_NONE, C_NONE, C_REG, 28, 8, 0, LFROM, 0}, - {AANDS, C_VCON, C_REG, C_NONE, C_REG, 28, 8, 0, LFROM, 0}, - {AANDS, C_VCON, C_NONE, C_NONE, C_REG, 28, 8, 0, LFROM, 0}, - {ATST, C_VCON, C_REG, C_NONE, C_NONE, 28, 8, 0, LFROM, 0}, + {AAND, C_MOVCON2, C_REG, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AAND, C_MOVCON2, C_NONE, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AAND, C_MOVCON3, C_REG, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AAND, C_MOVCON3, C_NONE, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AAND, C_VCON, C_REG, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AAND, C_VCON, C_NONE, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AANDS, C_MOVCON2, C_REG, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AANDS, C_MOVCON2, C_NONE, C_NONE, C_REG, 28, 12, 0, 0, 0}, + {AANDS, C_MOVCON3, C_REG, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AANDS, C_MOVCON3, C_NONE, C_NONE, C_REG, 28, 16, 0, 0, 0}, + {AANDS, C_VCON, C_REG, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {AANDS, C_VCON, C_NONE, C_NONE, C_REG, 28, 20, 0, 0, 0}, + {ATST, C_MOVCON2, C_REG, C_NONE, C_NONE, 28, 12, 0, 0, 0}, + {ATST, C_MOVCON3, C_REG, C_NONE, C_NONE, 28, 16, 0, 0, 0}, + {ATST, C_VCON, C_REG, C_NONE, C_NONE, 28, 20, 0, 0, 0}, {AAND, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AAND, C_SHIFT, C_NONE, C_NONE, C_REG, 3, 4, 0, 0, 0}, {AANDS, C_SHIFT, C_REG, C_NONE, C_REG, 3, 4, 0, 0, 0}, @@ -271,12 +292,12 @@ var optab = []Optab{ /* MOVs that become MOVK/MOVN/MOVZ/ADD/SUB/OR */ {AMOVW, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, {AMOVD, C_MOVCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, - - // TODO: these don't work properly. - // { AMOVW, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0}, - // { AMOVD, C_ADDCON, C_NONE, C_REG, 2, 4, 0 , 0}, {AMOVW, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, {AMOVD, C_BITCON, C_NONE, C_NONE, C_REG, 32, 4, 0, 0, 0}, + {AMOVW, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, 0, 0}, + {AMOVD, C_MOVCON2, C_NONE, C_NONE, C_REG, 12, 8, 0, 0, 0}, + {AMOVD, C_MOVCON3, C_NONE, C_NONE, C_REG, 12, 12, 0, 0, 0}, + {AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 12, 16, 0, 0, 0}, {AMOVK, C_VCON, C_NONE, C_NONE, C_REG, 33, 4, 0, 0, 0}, {AMOVD, C_AACON, C_NONE, C_NONE, C_REG, 4, 4, REGFROM, 0, 0}, @@ -317,9 +338,7 @@ var optab = []Optab{ {AWORD, C_NONE, C_NONE, C_NONE, C_LCON, 14, 4, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_NONE, C_LEXT, 14, 4, 0, 0, 0}, {AWORD, C_NONE, C_NONE, C_NONE, C_ADDR, 14, 4, 0, 0, 0}, - {AMOVW, C_VCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0}, {AMOVW, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, 0, 0}, - {AMOVD, C_VCON, C_NONE, C_NONE, C_REG, 12, 4, 0, LFROM, 0}, {AMOVD, C_VCONADDR, C_NONE, C_NONE, C_REG, 68, 8, 0, 0, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, @@ -339,9 +358,9 @@ var optab = []Optab{ {AFMOVS, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0}, {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 64, 12, 0, 0, 0}, {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 65, 12, 0, 0, 0}, - {AFMOVS, C_FCON, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFMOVS, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0}, {AFMOVS, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, - {AFMOVD, C_FCON, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, + {AFMOVD, C_FCON, C_NONE, C_NONE, C_FREG, 55, 4, 0, 0, 0}, {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 54, 4, 0, 0, 0}, {AFMOVS, C_REG, C_NONE, C_NONE, C_FREG, 29, 4, 0, 0, 0}, {AFMOVS, C_FREG, C_NONE, C_NONE, C_REG, 29, 4, 0, 0, 0}, @@ -400,8 +419,8 @@ var optab = []Optab{ {AMOVH, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, {AMOVW, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, - {AMOVD, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, {AMOVD, C_REG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, + {AMOVD, C_REG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, {AFMOVS, C_FREG, C_NONE, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, {AFMOVS, C_FREG, C_NONE, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, @@ -410,15 +429,15 @@ var optab = []Optab{ /* scaled 12-bit unsigned displacement load */ {AMOVB, C_UAUTO4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVB, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVB, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVBU, C_UAUTO4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVBU, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVBU, C_UOREG4K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVH, C_UAUTO8K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVH, C_UOREG8K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVH, C_UOREG8K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVW, C_UAUTO16K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVW, C_UOREG16K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVW, C_UOREG16K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVD, C_UAUTO32K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVD, C_UOREG32K, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVD, C_UOREG32K, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AFMOVS, C_UAUTO16K, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, {AFMOVS, C_UOREG16K, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, @@ -427,15 +446,15 @@ var optab = []Optab{ /* unscaled 9-bit signed displacement load */ {AMOVB, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVB, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVB, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVBU, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVBU, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVBU, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVH, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVH, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVH, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVW, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVW, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVW, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AMOVD, C_NSAUTO, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVD, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AMOVD, C_NSOREG, C_NONE, C_NONE, C_REG, 21, 4, 0, 0, 0}, {AFMOVS, C_NSAUTO, C_NONE, C_NONE, C_FREG, 21, 4, REGSP, 0, 0}, {AFMOVS, C_NSOREG, C_NONE, C_NONE, C_FREG, 21, 4, 0, 0, 0}, @@ -519,12 +538,16 @@ var optab = []Optab{ {AMOVH, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, {AMOVB, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, {AMOVBU, C_ROFF, C_NONE, C_NONE, C_REG, 98, 4, 0, 0, 0}, + {AFMOVS, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0}, + {AFMOVD, C_ROFF, C_NONE, C_NONE, C_FREG, 98, 4, 0, 0, 0}, /* store with extended register offset */ {AMOVD, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, {AMOVH, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AFMOVS, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, + {AFMOVD, C_FREG, C_NONE, C_NONE, C_ROFF, 99, 4, 0, 0, 0}, /* pre/post-indexed/signed-offset load/store register pair (unscaled, signed 10-bit quad-aligned and long offset) */ @@ -773,7 +796,8 @@ func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { ctxt.Diag("arm64 ops not initialized, call arm64.buildop first") } - c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset&0xffffffff) + 8} + c := ctxt7{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset & 0xffffffff), extrasize: int32(p.To.Offset >> 32)} + p.To.Offset &= 0xffffffff // extrasize is no longer needed bflag := 1 pc := int64(0) @@ -1042,6 +1066,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { C_NOREG4K, C_LOREG, C_LACON, + C_ADDCON2, C_LCON, C_VCON: if a.Name == obj.NAME_EXTERN { @@ -1081,6 +1106,57 @@ func (c *ctxt7) regoff(a *obj.Addr) uint32 { return uint32(c.instoffset) } +func isSTLXRop(op obj.As) bool { + switch op { + case ASTLXR, ASTLXRW, ASTLXRB, ASTLXRH, + ASTXR, ASTXRW, ASTXRB, ASTXRH: + return true + } + return false +} + +func isSTXPop(op obj.As) bool { + switch op { + case ASTXP, ASTLXP, ASTXPW, ASTLXPW: + return true + } + return false +} + +func isANDop(op obj.As) bool { + switch op { + case AAND, AORR, AEOR, AANDS, ATST, + ABIC, AEON, AORN, ABICS: + return true + } + return false +} + +func isANDWop(op obj.As) bool { + switch op { + case AANDW, AORRW, AEORW, AANDSW, ATSTW, + ABICW, AEONW, AORNW, ABICSW: + return true + } + return false +} + +func isADDop(op obj.As) bool { + switch op { + case AADD, AADDS, ASUB, ASUBS, ACMN, ACMP: + return true + } + return false +} + +func isADDWop(op obj.As) bool { + switch op { + case AADDW, AADDSW, ASUBW, ASUBSW, ACMNW, ACMPW: + return true + } + return false +} + func isRegShiftOrExt(a *obj.Addr) bool { return (a.Index-obj.RBaseARM64)®_EXT != 0 || (a.Index-obj.RBaseARM64)®_LSL != 0 } @@ -1107,7 +1183,7 @@ func isaddcon(v int64) bool { return v <= 0xFFF } -// isbitcon returns whether a constant can be encoded into a logical instruction. +// isbitcon reports whether a constant can be encoded into a logical instruction. // bitcon has a binary form of repetition of a bit sequence of length 2, 4, 8, 16, 32, or 64, // which itself is a rotate (w.r.t. the length of the unit) of a sequence of ones. // special cases: 0 and -1 are not bitcon. @@ -1387,6 +1463,81 @@ func rclass(r int16) int { return C_GOK } +// con32class reclassifies the constant of 32-bit instruction. Becuase the constant type is 32-bit, +// but saved in Offset which type is int64, con32class treats it as uint32 type and reclassifies it. +func (c *ctxt7) con32class(a *obj.Addr) int { + v := uint32(a.Offset) + if v == 0 { + return C_ZCON + } + if isaddcon(int64(v)) { + if v <= 0xFFF { + if isbitcon(uint64(v)) { + return C_ABCON0 + } + return C_ADDCON0 + } + if isbitcon(uint64(v)) { + return C_ABCON + } + if movcon(int64(v)) >= 0 { + return C_AMCON + } + if movcon(int64(^v)) >= 0 { + return C_AMCON + } + return C_ADDCON + } + + t := movcon(int64(v)) + if t >= 0 { + if isbitcon(uint64(v)) { + return C_MBCON + } + return C_MOVCON + } + + t = movcon(int64(^v)) + if t >= 0 { + if isbitcon(uint64(v)) { + return C_MBCON + } + return C_MOVCON + } + + if isbitcon(uint64(v)) { + return C_BITCON + } + + if 0 <= v && v <= 0xffffff { + return C_ADDCON2 + } + return C_LCON +} + +// con64class reclassifies the constant of C_VCON and C_LCON class. +func (c *ctxt7) con64class(a *obj.Addr) int { + zeroCount := 0 + negCount := 0 + for i := uint(0); i < 4; i++ { + immh := uint32(a.Offset >> (i * 16) & 0xffff) + if immh == 0 { + zeroCount++ + } else if immh == 0xffff { + negCount++ + } + } + if zeroCount >= 3 || negCount >= 3 { + return C_MOVCON + } else if zeroCount == 2 || negCount == 2 { + return C_MOVCON2 + } else if zeroCount == 1 || negCount == 1 { + return C_MOVCON3 + } else { + return C_VCON + } +} + func (c *ctxt7) aclass(a *obj.Addr) int { switch a.Type { case obj.TYPE_NONE: @@ -1405,6 +1556,10 @@ func (c *ctxt7) aclass(a *obj.Addr) int { return C_LIST case obj.TYPE_MEM: + // The base register should be an integer register. + if int16(REG_F0) <= a.Reg && a.Reg <= int16(REG_V31) { + break + } switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: if a.Sym == nil { @@ -1432,7 +1587,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - c.instoffset = int64(c.autosize) + a.Offset + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) return autoclass(c.instoffset) case obj.NAME_PARAM: @@ -1488,6 +1644,12 @@ func (c *ctxt7) aclass(a *obj.Addr) int { if isbitcon(uint64(v)) { return C_ABCON } + if movcon(v) >= 0 { + return C_AMCON + } + if movcon(^v) >= 0 { + return C_AMCON + } return C_ADDCON } @@ -1511,6 +1673,10 @@ func (c *ctxt7) aclass(a *obj.Addr) int { return C_BITCON } + if 0 <= v && v <= 0xffffff { + return C_ADDCON2 + } + if uint64(v) == uint64(uint32(v)) || v == int64(int32(v)) { return C_LCON } @@ -1532,7 +1698,8 @@ func (c *ctxt7) aclass(a *obj.Addr) int { // a.Offset is still relative to pseudo-SP. a.Reg = obj.REG_NONE } - c.instoffset = int64(c.autosize) + a.Offset + // The frame top 8 or 16 bytes are for FP + c.instoffset = int64(c.autosize) + a.Offset - int64(c.extrasize) case obj.NAME_PARAM: if a.Reg == REGSP { @@ -1568,8 +1735,38 @@ func (c *ctxt7) oplook(p *obj.Prog) *Optab { } a1 = int(p.From.Class) if a1 == 0 { - a1 = c.aclass(&p.From) + 1 + a0 := c.aclass(&p.From) + // do not break C_ADDCON2 when S bit is set + if (p.As == AADDS || p.As == AADDSW || p.As == ASUBS || p.As == ASUBSW) && a0 == C_ADDCON2 { + a0 = C_LCON + } + a1 = a0 + 1 p.From.Class = int8(a1) + // more specific classification of 32-bit integers + if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE { + if p.As == AMOVW || isADDWop(p.As) { + ra0 := c.con32class(&p.From) + // do not break C_ADDCON2 when S bit is set + if (p.As == AADDSW || p.As == ASUBSW) && ra0 == C_ADDCON2 { + ra0 = C_LCON + } + a1 = ra0 + 1 + p.From.Class = int8(a1) + } + if isANDWop(p.As) && a0 != C_BITCON { + // For 32-bit logical instruction with constant, + // the BITCON test is special in that it looks at + // the 64-bit which has the high 32-bit as a copy + // of the low 32-bit. We have handled that and + // don't pass it to con32class. + a1 = c.con32class(&p.From) + 1 + p.From.Class = int8(a1) + } + if ((p.As == AMOVD) || isANDop(p.As) || isADDop(p.As)) && (a0 == C_LCON || a0 == C_VCON) { + a1 = c.con64class(&p.From) + 1 + p.From.Class = int8(a1) + } + } } a1-- @@ -1640,7 +1837,7 @@ func cmp(a int, b int) bool { } case C_ADDCON: - if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON { + if b == C_ZCON || b == C_ABCON0 || b == C_ADDCON0 || b == C_ABCON || b == C_AMCON { return true } @@ -1650,15 +1847,23 @@ func cmp(a int, b int) bool { } case C_MOVCON: - if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 { + if b == C_MBCON || b == C_ZCON || b == C_ADDCON0 || b == C_AMCON { + return true + } + + case C_ADDCON2: + if b == C_ZCON || b == C_ADDCON || b == C_ADDCON0 { return true } case C_LCON: - if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON { + if b == C_ZCON || b == C_BITCON || b == C_ADDCON || b == C_ADDCON0 || b == C_ABCON || b == C_ABCON0 || b == C_MBCON || b == C_MOVCON || b == C_ADDCON2 || b == C_AMCON { return true } + case C_MOVCON2: + return cmp(C_LCON, b) + case C_VCON: return cmp(C_LCON, b) @@ -2008,11 +2213,17 @@ func buildop(ctxt *obj.Link) { oprangeset(AMOVZW, t) case ASWPD: + oprangeset(ASWPALD, t) oprangeset(ASWPB, t) oprangeset(ASWPH, t) oprangeset(ASWPW, t) - oprangeset(ALDADDALD, t) + oprangeset(ASWPALB, t) + oprangeset(ASWPALH, t) + oprangeset(ASWPALW, t) + oprangeset(ALDADDALB, t) + oprangeset(ALDADDALH, t) oprangeset(ALDADDALW, t) + oprangeset(ALDADDALD, t) oprangeset(ALDADDB, t) oprangeset(ALDADDH, t) oprangeset(ALDADDW, t) @@ -2185,14 +2396,21 @@ func buildop(ctxt *obj.Link) { AWORD, ADWORD, obj.ARET, - obj.ATEXT, - ASTP, - ASTPW, - ALDP: + obj.ATEXT: break + case ALDP: + oprangeset(AFLDPD, t) + + case ASTP: + oprangeset(AFSTPD, t) + + case ASTPW: + oprangeset(AFSTPS, t) + case ALDPW: oprangeset(ALDPSW, t) + oprangeset(AFLDPS, t) case AERET: oprangeset(AWFE, t) @@ -2440,6 +2658,9 @@ func buildop(ctxt *obj.Link) { } } +// chipfloat7() checks if the immediate constants available in FMOVS/FMOVD instructions. +// For details of the range of constants available, see +// http://infocenter.arm.com/help/topic/com.arm.doc.dui0473m/dom1359731199385.html. func (c *ctxt7) chipfloat7(e float64) int { ei := math.Float64bits(e) l := uint32(int32(ei)) @@ -2480,6 +2701,17 @@ func SYSARG4(op1 int, Cn int, Cm int, op2 int) int { return SYSARG5(0, op1, Cn, Cm, op2) } +// checkUnpredictable checks if the sourse and transfer registers are the same register. +// ARM64 manual says it is "constrained unpredictable" if the src and dst registers of STP/LDP are same. +func (c *ctxt7) checkUnpredictable(p *obj.Prog, isload bool, wback bool, rn int16, rt1 int16, rt2 int16) { + if wback && rn != REGSP && (rn == rt1 || rn == rt2) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + if isload && rt1 == rt2 { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } +} + /* checkindex checks if index >= 0 && index <= maxindex */ func (c *ctxt7) checkindex(p *obj.Prog, index, maxindex int) { if index < 0 || index > maxindex { @@ -2536,11 +2768,11 @@ func (c *ctxt7) checkShiftAmount(p *obj.Prog, a *obj.Addr) { if amount != 1 && amount != 0 { c.ctxt.Diag("invalid index shift amount: %v", p) } - case AMOVW, AMOVWU: + case AMOVW, AMOVWU, AFMOVS: if amount != 2 && amount != 0 { c.ctxt.Diag("invalid index shift amount: %v", p) } - case AMOVD: + case AMOVD, AFMOVD: if amount != 3 && amount != 0 { c.ctxt.Diag("invalid index shift amount: %v", p) } @@ -2550,6 +2782,7 @@ func (c *ctxt7) checkShiftAmount(p *obj.Prog, a *obj.Addr) { } func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { + var os [5]uint32 o1 := uint32(0) o2 := uint32(0) o3 := uint32(0) @@ -2739,13 +2972,29 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 12: /* movT $vcon, reg */ - o1 = c.omovlit(p.As, p, &p.From, int(p.To.Reg)) + num := c.omovlconst(p.As, p, &p.From, int(p.To.Reg), os[:]) + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) + } + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */ - o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) - - if !(o1 != 0) { - break + o := uint32(0) + num := uint8(0) + cls := oclass(&p.From) + if isADDWop(p.As) { + if (cls != C_LCON) && (cls != C_ADDCON2) { + c.ctxt.Diag("illegal combination: %v", p) + } + num = c.omovlconst(AMOVW, p, &p.From, REGTMP, os[:]) + } else { + num = c.omovlconst(AMOVD, p, &p.From, REGTMP, os[:]) + } + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) } rt := int(p.To.Reg) if p.To.Type == obj.TYPE_NONE { @@ -2756,16 +3005,23 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { r = rt } if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) { - o2 = c.opxrrr(p, p.As, false) - o2 |= REGTMP & 31 << 16 - o2 |= LSL0_64 + o = c.opxrrr(p, p.As, false) + o |= REGTMP & 31 << 16 + o |= LSL0_64 } else { - o2 = c.oprrr(p, p.As) - o2 |= REGTMP & 31 << 16 /* shift is 0 */ + o = c.oprrr(p, p.As) + o |= REGTMP & 31 << 16 /* shift is 0 */ } - o2 |= uint32(r&31) << 5 - o2 |= uint32(rt & 31) + o |= uint32(r&31) << 5 + o |= uint32(rt & 31) + + os[num] = o + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] + o5 = os[4] case 14: /* word */ if c.aclass(&p.To) == C_ADDR { @@ -2918,6 +3174,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */ + if p.From.Reg != REGSP && p.From.Reg == p.To.Reg { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + v := int32(p.From.Offset) if v < -256 || v > 255 { @@ -2932,6 +3192,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.From.Reg&31) << 5) | uint32(p.To.Reg&31) case 23: /* movT R,(R)O!; movT O(R)!, R -> strT */ + if p.To.Reg != REGSP && p.From.Reg == p.To.Reg { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + v := int32(p.To.Offset) if v < -256 || v > 255 { @@ -3003,10 +3267,20 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= (uint32(r&31) << 5) | uint32(rt&31) case 28: /* logop $vcon, [R], R (64 bit literal) */ - o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) + o := uint32(0) + num := uint8(0) + cls := oclass(&p.From) + if isANDWop(p.As) { + if (cls != C_LCON) && (cls != C_ADDCON) { + c.ctxt.Diag("illegal combination: %v", p) + } + num = c.omovlconst(AMOVW, p, &p.From, REGTMP, os[:]) + } else { + num = c.omovlconst(AMOVD, p, &p.From, REGTMP, os[:]) + } - if !(o1 != 0) { - break + if num == 0 { + c.ctxt.Diag("invalid constant: %v", p) } rt := int(p.To.Reg) if p.To.Type == obj.TYPE_NONE { @@ -3016,10 +3290,17 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if r == 0 { r = rt } - o2 = c.oprrr(p, p.As) - o2 |= REGTMP & 31 << 16 /* shift is 0 */ - o2 |= uint32(r&31) << 5 - o2 |= uint32(rt & 31) + o = c.oprrr(p, p.As) + o |= REGTMP & 31 << 16 /* shift is 0 */ + o |= uint32(r&31) << 5 + o |= uint32(rt & 31) + + os[num] = o + o1 = os[0] + o2 = os[1] + o3 = os[2] + o4 = os[3] + o5 = os[4] case 29: /* op Rn, Rd */ fc := c.aclass(&p.From) @@ -3150,7 +3431,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 34: /* mov $lacon,R */ o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) - if !(o1 != 0) { + if o1 == 0 { break } o2 = c.opxrrr(p, AADD, false) @@ -3383,21 +3664,21 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { rt := p.RegTo2 rb := p.To.Reg switch p.As { - case ASWPD, ALDADDALD, ALDADDD, ALDANDD, ALDEORD, ALDORD: // 64-bit + case ASWPD, ASWPALD, ALDADDALD, ALDADDD, ALDANDD, ALDEORD, ALDORD: // 64-bit o1 = 3 << 30 - case ASWPW, ALDADDALW, ALDADDW, ALDANDW, ALDEORW, ALDORW: // 32-bit + case ASWPW, ASWPALW, ALDADDALW, ALDADDW, ALDANDW, ALDEORW, ALDORW: // 32-bit o1 = 2 << 30 - case ASWPH, ALDADDH, ALDANDH, ALDEORH, ALDORH: // 16-bit + case ASWPH, ASWPALH, ALDADDALH, ALDADDH, ALDANDH, ALDEORH, ALDORH: // 16-bit o1 = 1 << 30 - case ASWPB, ALDADDB, ALDANDB, ALDEORB, ALDORB: // 8-bit + case ASWPB, ASWPALB, ALDADDALB, ALDADDB, ALDANDB, ALDEORB, ALDORB: // 8-bit o1 = 0 << 30 default: c.ctxt.Diag("illegal instruction: %v\n", p) } switch p.As { - case ASWPD, ASWPW, ASWPH, ASWPB: + case ASWPD, ASWPW, ASWPH, ASWPB, ASWPALD, ASWPALW, ASWPALH, ASWPALB: o1 |= 0x20 << 10 - case ALDADDALD, ALDADDALW, ALDADDD, ALDADDW, ALDADDH, ALDADDB: + case ALDADDALD, ALDADDALW, ALDADDALH, ALDADDALB, ALDADDD, ALDADDW, ALDADDH, ALDADDB: o1 |= 0x00 << 10 case ALDANDD, ALDANDW, ALDANDH, ALDANDB: o1 |= 0x04 << 10 @@ -3407,11 +3688,24 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= 0x0c << 10 } switch p.As { - case ALDADDALD, ALDADDALW: + case ALDADDALD, ALDADDALW, ALDADDALH, ALDADDALB, ASWPALD, ASWPALW, ASWPALH, ASWPALB: o1 |= 3 << 22 } o1 |= 0x1c1<<21 | uint32(rs&31)<<16 | uint32(rb&31)<<5 | uint32(rt&31) + case 48: /* ADD $C_ADDCON2, Rm, Rd */ + op := c.opirr(p, p.As) + if op&Sbit != 0 { + c.ctxt.Diag("can not break addition/subtraction when S bit is set", p) + } + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0x000fff, r, rt) + o2 = c.oaddi(p, int32(op), int32(c.regoff(&p.From))&0xfff000, rt, rt) + case 50: /* sys/sysl */ o1 = c.opirr(p, p.As) @@ -3465,19 +3759,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 54: /* floating point arith */ o1 = c.oprrr(p, p.As) - - var rf int - if p.From.Type == obj.TYPE_CONST { - rf = c.chipfloat7(p.From.Val.(float64)) - if rf < 0 || true { - c.ctxt.Diag("invalid floating-point immediate\n%v", p) - rf = 0 - } - - rf |= (1 << 3) - } else { - rf = int(p.From.Reg) - } + rf := int(p.From.Reg) rt := int(p.To.Reg) r := int(p.Reg) if (o1&(0x1F<<24)) == (0x1E<<24) && (o1&(1<<11)) == 0 { /* monadic */ @@ -3488,6 +3770,18 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + case 55: /* floating-point constant */ + var rf int + o1 = 0xf<<25 | 1<<21 | 1<<12 + rf = c.chipfloat7(p.From.Val.(float64)) + if rf < 0 { + c.ctxt.Diag("invalid floating-point immediate\n%v", p) + } + if p.As == AFMOVD { + o1 |= 1 << 22 + } + o1 |= (uint32(rf&0xff) << 13) | uint32(p.To.Reg&31) + case 56: /* floating point compare */ o1 = c.oprrr(p, p.As) @@ -3529,6 +3823,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= 0x1F << 16 o1 |= uint32(p.From.Reg&31) << 5 if p.As == ALDXP || p.As == ALDXPW || p.As == ALDAXP || p.As == ALDAXPW { + if int(p.To.Reg) == int(p.To.Offset) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } o1 |= uint32(p.To.Offset&31) << 10 } else { o1 |= 0x1F << 10 @@ -3536,6 +3833,19 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= uint32(p.To.Reg & 31) case 59: /* stxr/stlxr/stxp/stlxp */ + s := p.RegTo2 + n := p.To.Reg + t := p.From.Reg + if isSTLXRop(p.As) { + if s == t || (s == n && n != REGSP) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + } else if isSTXPop(p.As) { + t2 := int16(p.From.Offset) + if (s == t || s == t2) || (s == n && n != REGSP) { + c.ctxt.Diag("constrained unpredictable behavior: %v", p) + } + } o1 = c.opstore(p, p.As) if p.RegTo2 != obj.REG_NONE { @@ -3543,7 +3853,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } else { o1 |= 0x1F << 16 } - if p.As == ASTXP || p.As == ASTXPW || p.As == ASTLXP || p.As == ASTLXPW { + if isSTXPop(p.As) { o1 |= uint32(p.From.Offset&31) << 10 } o1 |= uint32(p.To.Reg&31)<<5 | uint32(p.From.Reg&31) @@ -3562,7 +3872,11 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if p.Reg == REGTMP { c.ctxt.Diag("cannot use REGTMP as source: %v\n", p) } - o1 = c.omovconst(AMOVD, p, &p.From, REGTMP) + if isADDWop(p.As) || isANDWop(p.As) { + o1 = c.omovconst(AMOVW, p, &p.From, REGTMP) + } else { + o1 = c.omovconst(AMOVD, p, &p.From, REGTMP) + } rt := int(p.To.Reg) if p.To.Type == obj.TYPE_NONE { @@ -6010,7 +6324,7 @@ func (c *ctxt7) oaddi(p *obj.Prog, o1 int32, v int32, r int, rt int) uint32 { } /* - * load a a literal value into dr + * load a literal value into dr */ func (c *ctxt7) omovlit(as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 int32 @@ -6087,34 +6401,185 @@ func (c *ctxt7) omovconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint3 return o1 } - r := 32 - if as == AMOVD { - r = 64 + if as == AMOVW { + d := uint32(a.Offset) + s := movcon(int64(d)) + if s < 0 || 16*s >= 32 { + d = ^d + s = movcon(int64(d)) + if s < 0 || 16*s >= 32 { + c.ctxt.Diag("impossible 32-bit move wide: %#x\n%v", uint32(a.Offset), p) + } + o1 = c.opirr(p, AMOVNW) + } else { + o1 = c.opirr(p, AMOVZW) + } + o1 |= MOVCONST(int64(d), s, rt) } - d := a.Offset - s := movcon(d) - if s < 0 || s >= r { - d = ^d - s = movcon(d) - if s < 0 || s >= r { - c.ctxt.Diag("impossible move wide: %#x\n%v", uint64(a.Offset), p) - } - if as == AMOVD { + if as == AMOVD { + d := a.Offset + s := movcon(d) + if s < 0 || 16*s >= 64 { + d = ^d + s = movcon(d) + if s < 0 || 16*s >= 64 { + c.ctxt.Diag("impossible 64-bit move wide: %#x\n%v", uint64(a.Offset), p) + } o1 = c.opirr(p, AMOVN) } else { - o1 = c.opirr(p, AMOVNW) - } - } else { - if as == AMOVD { o1 = c.opirr(p, AMOVZ) - } else { - o1 = c.opirr(p, AMOVZW) } + o1 |= MOVCONST(d, s, rt) } - o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31)) return o1 } +// load a 32-bit/64-bit large constant (LCON or VCON) in a.Offset into rt +// put the instruction sequence in os and return the number of instructions. +func (c *ctxt7) omovlconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int, os []uint32) (num uint8) { + switch as { + case AMOVW: + d := uint32(a.Offset) + // use MOVZW and MOVKW to load a constant to rt + os[0] = c.opirr(p, AMOVZW) + os[0] |= MOVCONST(int64(d), 0, rt) + os[1] = c.opirr(p, AMOVKW) + os[1] |= MOVCONST(int64(d), 1, rt) + return 2 + + case AMOVD: + d := a.Offset + dn := ^d + var immh [4]uint64 + var i int + zeroCount := int(0) + negCount := int(0) + for i = 0; i < 4; i++ { + immh[i] = uint64((d >> uint(i*16)) & 0xffff) + if immh[i] == 0 { + zeroCount++ + } else if immh[i] == 0xffff { + negCount++ + } + } + + if zeroCount == 4 || negCount == 4 { + c.ctxt.Diag("the immediate should be MOVCON: %v", p) + } + switch { + case zeroCount == 3: + // one MOVZ + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + break + } + } + return 1 + + case negCount == 3: + // one MOVN + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + break + } + } + return 1 + + case zeroCount == 2: + // one MOVZ and one MOVK + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + i++ + break + } + } + for ; i < 4; i++ { + if immh[i] != 0 { + os[1] = c.opirr(p, AMOVK) + os[1] |= MOVCONST(d, i, rt) + } + } + return 2 + + case negCount == 2: + // one MOVN and one MOVK + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + i++ + break + } + } + for ; i < 4; i++ { + if immh[i] != 0xffff { + os[1] = c.opirr(p, AMOVK) + os[1] |= MOVCONST(d, i, rt) + } + } + return 2 + + case zeroCount == 1: + // one MOVZ and two MOVKs + for i = 0; i < 4; i++ { + if immh[i] != 0 { + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, i, rt) + i++ + break + } + } + + for j := 1; i < 4; i++ { + if immh[i] != 0 { + os[j] = c.opirr(p, AMOVK) + os[j] |= MOVCONST(d, i, rt) + j++ + } + } + return 3 + + case negCount == 1: + // one MOVN and two MOVKs + for i = 0; i < 4; i++ { + if immh[i] != 0xffff { + os[0] = c.opirr(p, AMOVN) + os[0] |= MOVCONST(dn, i, rt) + i++ + break + } + } + + for j := 1; i < 4; i++ { + if immh[i] != 0xffff { + os[j] = c.opirr(p, AMOVK) + os[j] |= MOVCONST(d, i, rt) + j++ + } + } + return 3 + + default: + // one MOVZ and 3 MOVKs + os[0] = c.opirr(p, AMOVZ) + os[0] |= MOVCONST(d, 0, rt) + for i = 1; i < 4; i++ { + os[i] = c.opirr(p, AMOVK) + os[i] |= MOVCONST(d, i, rt) + } + return 4 + } + default: + return 0 + } +} + func (c *ctxt7) opbfm(p *obj.Prog, a obj.As, r int, s int, rf int, rt int) uint32 { var b uint32 o := c.opirr(p, a) @@ -6155,14 +6620,41 @@ func (c *ctxt7) opextr(p *obj.Prog, a obj.As, v int32, rn int, rm int, rt int) u /* genrate instruction encoding for LDP/LDPW/LDPSW/STP/STPW */ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uint32) uint32 { + wback := false + if o.scond == C_XPOST || o.scond == C_XPRE { + wback = true + } + switch p.As { + case ALDP, ALDPW, ALDPSW: + c.checkUnpredictable(p, true, wback, p.From.Reg, p.To.Reg, int16(p.To.Offset)) + case ASTP, ASTPW: + if wback == true { + c.checkUnpredictable(p, false, true, p.To.Reg, p.From.Reg, int16(p.From.Offset)) + } + case AFLDPD, AFLDPS: + c.checkUnpredictable(p, true, false, p.From.Reg, p.To.Reg, int16(p.To.Offset)) + } var ret uint32 + // check offset switch p.As { + case AFLDPD, AFSTPD: + if vo < -512 || vo > 504 || vo%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 8 + ret = 1<<30 | 1<<26 case ALDP, ASTP: if vo < -512 || vo > 504 || vo%8 != 0 { c.ctxt.Diag("invalid offset %v\n", p) } vo /= 8 ret = 2 << 30 + case AFLDPS, AFSTPS: + if vo < -256 || vo > 252 || vo%4 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + vo /= 4 + ret = 1 << 26 case ALDPW, ASTPW: if vo < -256 || vo > 252 || vo%4 != 0 { c.ctxt.Diag("invalid offset %v\n", p) @@ -6178,7 +6670,12 @@ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uin default: c.ctxt.Diag("invalid instruction %v\n", p) } + // check register pair switch p.As { + case AFLDPD, AFLDPS, AFSTPD, AFSTPS: + if rl < REG_F0 || REG_F31 < rl || rh < REG_F0 || REG_F31 < rh { + c.ctxt.Diag("invalid register pair %v\n", p) + } case ALDP, ALDPW, ALDPSW: if rl < REG_R0 || REG_R30 < rl || rh < REG_R0 || REG_R30 < rh { c.ctxt.Diag("invalid register pair %v\n", p) @@ -6188,6 +6685,7 @@ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uin c.ctxt.Diag("invalid register pair %v\n", p) } } + // other conditional flag bits switch o.scond { case C_XPOST: ret |= 1 << 23 diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go index 845fb2281701f..73d8bb76ddd0a 100644 --- a/src/cmd/internal/obj/arm64/doc.go +++ b/src/cmd/internal/obj/arm64/doc.go @@ -35,7 +35,7 @@ ldrsh, sturh, strh => MOVH. 4. Go moves conditions into opcode suffix, like BLT. -5. Go adds a V prefix for most floating-point and SIMD instrutions except cryptographic extension +5. Go adds a V prefix for most floating-point and SIMD instructions, except cryptographic extension instructions and floating-point(scalar) instructions. Examples: @@ -89,7 +89,7 @@ such as str, stur, strb, sturb, strh, sturh stlr, stlrb. stlrh, st1. Examples: MOVD R29, 384(R19) <=> str x29, [x19,#384] MOVB.P R30, 30(R4) <=> strb w30, [x4],#30 - STLRH R21, (R18) <=> stlrh w21, [x18] + STLRH R21, (R19) <=> stlrh w21, [x19] (2) MADD, MADDW, MSUB, MSUBW, SMADDL, SMSUBL, UMADDL, UMSUBL , , , @@ -127,7 +127,7 @@ such as str, stur, strb, sturb, strh, sturh stlr, stlrb. stlrh, st1. Examples: CCMN VS, R13, R22, $10 <=> ccmn x13, x22, #0xa, vs - CCMPW HS, R18, R14, $11 <=> ccmp w18, w14, #0xb, cs + CCMPW HS, R19, R14, $11 <=> ccmp w19, w14, #0xb, cs (9) CSEL, CSELW, CSNEG, CSNEGW, CSINC, CSINCW , , , ; FCSELD, FCSELS , , , @@ -144,19 +144,19 @@ FCSELD, FCSELS , , , Examples: STLXR ZR, (R15), R16 <=> stlxr w16, xzr, [x15] - STXRB R9, (R21), R18 <=> stxrb w18, w9, [x21] + STXRB R9, (R21), R19 <=> stxrb w19, w9, [x21] (12) STLXP, STLXPW, STXP, STXPW (, ), (), Examples: - STLXP (R17, R18), (R4), R5 <=> stlxp w5, x17, x18, [x4] + STLXP (R17, R19), (R4), R5 <=> stlxp w5, x17, x19, [x4] STXPW (R30, R25), (R22), R13 <=> stxp w13, w30, w25, [x22] 2. Expressions for special arguments. # is written as $. -Optionally-shifted immedate. +Optionally-shifted immediate. Examples: ADD $(3151<<12), R14, R20 <=> add x20, x14, #0xc4f, lsl #12 @@ -173,7 +173,7 @@ Extended registers are written as {.{<<}}. can be UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW or SXTX. Examples: - ADDS R18.UXTB<<4, R9, R26 <=> adds x26, x9, w18, uxtb #4 + ADDS R19.UXTB<<4, R9, R26 <=> adds x26, x9, w19, uxtb #4 ADDSW R14.SXTX, R14, R6 <=> adds w6, w14, w14, sxtx Memory references: [{,#0}] is written as (Rn|RSP), a base register and an immediate diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 0d832387d7f7d..e47857ab5fede 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -125,7 +125,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 @@ -140,7 +140,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + (objabi.StackGuard - objabi.StackSmall) + p.From.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) p.To.Type = obj.TYPE_REG p.To.Reg = REG_R3 @@ -254,7 +254,11 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { switch p.As { case AFMOVS: if p.From.Type == obj.TYPE_FCONST { - f32 := float32(p.From.Val.(float64)) + f64 := p.From.Val.(float64) + f32 := float32(f64) + if c.chipfloat7(f64) > 0 { + break + } if math.Float32bits(f32) == 0 { p.From.Type = obj.TYPE_REG p.From.Reg = REGZERO @@ -269,6 +273,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { case AFMOVD: if p.From.Type == obj.TYPE_FCONST { f64 := p.From.Val.(float64) + if c.chipfloat7(f64) > 0 { + break + } if math.Float64bits(f64) == 0 { p.From.Type = obj.TYPE_REG p.From.Reg = REGZERO @@ -304,12 +311,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // shared for both 32-bit and 64-bit. 32-bit ops // will zero the high 32-bit of the destination // register anyway. - switch p.As { - case AANDW, AORRW, AEORW, AANDSW, ATSTW: - if p.From.Type == obj.TYPE_CONST { - v := p.From.Offset & 0xffffffff - p.From.Offset = v | v<<32 - } + if isANDWop(p.As) && p.From.Type == obj.TYPE_CONST { + v := p.From.Offset & 0xffffffff + p.From.Offset = v | v<<32 } if c.ctxt.Flag_dynlink { @@ -542,22 +546,28 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.autosize += 8 } - if c.autosize != 0 && c.autosize&(16-1) != 0 { - // The frame includes an LR. - // If the frame size is 8, it's only an LR, - // so there's no potential for breaking references to - // local variables by growing the frame size, - // because there are no local variables. - // But otherwise, if there is a non-empty locals section, - // the author of the code is responsible for making sure - // that the frame size is 8 mod 16. - if c.autosize == 8 { - c.autosize += 8 - c.cursym.Func.Locals += 8 + if c.autosize != 0 { + extrasize := int32(0) + if c.autosize%16 == 8 { + // Allocate extra 8 bytes on the frame top to save FP + extrasize = 8 + } else if c.autosize&(16-1) == 0 { + // Allocate extra 16 bytes to save FP for the old frame whose size is 8 mod 16 + extrasize = 16 } else { - c.ctxt.Diag("%v: unaligned frame size %d - must be 8 mod 16 (or 0)", p, c.autosize-8) + c.ctxt.Diag("%v: unaligned frame size %d - must be 16 aligned", p, c.autosize-8) } + c.autosize += extrasize + c.cursym.Func.Locals += extrasize + + // low 32 bits for autosize + // high 32 bits for extrasize + p.To.Offset = int64(c.autosize) | int64(extrasize)<<32 + } else { + // NOFRAME + p.To.Offset = 0 } + if c.autosize == 0 && c.cursym.Func.Text.Mark&LEAF == 0 { if c.ctxt.Debugvlog { c.ctxt.Logf("save suppressed in: %s\n", c.cursym.Func.Text.From.Sym.Name) @@ -565,9 +575,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.cursym.Func.Text.Mark |= LEAF } - // FP offsets need an updated p.To.Offset. - p.To.Offset = int64(c.autosize) - 8 - if cursym.Func.Text.Mark&LEAF != 0 { cursym.Set(obj.AttrLeaf, true) if p.From.Sym.NoFrame() { @@ -631,6 +638,26 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q1.Spadj = aoffset } + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = AMOVD + q1.From.Type = obj.TYPE_REG + q1.From.Reg = REGFP + q1.To.Type = obj.TYPE_MEM + q1.To.Reg = REGSP + q1.To.Offset = -8 + + q1 = obj.Appendp(q1, c.newprog) + q1.Pos = p.Pos + q1.As = ASUB + q1.From.Type = obj.TYPE_CONST + q1.From.Offset = 8 + q1.Reg = REGSP + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REGFP + } + if c.cursym.Func.Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // @@ -753,9 +780,30 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.To.Type = obj.TYPE_REG p.To.Reg = REGSP p.Spadj = -c.autosize + + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + p = obj.Appendp(p, c.newprog) + p.As = ASUB + p.From.Type = obj.TYPE_CONST + p.From.Offset = 8 + p.Reg = REGSP + p.To.Type = obj.TYPE_REG + p.To.Reg = REGFP + } } } else { /* want write-back pre-indexed SP+autosize -> SP, loading REGLINK*/ + + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + p.As = AMOVD + p.From.Type = obj.TYPE_MEM + p.From.Reg = REGSP + p.From.Offset = -8 + p.To.Type = obj.TYPE_REG + p.To.Reg = REGFP + p = obj.Appendp(p, c.newprog) + } + aoffset := c.autosize if aoffset > 0xF0 { @@ -814,7 +862,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.Spadj = int32(+p.From.Offset) } } - break case obj.AGETCALLERPC: if cursym.Leaf() { @@ -828,6 +875,112 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From.Type = obj.TYPE_MEM p.From.Reg = REGSP } + + case obj.ADUFFCOPY: + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + // ADR ret_addr, R27 + // STP (FP, R27), -24(SP) + // SUB 24, SP, FP + // DUFFCOPY + // ret_addr: + // SUB 8, SP, FP + + q1 := p + // copy DUFFCOPY from q1 to q4 + q4 := obj.Appendp(p, c.newprog) + q4.Pos = p.Pos + q4.As = obj.ADUFFCOPY + q4.To = p.To + + q1.As = AADR + q1.From.Type = obj.TYPE_BRANCH + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REG_R27 + + q2 := obj.Appendp(q1, c.newprog) + q2.Pos = p.Pos + q2.As = ASTP + q2.From.Type = obj.TYPE_REGREG + q2.From.Reg = REGFP + q2.From.Offset = int64(REG_R27) + q2.To.Type = obj.TYPE_MEM + q2.To.Reg = REGSP + q2.To.Offset = -24 + + // maintaine FP for DUFFCOPY + q3 := obj.Appendp(q2, c.newprog) + q3.Pos = p.Pos + q3.As = ASUB + q3.From.Type = obj.TYPE_CONST + q3.From.Offset = 24 + q3.Reg = REGSP + q3.To.Type = obj.TYPE_REG + q3.To.Reg = REGFP + + q5 := obj.Appendp(q4, c.newprog) + q5.Pos = p.Pos + q5.As = ASUB + q5.From.Type = obj.TYPE_CONST + q5.From.Offset = 8 + q5.Reg = REGSP + q5.To.Type = obj.TYPE_REG + q5.To.Reg = REGFP + q1.Pcond = q5 + p = q5 + } + + case obj.ADUFFZERO: + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + // ADR ret_addr, R27 + // STP (FP, R27), -24(SP) + // SUB 24, SP, FP + // DUFFZERO + // ret_addr: + // SUB 8, SP, FP + + q1 := p + // copy DUFFZERO from q1 to q4 + q4 := obj.Appendp(p, c.newprog) + q4.Pos = p.Pos + q4.As = obj.ADUFFZERO + q4.To = p.To + + q1.As = AADR + q1.From.Type = obj.TYPE_BRANCH + q1.To.Type = obj.TYPE_REG + q1.To.Reg = REG_R27 + + q2 := obj.Appendp(q1, c.newprog) + q2.Pos = p.Pos + q2.As = ASTP + q2.From.Type = obj.TYPE_REGREG + q2.From.Reg = REGFP + q2.From.Offset = int64(REG_R27) + q2.To.Type = obj.TYPE_MEM + q2.To.Reg = REGSP + q2.To.Offset = -24 + + // maintaine FP for DUFFZERO + q3 := obj.Appendp(q2, c.newprog) + q3.Pos = p.Pos + q3.As = ASUB + q3.From.Type = obj.TYPE_CONST + q3.From.Offset = 24 + q3.Reg = REGSP + q3.To.Type = obj.TYPE_REG + q3.To.Reg = REGFP + + q5 := obj.Appendp(q4, c.newprog) + q5.Pos = p.Pos + q5.As = ASUB + q5.From.Type = obj.TYPE_CONST + q5.From.Offset = 8 + q5.Reg = REGSP + q5.To.Type = obj.TYPE_REG + q5.To.Reg = REGFP + q1.Pcond = q5 + p = q5 + } } } } diff --git a/src/cmd/internal/obj/inl.go b/src/cmd/internal/obj/inl.go index 671239444c7b9..8860069e4771b 100644 --- a/src/cmd/internal/obj/inl.go +++ b/src/cmd/internal/obj/inl.go @@ -47,9 +47,10 @@ type InlTree struct { // InlinedCall is a node in an InlTree. type InlinedCall struct { - Parent int // index of the parent in the InlTree or < 0 if outermost call - Pos src.XPos // position of the inlined call - Func *LSym // function that was inlined + Parent int // index of the parent in the InlTree or < 0 if outermost call + Pos src.XPos // position of the inlined call + Func *LSym // function that was inlined + ParentPC int32 // PC of instruction just before inlined body. Only valid in local trees. } // Add adds a new call to the tree, returning its index. @@ -76,6 +77,10 @@ func (tree *InlTree) CallPos(inlIndex int) src.XPos { return tree.nodes[inlIndex].Pos } +func (tree *InlTree) setParentPC(inlIndex int, pc int32) { + tree.nodes[inlIndex].ParentPC = pc +} + // OutermostPos returns the outermost position corresponding to xpos, // which is where xpos was ultimately inlined to. In the example for // InlTree, main() contains inlined AST nodes from h(), but the @@ -106,6 +111,6 @@ func (ctxt *Link) InnermostPos(xpos src.XPos) src.Pos { func dumpInlTree(ctxt *Link, tree InlTree) { for i, call := range tree.nodes { pos := ctxt.PosTable.Pos(call.Pos) - ctxt.Logf("%0d | %0d | %s (%s)\n", i, call.Parent, call.Func, pos) + ctxt.Logf("%0d | %0d | %s (%s) pc=%d\n", i, call.Parent, call.Func, pos, call.ParentPC) } } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 132f7836ef1ae..f506f60d065d4 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -214,6 +214,8 @@ const ( // Indicates auto that was optimized away, but whose type // we want to preserve in the DWARF debug info. NAME_DELETED_AUTO + // Indicates that this is a reference to a TOC anchor. + NAME_TOCREF ) //go:generate stringer -type AddrType @@ -344,6 +346,7 @@ const ( AFUNCDATA AJMP ANOP + APCALIGN APCDATA ARET AGETCALLERPC @@ -390,11 +393,12 @@ type LSym struct { // A FuncInfo contains extra fields for STEXT symbols. type FuncInfo struct { - Args int32 - Locals int32 - Text *Prog - Autom []*Auto - Pcln Pcln + Args int32 + Locals int32 + Text *Prog + Autom []*Auto + Pcln Pcln + InlMarks []InlMark dwarfInfoSym *LSym dwarfLocSym *LSym @@ -402,13 +406,53 @@ type FuncInfo struct { dwarfAbsFnSym *LSym dwarfIsStmtSym *LSym - GCArgs LSym - GCLocals LSym - GCRegs LSym + GCArgs *LSym + GCLocals *LSym + GCRegs *LSym + StackObjects *LSym } +type InlMark struct { + // When unwinding from an instruction in an inlined body, mark + // where we should unwind to. + // id records the global inlining id of the inlined body. + // p records the location of an instruction in the parent (inliner) frame. + p *Prog + id int32 +} + +// Mark p as the instruction to set as the pc when +// "unwinding" the inlining global frame id. Usually it should be +// instruction with a file:line at the callsite, and occur +// just before the body of the inlined function. +func (fi *FuncInfo) AddInlMark(p *Prog, id int32) { + fi.InlMarks = append(fi.InlMarks, InlMark{p: p, id: id}) +} + +//go:generate stringer -type ABI + +// ABI is the calling convention of a text symbol. +type ABI uint8 + +const ( + // ABI0 is the stable stack-based ABI. It's important that the + // value of this is "0": we can't distinguish between + // references to data and ABI0 text symbols in assembly code, + // and hence this doesn't distinguish between symbols without + // an ABI and text symbols with ABI0. + ABI0 ABI = iota + + // ABIInternal is the internal ABI that may change between Go + // versions. All Go functions use the internal ABI and the + // compiler generates wrappers for calls to and from other + // ABIs. + ABIInternal + + ABICount +) + // Attribute is a set of symbol attributes. -type Attribute int16 +type Attribute uint16 const ( AttrDuplicateOK Attribute = 1 << iota @@ -444,6 +488,13 @@ const ( // For function symbols; indicates that the specified function was the // target of an inline during compilation AttrWasInlined + + // attrABIBase is the value at which the ABI is encoded in + // Attribute. This must be last; all bits after this are + // assumed to be an ABI value. + // + // MUST BE LAST since all bits above this comprise the ABI. + attrABIBase ) func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } @@ -469,6 +520,12 @@ func (a *Attribute) Set(flag Attribute, value bool) { } } +func (a Attribute) ABI() ABI { return ABI(a / attrABIBase) } +func (a *Attribute) SetABI(abi ABI) { + const mask = 1 // Only one ABI bit for now. + *a = (*a &^ (mask * attrABIBase)) | Attribute(abi)*attrABIBase +} + var textAttrStrings = [...]struct { bit Attribute s string @@ -500,6 +557,12 @@ func (a Attribute) TextAttrString() string { a &^= x.bit } } + switch a.ABI() { + case ABI0: + case ABIInternal: + s += "ABIInternal|" + a.SetABI(0) // Clear ABI so we don't print below. + } if a != 0 { s += fmt.Sprintf("UnknownAttribute(%d)|", a) } @@ -554,7 +617,7 @@ type Pcdata struct { type Link struct { Headtype objabi.HeadType Arch *LinkArch - Debugasm bool + Debugasm int Debugvlog bool Debugpcln string Flag_shared bool @@ -563,8 +626,9 @@ type Link struct { Flag_locationlists bool Bso *bufio.Writer Pathname string - hashmu sync.Mutex // protects hash + hashmu sync.Mutex // protects hash, funchash hash map[string]*LSym // name -> sym mapping + funchash map[string]*LSym // name -> sym mapping for ABIInternal syms statichash map[string]*LSym // name -> sym mapping for static syms PosTable src.PosTable InlTree InlTree // global inlining tree used by gc/inl.go @@ -582,6 +646,16 @@ type Link struct { // state for writing objects Text []*LSym Data []*LSym + + // ABIAliases are text symbols that should be aliased to all + // ABIs. These symbols may only be referenced and not defined + // by this object, since the need for an alias may appear in a + // different object than the definition. Hence, this + // information can't be carried in the symbol definition. + // + // TODO(austin): Replace this with ABI wrappers once the ABIs + // actually diverge. + ABIAliases []*LSym } func (ctxt *Link) Diag(format string, args ...interface{}) { diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go index 5a2546af9ed42..f096c7ff14940 100644 --- a/src/cmd/internal/obj/mips/obj0.go +++ b/src/cmd/internal/obj/mips/obj0.go @@ -739,7 +739,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = add p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R2 @@ -754,7 +754,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = mov p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + objabi.StackGuard - objabi.StackSmall + p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REG_R1 diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index ef9ce4c688d4a..c6d2de4273e2d 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -25,12 +25,6 @@ type objWriter struct { // Temporary buffer for zigzag int writing. varintbuf [10]uint8 - // Provide the index of a symbol reference by symbol name. - // One map for versioned symbols and one for unversioned symbols. - // Used for deduplicating the symbol reference list. - refIdx map[string]int - vrefIdx map[string]int - // Number of objects written of each type. nRefs int nData int @@ -79,10 +73,8 @@ func (w *objWriter) writeLengths() { func newObjWriter(ctxt *Link, b *bufio.Writer) *objWriter { return &objWriter{ - ctxt: ctxt, - wr: b, - vrefIdx: make(map[string]int), - refIdx: make(map[string]int), + ctxt: ctxt, + wr: b, } } @@ -90,7 +82,7 @@ func WriteObjFile(ctxt *Link, b *bufio.Writer) { w := newObjWriter(ctxt, b) // Magic header - w.wr.WriteString("\x00\x00go19ld") + w.wr.WriteString("\x00go112ld") // Version w.wr.WriteByte(1) @@ -106,10 +98,25 @@ func WriteObjFile(ctxt *Link, b *bufio.Writer) { w.writeRefs(s) w.addLengths(s) } + + if ctxt.Headtype == objabi.Haix { + // Data must be sorted to keep a constant order in TOC symbols. + // As they are created during Progedit, two symbols can be switched between + // two different compilations. Therefore, BuildID will be different. + // TODO: find a better place and optimize to only sort TOC symbols + SortSlice(ctxt.Data, func(i, j int) bool { + return ctxt.Data[i].Name < ctxt.Data[j].Name + }) + } + for _, s := range ctxt.Data { w.writeRefs(s) w.addLengths(s) } + for _, s := range ctxt.ABIAliases { + w.writeRefs(s) + w.addLengths(s) + } // End symbol references w.wr.WriteByte(0xff) @@ -145,9 +152,12 @@ func WriteObjFile(ctxt *Link, b *bufio.Writer) { for _, s := range ctxt.Data { w.writeSym(s) } + for _, s := range ctxt.ABIAliases { + w.writeSym(s) + } // Magic footer - w.wr.WriteString("\xff\xffgo19ld") + w.wr.WriteString("\xffgo112ld") } // Symbols are prefixed so their content doesn't get confused with the magic footer. @@ -157,28 +167,20 @@ func (w *objWriter) writeRef(s *LSym, isPath bool) { if s == nil || s.RefIdx != 0 { return } - var m map[string]int - if !s.Static() { - m = w.refIdx - } else { - m = w.vrefIdx - } - - if idx := m[s.Name]; idx != 0 { - s.RefIdx = idx - return - } w.wr.WriteByte(symPrefix) if isPath { w.writeString(filepath.ToSlash(s.Name)) } else { w.writeString(s.Name) } - // Write "version". - w.writeBool(s.Static()) + // Write ABI/static information. + abi := int64(s.ABI()) + if s.Static() { + abi = -1 + } + w.writeInt(abi) w.nRefs++ s.RefIdx = w.nRefs - m[s.Name] = w.nRefs } func (w *objWriter) writeRefs(s *LSym) { @@ -238,7 +240,13 @@ func (w *objWriter) writeSymDebug(s *LSym) { fmt.Fprintf(ctxt.Bso, "\n") if s.Type == objabi.STEXT { for p := s.Func.Text; p != nil; p = p.Link { - fmt.Fprintf(ctxt.Bso, "\t%#04x %v\n", uint(int(p.Pc)), p) + var s string + if ctxt.Debugasm > 1 { + s = p.String() + } else { + s = p.InnermostString() + } + fmt.Fprintf(ctxt.Bso, "\t%#04x %s\n", uint(int(p.Pc)), s) } } for i := 0; i < len(s.P); i += 16 { @@ -281,7 +289,7 @@ func (w *objWriter) writeSymDebug(s *LSym) { func (w *objWriter) writeSym(s *LSym) { ctxt := w.ctxt - if ctxt.Debugasm { + if ctxt.Debugasm > 0 { w.writeSymDebug(s) } @@ -380,6 +388,7 @@ func (w *objWriter) writeSym(s *LSym) { w.writeRefIndex(fsym) w.writeInt(int64(l)) w.writeRefIndex(call.Func) + w.writeInt(int64(call.ParentPC)) } } @@ -459,7 +468,12 @@ func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { panic("should be used only in the linker") } -func (c dwCtxt) AddDWARFSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwCtxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { + size := 4 + if isDwarf64(c.Link) { + size = 8 + } + ls := s.(*LSym) rsym := t.(*LSym) ls.WriteAddr(c.Link, ls.Size, size, rsym, ofs) @@ -500,6 +514,10 @@ func (c dwCtxt) Logf(format string, args ...interface{}) { c.Link.Logf(format, args...) } +func isDwarf64(ctxt *Link) bool { + return ctxt.Headtype == objabi.Haix +} + func (ctxt *Link) dwarfSym(s *LSym) (dwarfInfoSym, dwarfLocSym, dwarfRangesSym, dwarfAbsFnSym, dwarfIsStmtSym *LSym) { if s.Type != objabi.STEXT { ctxt.Diag("dwarfSym of non-TEXT %v", s) diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index d72d797ee5a47..84dd494930ee3 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -193,6 +193,19 @@ func (s *pcinlineState) addBranch(ctxt *Link, globalIndex int) int { return localIndex } +func (s *pcinlineState) setParentPC(ctxt *Link, globalIndex int, pc int32) { + localIndex, ok := s.globalToLocal[globalIndex] + if !ok { + // We know where to unwind to when we need to unwind a body identified + // by globalIndex. But there may be no instructions generated by that + // body (it's empty, or its instructions were CSEd with other things, etc.). + // In that case, we don't need an unwind entry. + // TODO: is this really right? Seems to happen a whole lot... + return + } + s.localTree.setParentPC(localIndex, pc) +} + // pctoinline computes the index into the local inlining tree to use at p. // If p is not the result of inlining, pctoinline returns -1. Because p.Pos // applies to p, phase == 0 (before p) takes care of the update. @@ -323,6 +336,9 @@ func linkpcln(ctxt *Link, cursym *LSym) { pcinlineState := new(pcinlineState) funcpctab(ctxt, &pcln.Pcinline, cursym, "pctoinline", pcinlineState.pctoinline, nil) + for _, inlMark := range cursym.Func.InlMarks { + pcinlineState.setParentPC(ctxt, int(inlMark.id), int32(inlMark.p.Pc)) + } pcln.InlTree = pcinlineState.localTree if ctxt.Debugpcln == "pctoinline" && len(pcln.InlTree.nodes) > 0 { ctxt.Logf("-- inlining tree for %s:\n", cursym) diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 0658cc73112cb..9d376f739ffcb 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -27,7 +27,7 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string var plink *Prog for p := plist.Firstpc; p != nil; p = plink { - if ctxt.Debugasm && ctxt.Debugvlog { + if ctxt.Debugasm > 0 && ctxt.Debugvlog { fmt.Printf("obj: %v\n", p) } plink = p.Link @@ -119,9 +119,6 @@ func (ctxt *Link) InitTextSym(s *LSym, flag int) { ctxt.Diag("InitTextSym double init for %s", s.Name) } s.Func = new(FuncInfo) - if s.Func.Text != nil { - ctxt.Diag("duplicate TEXT for %s", s.Name) - } if s.OnList() { ctxt.Diag("symbol %s listed multiple times", s.Name) } @@ -150,18 +147,6 @@ func (ctxt *Link) InitTextSym(s *LSym, flag int) { isstmt.Type = objabi.SDWARFMISC isstmt.Set(AttrDuplicateOK, s.DuplicateOK()) ctxt.Data = append(ctxt.Data, isstmt) - - // Set up the function's gcargs and gclocals. - // They will be filled in later if needed. - gcargs := &s.Func.GCArgs - gcargs.Set(AttrDuplicateOK, true) - gcargs.Type = objabi.SRODATA - gclocals := &s.Func.GCLocals - gclocals.Set(AttrDuplicateOK, true) - gclocals.Type = objabi.SRODATA - gcregs := &s.Func.GCRegs - gcregs.Set(AttrDuplicateOK, true) - gcregs.Type = objabi.SRODATA } func (ctxt *Link) Globl(s *LSym, size int64, flag int) { diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index 3c374579ec6a9..6b248d5c366c2 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -391,6 +391,7 @@ const ( C_GOK C_ADDR C_GOTADDR + C_TOCADDR C_TLS_LE C_TLS_IE C_TEXTSIZE @@ -902,6 +903,7 @@ const ( AVCMPNEZB AVCMPNEZBCC AVPERM + AVPERMXOR AVBPERMQ AVBPERMD AVSEL diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go index c04ce27e46313..fb934e96f953b 100644 --- a/src/cmd/internal/obj/ppc64/anames.go +++ b/src/cmd/internal/obj/ppc64/anames.go @@ -493,6 +493,7 @@ var Anames = []string{ "VCMPNEZB", "VCMPNEZBCC", "VPERM", + "VPERMXOR", "VBPERMQ", "VBPERMD", "VSEL", diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go index 6ec7b7b518326..4699a15d3bec9 100644 --- a/src/cmd/internal/obj/ppc64/anames9.go +++ b/src/cmd/internal/obj/ppc64/anames9.go @@ -26,6 +26,7 @@ var cnames9 = []string{ "DACON", "SBRA", "LBRA", + "LBRAPIC", "SAUTO", "LAUTO", "SEXT", @@ -42,6 +43,7 @@ var cnames9 = []string{ "GOK", "ADDR", "GOTADDR", + "TOCADDR", "TLS_LE", "TLS_IE", "TEXTSIZE", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 756170bc5529b..a2ea492710b59 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -35,6 +35,7 @@ import ( "encoding/binary" "fmt" "log" + "math" "sort" ) @@ -286,6 +287,7 @@ var optab = []Optab{ {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0}, {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0}, + {AMOVD, C_TOCADDR, C_NONE, C_NONE, C_REG, 95, 8, 0}, /* load constant */ {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, @@ -342,6 +344,8 @@ var optab = []Optab{ {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB}, {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP}, {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO}, + {AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 24, 4, 0}, + {AFMOVD, C_ADDCON, C_NONE, C_NONE, C_FREG, 24, 8, 0}, {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0}, {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, @@ -599,6 +603,7 @@ var optab = []Optab{ {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0}, {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL + {obj.APCALIGN, C_LCON, C_NONE, C_NONE, C_NONE, 0, 0, 0}, // align code {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0}, } @@ -607,6 +612,28 @@ var oprange [ALAST & obj.AMask][]Optab var xcmp [C_NCLASS][C_NCLASS]bool +// padding bytes to add to align code as requested +func addpad(pc, a int64, ctxt *obj.Link) int { + switch a { + case 8: + if pc%8 != 0 { + return 4 + } + case 16: + switch pc % 16 { + // When currently aligned to 4, avoid 3 NOPs and set to + // 8 byte alignment which should still help. + case 4, 12: + return 4 + case 8: + return 8 + } + default: + ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) + } + return 0 +} + func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p := cursym.Func.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols @@ -629,12 +656,16 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { o = c.oplook(p) m = int(o.size) if m == 0 { - if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { - c.ctxt.Diag("zero-width instruction\n%v", p) + if p.As == obj.APCALIGN { + a := c.vregoff(&p.From) + m = addpad(pc, a, ctxt) + } else { + if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { + ctxt.Diag("zero-width instruction\n%v", p) + } + continue } - continue } - pc += int64(m) } @@ -683,10 +714,15 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { m = int(o.size) if m == 0 { - if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { - c.ctxt.Diag("zero-width instruction\n%v", p) + if p.As == obj.APCALIGN { + a := c.vregoff(&p.From) + m = addpad(pc, a, ctxt) + } else { + if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { + ctxt.Diag("zero-width instruction\n%v", p) + } + continue } - continue } pc += int64(m) @@ -695,7 +731,10 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.cursym.Size = pc } - pc += -pc & (funcAlign - 1) + if pc%funcAlign != 0 { + pc += funcAlign - (pc % funcAlign) + } + c.cursym.Size = pc /* @@ -713,10 +752,19 @@ func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if int(o.size) > 4*len(out) { log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) } + origsize := o.size c.asmout(p, o, out[:]) - for i = 0; i < int32(o.size/4); i++ { - c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) - bp = bp[4:] + if origsize == 0 && o.size > 0 { + for i = 0; i < int32(o.size/4); i++ { + c.ctxt.Arch.ByteOrder.PutUint32(bp, out[0]) + bp = bp[4:] + } + o.size = origsize + } else { + for i = 0; i < int32(o.size/4); i++ { + c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) + bp = bp[4:] + } } } } @@ -799,6 +847,9 @@ func (c *ctxt9) aclass(a *obj.Addr) int { case obj.NAME_GOTREF: return C_GOTADDR + case obj.NAME_TOCREF: + return C_TOCADDR + case obj.NAME_AUTO: c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { @@ -829,6 +880,18 @@ func (c *ctxt9) aclass(a *obj.Addr) int { case obj.TYPE_TEXTSIZE: return C_TEXTSIZE + case obj.TYPE_FCONST: + // The only cases where FCONST will occur are with float64 +/- 0. + // All other float constants are generated in memory. + f64 := a.Val.(float64) + if f64 == 0 { + if math.Signbit(f64) { + return C_ADDCON + } + return C_ZCON + } + log.Fatalf("Unexpected nonzero FCONST operand %v", a) + case obj.TYPE_CONST, obj.TYPE_ADDR: switch a.Name { @@ -960,7 +1023,7 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab { } } - //print("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4); + // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4) ops := oprange[p.As&obj.AMask] c1 := &xcmp[a1] c3 := &xcmp[a3] @@ -1435,7 +1498,7 @@ func buildop(ctxt *obj.Link) { opset(AVCMPNEZBCC, r0) case AVPERM: /* vperm */ - opset(AVPERM, r0) + opset(AVPERMXOR, r0) case AVBPERMQ: /* vbpermq, vbpermd */ opset(AVBPERMD, r0) @@ -1882,6 +1945,7 @@ func buildop(ctxt *obj.Link) { obj.ATEXT, obj.AUNDEF, obj.AFUNCDATA, + obj.APCALIGN, obj.APCDATA, obj.ADUFFZERO, obj.ADUFFCOPY: @@ -2147,6 +2211,10 @@ func (c *ctxt9) opform(insn uint32) int { // Encode instructions and create relocation for accessing s+d according to the // instruction op with source or destination (as appropriate) register reg. func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) { + if c.ctxt.Headtype == objabi.Haix { + // Every symbol access must be made via a TOC anchor. + c.ctxt.Diag("symbolAccess called for %s", s.Name) + } var base uint32 form := c.opform(op) if c.ctxt.Flag_shared { @@ -2290,6 +2358,19 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { prasm(p) case 0: /* pseudo ops */ + if p.As == obj.APCALIGN { + aln := c.vregoff(&p.From) + v := addpad(p.Pc, aln, c.ctxt) + if v > 0 { + for i := 0; i < 6; i++ { + out[i] = uint32(0) + } + o.size = int8(v) + out[0] = LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO) + return + } + o.size = 0 + } break case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */ @@ -2763,6 +2844,13 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { c.ctxt.Diag("%v is not supported", p) } + case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ + o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) + // This is needed for -0. + if o.size == 8 { + o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) + } + case 25: /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ v := c.regoff(&p.From) @@ -3567,6 +3655,26 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { /* operand order: RA, RB, CY, RT */ cy := int(c.regoff(p.GetFrom3())) o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) + + case 95: /* Retrieve TOC relative symbol */ + /* This code is for AIX only */ + v := c.vregoff(&p.From) + if v != 0 { + c.ctxt.Diag("invalid offset against TOC slot %v", p) + } + + inst := c.opload(p.As) + if c.opform(inst) != DS_FORM { + c.ctxt.Diag("invalid form for a TOC access in %v", p) + } + + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) + o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Type = objabi.R_ADDRPOWER_TOCREL_DS } out[0] = o1 @@ -4397,6 +4505,8 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case AVPERM: return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ + case AVPERMXOR: + return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ case AVSEL: return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index f42d6758050ba..30a8414d4ad40 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -67,10 +67,13 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { case AFMOVD: if p.From.Type == obj.TYPE_FCONST { f64 := p.From.Val.(float64) - p.From.Type = obj.TYPE_MEM - p.From.Sym = ctxt.Float64Sym(f64) - p.From.Name = obj.NAME_EXTERN - p.From.Offset = 0 + // Constant not needed in memory for float +/- 0 + if f64 != 0 { + p.From.Type = obj.TYPE_MEM + p.From.Sym = ctxt.Float64Sym(f64) + p.From.Name = obj.NAME_EXTERN + p.From.Offset = 0 + } } // Put >32-bit constants in memory and load them @@ -105,7 +108,121 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } if c.ctxt.Flag_dynlink { c.rewriteToUseGot(p) + } else if c.ctxt.Headtype == objabi.Haix { + c.rewriteToUseTOC(p) + } +} + +// Rewrite p, if necessary, to access a symbol using its TOC anchor. +// This code is for AIX only. +func (c *ctxt9) rewriteToUseTOC(p *obj.Prog) { + if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { + return } + + var source *obj.Addr + if p.From.Name == obj.NAME_EXTERN || p.From.Name == obj.NAME_STATIC { + if p.From.Type == obj.TYPE_ADDR { + if p.As == ADWORD { + // ADWORD $sym doesn't need TOC anchor + return + } + if p.As != AMOVD { + c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v", p) + return + } + if p.To.Type != obj.TYPE_REG { + c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v", p) + return + } + } else if p.From.Type != obj.TYPE_MEM { + c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p) + return + } + source = &p.From + + } else if p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC { + if p.To.Type != obj.TYPE_MEM { + c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p) + return + } + if source != nil { + c.ctxt.Diag("cannot handle symbols on both sides in %v", p) + return + } + source = &p.To + } else { + return + + } + + if source.Sym == nil { + c.ctxt.Diag("do not know how to handle nil symbol in %v", p) + return + } + + if source.Sym.Type == objabi.STLSBSS { + return + } + + // Retrieve or create the TOC anchor. + symtoc := c.ctxt.LookupInit("TOC."+source.Sym.Name, func(s *obj.LSym) { + s.Type = objabi.SDATA + s.Set(obj.AttrDuplicateOK, true) + c.ctxt.Data = append(c.ctxt.Data, s) + s.WriteAddr(c.ctxt, 0, 8, source.Sym, 0) + }) + + if source.Type == obj.TYPE_ADDR { + // MOVD $sym, Rx becomes MOVD symtoc, Rx + // MOVD $sym+, Rx becomes MOVD symtoc, Rx; ADD , Rx + p.From.Type = obj.TYPE_MEM + p.From.Sym = symtoc + p.From.Name = obj.NAME_TOCREF + + if p.From.Offset != 0 { + q := obj.Appendp(p, c.newprog) + q.As = AADD + q.From.Type = obj.TYPE_CONST + q.From.Offset = p.From.Offset + p.From.Offset = 0 + q.To = p.To + } + return + + } + + // MOVx sym, Ry becomes MOVD symtoc, REGTMP; MOVx (REGTMP), Ry + // MOVx Ry, sym becomes MOVD symtoc, REGTMP; MOVx Ry, (REGTMP) + // An addition may be inserted between the two MOVs if there is an offset. + + q := obj.Appendp(p, c.newprog) + q.As = AMOVD + q.From.Type = obj.TYPE_MEM + q.From.Sym = symtoc + q.From.Name = obj.NAME_TOCREF + q.To.Type = obj.TYPE_REG + q.To.Reg = REGTMP + + q = obj.Appendp(q, c.newprog) + q.As = p.As + q.From = p.From + q.To = p.To + if p.From.Name != obj.NAME_NONE { + q.From.Type = obj.TYPE_MEM + q.From.Reg = REGTMP + q.From.Name = obj.NAME_NONE + q.From.Sym = nil + } else if p.To.Name != obj.NAME_NONE { + q.To.Type = obj.TYPE_MEM + q.To.Reg = REGTMP + q.To.Name = obj.NAME_NONE + q.To.Sym = nil + } else { + c.ctxt.Diag("unreachable case in rewriteToUseTOC with %v", p) + } + + obj.Nopout(p) } // Rewrite p, if necessary, to access global data via the global offset table. @@ -924,7 +1041,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 @@ -939,7 +1056,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + objabi.StackGuard - objabi.StackSmall + p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go index babcd2af010b2..af321f6131a96 100644 --- a/src/cmd/internal/obj/s390x/a.out.go +++ b/src/cmd/internal/obj/s390x/a.out.go @@ -271,6 +271,9 @@ const ( // find leftmost one AFLOGR + // population count + APOPCNT + // integer bitwise AAND AANDW @@ -942,6 +945,9 @@ const ( AVUPLHW AVUPLF AVMSLG + AVMSLEG + AVMSLOG + AVMSLEOG // binary ABYTE diff --git a/src/cmd/internal/obj/s390x/anames.go b/src/cmd/internal/obj/s390x/anames.go index 7edbdd68dfd02..9cea9f962d93d 100644 --- a/src/cmd/internal/obj/s390x/anames.go +++ b/src/cmd/internal/obj/s390x/anames.go @@ -45,6 +45,7 @@ var Anames = []string{ "MOVDLT", "MOVDNE", "FLOGR", + "POPCNT", "AND", "ANDW", "OR", @@ -677,6 +678,9 @@ var Anames = []string{ "VUPLHW", "VUPLF", "VMSLG", + "VMSLEG", + "VMSLOG", + "VMSLEOG", "BYTE", "WORD", "DWORD", diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index ce3fe6af73cf0..7d49103be6fb6 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -246,6 +246,9 @@ var optab = []Optab{ // find leftmost one Optab{AFLOGR, C_REG, C_NONE, C_NONE, C_REG, 8, 0}, + // population count + Optab{APOPCNT, C_REG, C_NONE, C_NONE, C_REG, 9, 0}, + // compare Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 0}, Optab{ACMP, C_REG, C_NONE, C_NONE, C_LCON, 71, 0}, @@ -1376,6 +1379,9 @@ func buildop(ctxt *obj.Link) { opset(AVSBCBIQ, r) opset(AVSBIQ, r) opset(AVMSLG, r) + opset(AVMSLEG, r) + opset(AVMSLOG, r) + opset(AVMSLEOG, r) case AVSEL: opset(AVFMADB, r) opset(AWFMADB, r) @@ -2612,6 +2618,10 @@ func (c *ctxtz) branchMask(p *obj.Prog) uint32 { func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { o := c.oplook(p) + if o == nil { + return + } + switch o.type_ { default: c.ctxt.Diag("unknown type %d", o.type_) @@ -2849,6 +2859,9 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { // FLOGR also writes a mask to p.To.Reg+1. zRRE(op_FLOGR, uint32(p.To.Reg), uint32(p.From.Reg), asm) + case 9: // population count + zRRE(op_POPCNT, uint32(p.To.Reg), uint32(p.From.Reg), asm) + case 10: // subtract reg [reg] reg r := int(p.Reg) diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index b7b8a2c7a6373..b7a2873106f6e 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -641,7 +641,7 @@ func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Pro p = obj.Appendp(p, c.newprog) p.As = AADD p.From.Type = obj.TYPE_CONST - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.Reg = REGSP p.To.Type = obj.TYPE_REG p.To.Reg = REG_R4 @@ -656,7 +656,7 @@ func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (*obj.Prog, *obj.Pro p = obj.Appendp(p, c.newprog) p.As = AMOVD p.From.Type = obj.TYPE_CONST - p.From.Offset = int64(framesize) + objabi.StackGuard - objabi.StackSmall + p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall p.To.Type = obj.TYPE_REG p.To.Reg = REGTMP diff --git a/src/cmd/internal/obj/s390x/vector.go b/src/cmd/internal/obj/s390x/vector.go index 3f1d900367d3b..62adcf6d4a523 100644 --- a/src/cmd/internal/obj/s390x/vector.go +++ b/src/cmd/internal/obj/s390x/vector.go @@ -45,7 +45,7 @@ func vop(as obj.As) (opcode, es, cs uint32) { return op_VAC, 0, 0 case AVACQ: return op_VAC, 4, 0 - case AVMSLG: + case AVMSLG, AVMSLEG, AVMSLOG, AVMSLEOG: return op_VMSL, 3, 0 case AVACCC: return op_VACCC, 0, 0 @@ -1058,6 +1058,12 @@ func singleElementMask(as obj.As) uint32 { AWFTCIDB, AWFIDB: return 8 + case AVMSLEG: + return 8 + case AVMSLOG: + return 4 + case AVMSLEOG: + return 12 } return 0 } diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go index 3fc17fa850711..15a501c3aa514 100644 --- a/src/cmd/internal/obj/sym.go +++ b/src/cmd/internal/obj/sym.go @@ -41,6 +41,7 @@ import ( func Linknew(arch *LinkArch) *Link { ctxt := new(Link) ctxt.hash = make(map[string]*LSym) + ctxt.funchash = make(map[string]*LSym) ctxt.statichash = make(map[string]*LSym) ctxt.Arch = arch ctxt.Pathname = objabi.WorkingDir() @@ -74,6 +75,30 @@ func (ctxt *Link) LookupStatic(name string) *LSym { return s } +// LookupABI looks up a symbol with the given ABI. +// If it does not exist, it creates it. +func (ctxt *Link) LookupABI(name string, abi ABI) *LSym { + var hash map[string]*LSym + switch abi { + case ABI0: + hash = ctxt.hash + case ABIInternal: + hash = ctxt.funchash + default: + panic("unknown ABI") + } + + ctxt.hashmu.Lock() + s := hash[name] + if s == nil { + s = &LSym{Name: name} + s.SetABI(abi) + hash[name] = s + } + ctxt.hashmu.Unlock() + return s +} + // Lookup looks up the symbol with name name. // If it does not exist, it creates it. func (ctxt *Link) Lookup(name string) *LSym { diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index 3453b71b3bc3b..f1517d3d5d1df 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -17,6 +17,9 @@ const REG_NONE = 0 func (p *Prog) Line() string { return p.Ctxt.OutermostPos(p.Pos).Format(false, true) } +func (p *Prog) InnermostLine() string { + return p.Ctxt.InnermostPos(p.Pos).Format(false, true) +} // InnermostLineNumber returns a string containing the line number for the // innermost inlined function (if any inlining) at p's position @@ -118,6 +121,16 @@ func (p *Prog) String() string { return fmt.Sprintf("%.5d (%v)\t%s", p.Pc, p.Line(), p.InstructionString()) } +func (p *Prog) InnermostString() string { + if p == nil { + return "" + } + if p.Ctxt == nil { + return "" + } + return fmt.Sprintf("%.5d (%v)\t%s", p.Pc, p.InnermostLine(), p.InstructionString()) +} + // InstructionString returns a string representation of the instruction without preceding // program counter or file and line number. func (p *Prog) InstructionString() string { @@ -177,7 +190,7 @@ func (ctxt *Link) NewProg() *Prog { } func (ctxt *Link) CanReuseProgs() bool { - return !ctxt.Debugasm + return ctxt.Debugasm == 0 } func Dconv(p *Prog, a *Addr) string { @@ -372,6 +385,17 @@ func Mconv(a *Addr) string { } else { str = fmt.Sprintf("%s(%s)", offConv(a.Offset), reg) } + case NAME_TOCREF: + reg := "SB" + if a.Reg != REG_NONE { + reg = Rconv(int(a.Reg)) + } + if a.Sym != nil { + str = fmt.Sprintf("%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg) + } else { + str = fmt.Sprintf("%s(%s)", offConv(a.Offset), reg) + } + } return str } @@ -386,7 +410,7 @@ func offConv(off int64) string { // opSuffixSet is like regListSet, but for opcode suffixes. // // Unlike some other similar structures, uint8 space is not -// divided by it's own values set (because the're only 256 of them). +// divided by its own values set (because there are only 256 of them). // Instead, every arch may interpret/format all 8 bits as they like, // as long as they register proper cconv function for it. type opSuffixSet struct { @@ -535,6 +559,7 @@ var Anames = []string{ "FUNCDATA", "JMP", "NOP", + "PCALIGN", "PCDATA", "RET", "GETCALLERPC", diff --git a/src/cmd/internal/obj/wasm/a.out.go b/src/cmd/internal/obj/wasm/a.out.go index 6f882215ff4e1..0e8196be60f91 100644 --- a/src/cmd/internal/obj/wasm/a.out.go +++ b/src/cmd/internal/obj/wasm/a.out.go @@ -246,7 +246,7 @@ const ( REG_RET1 REG_RET2 REG_RET3 - REG_RUN + REG_PAUSE // locals REG_R0 diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go index 8498b407245f0..fbea103dcb080 100644 --- a/src/cmd/internal/obj/wasm/wasmobj.go +++ b/src/cmd/internal/obj/wasm/wasmobj.go @@ -16,16 +16,16 @@ import ( ) var Register = map[string]int16{ - "PC_F": REG_PC_F, - "PC_B": REG_PC_B, - "SP": REG_SP, - "CTXT": REG_CTXT, - "g": REG_g, - "RET0": REG_RET0, - "RET1": REG_RET1, - "RET2": REG_RET2, - "RET3": REG_RET3, - "RUN": REG_RUN, + "PC_F": REG_PC_F, + "PC_B": REG_PC_B, + "SP": REG_SP, + "CTXT": REG_CTXT, + "g": REG_g, + "RET0": REG_RET0, + "RET1": REG_RET1, + "RET2": REG_RET2, + "RET3": REG_RET3, + "PAUSE": REG_PAUSE, "R0": REG_R0, "R1": REG_R1, @@ -125,9 +125,13 @@ func instinit(ctxt *obj.Link) { morestack = ctxt.Lookup("runtime.morestack") morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt") gcWriteBarrier = ctxt.Lookup("runtime.gcWriteBarrier") - sigpanic = ctxt.Lookup("runtime.sigpanic") - deferreturn = ctxt.Lookup("runtime.deferreturn") - jmpdefer = ctxt.Lookup(`"".jmpdefer`) + sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal) + deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) + // jmpdefer is defined in assembly as ABI0, but what we're + // looking for is the *call* to jmpdefer from the Go function + // deferreturn, so we're looking for the ABIInternal version + // of jmpdefer that's called by Go. + jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABIInternal) } func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { @@ -241,7 +245,6 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { for p := s.Func.Text; p != nil; p = p.Link { prevBase := base base = ctxt.PosTable.Pos(p.Pos).Base() - switch p.As { case ABlock, ALoop, AIf: explicitBlockDepth++ @@ -277,8 +280,15 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { // more often to avoid bloat of the BrTable instruction. // The "base != prevBase" condition detects inlined instructions. They are an // implicit call, so entering and leaving this section affects the stack trace. - if p.As == ACALLNORESUME || p.As == obj.ANOP || p.Spadj != 0 || base != prevBase { + if p.As == ACALLNORESUME || p.As == obj.ANOP || p.As == ANop || p.Spadj != 0 || base != prevBase { pc++ + if p.To.Sym == sigpanic { + // The panic stack trace expects the PC at the call of sigpanic, + // not the next one. However, runtime.Caller subtracts 1 from the + // PC. To make both PC and PC-1 work (have the same line number), + // we advance the PC by 2 at sigpanic. + pc++ + } } } tableIdxs = append(tableIdxs, uint64(numResumePoints)) @@ -705,11 +715,42 @@ func regAddr(reg int16) obj.Addr { return obj.Addr{Type: obj.TYPE_REG, Reg: reg} } +// countRegisters returns the number of integer and float registers used by s. +// It does so by looking for the maximum I* and R* registers. +func countRegisters(s *obj.LSym) (numI, numF int16) { + for p := s.Func.Text; p != nil; p = p.Link { + var reg int16 + switch p.As { + case AGet: + reg = p.From.Reg + case ASet: + reg = p.To.Reg + case ATee: + reg = p.To.Reg + default: + continue + } + if reg >= REG_R0 && reg <= REG_R15 { + if n := reg - REG_R0 + 1; numI < n { + numI = n + } + } else if reg >= REG_F0 && reg <= REG_F15 { + if n := reg - REG_F0 + 1; numF < n { + numF = n + } + } + } + return +} + func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { w := new(bytes.Buffer) + numI, numF := countRegisters(s) + // Function starts with declaration of locals: numbers and types. switch s.Name { + // memchr and memcmp don't use the normal Go calling convention and need i32 variables. case "memchr": writeUleb128(w, 1) // number of sets of locals writeUleb128(w, 3) // number of locals @@ -719,11 +760,23 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { writeUleb128(w, 2) // number of locals w.WriteByte(0x7F) // i32 default: - writeUleb128(w, 2) // number of sets of locals - writeUleb128(w, 16) // number of locals - w.WriteByte(0x7E) // i64 - writeUleb128(w, 16) // number of locals - w.WriteByte(0x7C) // f64 + numTypes := 0 + if numI > 0 { + numTypes++ + } + if numF > 0 { + numTypes++ + } + + writeUleb128(w, uint64(numTypes)) + if numI > 0 { + writeUleb128(w, uint64(numI)) // number of locals + w.WriteByte(0x7E) // i64 + } + if numF > 0 { + writeUleb128(w, uint64(numF)) // number of locals + w.WriteByte(0x7C) // f64 + } } for p := s.Func.Text; p != nil; p = p.Link { @@ -734,12 +787,15 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } reg := p.From.Reg switch { - case reg >= REG_PC_F && reg <= REG_RUN: + case reg >= REG_PC_F && reg <= REG_PAUSE: w.WriteByte(0x23) // get_global writeUleb128(w, uint64(reg-REG_PC_F)) - case reg >= REG_R0 && reg <= REG_F15: - w.WriteByte(0x20) // get_local + case reg >= REG_R0 && reg <= REG_R15: + w.WriteByte(0x20) // get_local (i64) writeUleb128(w, uint64(reg-REG_R0)) + case reg >= REG_F0 && reg <= REG_F15: + w.WriteByte(0x20) // get_local (f64) + writeUleb128(w, uint64(numI+(reg-REG_F0))) default: panic("bad Get: invalid register") } @@ -751,7 +807,7 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } reg := p.To.Reg switch { - case reg >= REG_PC_F && reg <= REG_RUN: + case reg >= REG_PC_F && reg <= REG_PAUSE: w.WriteByte(0x24) // set_global writeUleb128(w, uint64(reg-REG_PC_F)) case reg >= REG_R0 && reg <= REG_F15: @@ -761,7 +817,11 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } else { w.WriteByte(0x21) // set_local } - writeUleb128(w, uint64(reg-REG_R0)) + if reg <= REG_R15 { + writeUleb128(w, uint64(reg-REG_R0)) + } else { + writeUleb128(w, uint64(numI+(reg-REG_F0))) + } default: panic("bad Set: invalid register") } @@ -773,9 +833,12 @@ func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) { } reg := p.To.Reg switch { - case reg >= REG_R0 && reg <= REG_F15: - w.WriteByte(0x22) // tee_local + case reg >= REG_R0 && reg <= REG_R15: + w.WriteByte(0x22) // tee_local (i64) writeUleb128(w, uint64(reg-REG_R0)) + case reg >= REG_F0 && reg <= REG_F15: + w.WriteByte(0x22) // tee_local (f64) + writeUleb128(w, uint64(numI+(reg-REG_F0))) default: panic("bad Tee: invalid register") } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index d3389e4f1545c..c3da29ce2cbd4 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -2064,7 +2064,7 @@ func instinit(ctxt *obj.Link) { case objabi.Hplan9: plan9privates = ctxt.Lookup("_privates") case objabi.Hnacl: - deferreturn = ctxt.Lookup("runtime.deferreturn") + deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal) } for i := range avxOptab { @@ -2288,7 +2288,7 @@ func instinit(ctxt *obj.Link) { } } -var isAndroid = (objabi.GOOS == "android") +var isAndroid = objabi.GOOS == "android" func prefixof(ctxt *obj.Link, a *obj.Addr) int { if a.Reg < REG_CS && a.Index < REG_CS { // fast path @@ -4704,7 +4704,9 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { r = obj.Addrel(cursym) r.Off = int32(p.Pc + int64(ab.Len())) r.Sym = p.To.Sym - r.Type = objabi.R_PCREL + // Note: R_CALL instead of R_PCREL. R_CALL is more permissive in that + // it can point to a trampoline instead of the destination itself. + r.Type = objabi.R_CALL r.Siz = 4 ab.PutInt32(0) break @@ -5051,7 +5053,7 @@ func (ab *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { bad: if ctxt.Arch.Family != sys.AMD64 { // here, the assembly has failed. - // if its a byte instruction that has + // if it's a byte instruction that has // unaddressable registers, try to // exchange registers and reissue the // instruction with the operands renamed. diff --git a/src/cmd/internal/obj/x86/evex.go b/src/cmd/internal/obj/x86/evex.go index 30c0e62e0f593..d8867283fa591 100644 --- a/src/cmd/internal/obj/x86/evex.go +++ b/src/cmd/internal/obj/x86/evex.go @@ -194,7 +194,7 @@ func newEVEXSuffix() evexSuffix { return evexSuffix{rounding: rcUnset} } -// evexSuffixMap maps obj.X86suffix to it's decoded version. +// evexSuffixMap maps obj.X86suffix to its decoded version. // Filled during init(). var evexSuffixMap [255]evexSuffix diff --git a/src/cmd/internal/obj/x86/issue19518_test.go b/src/cmd/internal/obj/x86/issue19518_test.go index 2fe227ee3f239..fa2beb8aad39f 100644 --- a/src/cmd/internal/obj/x86/issue19518_test.go +++ b/src/cmd/internal/obj/x86/issue19518_test.go @@ -11,7 +11,6 @@ import ( "os" "os/exec" "path/filepath" - "strings" "testing" ) @@ -68,13 +67,8 @@ func objdumpOutput(t *testing.T) []byte { testenv.GoToolPath(t), "build", "-o", filepath.Join(tmpdir, "output")) - var env []string - for _, v := range os.Environ() { - if !strings.HasPrefix(v, "GOARCH=") { - env = append(env, v) - } - } - cmd.Env = append(env, "GOARCH=amd64") + cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux") + out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("error %s output %s", err, out) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 139f293b136f0..babfd38ad2351 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -1115,7 +1115,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p.As = lea p.From.Type = obj.TYPE_MEM p.From.Reg = REG_SP - p.From.Offset = objabi.StackGuard + p.From.Offset = int64(objabi.StackGuard) p.To.Type = obj.TYPE_REG p.To.Reg = REG_AX @@ -1131,7 +1131,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA p.From.Type = obj.TYPE_REG p.From.Reg = REG_AX p.To.Type = obj.TYPE_CONST - p.To.Offset = int64(framesize) + (objabi.StackGuard - objabi.StackSmall) + p.To.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) } // common diff --git a/src/cmd/internal/obj/x86/obj6_test.go b/src/cmd/internal/obj/x86/obj6_test.go index 2f6296ce8bb4a..c5399744f2d18 100644 --- a/src/cmd/internal/obj/x86/obj6_test.go +++ b/src/cmd/internal/obj/x86/obj6_test.go @@ -99,13 +99,7 @@ func asmOutput(t *testing.T, s string) []byte { testenv.GoToolPath(t), "tool", "asm", "-S", "-dynlink", "-o", filepath.Join(tmpdir, "output.6"), tmpfile.Name()) - var env []string - for _, v := range os.Environ() { - if !strings.HasPrefix(v, "GOARCH=") { - env = append(env, v) - } - } - cmd.Env = append(env, "GOARCH=amd64") + cmd.Env = append(os.Environ(), "GOARCH=amd64", "GOOS=linux") asmout, err := cmd.CombinedOutput() if err != nil { t.Fatalf("error %s output %s", err, asmout) diff --git a/src/cmd/internal/obj/x86/ytab.go b/src/cmd/internal/obj/x86/ytab.go index 14bbaf72a9437..7d0b75bf46415 100644 --- a/src/cmd/internal/obj/x86/ytab.go +++ b/src/cmd/internal/obj/x86/ytab.go @@ -22,7 +22,7 @@ type ytab struct { // Returns true if yt is compatible with args. // -// Elements from args and yt.args are used to +// Elements from args and yt.args are used // to index ycover table like `ycover[args[i]+yt.args[i]]`. // This means that args should contain values that already // multiplied by Ymax. diff --git a/src/cmd/internal/objabi/doc.go b/src/cmd/internal/objabi/doc.go index 7bd5ff63e562c..03dc9fb88bc76 100644 --- a/src/cmd/internal/objabi/doc.go +++ b/src/cmd/internal/objabi/doc.go @@ -22,7 +22,7 @@ // // The file format is: // -// - magic header: "\x00\x00go19ld" +// - magic header: "\x00go112ld" // - byte 1 - version number // - sequence of strings giving dependencies (imported packages) // - empty string (marks end of sequence) @@ -38,7 +38,7 @@ // - data, the content of the defined symbols // - sequence of defined symbols // - byte 0xff (marks end of sequence) -// - magic footer: "\xff\xffgo19ld" +// - magic footer: "\xffgo112ld" // // All integers are stored in a zigzag varint format. // See golang.org/s/go12symtab for a definition. @@ -46,7 +46,7 @@ // Data blocks and strings are both stored as an integer // followed by that many bytes. // -// A symbol reference is a string name followed by a version. +// A symbol reference is a string name followed by an ABI or -1 for static. // // A symbol points to other symbols using an index into the symbol // reference sequence. Index 0 corresponds to a nil symbol pointer. @@ -57,7 +57,7 @@ // // - byte 0xfe (sanity check for synchronization) // - type [byte] -// - name & version [symref index] +// - name & ABI [symref index] // - flags [int] // 1<<0 dupok // 1<<1 local diff --git a/src/cmd/internal/objabi/flag.go b/src/cmd/internal/objabi/flag.go index 30cd7dccac20f..90e944656bb8a 100644 --- a/src/cmd/internal/objabi/flag.go +++ b/src/cmd/internal/objabi/flag.go @@ -100,9 +100,18 @@ func (versionFlag) Set(s string) error { // for releases, but during development we include the full // build ID of the binary, so that if the compiler is changed and // rebuilt, we notice and rebuild all packages. - if s == "full" && strings.HasPrefix(Version, "devel") { - p += " buildID=" + buildID + if s == "full" { + // If there's an active experiment, include that, + // to distinguish go1.10.2 with an experiment + // from go1.10.2 without an experiment. + if x := Expstring(); x != "" { + p += " " + x + } + if strings.HasPrefix(Version, "devel") { + p += " buildID=" + buildID + } } + fmt.Printf("%s version %s%s%s\n", name, Version, sep, p) os.Exit(0) return nil diff --git a/src/cmd/internal/objabi/funcdata.go b/src/cmd/internal/objabi/funcdata.go index a7827125bf65c..231d11b18572c 100644 --- a/src/cmd/internal/objabi/funcdata.go +++ b/src/cmd/internal/objabi/funcdata.go @@ -18,6 +18,7 @@ const ( FUNCDATA_LocalsPointerMaps = 1 FUNCDATA_InlTree = 2 FUNCDATA_RegPointerMaps = 3 + FUNCDATA_StackObjects = 4 // ArgsSizeUnknown is set in Func.argsize to mark all functions // whose argument size is unknown (C vararg functions, and diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go index 15a63ab8b3424..a30bc3fa05a6c 100644 --- a/src/cmd/internal/objabi/funcid.go +++ b/src/cmd/internal/objabi/funcid.go @@ -4,12 +4,17 @@ package objabi +import ( + "strconv" + "strings" +) + // A FuncID identifies particular functions that need to be treated // specially by the runtime. // Note that in some situations involving plugins, there may be multiple // copies of a particular special runtime function. // Note: this list must match the list in runtime/symtab.go. -type FuncID uint32 +type FuncID uint8 const ( FuncID_normal FuncID = iota // not a special function @@ -30,4 +35,65 @@ const ( FuncID_gogo FuncID_externalthreadhandler FuncID_debugCallV1 + FuncID_gopanic + FuncID_panicwrap + FuncID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) ) + +// Get the function ID for the named function in the named file. +// The function should be package-qualified. +func GetFuncID(name, file string) FuncID { + switch name { + case "runtime.main": + return FuncID_runtime_main + case "runtime.goexit": + return FuncID_goexit + case "runtime.jmpdefer": + return FuncID_jmpdefer + case "runtime.mcall": + return FuncID_mcall + case "runtime.morestack": + return FuncID_morestack + case "runtime.mstart": + return FuncID_mstart + case "runtime.rt0_go": + return FuncID_rt0_go + case "runtime.asmcgocall": + return FuncID_asmcgocall + case "runtime.sigpanic": + return FuncID_sigpanic + case "runtime.runfinq": + return FuncID_runfinq + case "runtime.gcBgMarkWorker": + return FuncID_gcBgMarkWorker + case "runtime.systemstack_switch": + return FuncID_systemstack_switch + case "runtime.systemstack": + return FuncID_systemstack + case "runtime.cgocallback_gofunc": + return FuncID_cgocallback_gofunc + case "runtime.gogo": + return FuncID_gogo + case "runtime.externalthreadhandler": + return FuncID_externalthreadhandler + case "runtime.debugCallV1": + return FuncID_debugCallV1 + case "runtime.gopanic": + return FuncID_gopanic + case "runtime.panicwrap": + return FuncID_panicwrap + } + if file == "" { + return FuncID_wrapper + } + if strings.HasPrefix(name, "runtime.call") { + if _, err := strconv.Atoi(name[12:]); err == nil { + // runtime.callXX reflect call wrappers. + return FuncID_wrapper + } + } + if strings.HasSuffix(name, "-fm") { + return FuncID_wrapper + } + return FuncID_normal +} diff --git a/src/cmd/internal/objabi/head.go b/src/cmd/internal/objabi/head.go index 23c7b62daf620..db2221d6b145e 100644 --- a/src/cmd/internal/objabi/head.go +++ b/src/cmd/internal/objabi/head.go @@ -48,10 +48,13 @@ const ( Hplan9 Hsolaris Hwindows + Haix ) func (h *HeadType) Set(s string) error { switch s { + case "aix": + *h = Haix case "darwin": *h = Hdarwin case "dragonfly": @@ -82,6 +85,8 @@ func (h *HeadType) Set(s string) error { func (h *HeadType) String() string { switch *h { + case Haix: + return "aix" case Hdarwin: return "darwin" case Hdragonfly: diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index a3e2868a1bc94..355882c63880a 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -198,7 +198,7 @@ const ( R_WASMIMPORT ) -// IsDirectJump returns whether r is a relocation for a direct jump. +// IsDirectJump reports whether r is a relocation for a direct jump. // A direct jump is a CALL or JMP instruction that takes the target address // as immediate. The address is embedded into the instruction, possibly // with limited width. diff --git a/src/cmd/internal/objabi/stack.go b/src/cmd/internal/objabi/stack.go index 11433932e2dcf..62ab0398a6653 100644 --- a/src/cmd/internal/objabi/stack.go +++ b/src/cmd/internal/objabi/stack.go @@ -10,11 +10,24 @@ const ( STACKSYSTEM = 0 StackSystem = STACKSYSTEM StackBig = 4096 - StackGuard = 880*stackGuardMultiplier + StackSystem StackSmall = 128 - StackLimit = StackGuard - StackSystem - StackSmall ) const ( StackPreempt = -1314 // 0xfff...fade ) + +// Initialize StackGuard and StackLimit according to target system. +var StackGuard = 880*stackGuardMultiplier() + StackSystem +var StackLimit = StackGuard - StackSystem - StackSmall + +// stackGuardMultiplier returns a multiplier to apply to the default +// stack guard size. Larger multipliers are used for non-optimized +// builds that have larger stack frames or for specific targets. +func stackGuardMultiplier() int { + // On AIX, a larger stack is needed for syscalls. + if GOOS == "aix" { + return 2 + } + return stackGuardMultiplierDefault +} diff --git a/src/cmd/internal/objabi/symkind.go b/src/cmd/internal/objabi/symkind.go index b95a0d3c701fd..16b4c535ed85b 100644 --- a/src/cmd/internal/objabi/symkind.go +++ b/src/cmd/internal/objabi/symkind.go @@ -60,6 +60,13 @@ const ( SDWARFRANGE SDWARFLOC SDWARFMISC + // ABI alias. An ABI alias symbol is an empty symbol with a + // single relocation with 0 size that references the native + // function implementation symbol. + // + // TODO(austin): Remove this and all uses once the compiler + // generates real ABI wrappers rather than symbol aliases. + SABIALIAS // Update cmd/link/internal/sym/AbiSymKindToSymKind for new SymKind values. ) diff --git a/src/cmd/internal/objabi/symkind_string.go b/src/cmd/internal/objabi/symkind_string.go index 7152d6c006901..2b9a9080e8c0f 100644 --- a/src/cmd/internal/objabi/symkind_string.go +++ b/src/cmd/internal/objabi/symkind_string.go @@ -4,9 +4,9 @@ package objabi import "strconv" -const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISC" +const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISCSABIALIAS" -var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 91} +var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 91, 100} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index a47e2f93a10e9..da49f706f6033 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -76,7 +76,7 @@ func init() { } func Framepointer_enabled(goos, goarch string) bool { - return framepointer_enabled != 0 && goarch == "amd64" && goos != "nacl" + return framepointer_enabled != 0 && (goarch == "amd64" && goos != "nacl" || goarch == "arm64" && goos == "linux") } func addexp(s string) { @@ -104,8 +104,6 @@ var ( framepointer_enabled int = 1 Fieldtrack_enabled int Preemptibleloops_enabled int - Clobberdead_enabled int - DebugCPU_enabled int ) // Toolchain experiments. @@ -119,8 +117,6 @@ var exper = []struct { {"fieldtrack", &Fieldtrack_enabled}, {"framepointer", &framepointer_enabled}, {"preemptibleloops", &Preemptibleloops_enabled}, - {"clobberdead", &Clobberdead_enabled}, - {"debugcpu", &DebugCPU_enabled}, } var defaultExpstring = Expstring() diff --git a/src/cmd/internal/objfile/disasm.go b/src/cmd/internal/objfile/disasm.go index fce63bfeeae0b..50fc51be87726 100644 --- a/src/cmd/internal/objfile/disasm.go +++ b/src/cmd/internal/objfile/disasm.go @@ -357,7 +357,7 @@ func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.By inst, err := ppc64asm.Decode(code, byteOrder) var text string size := inst.Len - if err != nil || size == 0 || inst.Op == 0 { + if err != nil || size == 0 { size = 4 text = "?" } else { diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go index 10307be07217d..41c5d9b9f53ca 100644 --- a/src/cmd/internal/objfile/objfile.go +++ b/src/cmd/internal/objfile/objfile.go @@ -61,6 +61,7 @@ var openers = []func(io.ReaderAt) (rawFile, error){ openMacho, openPE, openPlan9, + openXcoff, } // Open opens the named file. diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go index 80db6f0f1872f..259b59a4f4aeb 100644 --- a/src/cmd/internal/objfile/pe.go +++ b/src/cmd/internal/objfile/pe.go @@ -190,6 +190,9 @@ func (f *peFile) goarch() string { if _, err := findPESymbol(f.pe, "_rt0_amd64_windows"); err == nil { return "amd64" } + if _, err := findPESymbol(f.pe, "_rt0_arm_windows"); err == nil { + return "arm" + } return "" } diff --git a/src/cmd/internal/objfile/xcoff.go b/src/cmd/internal/objfile/xcoff.go new file mode 100644 index 0000000000000..d438c802264f6 --- /dev/null +++ b/src/cmd/internal/objfile/xcoff.go @@ -0,0 +1,165 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Parsing of XCOFF executable (AIX) + +package objfile + +import ( + "debug/dwarf" + "fmt" + "internal/xcoff" + "io" + "unicode" +) + +type xcoffFile struct { + xcoff *xcoff.File +} + +func openXcoff(r io.ReaderAt) (rawFile, error) { + f, err := xcoff.NewFile(r) + if err != nil { + return nil, err + } + return &xcoffFile{f}, nil +} + +func (f *xcoffFile) symbols() ([]Sym, error) { + var syms []Sym + for _, s := range f.xcoff.Symbols { + const ( + N_UNDEF = 0 // An undefined (extern) symbol + N_ABS = -1 // An absolute symbol (e_value is a constant, not an address) + N_DEBUG = -2 // A debugging symbol + ) + sym := Sym{Name: s.Name, Addr: s.Value, Code: '?'} + + switch s.SectionNumber { + case N_UNDEF: + sym.Code = 'U' + case N_ABS: + sym.Code = 'C' + case N_DEBUG: + sym.Code = '?' + default: + if s.SectionNumber < 0 || len(f.xcoff.Sections) < int(s.SectionNumber) { + return nil, fmt.Errorf("invalid section number in symbol table") + } + sect := f.xcoff.Sections[s.SectionNumber-1] + + // debug/xcoff returns an offset in the section not the actual address + sym.Addr += sect.VirtualAddress + + if s.AuxCSect.SymbolType&0x3 == xcoff.XTY_LD { + // The size of a function is contained in the + // AUX_FCN entry + sym.Size = s.AuxFcn.Size + } else { + sym.Size = s.AuxCSect.Length + } + + sym.Size = s.AuxCSect.Length + + switch sect.Type { + case xcoff.STYP_TEXT: + if s.AuxCSect.StorageMappingClass == xcoff.XMC_RO { + sym.Code = 'R' + } else { + sym.Code = 'T' + } + case xcoff.STYP_DATA: + sym.Code = 'D' + case xcoff.STYP_BSS: + sym.Code = 'B' + } + + if s.StorageClass == xcoff.C_HIDEXT { + // Local symbol + sym.Code = unicode.ToLower(sym.Code) + } + + } + syms = append(syms, sym) + } + + return syms, nil +} + +func (f *xcoffFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { + if sect := f.xcoff.Section(".text"); sect != nil { + textStart = sect.VirtualAddress + } + if pclntab, err = loadXCOFFTable(f.xcoff, "runtime.pclntab", "runtime.epclntab"); err != nil { + return 0, nil, nil, err + } + if symtab, err = loadXCOFFTable(f.xcoff, "runtime.symtab", "runtime.esymtab"); err != nil { + return 0, nil, nil, err + } + return textStart, symtab, pclntab, nil +} + +func (f *xcoffFile) text() (textStart uint64, text []byte, err error) { + sect := f.xcoff.Section(".text") + if sect == nil { + return 0, nil, fmt.Errorf("text section not found") + } + textStart = sect.VirtualAddress + text, err = sect.Data() + return +} + +func findXCOFFSymbol(f *xcoff.File, name string) (*xcoff.Symbol, error) { + for _, s := range f.Symbols { + if s.Name != name { + continue + } + if s.SectionNumber <= 0 { + return nil, fmt.Errorf("symbol %s: invalid section number %d", name, s.SectionNumber) + } + if len(f.Sections) < int(s.SectionNumber) { + return nil, fmt.Errorf("symbol %s: section number %d is larger than max %d", name, s.SectionNumber, len(f.Sections)) + } + return s, nil + } + return nil, fmt.Errorf("no %s symbol found", name) +} + +func loadXCOFFTable(f *xcoff.File, sname, ename string) ([]byte, error) { + ssym, err := findXCOFFSymbol(f, sname) + if err != nil { + return nil, err + } + esym, err := findXCOFFSymbol(f, ename) + if err != nil { + return nil, err + } + if ssym.SectionNumber != esym.SectionNumber { + return nil, fmt.Errorf("%s and %s symbols must be in the same section", sname, ename) + } + sect := f.Sections[ssym.SectionNumber-1] + data, err := sect.Data() + if err != nil { + return nil, err + } + return data[ssym.Value:esym.Value], nil +} + +func (f *xcoffFile) goarch() string { + switch f.xcoff.TargetMachine { + case xcoff.U802TOCMAGIC: + return "ppc" + case xcoff.U64_TOCMAGIC: + return "ppc64" + } + return "" +} + +func (f *xcoffFile) loadAddress() (uint64, error) { + return 0, fmt.Errorf("unknown load address") +} + +func (f *xcoffFile) dwarf() (*dwarf.Data, error) { + return f.xcoff.DWARF() +} diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go new file mode 100644 index 0000000000000..a53da6ed2cbeb --- /dev/null +++ b/src/cmd/internal/sys/supported.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +// RaceDetectorSupported reports whether goos/goarch supports the race +// detector. There is a copy of this function in cmd/dist/test.go. +func RaceDetectorSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "ppc64le" || goarch == "arm64" + case "darwin", "freebsd", "netbsd", "windows": + return goarch == "amd64" + default: + return false + } +} + +// MSanSupported reports whether goos/goarch supports the memory +// sanitizer option. There is a copy of this function in cmd/dist/test.go. +func MSanSupported(goos, goarch string) bool { + switch goos { + case "linux": + return goarch == "amd64" || goarch == "arm64" + default: + return false + } +} diff --git a/src/cmd/link/doc.go b/src/cmd/link/doc.go index 963d86a35fa28..219499be0a16a 100644 --- a/src/cmd/link/doc.go +++ b/src/cmd/link/doc.go @@ -43,8 +43,16 @@ Flags: or initialized to a constant string expression. -X will not work if the initializer makes a function call or refers to other variables. Note that before Go 1.5 this option took two separate arguments. + -a + Disassemble output. + -buildid id + Record id as Go toolchain build id. -buildmode mode Set build mode (default exe). + -c + Dump call graphs. + -compressdwarf + Compress DWARF if possible (default true). -cpuprofile file Write CPU profile to file. -d @@ -54,6 +62,10 @@ Flags: The dynamic header is on by default, even without any references to dynamic libraries, because many common system tools now assume the presence of the header. + -debugtramp int + Debug trampolines. + -dumpdep + Dump symbol dependency graph. -extar ar Set the external archive program (default "ar"). Used only for -buildmode=c-archive. @@ -65,9 +77,14 @@ Flags: Ignore version mismatch in the linked archives. -g Disable Go package data checks. + -importcfg file + Read import configuration from file. + In the file, set packagefile, packageshlib to specify import resolution. -installsuffix suffix Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix instead of $GOROOT/pkg/$GOOS_$GOARCH. + -k symbol + Set field tracking symbol. Use this flag when GOEXPERIMENT=fieldtrack is set. -libgcc file Set name of compiler support library. This is only used in internal link mode. @@ -85,6 +102,8 @@ Flags: Set runtime.MemProfileRate to rate. -msan Link with C/C++ memory sanitizer support. + -n + Dump symbol table. -o file Write output to file (default a.out, or a.out.exe on Windows). -pluginpath path @@ -100,6 +119,8 @@ Flags: -tmpdir dir Write temporary files to dir. Temporary files are only used in external linking mode. + -u + Reject unsafe packages. -v Print trace of linker operations. -w diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go index ff11689bbccc0..2c01456f6b0e3 100644 --- a/src/cmd/link/dwarf_test.go +++ b/src/cmd/link/dwarf_test.go @@ -122,6 +122,9 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) r.SkipChildren() continue } + if cu.Val(dwarf.AttrStmtList) == nil { + continue + } lr, err := d.LineReader(cu) if err != nil { t.Fatal(err) diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index af274444f36ae..e922fe2db94ee 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -139,7 +139,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add += int64(targ.Plt) + r.Add += int64(targ.Plt()) } return true @@ -164,7 +164,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += 4 - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) return true case 256 + objabi.RelocType(elf.R_X86_64_64): @@ -190,13 +190,12 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) r.Type = objabi.R_PCREL return true } fallthrough - // fall through case 512 + ld.MACHO_X86_64_RELOC_UNSIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED*2 + 1, 512 + ld.MACHO_X86_64_RELOC_SIGNED_1*2 + 1, @@ -224,7 +223,6 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { } fallthrough - // fall through case 512 + ld.MACHO_X86_64_RELOC_GOT*2 + 1: if targ.Type != sym.SDYNIMPORT { ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name) @@ -232,7 +230,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { addgotsym(ctxt, targ) r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) return true } @@ -251,7 +249,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { // Build a PLT entry and change the relocation target to that entry. addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) return true case objabi.R_ADDR: @@ -259,7 +257,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if ctxt.HeadType == objabi.Hsolaris { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add += int64(targ.Plt) + r.Add += int64(targ.Plt()) return true } // The code is asking for the address of an external @@ -268,7 +266,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { addgotsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".got", 0) - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) return true } @@ -412,7 +410,7 @@ func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { } case objabi.R_PCREL: if r.Siz == 4 { - if r.Xsym.Type == sym.SDYNIMPORT && r.Xsym.ElfType == elf.STT_FUNC { + if r.Xsym.Type == sym.SDYNIMPORT && r.Xsym.ElfType() == elf.STT_FUNC { ctxt.Out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32) } else { ctxt.Out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32) @@ -532,8 +530,8 @@ func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, secto return true } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { - return false +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { @@ -569,7 +567,7 @@ func elfsetupplt(ctxt *ld.Link) { } func addpltsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Plt >= 0 { + if s.Plt() >= 0 { return } @@ -608,7 +606,7 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_JMP_SLOT))) rela.AddUint64(ctxt.Arch, 0) - s.Plt = int32(plt.Size - 16) + s.SetPlt(int32(plt.Size - 16)) } else if ctxt.HeadType == objabi.Hdarwin { // To do lazy symbol lookup right, we're supposed // to tell the dynamic loader which library each @@ -626,29 +624,29 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { ctxt.Syms.Lookup(".linkedit.plt", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) // jmpq *got+size(IP) - s.Plt = int32(plt.Size) + s.SetPlt(int32(plt.Size)) plt.AddUint8(0xff) plt.AddUint8(0x25) - plt.AddPCRelPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) + plt.AddPCRelPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got())) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } func addgotsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Got >= 0 { + if s.Got() >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) got.AddUint64(ctxt.Arch, 0) if ctxt.IsELF { rela := ctxt.Syms.Lookup(".rela", 0) - rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got())) rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_GLOB_DAT))) rela.AddUint64(ctxt.Arch, 0) } else if ctxt.HeadType == objabi.Hdarwin { diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go index f0a510f1f07fc..efcd41d72b0f6 100644 --- a/src/cmd/link/internal/arm/asm.go +++ b/src/cmd/link/internal/arm/asm.go @@ -132,7 +132,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) + r.Add = int64(braddoff(int32(r.Add), targ.Plt()/4)) } return true @@ -150,7 +150,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { r.Type = objabi.R_CONST // write r->add during relocsym r.Sym = nil - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) return true case 256 + objabi.RelocType(elf.R_ARM_GOT_PREL): // GOT(nil) + A - nil @@ -162,7 +162,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) - r.Add += int64(targ.Got) + 4 + r.Add += int64(targ.Got()) + 4 return true case 256 + objabi.RelocType(elf.R_ARM_GOTOFF): // R_ARM_GOTOFF32 @@ -182,7 +182,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) + r.Add = int64(braddoff(int32(r.Add), targ.Plt()/4)) } return true @@ -216,7 +216,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) + r.Add = int64(braddoff(int32(r.Add), targ.Plt()/4)) } return true @@ -235,7 +235,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { } addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) return true case objabi.R_ADDR: @@ -411,6 +411,35 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, se return true } +func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { + rs := r.Xsym + + if rs.Dynid < 0 { + ld.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) + return false + } + + out.Write32(uint32(sectoff)) + out.Write32(uint32(rs.Dynid)) + + var v uint32 + switch r.Type { + default: + // unsupported relocation type + return false + + case objabi.R_DWARFSECREF: + v = ld.IMAGE_REL_ARM_SECREL + + case objabi.R_ADDR: + v = ld.IMAGE_REL_ARM_ADDR32 + } + + out.Write16(uint16(v)) + + return true +} + // sign extend a 24-bit integer func signext24(x int64) int32 { return (int32(x) << 8) >> 8 @@ -568,7 +597,7 @@ func gentrampdyn(arch *sys.Arch, tramp, target *sym.Symbol, offset int64) { } } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { switch r.Type { case objabi.R_CALLARM: @@ -602,20 +631,17 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { ld.Errorf(s, "direct call too far %d", r.Xadd/4) } - *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))) - return true + return int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))), true } - return false + return -1, false } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true // The following three arch specific relocations are only for generation of // Linux/ARM ELF's PLT entry (3 assembler instruction) @@ -623,16 +649,11 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { if ld.Symaddr(ctxt.Syms.Lookup(".got.plt", 0)) < ld.Symaddr(ctxt.Syms.Lookup(".plt", 0)) { ld.Errorf(s, ".got.plt should be placed after .plt section.") } - *val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add)) >> 20)) - return true + return 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add)) >> 20)), true case objabi.R_PLT1: // add ip, ip, #0xYY000 - *val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)) - - return true + return 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)), true case objabi.R_PLT2: // ldr pc, [ip, #0xZZZ]! - *val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+8))) - - return true + return 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+8))), true case objabi.R_CALLARM: // bl XXXXXX or b YYYYYY // r.Add is the instruction // low 24-bit encodes the target address @@ -640,12 +661,10 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { if t > 0x7fffff || t < -0x800000 { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } - *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&t))) - - return true + return int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&t))), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { @@ -659,7 +678,7 @@ func addpltreloc(ctxt *ld.Link, plt *sym.Symbol, got *sym.Symbol, s *sym.Symbol, r.Off = int32(plt.Size) r.Siz = 4 r.Type = typ - r.Add = int64(s.Got) - 8 + r.Add = int64(s.Got()) - 8 plt.Attr |= sym.AttrReachable plt.Size += 4 @@ -667,7 +686,7 @@ func addpltreloc(ctxt *ld.Link, plt *sym.Symbol, got *sym.Symbol, s *sym.Symbol, } func addpltsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Plt >= 0 { + if s.Plt() >= 0 { return } @@ -682,7 +701,7 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { } // .got entry - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) // In theory, all GOT should point to the first PLT entry, // Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's @@ -690,14 +709,14 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { got.AddAddrPlus(ctxt.Arch, plt, 0) // .plt entry, this depends on the .got entry - s.Plt = int32(plt.Size) + s.SetPlt(int32(plt.Size)) addpltreloc(ctxt, plt, got, s, objabi.R_PLT0) // add lr, pc, #0xXX00000 addpltreloc(ctxt, plt, got, s, objabi.R_PLT1) // add lr, lr, #0xYY000 addpltreloc(ctxt, plt, got, s, objabi.R_PLT2) // ldr pc, [lr, #0xZZZ]! // rel - rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got())) rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_ARM_JUMP_SLOT))) } else { @@ -706,12 +725,12 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { } func addgotsyminternal(ctxt *ld.Link, s *sym.Symbol) { - if s.Got >= 0 { + if s.Got() >= 0 { return } got := ctxt.Syms.Lookup(".got", 0) - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) got.AddAddrPlus(ctxt.Arch, s, 0) @@ -722,18 +741,18 @@ func addgotsyminternal(ctxt *ld.Link, s *sym.Symbol) { } func addgotsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Got >= 0 { + if s.Got() >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) got.AddUint32(ctxt.Arch, 0) if ctxt.IsELF { rel := ctxt.Syms.Lookup(".rel", 0) - rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got())) rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_ARM_GLOB_DAT))) } else { ld.Errorf(s, "addgotsym: unsupported binary format") @@ -809,6 +828,10 @@ func asmb(ctxt *ld.Link) { case objabi.Hdarwin: symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink)) + + case objabi.Hwindows: + symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) + symo = uint32(ld.Rnd(int64(symo), ld.PEFILEALIGN)) } ctxt.Out.SeekSet(int64(symo)) @@ -838,6 +861,11 @@ func asmb(ctxt *ld.Link) { ctxt.Out.Flush() } + case objabi.Hwindows: + if ctxt.Debugvlog != 0 { + ctxt.Logf("%5.2f dwarf\n", ld.Cputime()) + } + case objabi.Hdarwin: if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) @@ -870,6 +898,9 @@ func asmb(ctxt *ld.Link) { case objabi.Hdarwin: ld.Asmbmacho(ctxt) + + case objabi.Hwindows: + ld.Asmbpe(ctxt) } ctxt.Out.Flush() diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go index 788be68522fa5..77716bb954dfd 100644 --- a/src/cmd/link/internal/arm/obj.go +++ b/src/cmd/link/internal/arm/obj.go @@ -57,6 +57,7 @@ func Init() (*sys.Arch, ld.Arch) { Elfsetupplt: elfsetupplt, Gentext: gentext, Machoreloc1: machoreloc1, + PEreloc1: pereloc1, Linuxdynld: "/lib/ld-linux.so.3", // 2 for OABI, 3 for EABI Freebsddynld: "/usr/libexec/ld-elf.so.1", @@ -130,6 +131,10 @@ func archinit(ctxt *ld.Link) { if *ld.FlagRound == -1 { *ld.FlagRound = 4096 } + + case objabi.Hwindows: /* PE executable */ + // ld.HEADR, ld.FlagTextAddr, ld.FlagDataAddr and ld.FlagRound are set in ld.Peinit + return } if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 { diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 5b3b9e588049b..5ba038d14772c 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -234,19 +234,19 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, se return true } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return false + return val, false case objabi.R_ARM64_GOTPCREL: var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o1 = uint32(*val >> 32) - o2 = uint32(*val) + o1 = uint32(val >> 32) + o2 = uint32(val) } else { - o1 = uint32(*val) - o2 = uint32(*val >> 32) + o1 = uint32(val) + o2 = uint32(val >> 32) } // Any relocation against a function symbol is redirected to // be against a local symbol instead (see putelfsym in @@ -256,7 +256,7 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { // (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So // we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp; // add + R_ADDRARM64. - if !(r.Sym.Version != 0 || r.Sym.Attr.VisibilityHidden() || r.Sym.Attr.Local()) && r.Sym.Type == sym.STEXT && ctxt.DynlinkingGo() { + if !(r.Sym.IsFileLocal() || r.Sym.Attr.VisibilityHidden() || r.Sym.Attr.Local()) && r.Sym.Type == sym.STEXT && ctxt.DynlinkingGo() { if o2&0xffc00000 != 0xf9400000 { ld.Errorf(s, "R_ARM64_GOTPCREL against unexpected instruction %x", o2) } @@ -264,9 +264,9 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { r.Type = objabi.R_ADDRARM64 } if ctxt.Arch.ByteOrder == binary.BigEndian { - *val = int64(o1)<<32 | int64(o2) + val = int64(o1)<<32 | int64(o2) } else { - *val = int64(o2)<<32 | int64(o1) + val = int64(o2)<<32 | int64(o1) } fallthrough case objabi.R_ADDRARM64: @@ -294,11 +294,11 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { var o0, o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o0 = uint32(*val >> 32) - o1 = uint32(*val) + o0 = uint32(val >> 32) + o1 = uint32(val) } else { - o0 = uint32(*val) - o1 = uint32(*val >> 32) + o0 = uint32(val) + o1 = uint32(val >> 32) } // Mach-O wants the addend to be encoded in the instruction // Note that although Mach-O supports ARM64_RELOC_ADDEND, it @@ -311,30 +311,28 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { // when laid out, the instruction order must always be o1, o2. if ctxt.Arch.ByteOrder == binary.BigEndian { - *val = int64(o0)<<32 | int64(o1) + val = int64(o0)<<32 | int64(o1) } else { - *val = int64(o1)<<32 | int64(o0) + val = int64(o1)<<32 | int64(o0) } } - return true + return val, true case objabi.R_CALLARM64, objabi.R_ARM64_TLS_LE, objabi.R_ARM64_TLS_IE: r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return true + return val, true } } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true case objabi.R_ADDRARM64: t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { @@ -344,11 +342,11 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { var o0, o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o0 = uint32(*val >> 32) - o1 = uint32(*val) + o0 = uint32(val >> 32) + o1 = uint32(val) } else { - o0 = uint32(*val) - o1 = uint32(*val >> 32) + o0 = uint32(val) + o1 = uint32(val >> 32) } o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) @@ -356,11 +354,9 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { // when laid out, the instruction order must always be o1, o2. if ctxt.Arch.ByteOrder == binary.BigEndian { - *val = int64(o0)<<32 | int64(o1) - } else { - *val = int64(o1)<<32 | int64(o0) + return int64(o0)<<32 | int64(o1), true } - return true + return int64(o1)<<32 | int64(o0), true case objabi.R_ARM64_TLS_LE: r.Done = false if ctxt.HeadType != objabi.Hlinux { @@ -372,18 +368,16 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { if v < 0 || v >= 32678 { ld.Errorf(s, "TLS offset out of range %d", v) } - *val |= v << 5 - return true + return val | (v << 5), true case objabi.R_CALLARM64: t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off)) if t >= 1<<27 || t < -1<<27 { ld.Errorf(s, "program too large, call relocation distance = %d", t) } - *val |= (t >> 2) & 0x03ffffff - return true + return val | ((t >> 2) & 0x03ffffff), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { diff --git a/src/cmd/link/internal/ld/ar.go b/src/cmd/link/internal/ld/ar.go index ae7554c929609..f41e30d6e73c4 100644 --- a/src/cmd/link/internal/ld/ar.go +++ b/src/cmd/link/internal/ld/ar.go @@ -105,7 +105,8 @@ func hostArchive(ctxt *Link, name string) { for any { var load []uint64 for _, s := range ctxt.Syms.Allsym { - for _, r := range s.R { + for i := range s.R { + r := &s.R[i] // Copying sym.Reloc has measurable impact on performance if r.Sym != nil && r.Sym.Type == sym.SXREF { if off := armap[r.Sym.Name]; off != 0 && !loaded[off] { load = append(load, off) diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 18fbea62eef98..60b6491859574 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -60,7 +60,7 @@ func (mode *BuildMode) Set(s string) error { } case "windows": switch objabi.GOARCH { - case "amd64", "386": + case "amd64", "386", "arm": default: return badmode() } @@ -199,8 +199,8 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { // When the race flag is set, the LLVM tsan relocatable file is linked // into the final binary, which means external linking is required because // internal linking does not support it. - if *flagRace && ctxt.Arch.InFamily(sys.PPC64) { - return true, "race on ppc64le" + if *flagRace && ctxt.Arch.InFamily(sys.PPC64, sys.ARM64) { + return true, "race on " + objabi.GOARCH } // Some build modes require work the internal linker cannot do (yet). @@ -248,7 +248,7 @@ func determineLinkMode(ctxt *Link) { ctxt.LinkMode = LinkInternal case "1": if objabi.GOARCH == "ppc64" { - Exitf("external linking requested via GO_EXTLINK_ENABLED but not supported for linux/ppc64") + Exitf("external linking requested via GO_EXTLINK_ENABLED but not supported for %s/ppc64", objabi.GOOS) } ctxt.LinkMode = LinkExternal default: @@ -262,7 +262,7 @@ func determineLinkMode(ctxt *Link) { ctxt.LinkMode = LinkInternal } if objabi.GOARCH == "ppc64" && ctxt.LinkMode == LinkExternal { - Exitf("external linking is not supported for linux/ppc64") + Exitf("external linking is not supported for %s/ppc64", objabi.GOOS) } } case LinkInternal: @@ -271,7 +271,7 @@ func determineLinkMode(ctxt *Link) { } case LinkExternal: if objabi.GOARCH == "ppc64" { - Exitf("external linking not supported for linux/ppc64") + Exitf("external linking not supported for %s/ppc64", objabi.GOOS) } } } diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 0ae93f1018555..e0fad1acfdc90 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -48,7 +48,7 @@ import ( "sync" ) -// isRuntimeDepPkg returns whether pkg is the runtime package or its dependency +// isRuntimeDepPkg reports whether pkg is the runtime package or its dependency func isRuntimeDepPkg(pkg string) bool { switch pkg { case "runtime", @@ -111,7 +111,20 @@ func trampoline(ctxt *Link, s *sym.Symbol) { } -// resolve relocations in s. +// relocsym resolve relocations in "s". The main loop walks through +// the list of relocations attached to "s" and resolves them where +// applicable. Relocations are often architecture-specific, requiring +// calls into the 'archreloc' and/or 'archrelocvariant' functions for +// the architecture. When external linking is in effect, it may not be +// possible to completely resolve the address/offset for a symbol, in +// which case the goal is to lay the groundwork for turning a given +// relocation into an external reloc (to be applied by the external +// linker). For more on how relocations work in general, see +// +// "Linkers and Loaders", by John R. Levine (Morgan Kaufmann, 1999), ch. 7 +// +// This is a performance-critical function for the linker; be careful +// to avoid introducing unnecessary allocations in the main loop. func relocsym(ctxt *Link, s *sym.Symbol) { for ri := int32(0); ri < int32(len(s.R)); ri++ { r := &s.R[ri] @@ -162,8 +175,8 @@ func relocsym(ctxt *Link, s *sym.Symbol) { } // We need to be able to reference dynimport symbols when linking against - // shared libraries, and Solaris and Darwin need it always - if ctxt.HeadType != objabi.Hsolaris && ctxt.HeadType != objabi.Hdarwin && r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT && !ctxt.DynlinkingGo() && !r.Sym.Attr.SubSymbol() { + // shared libraries, and Solaris, Darwin and AIX need it always + if ctxt.HeadType != objabi.Hsolaris && ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Haix && r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT && !ctxt.DynlinkingGo() && !r.Sym.Attr.SubSymbol() { if !(ctxt.Arch.Family == sys.PPC64 && ctxt.LinkMode == LinkExternal && r.Sym.Name == ".TOC.") { Errorf(s, "unhandled relocation for %s (type %d (%s) rtype %d (%s))", r.Sym.Name, r.Sym.Type, r.Sym.Type, r.Type, sym.RelocName(ctxt.Arch, r.Type)) } @@ -198,7 +211,9 @@ func relocsym(ctxt *Link, s *sym.Symbol) { case 8: o = int64(ctxt.Arch.ByteOrder.Uint64(s.P[off:])) } - if !thearch.Archreloc(ctxt, r, s, &o) { + if offset, ok := thearch.Archreloc(ctxt, r, s, o); ok { + o = offset + } else { Errorf(s, "unknown reloc to %v: %d (%s)", r.Sym.Name, r.Type, sym.RelocName(ctxt.Arch, r.Type)) } case objabi.R_TLS_LE: @@ -300,6 +315,21 @@ func relocsym(ctxt *Link, s *sym.Symbol) { break } + // On AIX, a second relocation must be done by the loader, + // as section addresses can change once loaded. + // The "default" symbol address is still needed by the loader so + // the current relocation can't be skipped. + if ctxt.HeadType == objabi.Haix && r.Sym.Type != sym.SDYNIMPORT { + // It's not possible to make a loader relocation in a + // symbol which is not inside .data section. + // FIXME: It should be forbidden to have R_ADDR from a + // symbol which isn't in .data. However, as .text has the + // same address once loaded, this is possible. + if s.Sect.Seg == &Segdata { + Xcoffadddynrel(ctxt, s, r) + } + } + o = Symaddr(r.Sym) + r.Add // On amd64, 4-byte offsets will be sign-extended, so it is impossible to @@ -514,11 +544,7 @@ func (ctxt *Link) reloc() { } } -func windynrelocsym(ctxt *Link, s *sym.Symbol) { - rel := ctxt.Syms.Lookup(".rel", 0) - if s == rel { - return - } +func windynrelocsym(ctxt *Link, rel, s *sym.Symbol) { for ri := range s.R { r := &s.R[ri] targ := r.Sym @@ -531,40 +557,61 @@ func windynrelocsym(ctxt *Link, s *sym.Symbol) { } Errorf(s, "dynamic relocation to unreachable symbol %s", targ.Name) } - if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files. - targ.Plt = int32(rel.Size) + if r.Sym.Plt() == -2 && r.Sym.Got() != -2 { // make dynimport JMP table for PE object files. + targ.SetPlt(int32(rel.Size)) r.Sym = rel - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) // jmp *addr - if ctxt.Arch.Family == sys.I386 { + switch ctxt.Arch.Family { + default: + Errorf(s, "unsupported arch %v", ctxt.Arch.Family) + return + case sys.I386: rel.AddUint8(0xff) rel.AddUint8(0x25) rel.AddAddr(ctxt.Arch, targ) rel.AddUint8(0x90) rel.AddUint8(0x90) - } else { + case sys.AMD64: rel.AddUint8(0xff) rel.AddUint8(0x24) rel.AddUint8(0x25) rel.AddAddrPlus4(targ, 0) rel.AddUint8(0x90) } - } else if r.Sym.Plt >= 0 { + } else if r.Sym.Plt() >= 0 { r.Sym = rel - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) } } } -func dynrelocsym(ctxt *Link, s *sym.Symbol) { - if ctxt.HeadType == objabi.Hwindows { - if ctxt.LinkMode == LinkInternal { - windynrelocsym(ctxt, s) - } +// windynrelocsyms generates jump table to C library functions that will be +// added later. windynrelocsyms writes the table into .rel symbol. +func (ctxt *Link) windynrelocsyms() { + if !(ctxt.HeadType == objabi.Hwindows && iscgo && ctxt.LinkMode == LinkInternal) { return } + if ctxt.Debugvlog != 0 { + ctxt.Logf("%5.2f windynrelocsyms\n", Cputime()) + } + + /* relocation table */ + rel := ctxt.Syms.Lookup(".rel", 0) + rel.Attr |= sym.AttrReachable + rel.Type = sym.STEXT + ctxt.Textp = append(ctxt.Textp, rel) + + for _, s := range ctxt.Textp { + if s == rel { + continue + } + windynrelocsym(ctxt, rel, s) + } +} +func dynrelocsym(ctxt *Link, s *sym.Symbol) { for ri := range s.R { r := &s.R[ri] if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal { @@ -574,6 +621,7 @@ func dynrelocsym(ctxt *Link, s *sym.Symbol) { thearch.Adddynrel(ctxt, s, r) continue } + if r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT || r.Type >= 256 { if r.Sym != nil && !r.Sym.Attr.Reachable() { Errorf(s, "dynamic relocation to unreachable symbol %s", r.Sym.Name) @@ -586,9 +634,12 @@ func dynrelocsym(ctxt *Link, s *sym.Symbol) { } func dynreloc(ctxt *Link, data *[sym.SXREF][]*sym.Symbol) { + if ctxt.HeadType == objabi.Hwindows { + return + } // -d suppresses dynamic loader format, so we may as well not // compute these sections or mark their symbols as reachable. - if *FlagD && ctxt.HeadType != objabi.Hwindows { + if *FlagD { return } if ctxt.Debugvlog != 0 { @@ -770,7 +821,8 @@ func Datblk(ctxt *Link, addr int64, size int64) { if ctxt.LinkMode != LinkExternal { continue } - for _, r := range sym.R { + for i := range sym.R { + r := &sym.R[i] // Copying sym.Reloc has measurable impact on performance rsname := "" if r.Sym != nil { rsname = r.Sym.Name @@ -1261,7 +1313,7 @@ func (ctxt *Link) dodata() { case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: hasinitarr = true } - if hasinitarr { + if hasinitarr && len(data[sym.SINITARR]) > 0 { sect := addsection(ctxt.Arch, &Segdata, ".init_array", 06) sect.Align = dataMaxAlign[sym.SINITARR] datsize = Rnd(datsize, int64(sect.Align)) @@ -1293,6 +1345,14 @@ func (ctxt *Link) dodata() { gc.AddSym(s) datsize += s.Size } + // On AIX, TOC entries must be the last of .data + for _, s := range data[sym.SXCOFFTOC] { + s.Sect = sect + s.Type = sym.SDATA + datsize = aligndatsize(datsize, s) + s.Value = int64(uint64(datsize) - sect.Vaddr) + datsize += s.Size + } checkdatsize(ctxt, datsize, sym.SDATA) sect.Length = uint64(datsize) - sect.Vaddr gc.End(int64(sect.Length)) @@ -1582,7 +1642,7 @@ func (ctxt *Link) dodata() { datap = append(datap, data[symn]...) } - dwarfgeneratedebugsyms(ctxt) + dwarfGenerateDebugSyms(ctxt) var i int for ; i < len(dwarfp); i++ { @@ -1976,6 +2036,11 @@ func (ctxt *Link) address() []*sym.Segment { } va = uint64(Rnd(int64(va), int64(*FlagRound))) + if ctxt.HeadType == objabi.Haix { + // Data sections are moved to an unreachable segment + // to ensure that they are position-independent. + va += uint64(XCOFFDATABASE) - uint64(XCOFFTEXTBASE) + } order = append(order, &Segdata) Segdata.Rwx = 06 Segdata.Vaddr = va diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 540f4068cb8ea..8f582174c549c 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -60,8 +60,8 @@ func deadcode(ctxt *Link) { d.init() d.flood() - callSym := ctxt.Syms.ROLookup("reflect.Value.Call", 0) - methSym := ctxt.Syms.ROLookup("reflect.Value.Method", 0) + callSym := ctxt.Syms.ROLookup("reflect.Value.Call", sym.SymVerABIInternal) + methSym := ctxt.Syms.ROLookup("reflect.Value.Method", sym.SymVerABIInternal) reflectSeen := false if ctxt.DynlinkingGo() { @@ -245,8 +245,8 @@ func (d *deadcodepass) init() { // but we do keep the symbols it refers to. exports := d.ctxt.Syms.ROLookup("go.plugin.exports", 0) if exports != nil { - for _, r := range exports.R { - d.mark(r.Sym, nil) + for i := range exports.R { + d.mark(exports.R[i].Sym, nil) } } } @@ -257,7 +257,10 @@ func (d *deadcodepass) init() { } for _, name := range names { + // Mark symbol as an data/ABI0 symbol. d.mark(d.ctxt.Syms.ROLookup(name, 0), nil) + // Also mark any Go functions (internal ABI). + d.mark(d.ctxt.Syms.ROLookup(name, sym.SymVerABIInternal), nil) } } @@ -308,6 +311,11 @@ func (d *deadcodepass) flood() { // reachable. continue } + if r.Sym.Type == sym.SABIALIAS { + // Patch this relocation through the + // ABI alias before marking. + r.Sym = resolveABIAlias(r.Sym) + } if r.Type != objabi.R_METHODOFF { d.mark(r.Sym, s) continue diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index ae6f90b079c5c..a150306df9174 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -5,7 +5,7 @@ // TODO/NICETOHAVE: // - eliminate DW_CLS_ if not used // - package info in compilation units -// - assign global variables and types to their packages +// - assign types to their packages // - gdb uses c syntax, meaning clumsy quoting is needed for go identifiers. eg // ptype struct '[]uint8' and qualifiers need to be quoted away // - file:line info for variables @@ -21,6 +21,7 @@ import ( "cmd/link/internal/sym" "fmt" "log" + "sort" "strings" ) @@ -66,7 +67,12 @@ func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64 r.Add = ofs } -func (c dwctxt) AddDWARFSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { +func (c dwctxt) AddDWARFAddrSectionOffset(s dwarf.Sym, t interface{}, ofs int64) { + size := 4 + if isDwarf64(c.linkctxt) { + size = 8 + } + c.AddSectionOffset(s, size, t, ofs) ls := s.(*sym.Symbol) ls.R[len(ls.R)-1].Type = objabi.R_DWARFSECREF @@ -94,6 +100,10 @@ func (c dwctxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets [] panic("should be used only in the compiler") } +func isDwarf64(ctxt *Link) bool { + return ctxt.HeadType == objabi.Haix +} + var gdbscript string var dwarfp []*sym.Symbol @@ -105,15 +115,8 @@ func writeabbrev(ctxt *Link) *sym.Symbol { return s } -/* - * Root DIEs for compilation units, types and global variables. - */ -var dwroot dwarf.DWDie - var dwtypes dwarf.DWDie -var dwglobals dwarf.DWDie - func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data interface{}) *dwarf.DWAttr { a := new(dwarf.DWAttr) a.Link = die.Attr @@ -339,19 +342,39 @@ func lookupOrDiag(ctxt *Link, n string) *sym.Symbol { return s } -func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) { +// dwarfFuncSym looks up a DWARF metadata symbol for function symbol s. +// If the symbol does not exist, it creates it if create is true, +// or returns nil otherwise. +func dwarfFuncSym(ctxt *Link, s *sym.Symbol, meta string, create bool) *sym.Symbol { + // All function ABIs use symbol version 0 for the DWARF data. + // + // TODO(austin): It may be useful to have DWARF info for ABI + // wrappers, in which case we may want these versions to + // align. Better yet, replace these name lookups with a + // general way to attach metadata to a symbol. + ver := 0 + if s.IsFileLocal() { + ver = int(s.Version) + } + if create { + return ctxt.Syms.Lookup(meta+s.Name, ver) + } + return ctxt.Syms.ROLookup(meta+s.Name, ver) +} + +func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) *dwarf.DWDie { // Only emit typedefs for real names. if strings.HasPrefix(name, "map[") { - return + return nil } if strings.HasPrefix(name, "struct {") { - return + return nil } if strings.HasPrefix(name, "chan ") { - return + return nil } if name[0] == '[' || name[0] == '*' { - return + return nil } if def == nil { Errorf(nil, "dwarf: bad def in dotypedef") @@ -369,6 +392,8 @@ func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) { die := newdie(ctxt, parent, dwarf.DW_ABRV_TYPEDECL, name, 0) newrefattr(die, dwarf.DW_AT_type, s) + + return die } // Define gotype, for composite ones recurse into constituents. @@ -398,7 +423,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { kind := decodetypeKind(ctxt.Arch, gotype) bytesize := decodetypeSize(ctxt.Arch, gotype) - var die *dwarf.DWDie + var die, typedefdie *dwarf.DWDie switch kind { case objabi.KindBool: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0) @@ -438,7 +463,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { case objabi.KindArray: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_ARRAYTYPE, name, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) s := decodetypeArrayElem(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s)) @@ -460,7 +485,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { case objabi.KindFunc: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_FUNCTYPE, name, 0) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) nfields := decodetypeFuncInCount(ctxt.Arch, gotype) for i := 0; i < nfields; i++ { s := decodetypeFuncInType(ctxt.Arch, gotype, i) @@ -480,7 +505,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { case objabi.KindInterface: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_IFACETYPE, name, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) nfields := int(decodetypeIfaceMethodCount(ctxt.Arch, gotype)) var s *sym.Symbol if nfields == 0 { @@ -502,13 +527,13 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { case objabi.KindPtr: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_PTRTYPE, name, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) s := decodetypePtrElem(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s)) case objabi.KindSlice: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_SLICETYPE, name, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) s := decodetypeArrayElem(ctxt.Arch, gotype) elem := defgotype(ctxt, s) @@ -520,7 +545,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { case objabi.KindStruct: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_STRUCTTYPE, name, 0) - dotypedef(ctxt, &dwtypes, name, die) + typedefdie = dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) nfields := decodetypeStructFieldCount(ctxt.Arch, gotype) for i := 0; i < nfields; i++ { @@ -556,6 +581,9 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { prototypedies[gotype.Name] = die } + if typedefdie != nil { + return typedefdie + } return die } @@ -828,6 +856,20 @@ func synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { } } +func dwarfDefineGlobal(ctxt *Link, s *sym.Symbol, str string, v int64, gotype *sym.Symbol) { + lib := s.Lib + if lib == nil { + lib = ctxt.LibraryByPkg["runtime"] + } + dv := newdie(ctxt, ctxt.compUnitByPackage[lib].dwinfo, dwarf.DW_ABRV_VARIABLE, str, int(s.Version)) + newabslocexprattr(dv, v, s) + if !s.IsFileLocal() { + newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0) + } + dt := defgotype(ctxt, gotype) + newrefattr(dv, dwarf.DW_AT_type, dt) +} + // For use with pass.c::genasmsym func defdwsymb(ctxt *Link, s *sym.Symbol, str string, t SymbolType, v int64, gotype *sym.Symbol) { if strings.HasPrefix(str, "go.string.") { @@ -837,32 +879,53 @@ func defdwsymb(ctxt *Link, s *sym.Symbol, str string, t SymbolType, v int64, got return } - if strings.HasPrefix(str, "type.") && str != "type.*" && !strings.HasPrefix(str, "type..") { - defgotype(ctxt, s) - return - } - - var dv *dwarf.DWDie - - var dt *sym.Symbol switch t { - default: - return - case DataSym, BSSSym: - dv = newdie(ctxt, &dwglobals, dwarf.DW_ABRV_VARIABLE, str, int(s.Version)) - newabslocexprattr(dv, v, s) - if s.Version == 0 { - newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0) + switch s.Type { + case sym.SDATA, sym.SNOPTRDATA, sym.STYPE, sym.SBSS, sym.SNOPTRBSS, sym.STLSBSS: + // ok + case sym.SRODATA: + if gotype != nil { + defgotype(ctxt, gotype) + } + return + default: + return } - fallthrough + if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) { + return + } + dwarfDefineGlobal(ctxt, s, str, v, gotype) case AutoSym, ParamSym, DeletedAutoSym: - dt = defgotype(ctxt, gotype) + defgotype(ctxt, gotype) } +} - if dv != nil { - newrefattr(dv, dwarf.DW_AT_type, dt) +// createUnitLength creates the initial length field with value v and update +// offset of unit_length if needed. +func createUnitLength(ctxt *Link, s *sym.Symbol, v uint64) { + if isDwarf64(ctxt) { + s.AddUint32(ctxt.Arch, 0xFFFFFFFF) + } + addDwarfAddrField(ctxt, s, v) +} + +// addDwarfAddrField adds a DWARF field in DWARF 64bits or 32bits. +func addDwarfAddrField(ctxt *Link, s *sym.Symbol, v uint64) { + if isDwarf64(ctxt) { + s.AddUint(ctxt.Arch, v) + } else { + s.AddUint32(ctxt.Arch, uint32(v)) + } +} + +// addDwarfAddrRef adds a DWARF pointer in DWARF 64bits or 32bits. +func addDwarfAddrRef(ctxt *Link, s *sym.Symbol, t *sym.Symbol) { + if isDwarf64(ctxt) { + adddwarfref(ctxt, s, t, 8) + } else { + adddwarfref(ctxt, s, t, 4) } } @@ -875,27 +938,17 @@ type compilationUnit struct { dwinfo *dwarf.DWDie // CU root DIE funcDIEs []*sym.Symbol // Function DIE subtrees absFnDIEs []*sym.Symbol // Abstract function DIE subtrees + rangeSyms []*sym.Symbol // symbols for debug_range } -// getCompilationUnits divides the symbols in ctxt.Textp by package. -func getCompilationUnits(ctxt *Link) []*compilationUnit { - units := []*compilationUnit{} - index := make(map[*sym.Library]*compilationUnit) +// calcCompUnitRanges calculates the PC ranges of the compilation units. +func calcCompUnitRanges(ctxt *Link) { var prevUnit *compilationUnit for _, s := range ctxt.Textp { if s.FuncInfo == nil { continue } - unit := index[s.Lib] - if unit == nil { - unit = &compilationUnit{lib: s.Lib} - if s := ctxt.Syms.ROLookup(dwarf.ConstInfoPrefix+s.Lib.Pkg, 0); s != nil { - importInfoSymbol(ctxt, s) - unit.consts = s - } - units = append(units, unit) - index[s.Lib] = unit - } + unit := ctxt.compUnitByPackage[s.Lib] // Update PC ranges. // @@ -910,13 +963,13 @@ func getCompilationUnits(ctxt *Link) []*compilationUnit { } unit.pcs[len(unit.pcs)-1].End = s.Value - unit.lib.Textp[0].Value + s.Size } - return units } -func movetomodule(parent *dwarf.DWDie) { - die := dwroot.Child.Child +func movetomodule(ctxt *Link, parent *dwarf.DWDie) { + runtimelib := ctxt.LibraryByPkg["runtime"] + die := ctxt.compUnitByPackage[runtimelib].dwinfo.Child if die == nil { - dwroot.Child.Child = parent.Child + ctxt.compUnitByPackage[runtimelib].dwinfo.Child = parent.Child return } for die.Link != nil { @@ -1061,63 +1114,16 @@ func getCompilationDir() string { func importInfoSymbol(ctxt *Link, dsym *sym.Symbol) { dsym.Attr |= sym.AttrNotInSymbolTable | sym.AttrReachable dsym.Type = sym.SDWARFINFO - for _, r := range dsym.R { + for i := range dsym.R { + r := &dsym.R[i] // Copying sym.Reloc has measurable impact on performance if r.Type == objabi.R_DWARFSECREF && r.Sym.Size == 0 { - if ctxt.BuildMode == BuildModeShared { - // These type symbols may not be present in BuildModeShared. Skip. - continue - } n := nameFromDIESym(r.Sym) defgotype(ctxt, ctxt.Syms.Lookup("type."+n, 0)) } } } -// For the specified function, collect symbols corresponding to any -// "abstract" subprogram DIEs referenced. The first case of interest -// is a concrete subprogram DIE, which will refer to its corresponding -// abstract subprogram DIE, and then there can be references from a -// non-abstract subprogram DIE to the abstract subprogram DIEs for any -// functions inlined into this one. -// -// A given abstract subprogram DIE can be referenced in numerous -// places (even within the same DIE), so it is important to make sure -// it gets imported and added to the absfuncs lists only once. - -func collectAbstractFunctions(ctxt *Link, fn *sym.Symbol, dsym *sym.Symbol, absfuncs []*sym.Symbol) []*sym.Symbol { - - var newabsfns []*sym.Symbol - - // Walk the relocations on the primary subprogram DIE and look for - // references to abstract funcs. - for _, reloc := range dsym.R { - candsym := reloc.Sym - if reloc.Type != objabi.R_DWARFSECREF { - continue - } - if !strings.HasPrefix(candsym.Name, dwarf.InfoPrefix) { - continue - } - if !strings.HasSuffix(candsym.Name, dwarf.AbstractFuncSuffix) { - continue - } - if candsym.Attr.OnList() { - continue - } - candsym.Attr |= sym.AttrOnList - newabsfns = append(newabsfns, candsym) - } - - // Import any new symbols that have turned up. - for _, absdsym := range newabsfns { - importInfoSymbol(ctxt, absdsym) - absfuncs = append(absfuncs, absdsym) - } - - return absfuncs -} - -func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbol) (dwinfo *dwarf.DWDie, funcs []*sym.Symbol, absfuncs []*sym.Symbol) { +func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) { var dwarfctxt dwarf.Context = dwctxt{ctxt} is_stmt := uint8(1) // initially = recommended default_is_stmt = 1, tracks is_stmt toggles. @@ -1126,38 +1132,16 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo headerstart := int64(-1) headerend := int64(-1) - lang := dwarf.DW_LANG_Go - - dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, lib.Pkg, 0) - newattr(dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(lang), 0) - newattr(dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, ls.Size, ls) - // OS X linker requires compilation dir or absolute path in comp unit name to output debug info. - compDir := getCompilationDir() - // TODO: Make this be the actual compilation directory, not - // the linker directory. If we move CU construction into the - // compiler, this should happen naturally. - newattr(dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir) - producerExtra := ctxt.Syms.Lookup(dwarf.CUInfoPrefix+"producer."+lib.Pkg, 0) - producer := "Go cmd/compile " + objabi.Version - if len(producerExtra.P) > 0 { - // We put a semicolon before the flags to clearly - // separate them from the version, which can be long - // and have lots of weird things in it in development - // versions. We promise not to put a semicolon in the - // version, so it should be safe for readers to scan - // forward to the semicolon. - producer += "; " + string(producerExtra.P) - } - newattr(dwinfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer) + newattr(unit.dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, ls.Size, ls) // Write .debug_line Line Number Program Header (sec 6.2.4) // Fields marked with (*) must be changed for 64-bit dwarf unitLengthOffset := ls.Size - ls.AddUint32(ctxt.Arch, 0) // unit_length (*), filled in at end. + createUnitLength(ctxt, ls, 0) // unit_length (*), filled in at end unitstart = ls.Size ls.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) -- version 3 is incompatible w/ XCode 9.0's dsymutil, latest supported on OSX 10.12 as of 2018-05 headerLengthOffset := ls.Size - ls.AddUint32(ctxt.Arch, 0) // header_length (*), filled in at end. + addDwarfAddrField(ctxt, ls, 0) // header_length (*), filled in at end headerstart = ls.Size // cpos == unitstart + 4 + 2 + 4 @@ -1181,7 +1165,8 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo // Create the file table. fileNums maps from global file // indexes (created by numberfile) to CU-local indexes. fileNums := make(map[int]int) - for _, s := range textp { // textp has been dead-code-eliminated already. + for _, s := range unit.lib.Textp { // textp has been dead-code-eliminated already. + dsym := dwarfFuncSym(ctxt, s, dwarf.InfoPrefix, true) for _, f := range s.FuncInfo.File { if _, ok := fileNums[int(f.Value)]; ok { continue @@ -1193,26 +1178,21 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo ls.AddUint8(0) ls.AddUint8(0) } - - // Look up the .debug_info sym for the function. We do this - // now so that we can walk the sym's relocations to discover - // files that aren't mentioned in S.FuncInfo.File (for - // example, files mentioned only in an inlined subroutine). - dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version)) - importInfoSymbol(ctxt, dsym) - for ri := range dsym.R { + for ri := 0; ri < len(dsym.R); ri++ { r := &dsym.R[ri] if r.Type != objabi.R_DWARFFILEREF { continue } - _, ok := fileNums[int(r.Sym.Value)] - if !ok { - fileNums[int(r.Sym.Value)] = len(fileNums) + 1 - Addstring(ls, r.Sym.Name) - ls.AddUint8(0) - ls.AddUint8(0) - ls.AddUint8(0) + // A file that is only mentioned in an inlined subroutine will appear + // as a R_DWARFFILEREF but not in s.FuncInfo.File + if _, ok := fileNums[int(r.Sym.Value)]; ok { + continue } + fileNums[int(r.Sym.Value)] = len(fileNums) + 1 + Addstring(ls, r.Sym.Name) + ls.AddUint8(0) + ls.AddUint8(0) + ls.AddUint8(0) } } @@ -1225,7 +1205,7 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo dwarf.Uleb128put(dwarfctxt, ls, 1+int64(ctxt.Arch.PtrSize)) ls.AddUint8(dwarf.DW_LNE_set_address) - s := textp[0] + s := unit.lib.Textp[0] pc := s.Value line := 1 file := 1 @@ -1234,23 +1214,19 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo var pcfile Pciter var pcline Pciter var pcstmt Pciter - for i, s := range textp { - dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version)) - funcs = append(funcs, dsym) - absfuncs = collectAbstractFunctions(ctxt, s, dsym, absfuncs) - + for i, s := range unit.lib.Textp { finddebugruntimepath(s) - isStmtsSym := ctxt.Syms.ROLookup(dwarf.IsStmtPrefix+s.Name, int(s.Version)) - pctostmtData := sym.Pcdata{P: isStmtsSym.P} - pciterinit(ctxt, &pcfile, &s.FuncInfo.Pcfile) pciterinit(ctxt, &pcline, &s.FuncInfo.Pcline) - pciterinit(ctxt, &pcstmt, &pctostmtData) - if pcstmt.done != 0 { + isStmtSym := dwarfFuncSym(ctxt, s, dwarf.IsStmtPrefix, false) + if isStmtSym != nil && len(isStmtSym.P) > 0 { + pciterinit(ctxt, &pcstmt, &sym.Pcdata{P: isStmtSym.P}) + } else { // Assembly files lack a pcstmt section, we assume that every instruction // is a valid statement. + pcstmt.done = 1 pcstmt.value = 1 } @@ -1310,7 +1286,7 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo pciternext(&pcline) } } - if is_stmt == 0 && i < len(textp)-1 { + if is_stmt == 0 && i < len(unit.lib.Textp)-1 { // If there is more than one function, ensure default value is established. is_stmt = 1 ls.AddUint8(uint8(dwarf.DW_LNS_negate_stmt)) @@ -1321,8 +1297,16 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo dwarf.Uleb128put(dwarfctxt, ls, 1) ls.AddUint8(dwarf.DW_LNE_end_sequence) - ls.SetUint32(ctxt.Arch, unitLengthOffset, uint32(ls.Size-unitstart)) - ls.SetUint32(ctxt.Arch, headerLengthOffset, uint32(headerend-headerstart)) + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(".debug_line", unit.lib.String(), uint64(ls.Size-unitLengthOffset)) + } + if isDwarf64(ctxt) { + ls.SetUint(ctxt.Arch, unitLengthOffset+4, uint64(ls.Size-unitstart)) // +4 because of 0xFFFFFFFF + ls.SetUint(ctxt.Arch, headerLengthOffset, uint64(headerend-headerstart)) + } else { + ls.SetUint32(ctxt.Arch, unitLengthOffset, uint32(ls.Size-unitstart)) + ls.SetUint32(ctxt.Arch, headerLengthOffset, uint32(headerend-headerstart)) + } // Apply any R_DWARFFILEREF relocations, since we now know the // line table file indices for this compilation unit. Note that @@ -1331,7 +1315,7 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo // DIE flavors (ex: variables) then those DIEs would need to // be included below. missing := make(map[int]interface{}) - for _, f := range funcs { + for _, f := range unit.funcDIEs { for ri := range f.R { r := &f.R[ri] if r.Type != objabi.R_DWARFFILEREF { @@ -1361,8 +1345,6 @@ func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbo } } } - - return dwinfo, funcs, absfuncs } // writepcranges generates the DW_AT_ranges table for compilation unit cu. @@ -1414,8 +1396,8 @@ func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { if haslinkregister(ctxt) { cieReserve = 32 } - fs.AddUint32(ctxt.Arch, cieReserve) // initial length, must be multiple of thearch.ptrsize - fs.AddUint32(ctxt.Arch, 0xffffffff) // cid. + createUnitLength(ctxt, fs, uint64(cieReserve)) // initial length, must be multiple of thearch.ptrsize + addDwarfAddrField(ctxt, fs, 0xffffffff) // cid. fs.AddUint8(3) // dwarf version (appendix F) fs.AddUint8(0) // augmentation "" dwarf.Uleb128put(dwarfctxt, fs, 1) // code_alignment_factor @@ -1503,9 +1485,9 @@ func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { // ptrsize: address range fs.AddUint32(ctxt.Arch, uint32(4+2*ctxt.Arch.PtrSize+len(deltaBuf))) // length (excludes itself) if ctxt.LinkMode == LinkExternal { - adddwarfref(ctxt, fs, fs, 4) + addDwarfAddrRef(ctxt, fs, fs) } else { - fs.AddUint32(ctxt.Arch, 0) // CIE offset + addDwarfAddrField(ctxt, fs, 0) // CIE offset } fs.AddAddr(ctxt.Arch, s) fs.AddUintXX(ctxt.Arch, uint64(s.Size), ctxt.Arch.PtrSize) // address range @@ -1514,24 +1496,6 @@ func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { return syms } -func writeranges(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { - for _, s := range ctxt.Textp { - rangeSym := ctxt.Syms.ROLookup(dwarf.RangePrefix+s.Name, int(s.Version)) - if rangeSym == nil || rangeSym.Size == 0 { - continue - } - rangeSym.Attr |= sym.AttrReachable | sym.AttrNotInSymbolTable - rangeSym.Type = sym.SDWARFRANGE - // LLVM doesn't support base address entries. Strip them out so LLDB and dsymutil don't get confused. - if ctxt.HeadType == objabi.Hdarwin { - fn := ctxt.Syms.ROLookup(dwarf.InfoPrefix+s.Name, int(s.Version)) - removeDwarfAddrListBaseAddress(ctxt, fn, rangeSym, false) - } - syms = append(syms, rangeSym) - } - return syms -} - /* * Walk DWarfDebugInfoEntries, and emit .debug_info */ @@ -1547,24 +1511,22 @@ func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevs var dwarfctxt dwarf.Context = dwctxt{ctxt} - // Re-index per-package information by its CU die. - unitByDIE := make(map[*dwarf.DWDie]*compilationUnit) for _, u := range units { - unitByDIE[u.dwinfo] = u - } - - for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link { + compunit := u.dwinfo s := dtolsym(compunit.Sym) - u := unitByDIE[compunit] + + if len(u.lib.Textp) == 0 && u.dwinfo.Child == nil { + continue + } // Write .debug_info Compilation Unit Header (sec 7.5.1) // Fields marked with (*) must be changed for 64-bit dwarf // This must match COMPUNITHEADERSIZE above. - s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. - s.AddUint16(ctxt.Arch, 4) // dwarf version (appendix F) + createUnitLength(ctxt, s, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 4) // dwarf version (appendix F) // debug_abbrev_offset (*) - adddwarfref(ctxt, s, abbrevsym, 4) + addDwarfAddrRef(ctxt, s, abbrevsym) s.AddUint8(uint8(ctxt.Arch.PtrSize)) // address_size @@ -1582,8 +1544,17 @@ func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevs for _, child := range cu { cusize += child.Size } - cusize -= 4 // exclude the length field. - s.SetUint32(ctxt.Arch, 0, uint32(cusize)) + // Save size for AIX symbol table. + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(".debug_info", getPkgFromCUSym(s), uint64(cusize)) + } + if isDwarf64(ctxt) { + cusize -= 12 // exclude the length field. + s.SetUint(ctxt.Arch, 4, uint64(cusize)) // 4 because of 0XFFFFFFFF + } else { + cusize -= 4 // exclude the length field. + s.SetUint32(ctxt.Arch, 0, uint32(cusize)) + } // Leave a breadcrumb for writepub. This does not // appear in the DWARF output. newattr(compunit, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, cusize, 0) @@ -1615,15 +1586,19 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*s s.Type = sym.SDWARFSECT syms = append(syms, s) - for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link { + for _, u := range ctxt.compUnits { + if len(u.lib.Textp) == 0 && u.dwinfo.Child == nil { + continue + } + compunit := u.dwinfo sectionstart := s.Size culength := uint32(getattr(compunit, dwarf.DW_AT_byte_size).Value) + 4 // Write .debug_pubnames/types Header (sec 6.1.1) - s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. - s.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) - adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4) // debug_info_offset (of the Comp unit Header) - s.AddUint32(ctxt.Arch, culength) // debug_info_length + createUnitLength(ctxt, s, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) + addDwarfAddrRef(ctxt, s, dtolsym(compunit.Sym)) // debug_info_offset (of the Comp unit Header) + addDwarfAddrField(ctxt, s, uint64(culength)) // debug_info_length for die := compunit.Child; die != nil; die = die.Link { if !ispub(die) { @@ -1634,19 +1609,31 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*s if die.Sym == nil { fmt.Println("Missing sym for ", name) } - adddwarfref(ctxt, s, dtolsym(die.Sym), 4) + addDwarfAddrRef(ctxt, s, dtolsym(die.Sym)) Addstring(s, name) } - s.AddUint32(ctxt.Arch, 0) + addDwarfAddrField(ctxt, s, 0) // Null offset - s.SetUint32(ctxt.Arch, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. + // On AIX, save the current size of this compilation unit. + if ctxt.HeadType == objabi.Haix { + saveDwsectCUSize(sname, getPkgFromCUSym(dtolsym(compunit.Sym)), uint64(s.Size-sectionstart)) + } + if isDwarf64(ctxt) { + s.SetUint(ctxt.Arch, sectionstart+4, uint64(s.Size-sectionstart)-12) // exclude the length field. + } else { + s.SetUint32(ctxt.Arch, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. + } } return syms } func writegdbscript(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { + // TODO (aix): make it available + if ctxt.HeadType == objabi.Haix { + return syms + } if ctxt.LinkMode == LinkExternal && ctxt.HeadType == objabi.Hwindows && ctxt.BuildMode == BuildModeCArchive { // gcc on Windows places .debug_gdb_scripts in the wrong location, which // causes the program not to run. See https://golang.org/issue/20183 @@ -1670,24 +1657,15 @@ func writegdbscript(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { var prototypedies map[string]*dwarf.DWDie -/* - * This is the main entry point for generating dwarf. After emitting - * the mandatory debug_abbrev section, it calls writelines() to set up - * the per-compilation unit part of the DIE tree, while simultaneously - * emitting the debug_line section. When the final tree contains - * forward references, it will write the debug_info section in 2 - * passes. - * - */ -func dwarfgeneratedebugsyms(ctxt *Link) { +func dwarfEnabled(ctxt *Link) bool { if *FlagW { // disable dwarf - return + return false } if *FlagS && ctxt.HeadType != objabi.Hdarwin { - return + return false } if ctxt.HeadType == objabi.Hplan9 || ctxt.HeadType == objabi.Hjs { - return + return false } if ctxt.LinkMode == LinkExternal { @@ -1696,14 +1674,27 @@ func dwarfgeneratedebugsyms(ctxt *Link) { case ctxt.HeadType == objabi.Hdarwin: case ctxt.HeadType == objabi.Hwindows: default: - return + return false } } - if ctxt.Debugvlog != 0 { - ctxt.Logf("%5.2f dwarf\n", Cputime()) + return true +} + +// dwarfGenerateDebugInfo generated debug info entries for all types, +// variables and functions in the program. +// Along with dwarfGenerateDebugSyms they are the two main entry points into +// dwarf generation: dwarfGenerateDebugInfo does all the work that should be +// done before symbol names are mangled while dwarfgeneratedebugsyms does +// all the work that can only be done after addresses have been assigned to +// text symbols. +func dwarfGenerateDebugInfo(ctxt *Link) { + if !dwarfEnabled(ctxt) { + return } + ctxt.compUnitByPackage = make(map[*sym.Library]*compilationUnit) + // Forctxt.Diagnostic messages. newattr(&dwtypes, dwarf.DW_AT_name, dwarf.DW_CLS_STRING, int64(len("dwtypes")), "dwtypes") @@ -1746,12 +1737,109 @@ func dwarfgeneratedebugsyms(ctxt *Link) { defgotype(ctxt, lookupOrDiag(ctxt, typ)) } + // fake root DIE for compile unit DIEs + var dwroot dwarf.DWDie + + for _, lib := range ctxt.Library { + unit := &compilationUnit{lib: lib} + if s := ctxt.Syms.ROLookup(dwarf.ConstInfoPrefix+lib.Pkg, 0); s != nil { + importInfoSymbol(ctxt, s) + unit.consts = s + } + ctxt.compUnits = append(ctxt.compUnits, unit) + ctxt.compUnitByPackage[lib] = unit + + unit.dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, unit.lib.Pkg, 0) + newattr(unit.dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(dwarf.DW_LANG_Go), 0) + // OS X linker requires compilation dir or absolute path in comp unit name to output debug info. + compDir := getCompilationDir() + // TODO: Make this be the actual compilation directory, not + // the linker directory. If we move CU construction into the + // compiler, this should happen naturally. + newattr(unit.dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir) + producerExtra := ctxt.Syms.Lookup(dwarf.CUInfoPrefix+"producer."+unit.lib.Pkg, 0) + producer := "Go cmd/compile " + objabi.Version + if len(producerExtra.P) > 0 { + // We put a semicolon before the flags to clearly + // separate them from the version, which can be long + // and have lots of weird things in it in development + // versions. We promise not to put a semicolon in the + // version, so it should be safe for readers to scan + // forward to the semicolon. + producer += "; " + string(producerExtra.P) + } + newattr(unit.dwinfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer) + + if len(lib.Textp) == 0 { + unit.dwinfo.Abbrev = dwarf.DW_ABRV_COMPUNIT_TEXTLESS + } + + // Scan all functions in this compilation unit, create DIEs for all + // referenced types, create the file table for debug_line, find all + // referenced abstract functions. + // Collect all debug_range symbols in unit.rangeSyms + for _, s := range lib.Textp { // textp has been dead-code-eliminated already. + dsym := dwarfFuncSym(ctxt, s, dwarf.InfoPrefix, false) + dsym.Attr |= sym.AttrNotInSymbolTable | sym.AttrReachable + dsym.Type = sym.SDWARFINFO + unit.funcDIEs = append(unit.funcDIEs, dsym) + + rangeSym := dwarfFuncSym(ctxt, s, dwarf.RangePrefix, false) + if rangeSym != nil && rangeSym.Size > 0 { + rangeSym.Attr |= sym.AttrReachable | sym.AttrNotInSymbolTable + rangeSym.Type = sym.SDWARFRANGE + // LLVM doesn't support base address entries. Strip them out so LLDB and dsymutil don't get confused. + if ctxt.HeadType == objabi.Hdarwin { + removeDwarfAddrListBaseAddress(ctxt, dsym, rangeSym, false) + } + unit.rangeSyms = append(unit.rangeSyms, rangeSym) + } + + for ri := 0; ri < len(dsym.R); ri++ { + r := &dsym.R[ri] + if r.Type == objabi.R_DWARFSECREF { + rsym := r.Sym + if strings.HasPrefix(rsym.Name, dwarf.InfoPrefix) && strings.HasSuffix(rsym.Name, dwarf.AbstractFuncSuffix) && !rsym.Attr.OnList() { + // abstract function + rsym.Attr |= sym.AttrOnList + unit.absFnDIEs = append(unit.absFnDIEs, rsym) + importInfoSymbol(ctxt, rsym) + } else if rsym.Size == 0 { + // a type we do not have a DIE for + n := nameFromDIESym(rsym) + defgotype(ctxt, ctxt.Syms.Lookup("type."+n, 0)) + } + } + } + } + } + + // Create DIEs for global variables and the types they use. genasmsym(ctxt, defdwsymb) + synthesizestringtypes(ctxt, dwtypes.Child) + synthesizeslicetypes(ctxt, dwtypes.Child) + synthesizemaptypes(ctxt, dwtypes.Child) + synthesizechantypes(ctxt, dwtypes.Child) +} + +// dwarfGenerateDebugSyms constructs debug_line, debug_frame, debug_loc, +// debug_pubnames and debug_pubtypes. It also writes out the debug_info +// section using symbols generated in dwarfGenerateDebugInfo. +func dwarfGenerateDebugSyms(ctxt *Link) { + if !dwarfEnabled(ctxt) { + return + } + + if ctxt.Debugvlog != 0 { + ctxt.Logf("%5.2f dwarf\n", Cputime()) + } + abbrev := writeabbrev(ctxt) syms := []*sym.Symbol{abbrev} - units := getCompilationUnits(ctxt) + calcCompUnitRanges(ctxt) + sort.Sort(compilationUnitByStartPC(ctxt.compUnits)) // Write per-package line and range tables and start their CU DIEs. debugLine := ctxt.Syms.Lookup(".debug_line", 0) @@ -1760,29 +1848,24 @@ func dwarfgeneratedebugsyms(ctxt *Link) { debugRanges.Type = sym.SDWARFRANGE debugRanges.Attr |= sym.AttrReachable syms = append(syms, debugLine) - for _, u := range units { - u.dwinfo, u.funcDIEs, u.absFnDIEs = writelines(ctxt, u.lib, u.lib.Textp, debugLine) + for _, u := range ctxt.compUnits { + reversetree(&u.dwinfo.Child) + if u.dwinfo.Abbrev == dwarf.DW_ABRV_COMPUNIT_TEXTLESS { + continue + } + writelines(ctxt, u, debugLine) writepcranges(ctxt, u.dwinfo, u.lib.Textp[0], u.pcs, debugRanges) } - synthesizestringtypes(ctxt, dwtypes.Child) - synthesizeslicetypes(ctxt, dwtypes.Child) - synthesizemaptypes(ctxt, dwtypes.Child) - synthesizechantypes(ctxt, dwtypes.Child) - // newdie adds DIEs to the *beginning* of the parent's DIE list. // Now that we're done creating DIEs, reverse the trees so DIEs // appear in the order they were created. - reversetree(&dwroot.Child) reversetree(&dwtypes.Child) - reversetree(&dwglobals.Child) - - movetomodule(&dwtypes) - movetomodule(&dwglobals) + movetomodule(ctxt, &dwtypes) // Need to reorder symbols so sym.SDWARFINFO is after all sym.SDWARFSECT // (but we need to generate dies before writepub) - infosyms := writeinfo(ctxt, nil, units, abbrev) + infosyms := writeinfo(ctxt, nil, ctxt.compUnits, abbrev) syms = writeframes(ctxt, syms) syms = writepub(ctxt, ".debug_pubnames", ispubname, syms) @@ -1791,9 +1874,11 @@ func dwarfgeneratedebugsyms(ctxt *Link) { // Now we're done writing SDWARFSECT symbols, so we can write // other SDWARF* symbols. syms = append(syms, infosyms...) - syms = collectlocs(ctxt, syms, units) + syms = collectlocs(ctxt, syms, ctxt.compUnits) syms = append(syms, debugRanges) - syms = writeranges(ctxt, syms) + for _, unit := range ctxt.compUnits { + syms = append(syms, unit.rangeSyms...) + } dwarfp = syms } @@ -1801,7 +1886,8 @@ func collectlocs(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit) []*sy empty := true for _, u := range units { for _, fn := range u.funcDIEs { - for _, reloc := range fn.R { + for i := range fn.R { + reloc := &fn.R[i] // Copying sym.Reloc has measurable impact on performance if reloc.Type == objabi.R_DWARFSECREF && strings.HasPrefix(reloc.Sym.Name, dwarf.LocPrefix) { reloc.Sym.Attr |= sym.AttrReachable | sym.AttrNotInSymbolTable syms = append(syms, reloc.Sym) @@ -2003,3 +2089,45 @@ func dwarfcompress(ctxt *Link) { } Segdwarf.Length = pos - Segdwarf.Vaddr } + +type compilationUnitByStartPC []*compilationUnit + +func (v compilationUnitByStartPC) Len() int { return len(v) } +func (v compilationUnitByStartPC) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +func (v compilationUnitByStartPC) Less(i, j int) bool { + switch { + case len(v[i].lib.Textp) == 0 && len(v[j].lib.Textp) == 0: + return v[i].lib.Pkg < v[j].lib.Pkg + case len(v[i].lib.Textp) != 0 && len(v[j].lib.Textp) == 0: + return true + case len(v[i].lib.Textp) == 0 && len(v[j].lib.Textp) != 0: + return false + default: + return v[i].lib.Textp[0].Value < v[j].lib.Textp[0].Value + } +} + +// On AIX, the symbol table needs to know where are the compilation units parts +// for a specific package in each .dw section. +// dwsectCUSize map will save the size of a compilation unit for +// the corresponding .dw section. +// This size can later be retrieved with the index "sectionName.pkgName". +var dwsectCUSize map[string]uint64 + +// getDwsectCUSize retrieves the corresponding package size inside the current section. +func getDwsectCUSize(sname string, pkgname string) uint64 { + return dwsectCUSize[sname+"."+pkgname] +} + +func saveDwsectCUSize(sname string, pkgname string, size uint64) { + dwsectCUSize[sname+"."+pkgname] = size +} + +// getPkgFromCUSym returns the package name for the compilation unit +// represented by s. +// The prefix dwarf.InfoPrefix+".pkg." needs to be removed in order to get +// the package name. +func getPkgFromCUSym(s *sym.Symbol) string { + return strings.TrimPrefix(s.Name, dwarf.InfoPrefix+".pkg.") +} diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index 157bebbb41256..7bbe2b710cca2 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -5,6 +5,7 @@ package ld import ( + intdwarf "cmd/internal/dwarf" objfilepkg "cmd/internal/objfile" // renamed to avoid conflict with objfile function "debug/dwarf" "errors" @@ -29,6 +30,7 @@ const ( ) func TestRuntimeTypesPresent(t *testing.T) { + t.Parallel() testenv.MustHaveGoBuild(t) if runtime.GOOS == "plan9" { @@ -145,6 +147,7 @@ func gobuildTestdata(t *testing.T, tdir string, gopathdir string, packtobuild st } func TestEmbeddedStructMarker(t *testing.T) { + t.Parallel() testenv.MustHaveGoBuild(t) if runtime.GOOS == "plan9" { @@ -224,7 +227,7 @@ func main() { func findMembers(rdr *dwarf.Reader) (map[string]bool, error) { memberEmbedded := map[string]bool{} // TODO(hyangah): define in debug/dwarf package - const goEmbeddedStruct = dwarf.Attr(0x2903) + const goEmbeddedStruct = dwarf.Attr(intdwarf.DW_AT_go_embedded_field) for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { if err != nil { return nil, err @@ -245,6 +248,7 @@ func TestSizes(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } + t.Parallel() // DWARF sizes should never be -1. // See issue #21097 @@ -292,6 +296,7 @@ func TestFieldOverlap(t *testing.T) { if runtime.GOOS == "plan9" { t.Skip("skipping on plan9; no DWARF symbol table in executables") } + t.Parallel() // This test grew out of issue 21094, where specific sudog DWARF types // had elem fields set to values instead of pointers. @@ -348,6 +353,7 @@ func main() { } func varDeclCoordsAndSubrogramDeclFile(t *testing.T, testpoint string, expectFile int, expectLine int, directive string) { + t.Parallel() prog := fmt.Sprintf("package main\n\nfunc main() {\n%s\nvar i int\ni = i\n}\n", directive) @@ -526,7 +532,7 @@ func (ex *examiner) entryFromOffset(off dwarf.Offset) *dwarf.Entry { return nil } -// Return the ID that that examiner uses to refer to the DIE at offset off +// Return the ID that examiner uses to refer to the DIE at offset off func (ex *examiner) idxFromOffset(off dwarf.Offset) int { if idx, found := ex.idxByOffset[off]; found { return idx @@ -584,6 +590,8 @@ func TestInlinedRoutineRecords(t *testing.T) { t.Skip("skipping on solaris and darwin, pending resolution of issue #23168") } + t.Parallel() + const prog = ` package main @@ -720,6 +728,7 @@ func main() { } func abstractOriginSanity(t *testing.T, gopathdir string, flags string) { + t.Parallel() dir, err := ioutil.TempDir("", "TestAbstractOriginSanity") if err != nil { @@ -861,6 +870,10 @@ func TestRuntimeTypeAttrInternal(t *testing.T) { t.Skip("skipping on plan9; no DWARF symbol table in executables") } + if runtime.GOOS == "windows" && runtime.GOARCH == "arm" { + t.Skip("skipping on windows/arm; test is incompatible with relocatable binaries") + } + testRuntimeTypeAttr(t, "-ldflags=-linkmode=internal") } @@ -881,6 +894,8 @@ func TestRuntimeTypeAttrExternal(t *testing.T) { } func testRuntimeTypeAttr(t *testing.T, flags string) { + t.Parallel() + const prog = ` package main @@ -939,7 +954,7 @@ func main() { if len(dies) != 1 { t.Fatalf("wanted 1 DIE named *main.X, found %v", len(dies)) } - rtAttr := dies[0].Val(0x2904) + rtAttr := dies[0].Val(intdwarf.DW_AT_go_runtime_type) if rtAttr == nil { t.Fatalf("*main.X DIE had no runtime type attr. DIE: %v", dies[0]) } @@ -948,3 +963,194 @@ func main() { t.Errorf("DWARF type offset was %#x+%#x, but test program said %#x", rtAttr.(uint64), types.Addr, addr) } } + +func TestIssue27614(t *testing.T) { + // Type references in debug_info should always use the DW_TAG_typedef_type + // for the type, when that's generated. + + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + t.Parallel() + + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + const prog = `package main + +import "fmt" + +type astruct struct { + X int +} + +type bstruct struct { + X float32 +} + +var globalptr *astruct +var globalvar astruct +var bvar0, bvar1, bvar2 bstruct + +func main() { + fmt.Println(globalptr, globalvar, bvar0, bvar1, bvar2) +} +` + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + data, err := f.DWARF() + if err != nil { + t.Fatal(err) + } + + rdr := data.Reader() + + var astructTypeDIE, bstructTypeDIE, ptrastructTypeDIE *dwarf.Entry + var globalptrDIE, globalvarDIE *dwarf.Entry + var bvarDIE [3]*dwarf.Entry + + for { + e, err := rdr.Next() + if err != nil { + t.Fatal(err) + } + if e == nil { + break + } + + name, _ := e.Val(dwarf.AttrName).(string) + + switch e.Tag { + case dwarf.TagTypedef: + switch name { + case "main.astruct": + astructTypeDIE = e + case "main.bstruct": + bstructTypeDIE = e + } + case dwarf.TagPointerType: + if name == "*main.astruct" { + ptrastructTypeDIE = e + } + case dwarf.TagVariable: + switch name { + case "main.globalptr": + globalptrDIE = e + case "main.globalvar": + globalvarDIE = e + default: + const bvarprefix = "main.bvar" + if strings.HasPrefix(name, bvarprefix) { + i, _ := strconv.Atoi(name[len(bvarprefix):]) + bvarDIE[i] = e + } + } + } + } + + typedieof := func(e *dwarf.Entry) dwarf.Offset { + return e.Val(dwarf.AttrType).(dwarf.Offset) + } + + if off := typedieof(ptrastructTypeDIE); off != astructTypeDIE.Offset { + t.Errorf("type attribute of *main.astruct references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset) + } + + if off := typedieof(globalptrDIE); off != ptrastructTypeDIE.Offset { + t.Errorf("type attribute of main.globalptr references %#x, not *main.astruct DIE at %#x\n", off, ptrastructTypeDIE.Offset) + } + + if off := typedieof(globalvarDIE); off != astructTypeDIE.Offset { + t.Errorf("type attribute of main.globalvar1 references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset) + } + + for i := range bvarDIE { + if off := typedieof(bvarDIE[i]); off != bstructTypeDIE.Offset { + t.Errorf("type attribute of main.bvar%d references %#x, not main.bstruct DIE at %#x\n", i, off, bstructTypeDIE.Offset) + } + } +} + +func TestStaticTmp(t *testing.T) { + // Checks that statictmp variables do not appear in debug_info or the + // symbol table. + // Also checks that statictmp variables do not collide with user defined + // variables (issue #25113) + + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + t.Parallel() + + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + const prog = `package main + +var stmp_0 string +var a []int + +func init() { + a = []int{ 7 } +} + +func main() { + println(a[0]) +} +` + + f := gobuild(t, dir, prog, NoOpt) + + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + for { + e, err := rdr.Next() + if err != nil { + t.Fatal(err) + } + if e == nil { + break + } + if e.Tag != dwarf.TagVariable { + continue + } + name, ok := e.Val(dwarf.AttrName).(string) + if !ok { + continue + } + if strings.Contains(name, "stmp") { + t.Errorf("statictmp variable found in debug_info: %s at %x", name, e.Offset) + } + } + + syms, err := f.Symbols() + if err != nil { + t.Fatalf("error reading symbols: %v", err) + } + for _, sym := range syms { + if strings.Contains(sym.Name, "stmp") { + t.Errorf("statictmp variable found in symbol table: %s", sym.Name) + } + } +} diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 4ecbff86a9c5d..c2a2b3a7baf25 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -506,7 +506,7 @@ func Elfinit(ctxt *Link) { } elf64 = true - ehdr.phoff = ELF64HDRSIZE /* Must be be ELF64HDRSIZE: first PHdr must follow ELF header */ + ehdr.phoff = ELF64HDRSIZE /* Must be ELF64HDRSIZE: first PHdr must follow ELF header */ ehdr.shoff = ELF64HDRSIZE /* Will move as we add PHeaders */ ehdr.ehsize = ELF64HDRSIZE /* Must be ELF64HDRSIZE */ ehdr.phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */ @@ -533,7 +533,7 @@ func Elfinit(ctxt *Link) { fallthrough default: ehdr.phoff = ELF32HDRSIZE - /* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */ + /* Must be ELF32HDRSIZE: first PHdr must follow ELF header */ ehdr.shoff = ELF32HDRSIZE /* Will move as we add PHeaders */ ehdr.ehsize = ELF32HDRSIZE /* Must be ELF32HDRSIZE */ ehdr.phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */ @@ -1034,7 +1034,7 @@ func elfdynhash(ctxt *Link) { need[sy.Dynid] = addelflib(&needlib, sy.Dynimplib(), sy.Dynimpvers()) } - name := sy.Extname + name := sy.Extname() hc := elfhash(name) b := hc % uint32(nbucket) @@ -2254,7 +2254,7 @@ func elfadddynsym(ctxt *Link, s *sym.Symbol) { d := ctxt.Syms.Lookup(".dynsym", 0) - name := s.Extname + name := s.Extname() d.AddUint32(ctxt.Arch, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) /* type */ @@ -2297,7 +2297,7 @@ func elfadddynsym(ctxt *Link, s *sym.Symbol) { d := ctxt.Syms.Lookup(".dynsym", 0) /* name */ - name := s.Extname + name := s.Extname() d.AddUint32(ctxt.Arch, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index 06ee6968c6804..80d7ac32f524b 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -25,6 +25,17 @@ func expandpkg(t0 string, pkg string) string { return strings.Replace(t0, `"".`, pkg+".", -1) } +func resolveABIAlias(s *sym.Symbol) *sym.Symbol { + if s.Type != sym.SABIALIAS { + return s + } + target := s.R[0].Sym + if target.Type == sym.SABIALIAS { + panic(fmt.Sprintf("ABI alias %s references another ABI alias %s", s, target)) + } + return target +} + // TODO: // generate debugging section in binary. // once the dust settles, try to move some code to @@ -156,13 +167,14 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { s := ctxt.Syms.Lookup(local, 0) if s.Type == 0 || s.Type == sym.SXREF || s.Type == sym.SHOSTOBJ { s.SetDynimplib(lib) - s.Extname = remote + s.SetExtname(remote) s.SetDynimpvers(q) if s.Type != sym.SHOSTOBJ { s.Type = sym.SDYNIMPORT } havedynamic = 1 } + continue case "cgo_import_static": @@ -187,6 +199,11 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { } local = expandpkg(local, pkg) + // The compiler arranges for an ABI0 wrapper + // to be available for all cgo-exported + // functions. Link.loadlib will resolve any + // ABI aliases we find here (since we may not + // yet know it's an alias). s := ctxt.Syms.Lookup(local, 0) switch ctxt.BuildMode { @@ -200,15 +217,15 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { // see issue 4878. if s.Dynimplib() != "" { s.ResetDyninfo() - s.Extname = "" + s.SetExtname("") s.Type = 0 } if !s.Attr.CgoExport() { - s.Extname = remote + s.SetExtname(remote) dynexp = append(dynexp, s) - } else if s.Extname != remote { - fmt.Fprintf(os.Stderr, "%s: conflicting cgo_export directives: %s as %s and %s\n", os.Args[0], s.Name, s.Extname, remote) + } else if s.Extname() != remote { + fmt.Fprintf(os.Stderr, "%s: conflicting cgo_export directives: %s as %s and %s\n", os.Args[0], s.Name, s.Extname(), remote) nerrors++ return } @@ -276,7 +293,7 @@ func Adddynsym(ctxt *Link, s *sym.Symbol) { if ctxt.IsELF { elfadddynsym(ctxt, s) } else if ctxt.HeadType == objabi.Hdarwin { - Errorf(s, "adddynsym: missed symbol (Extname=%s)", s.Extname) + Errorf(s, "adddynsym: missed symbol (Extname=%s)", s.Extname()) } else if ctxt.HeadType == objabi.Hwindows { // already taken care of } else { @@ -317,7 +334,8 @@ func fieldtrack(ctxt *Link) { } func (ctxt *Link) addexport() { - if ctxt.HeadType == objabi.Hdarwin { + // TODO(aix) + if ctxt.HeadType == objabi.Hdarwin || ctxt.HeadType == objabi.Haix { return } diff --git a/src/cmd/link/internal/ld/ld.go b/src/cmd/link/internal/ld/ld.go index 896e1c87d242f..9e5e2f98726ef 100644 --- a/src/cmd/link/internal/ld/ld.go +++ b/src/cmd/link/internal/ld/ld.go @@ -136,13 +136,13 @@ func findlib(ctxt *Link, lib string) (string, bool) { // try dot, -L "libdir", and then goroot. for _, dir := range ctxt.Libdir { if ctxt.linkShared { - pname = dir + "/" + pkg + ".shlibname" + pname = filepath.Join(dir, pkg+".shlibname") if _, err := os.Stat(pname); err == nil { isshlib = true break } } - pname = dir + "/" + name + pname = filepath.Join(dir, name) if _, err := os.Stat(pname); err == nil { break } diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index 4884a07d05a93..081642931652b 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -14,6 +14,7 @@ import ( ) func TestUndefinedRelocErrors(t *testing.T) { + t.Parallel() testenv.MustHaveGoBuild(t) dir, err := ioutil.TempDir("", "go-build") if err != nil { diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 220aab310f06d..2cb7ae72e4d2d 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -39,6 +39,7 @@ import ( "cmd/link/internal/loadelf" "cmd/link/internal/loadmacho" "cmd/link/internal/loadpe" + "cmd/link/internal/loadxcoff" "cmd/link/internal/objfile" "cmd/link/internal/sym" "crypto/sha1" @@ -91,28 +92,47 @@ import ( // THE SOFTWARE. type Arch struct { - Funcalign int - Maxalign int - Minalign int - Dwarfregsp int - Dwarfreglr int - Linuxdynld string - Freebsddynld string - Netbsddynld string - Openbsddynld string - Dragonflydynld string - Solarisdynld string - Adddynrel func(*Link, *sym.Symbol, *sym.Reloc) bool - Archinit func(*Link) - Archreloc func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool - Archrelocvariant func(*Link, *sym.Reloc, *sym.Symbol, int64) int64 - Trampoline func(*Link, *sym.Reloc, *sym.Symbol) - Asmb func(*Link) - Elfreloc1 func(*Link, *sym.Reloc, int64) bool - Elfsetupplt func(*Link) - Gentext func(*Link) - Machoreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool - PEreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool + Funcalign int + Maxalign int + Minalign int + Dwarfregsp int + Dwarfreglr int + Linuxdynld string + Freebsddynld string + Netbsddynld string + Openbsddynld string + Dragonflydynld string + Solarisdynld string + Adddynrel func(*Link, *sym.Symbol, *sym.Reloc) bool + Archinit func(*Link) + // Archreloc is an arch-specific hook that assists in + // relocation processing (invoked by 'relocsym'); it handles + // target-specific relocation tasks. Here "rel" is the current + // relocation being examined, "sym" is the symbol containing the + // chunk of data to which the relocation applies, and "off" is the + // contents of the to-be-relocated data item (from sym.P). Return + // value is the appropriately relocated value (to be written back + // to the same spot in sym.P) and a boolean indicating + // success/failure (a failing value indicates a fatal error). + Archreloc func(link *Link, rel *sym.Reloc, sym *sym.Symbol, + offset int64) (relocatedOffset int64, success bool) + // Archrelocvariant is a second arch-specific hook used for + // relocation processing; it handles relocations where r.Type is + // insufficient to describe the relocation (r.Variant != + // sym.RV_NONE). Here "rel" is the relocation being applied, "sym" + // is the symbol containing the chunk of data to which the + // relocation applies, and "off" is the contents of the + // to-be-relocated data item (from sym.P). Return is an updated + // offset value. + Archrelocvariant func(link *Link, rel *sym.Reloc, sym *sym.Symbol, + offset int64) (relocatedOffset int64) + Trampoline func(*Link, *sym.Reloc, *sym.Symbol) + Asmb func(*Link) + Elfreloc1 func(*Link, *sym.Reloc, int64) bool + Elfsetupplt func(*Link) + Gentext func(*Link) + Machoreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool + PEreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool // TLSIEtoLE converts a TLS Initial Executable relocation to // a TLS Local Executable relocation. @@ -138,7 +158,7 @@ const ( MINFUNC = 16 // minimum size for a function ) -// DynlinkingGo returns whether we are producing Go code that can live +// DynlinkingGo reports whether we are producing Go code that can live // in separate shared libraries linked together at runtime. func (ctxt *Link) DynlinkingGo() bool { if !ctxt.Loaded { @@ -147,12 +167,12 @@ func (ctxt *Link) DynlinkingGo() bool { return ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() } -// CanUsePlugins returns whether a plugins can be used +// CanUsePlugins reports whether a plugins can be used func (ctxt *Link) CanUsePlugins() bool { - return ctxt.Syms.ROLookup("plugin.Open", 0) != nil + return ctxt.Syms.ROLookup("plugin.Open", sym.SymVerABIInternal) != nil } -// UseRelro returns whether to make use of "read only relocations" aka +// UseRelro reports whether to make use of "read only relocations" aka // relro. func (ctxt *Link) UseRelro() bool { switch ctxt.BuildMode { @@ -416,7 +436,7 @@ func (ctxt *Link) loadlib() { // cgo_import_static and cgo_import_dynamic, // then we want to make it cgo_import_dynamic // now. - if s.Extname != "" && s.Dynimplib() != "" && !s.Attr.CgoExport() { + if s.Extname() != "" && s.Dynimplib() != "" && !s.Attr.CgoExport() { s.Type = sym.SDYNIMPORT } else { s.Type = 0 @@ -503,7 +523,8 @@ func (ctxt *Link) loadlib() { // objects, try to read them from the libgcc file. any := false for _, s := range ctxt.Syms.Allsym { - for _, r := range s.R { + for i := range s.R { + r := &s.R[i] // Copying sym.Reloc has measurable impact on performance if r.Sym != nil && r.Sym.Type == sym.SXREF && r.Sym.Name != ".got" { any = true break @@ -514,6 +535,12 @@ func (ctxt *Link) loadlib() { if *flagLibGCC == "" { *flagLibGCC = ctxt.findLibPathCmd("--print-libgcc-file-name", "libgcc") } + if runtime.GOOS == "openbsd" && *flagLibGCC == "libgcc.a" { + // On OpenBSD `clang --print-libgcc-file-name` returns "libgcc.a". + // In this case we fail to load libgcc.a and can encounter link + // errors - see if we can find libcompiler_rt.a instead. + *flagLibGCC = ctxt.findLibPathCmd("--print-file-name=libcompiler_rt.a", "libcompiler_rt") + } if *flagLibGCC != "none" { hostArchive(ctxt, *flagLibGCC) } @@ -557,27 +584,6 @@ func (ctxt *Link) loadlib() { } } - // If type. symbols are visible in the symbol table, rename them - // using a SHA-1 prefix. This reduces binary size (the full - // string of a type symbol can be multiple kilobytes) and removes - // characters that upset external linkers. - // - // Keep the type.. prefix, which parts of the linker (like the - // DWARF generator) know means the symbol is not decodable. - // - // Leave type.runtime. symbols alone, because other parts of - // the linker manipulates them, and also symbols whose names - // would not be shortened by this process. - if typeSymbolMangling(ctxt) { - *FlagW = true // disable DWARF generation - for _, s := range ctxt.Syms.Allsym { - newName := typeSymbolMangle(s.Name) - if newName != s.Name { - ctxt.Syms.Rename(s.Name, newName, int(s.Version)) - } - } - } - // If package versioning is required, generate a hash of the // packages used in the link. if ctxt.BuildMode == BuildModeShared || ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() { @@ -588,8 +594,8 @@ func (ctxt *Link) loadlib() { } } - if ctxt.Arch == sys.Arch386 { - if (ctxt.BuildMode == BuildModeCArchive && ctxt.IsELF) || (ctxt.BuildMode == BuildModeCShared && ctxt.HeadType != objabi.Hwindows) || ctxt.BuildMode == BuildModePIE || ctxt.DynlinkingGo() { + if ctxt.Arch == sys.Arch386 && ctxt.HeadType != objabi.Hwindows { + if (ctxt.BuildMode == BuildModeCArchive && ctxt.IsELF) || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE || ctxt.DynlinkingGo() { got := ctxt.Syms.Lookup("_GLOBAL_OFFSET_TABLE_", 0) got.Type = sym.SDYNIMPORT got.Attr |= sym.AttrReachable @@ -635,25 +641,53 @@ func (ctxt *Link) loadlib() { } ctxt.Textp = textp } + + // Resolve ABI aliases in the list of cgo-exported functions. + // This is necessary because we load the ABI0 symbol for all + // cgo exports. + for i, s := range dynexp { + if s.Type != sym.SABIALIAS { + continue + } + t := resolveABIAlias(s) + t.Attr |= s.Attr + t.SetExtname(s.Extname()) + dynexp[i] = t + } } -// typeSymbolMangling reports whether the linker should shorten the -// names of symbols that represent Go types. +// mangleTypeSym shortens the names of symbols that represent Go types +// if they are visible in the symbol table. // // As the names of these symbols are derived from the string of // the type, they can run to many kilobytes long. So we shorten // them using a SHA-1 when the name appears in the final binary. +// This also removes characters that upset external linkers. // // These are the symbols that begin with the prefix 'type.' and // contain run-time type information used by the runtime and reflect -// packages. All Go binaries contain these symbols, but only only +// packages. All Go binaries contain these symbols, but only // those programs loaded dynamically in multiple parts need these // symbols to have entries in the symbol table. -func typeSymbolMangling(ctxt *Link) bool { - return ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil +func (ctxt *Link) mangleTypeSym() { + if ctxt.BuildMode != BuildModeShared && !ctxt.linkShared && ctxt.BuildMode != BuildModePlugin && !ctxt.CanUsePlugins() { + return + } + + for _, s := range ctxt.Syms.Allsym { + newName := typeSymbolMangle(s.Name) + if newName != s.Name { + ctxt.Syms.Rename(s.Name, newName, int(s.Version), ctxt.Reachparent) + } + } } // typeSymbolMangle mangles the given symbol name into something shorter. +// +// Keep the type.. prefix, which parts of the linker (like the +// DWARF generator) know means the symbol is not decodable. +// Leave type.runtime. symbols alone, because other parts of +// the linker manipulates them. func typeSymbolMangle(name string) string { if !strings.HasPrefix(name, "type.") { return name @@ -841,6 +875,15 @@ func loadobjfile(ctxt *Link, lib *sym.Library) { continue } + // Skip other special (non-object-file) sections that + // build tools may have added. Such sections must have + // short names so that the suffix is not truncated. + if len(arhdr.name) < 16 { + if ext := filepath.Ext(arhdr.name); ext != ".o" && ext != ".syso" { + continue + } + } + pname := fmt.Sprintf("%s(%s)", lib.File, arhdr.name) l = atolwhex(arhdr.size) ldobj(ctxt, f, lib, l, pname, lib.File) @@ -982,6 +1025,7 @@ func hostobjCopy() (paths []string) { if err != nil { Exitf("cannot reopen %s: %v", h.pn, err) } + defer f.Close() if _, err := f.Seek(h.off, 0); err != nil { Exitf("cannot seek %s: %v", h.pn, err) } @@ -1087,7 +1131,7 @@ func (ctxt *Link) hostlink() { switch ctxt.HeadType { case objabi.Hdarwin: argv = append(argv, "-Wl,-headerpad,1144") - if ctxt.DynlinkingGo() { + if ctxt.DynlinkingGo() && !ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { argv = append(argv, "-Wl,-flat_namespace") } if ctxt.BuildMode == BuildModeExe && !ctxt.Arch.InFamily(sys.ARM64) { @@ -1323,9 +1367,24 @@ func (ctxt *Link) hostlink() { ctxt.Logf("\n") } - if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil { + out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput() + if err != nil { Exitf("running %s failed: %v\n%s", argv[0], err, out) - } else if len(out) > 0 { + } + + // Filter out useless linker warnings caused by bugs outside Go. + // See also cmd/go/internal/work/exec.go's gccld method. + var save [][]byte + for _, line := range bytes.SplitAfter(out, []byte("\n")) { + // golang.org/issue/26073 - Apple Xcode bug + if bytes.Contains(line, []byte("ld: warning: text-based stub file")) { + continue + } + save = append(save, line) + } + out = bytes.Join(save, nil) + + if len(out) > 0 { // always print external output even if the command is successful, so that we don't // swallow linker warnings (see https://golang.org/issue/17935). ctxt.Logf("%s", out) @@ -1365,7 +1424,61 @@ func linkerFlagSupported(linker, flag string) bool { } }) - cmd := exec.Command(linker, flag, "trivial.c") + flagsWithNextArgSkip := []string{ + "-F", + "-l", + "-L", + "-framework", + "-Wl,-framework", + "-Wl,-rpath", + "-Wl,-undefined", + } + flagsWithNextArgKeep := []string{ + "-arch", + "-isysroot", + "--sysroot", + "-target", + } + prefixesToKeep := []string{ + "-f", + "-m", + "-p", + "-Wl,", + "-arch", + "-isysroot", + "--sysroot", + "-target", + } + + var flags []string + keep := false + skip := false + extldflags := strings.Fields(*flagExtldflags) + for _, f := range append(extldflags, ldflag...) { + if keep { + flags = append(flags, f) + keep = false + } else if skip { + skip = false + } else if f == "" || f[0] != '-' { + } else if contains(flagsWithNextArgSkip, f) { + skip = true + } else if contains(flagsWithNextArgKeep, f) { + flags = append(flags, f) + keep = true + } else { + for _, p := range prefixesToKeep { + if strings.HasPrefix(f, p) { + flags = append(flags, f) + break + } + } + } + } + + flags = append(flags, flag, "trivial.c") + + cmd := exec.Command(linker, flags...) cmd.Dir = *flagTmpdir cmd.Env = append([]string{"LC_ALL=C"}, os.Environ()...) out, err := cmd.CombinedOutput() @@ -1449,6 +1562,18 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, return ldhostobj(ldpe, ctxt.HeadType, f, pkg, length, pn, file) } + if c1 == 0x01 && (c2 == 0xD7 || c2 == 0xF7) { + ldxcoff := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, err := loadxcoff.Load(ctxt.Arch, ctxt.Syms, f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldxcoff, ctxt.HeadType, f, pkg, length, pn, file) + } + /* check the header */ line, err := f.ReadString('\n') if err != nil { @@ -1497,7 +1622,7 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, // // Note: It's possible for "\n!\n" to appear within the binary // package export data format. To avoid truncating the package - // definition prematurely (issue 21703), we keep keep track of + // definition prematurely (issue 21703), we keep track of // how many "$$" delimiters we've seen. import0 := f.Offset() @@ -1686,7 +1811,7 @@ func ldshlibsyms(ctxt *Link, shlib string) { continue } lsym.Type = sym.SDYNIMPORT - lsym.ElfType = elf.ST_TYPE(elfsym.Info) + lsym.SetElfType(elf.ST_TYPE(elfsym.Info)) lsym.Size = int64(elfsym.Size) if elfsym.Section != elf.SHN_UNDEF { // Set .File for the library that actually defines the symbol. @@ -1698,6 +1823,21 @@ func ldshlibsyms(ctxt *Link, shlib string) { gcdataLocations[elfsym.Value+2*uint64(ctxt.Arch.PtrSize)+8+1*uint64(ctxt.Arch.PtrSize)] = lsym } } + // For function symbols, we don't know what ABI is + // available, so alias it under both ABIs. + // + // TODO(austin): This is almost certainly wrong once + // the ABIs are actually different. We might have to + // mangle Go function names in the .so to include the + // ABI. + if elf.ST_TYPE(elfsym.Info) == elf.STT_FUNC { + alias := ctxt.Syms.Lookup(elfsym.Name, sym.SymVerABIInternal) + if alias.Type != 0 { + continue + } + alias.Type = sym.SABIALIAS + alias.R = []sym.Reloc{{Sym: lsym}} + } } gcdataAddresses := make(map[*sym.Symbol]uint64) if ctxt.Arch.Family == sys.ARM64 { @@ -1738,26 +1878,6 @@ func addsection(arch *sys.Arch, seg *sym.Segment, name string, rwx int) *sym.Sec return sect } -func Le16(b []byte) uint16 { - return uint16(b[0]) | uint16(b[1])<<8 -} - -func Le32(b []byte) uint32 { - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func Le64(b []byte) uint64 { - return uint64(Le32(b)) | uint64(Le32(b[4:]))<<32 -} - -func Be16(b []byte) uint16 { - return uint16(b[0])<<8 | uint16(b[1]) -} - -func Be32(b []byte) uint32 { - return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3]) -} - type chain struct { sym *sym.Symbol up *chain @@ -1795,6 +1915,10 @@ func (ctxt *Link) dostkcheck() { ch.up = nil ch.limit = objabi.StackLimit - callsize(ctxt) + if objabi.GOARCH == "arm64" { + // need extra 8 bytes below SP to save FP + ch.limit -= 8 + } // Check every function, but do the nosplit functions in a first pass, // to make the printed failure chains as short as possible. @@ -1883,7 +2007,7 @@ func stkcheck(ctxt *Link, up *chain, depth int) int { if s.FuncInfo != nil { locals = s.FuncInfo.Locals } - limit = int(objabi.StackLimit+locals) + int(ctxt.FixedFrameSize()) + limit = objabi.StackLimit + int(locals) + int(ctxt.FixedFrameSize()) } // Walk through sp adjustments in function, consuming relocs. @@ -2042,7 +2166,7 @@ func genasmsym(ctxt *Link, put func(*Link, *sym.Symbol, string, SymbolType, int6 if s.Attr.NotInSymbolTable() { continue } - if (s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC." { + if (s.Name == "" || s.Name[0] == '.') && !s.IsFileLocal() && s.Name != ".rathole" && s.Name != ".TOC." { continue } switch s.Type { @@ -2092,7 +2216,7 @@ func genasmsym(ctxt *Link, put func(*Link, *sym.Symbol, string, SymbolType, int6 if !s.Attr.Reachable() { continue } - put(ctxt, s, s.Extname, UndefinedSym, 0, nil) + put(ctxt, s, s.Extname(), UndefinedSym, 0, nil) case sym.STLSBSS: if ctxt.LinkMode == LinkExternal { @@ -2191,7 +2315,7 @@ func Entryvalue(ctxt *Link) int64 { if s.Type == 0 { return *FlagTextAddr } - if s.Type != sym.STEXT { + if ctxt.HeadType != objabi.Haix && s.Type != sym.STEXT { Errorf(s, "entry not text") } return s.Value diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index bf5754435786f..8ed5c6e27e900 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -32,6 +32,7 @@ package ld import ( "bufio" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/sym" @@ -89,6 +90,9 @@ type Link struct { // Used to implement field tracking. Reachparent map[*sym.Symbol]*sym.Symbol + + compUnits []*compilationUnit // DWARF compilation units + compUnitByPackage map[*sym.Library]*compilationUnit } type unresolvedSymKey struct { @@ -105,9 +109,28 @@ func (ctxt *Link) ErrorUnresolved(s *sym.Symbol, r *sym.Reloc) { k := unresolvedSymKey{from: s, to: r.Sym} if !ctxt.unresolvedSymSet[k] { ctxt.unresolvedSymSet[k] = true + + // Try to find symbol under another ABI. + var reqABI, haveABI obj.ABI + haveABI = ^obj.ABI(0) + reqABI, ok := sym.VersionToABI(int(r.Sym.Version)) + if ok { + for abi := obj.ABI(0); abi < obj.ABICount; abi++ { + v := sym.ABIToVersion(abi) + if v == -1 { + continue + } + if rs := ctxt.Syms.ROLookup(r.Sym.Name, v); rs != nil && rs.Type != sym.Sxxx { + haveABI = abi + } + } + } + // Give a special error message for main symbol (see #24809). if r.Sym.Name == "main.main" { Errorf(s, "function main is undeclared in the main package") + } else if haveABI != ^obj.ABI(0) { + Errorf(s, "relocation target %s not defined for %s (but is defined for %s)", r.Sym.Name, reqABI, haveABI) } else { Errorf(s, "relocation target %s not defined", r.Sym.Name) } diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 8315de5152d04..b935814ff0a43 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -724,7 +724,7 @@ func (x machoscmp) Less(i, j int) bool { return k1 < k2 } - return s1.Extname < s2.Extname + return s1.Extname() < s2.Extname() } func machogenasmsym(ctxt *Link) { @@ -763,7 +763,7 @@ func machoShouldExport(ctxt *Link, s *sym.Symbol) bool { if !ctxt.DynlinkingGo() || s.Attr.Local() { return false } - if ctxt.BuildMode == BuildModePlugin && strings.HasPrefix(s.Extname, objabi.PathToPrefix(*flagPluginPath)) { + if ctxt.BuildMode == BuildModePlugin && strings.HasPrefix(s.Extname(), objabi.PathToPrefix(*flagPluginPath)) { return true } if strings.HasPrefix(s.Name, "go.itab.") { @@ -798,13 +798,13 @@ func machosymtab(ctxt *Link) { // symbols like crosscall2 are in pclntab and end up // pointing at the host binary, breaking unwinding. // See Issue #18190. - cexport := !strings.Contains(s.Extname, ".") && (ctxt.BuildMode != BuildModePlugin || onlycsymbol(s)) + cexport := !strings.Contains(s.Extname(), ".") && (ctxt.BuildMode != BuildModePlugin || onlycsymbol(s)) if cexport || export { symstr.AddUint8('_') } // replace "·" as ".", because DTrace cannot handle it. - Addstring(symstr, strings.Replace(s.Extname, "·", ".", -1)) + Addstring(symstr, strings.Replace(s.Extname(), "·", ".", -1)) if s.Type == sym.SDYNIMPORT || s.Type == sym.SHOSTOBJ { symtab.AddUint8(0x01) // type N_EXT, external symbol diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index 23462f1154b7e..b87ee8094fb6a 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -208,9 +208,11 @@ func Main(arch *sys.Arch, theArch Arch) { ctxt.dostrdata() deadcode(ctxt) + dwarfGenerateDebugInfo(ctxt) if objabi.Fieldtrack_enabled != 0 { fieldtrack(ctxt) } + ctxt.mangleTypeSym() ctxt.callgraph() ctxt.doelf() @@ -220,7 +222,12 @@ func Main(arch *sys.Arch, theArch Arch) { ctxt.dostkcheck() if ctxt.HeadType == objabi.Hwindows { ctxt.dope() + ctxt.windynrelocsyms() } + if ctxt.HeadType == objabi.Haix { + ctxt.doxcoff() + } + ctxt.addexport() thearch.Gentext(ctxt) // trampolines, call stubs, etc. ctxt.textbuildid() diff --git a/src/cmd/link/internal/ld/nooptcgolink_test.go b/src/cmd/link/internal/ld/nooptcgolink_test.go index e019a39bf7348..4d2ff1acf223f 100644 --- a/src/cmd/link/internal/ld/nooptcgolink_test.go +++ b/src/cmd/link/internal/ld/nooptcgolink_test.go @@ -15,6 +15,11 @@ import ( ) func TestNooptCgoBuild(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + t.Parallel() + testenv.MustHaveGoBuild(t) testenv.MustHaveCGO(t) dir, err := ioutil.TempDir("", "go-build") diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 7b7f7068e7411..e4db834622dd4 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -7,6 +7,7 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/src" + "cmd/internal/sys" "cmd/link/internal/sym" "log" "os" @@ -312,45 +313,32 @@ func (ctxt *Link) pclntab() { } off = int32(ftab.SetUint32(ctxt.Arch, int64(off), args)) - // funcID uint32 - funcID := objabi.FuncID_normal - switch s.Name { - case "runtime.main": - funcID = objabi.FuncID_runtime_main - case "runtime.goexit": - funcID = objabi.FuncID_goexit - case "runtime.jmpdefer": - funcID = objabi.FuncID_jmpdefer - case "runtime.mcall": - funcID = objabi.FuncID_mcall - case "runtime.morestack": - funcID = objabi.FuncID_morestack - case "runtime.mstart": - funcID = objabi.FuncID_mstart - case "runtime.rt0_go": - funcID = objabi.FuncID_rt0_go - case "runtime.asmcgocall": - funcID = objabi.FuncID_asmcgocall - case "runtime.sigpanic": - funcID = objabi.FuncID_sigpanic - case "runtime.runfinq": - funcID = objabi.FuncID_runfinq - case "runtime.gcBgMarkWorker": - funcID = objabi.FuncID_gcBgMarkWorker - case "runtime.systemstack_switch": - funcID = objabi.FuncID_systemstack_switch - case "runtime.systemstack": - funcID = objabi.FuncID_systemstack - case "runtime.cgocallback_gofunc": - funcID = objabi.FuncID_cgocallback_gofunc - case "runtime.gogo": - funcID = objabi.FuncID_gogo - case "runtime.externalthreadhandler": - funcID = objabi.FuncID_externalthreadhandler - case "runtime.debugCallV1": - funcID = objabi.FuncID_debugCallV1 + // deferreturn + deferreturn := uint32(0) + lastWasmAddr := uint32(0) + for _, r := range s.R { + if ctxt.Arch.Family == sys.Wasm && r.Type == objabi.R_ADDR { + // Wasm does not have a live variable set at the deferreturn + // call itself. Instead it has one identified by the + // resumption point immediately preceding the deferreturn. + // The wasm code has a R_ADDR relocation which is used to + // set the resumption point to PC_B. + lastWasmAddr = uint32(r.Add) + } + if r.Sym != nil && r.Sym.Name == "runtime.deferreturn" && r.Add == 0 { + if ctxt.Arch.Family == sys.Wasm { + deferreturn = lastWasmAddr + } else { + // Note: the relocation target is in the call instruction, but + // is not necessarily the whole instruction (for instance, on + // x86 the relocation applies to bytes [1:5] of the 5 byte call + // instruction). + deferreturn = uint32(r.Off) + } + break // only need one + } } - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(funcID))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), deferreturn)) if pcln != &pclntabZpcln { renumberfiles(ctxt, pcln.File, &pcln.Pcfile) @@ -380,10 +368,13 @@ func (ctxt *Link) pclntab() { numberfile(ctxt, call.File) nameoff := nameToOffset(call.Func.Name) - inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+0), uint32(call.Parent)) - inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+4), uint32(call.File.Value)) - inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+8), uint32(call.Line)) - inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+12), uint32(nameoff)) + inlTreeSym.SetUint16(ctxt.Arch, int64(i*20+0), uint16(call.Parent)) + inlTreeSym.SetUint8(ctxt.Arch, int64(i*20+2), uint8(objabi.GetFuncID(call.Func.Name, call.Func.File))) + // byte 3 is unused + inlTreeSym.SetUint32(ctxt.Arch, int64(i*20+4), uint32(call.File.Value)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*20+8), uint32(call.Line)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*20+12), uint32(nameoff)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*20+16), uint32(call.ParentPC)) } pcln.Funcdata[objabi.FUNCDATA_InlTree] = inlTreeSym @@ -396,7 +387,21 @@ func (ctxt *Link) pclntab() { off = addpctab(ctxt, ftab, off, &pcln.Pcfile) off = addpctab(ctxt, ftab, off, &pcln.Pcline) off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Pcdata)))) - off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Funcdata)))) + + // funcID uint8 + var file string + if s.FuncInfo != nil && len(s.FuncInfo.File) > 0 { + file = s.FuncInfo.File[0].Name + } + funcID := objabi.GetFuncID(s.Name, file) + + off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(funcID))) + + // unused + off += 2 + + // nfuncdata must be the final entry. + off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(len(pcln.Funcdata)))) for i := range pcln.Pcdata { off = addpctab(ctxt, ftab, off, &pcln.Pcdata[i]) } @@ -487,10 +492,8 @@ func (ctxt *Link) findfunctab() { // find min and max address min := ctxt.Textp[0].Value - max := int64(0) - for _, s := range ctxt.Textp { - max = s.Value + s.Size - } + lastp := ctxt.Textp[len(ctxt.Textp)-1] + max := lastp.Value + lastp.Size // for each subbucket, compute the minimum of all symbol indexes // that map to that subbucket. diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index c81e3d6af5776..68251786ed908 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -54,41 +54,45 @@ var ( ) const ( - IMAGE_FILE_MACHINE_I386 = 0x14c - IMAGE_FILE_MACHINE_AMD64 = 0x8664 - IMAGE_FILE_RELOCS_STRIPPED = 0x0001 - IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002 - IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004 - IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020 - IMAGE_FILE_32BIT_MACHINE = 0x0100 - IMAGE_FILE_DEBUG_STRIPPED = 0x0200 - IMAGE_SCN_CNT_CODE = 0x00000020 - IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 - IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 - IMAGE_SCN_MEM_EXECUTE = 0x20000000 - IMAGE_SCN_MEM_READ = 0x40000000 - IMAGE_SCN_MEM_WRITE = 0x80000000 - IMAGE_SCN_MEM_DISCARDABLE = 0x2000000 - IMAGE_SCN_LNK_NRELOC_OVFL = 0x1000000 - IMAGE_SCN_ALIGN_32BYTES = 0x600000 - IMAGE_DIRECTORY_ENTRY_EXPORT = 0 - IMAGE_DIRECTORY_ENTRY_IMPORT = 1 - IMAGE_DIRECTORY_ENTRY_RESOURCE = 2 - IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3 - IMAGE_DIRECTORY_ENTRY_SECURITY = 4 - IMAGE_DIRECTORY_ENTRY_BASERELOC = 5 - IMAGE_DIRECTORY_ENTRY_DEBUG = 6 - IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7 - IMAGE_DIRECTORY_ENTRY_ARCHITECTURE = 7 - IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8 - IMAGE_DIRECTORY_ENTRY_TLS = 9 - IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10 - IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11 - IMAGE_DIRECTORY_ENTRY_IAT = 12 - IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT = 13 - IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14 - IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 - IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 + IMAGE_FILE_MACHINE_I386 = 0x14c + IMAGE_FILE_MACHINE_AMD64 = 0x8664 + IMAGE_FILE_MACHINE_ARM = 0x1c0 + IMAGE_FILE_MACHINE_ARMNT = 0x1c4 + IMAGE_FILE_RELOCS_STRIPPED = 0x0001 + IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002 + IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004 + IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020 + IMAGE_FILE_32BIT_MACHINE = 0x0100 + IMAGE_FILE_DEBUG_STRIPPED = 0x0200 + IMAGE_SCN_CNT_CODE = 0x00000020 + IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 + IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 + IMAGE_SCN_MEM_EXECUTE = 0x20000000 + IMAGE_SCN_MEM_READ = 0x40000000 + IMAGE_SCN_MEM_WRITE = 0x80000000 + IMAGE_SCN_MEM_DISCARDABLE = 0x2000000 + IMAGE_SCN_LNK_NRELOC_OVFL = 0x1000000 + IMAGE_SCN_ALIGN_32BYTES = 0x600000 + IMAGE_DIRECTORY_ENTRY_EXPORT = 0 + IMAGE_DIRECTORY_ENTRY_IMPORT = 1 + IMAGE_DIRECTORY_ENTRY_RESOURCE = 2 + IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3 + IMAGE_DIRECTORY_ENTRY_SECURITY = 4 + IMAGE_DIRECTORY_ENTRY_BASERELOC = 5 + IMAGE_DIRECTORY_ENTRY_DEBUG = 6 + IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7 + IMAGE_DIRECTORY_ENTRY_ARCHITECTURE = 7 + IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8 + IMAGE_DIRECTORY_ENTRY_TLS = 9 + IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10 + IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11 + IMAGE_DIRECTORY_ENTRY_IAT = 12 + IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT = 13 + IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR = 14 + IMAGE_SUBSYSTEM_WINDOWS_GUI = 2 + IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 + IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE = 0x0040 + IMAGE_DLLCHARACTERISTICS_NX_COMPAT = 0x0100 ) // TODO(crawshaw): add these constants to debug/pe. @@ -109,6 +113,15 @@ const ( IMAGE_REL_AMD64_ADDR32 = 0x0002 IMAGE_REL_AMD64_REL32 = 0x0004 IMAGE_REL_AMD64_SECREL = 0x000B + + IMAGE_REL_ARM_ABSOLUTE = 0x0000 + IMAGE_REL_ARM_ADDR32 = 0x0001 + IMAGE_REL_ARM_ADDR32NB = 0x0002 + IMAGE_REL_ARM_BRANCH24 = 0x0003 + IMAGE_REL_ARM_BRANCH11 = 0x0004 + IMAGE_REL_ARM_SECREL = 0x000F + + IMAGE_REL_BASED_HIGHLOW = 3 ) // Copyright 2009 The Go Authors. All rights reserved. @@ -477,6 +490,8 @@ func (f *peFile) addInitArray(ctxt *Link) *peSection { size = 4 case "amd64": size = 8 + case "arm": + size = 4 } sect := f.addSection(".ctors", size, size) sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ @@ -487,7 +502,7 @@ func (f *peFile) addInitArray(ctxt *Link) *peSection { init_entry := ctxt.Syms.Lookup(*flagEntrySymbol, 0) addr := uint64(init_entry.Value) - init_entry.Sect.Vaddr switch objabi.GOARCH { - case "386": + case "386", "arm": ctxt.Out.Write32(uint32(addr)) case "amd64": ctxt.Out.Write64(addr) @@ -592,6 +607,8 @@ dwarfLoop: ctxt.Out.Write16(IMAGE_REL_I386_DIR32) case "amd64": ctxt.Out.Write16(IMAGE_REL_AMD64_ADDR64) + case "arm": + ctxt.Out.Write16(IMAGE_REL_ARM_ADDR32) } return 1 }) @@ -687,7 +704,7 @@ func (f *peFile) writeSymbols(ctxt *Link) { } } class := IMAGE_SYM_CLASS_EXTERNAL - if s.Version != 0 || s.Attr.VisibilityHidden() || s.Attr.Local() { + if s.IsFileLocal() || s.Attr.VisibilityHidden() || s.Attr.Local() { class = IMAGE_SYM_CLASS_STATIC } f.writeSymbol(ctxt.Out, s, value, sect, typ, uint8(class)) @@ -743,6 +760,8 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) fh.Machine = IMAGE_FILE_MACHINE_AMD64 case sys.I386: fh.Machine = IMAGE_FILE_MACHINE_I386 + case sys.ARM: + fh.Machine = IMAGE_FILE_MACHINE_ARMNT } fh.NumberOfSections = uint16(len(f.sections)) @@ -754,7 +773,14 @@ func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) if linkmode == LinkExternal { fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED } else { - fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + switch arch.Family { + default: + Exitf("write COFF(ext): unknown PE architecture: %v", arch.Family) + case sys.AMD64, sys.I386: + fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + case sys.ARM: + fh.Characteristics = IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + } } if pe64 != 0 { var oh64 pe.OptionalHeader64 @@ -831,6 +857,12 @@ func (f *peFile) writeOptionalHeader(ctxt *Link) { oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI } + switch ctxt.Arch.Family { + case sys.ARM: + oh64.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE | IMAGE_DLLCHARACTERISTICS_NX_COMPAT + oh.DllCharacteristics = IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE | IMAGE_DLLCHARACTERISTICS_NX_COMPAT + } + // Disable stack growth as we don't want Windows to // fiddle with the thread stack limits, which we set // ourselves to circumvent the stack checks in the @@ -1008,14 +1040,15 @@ func initdynimport(ctxt *Link) *Dll { // of uinptrs this function consumes. Store the argsize and discard // the %n suffix if any. m.argsize = -1 - if i := strings.IndexByte(s.Extname, '%'); i >= 0 { + extName := s.Extname() + if i := strings.IndexByte(extName, '%'); i >= 0 { var err error - m.argsize, err = strconv.Atoi(s.Extname[i+1:]) + m.argsize, err = strconv.Atoi(extName[i+1:]) if err != nil { Errorf(s, "failed to parse stdcall decoration: %v", err) } m.argsize *= ctxt.Arch.PtrSize - s.Extname = s.Extname[:i] + s.SetExtname(extName[:i]) } m.s = s @@ -1029,7 +1062,7 @@ func initdynimport(ctxt *Link) *Dll { for m = d.ms; m != nil; m = m.next { m.s.Type = sym.SDATA m.s.Grow(int64(ctxt.Arch.PtrSize)) - dynName := m.s.Extname + dynName := m.s.Extname() // only windows/386 requires stdcall decoration if ctxt.Arch.Family == sys.I386 && m.argsize >= 0 { dynName += fmt.Sprintf("@%d", m.argsize) @@ -1100,7 +1133,7 @@ func addimports(ctxt *Link, datsect *peSection) { for m := d.ms; m != nil; m = m.next { m.off = uint64(pefile.nextSectOffset) + uint64(ctxt.Out.Offset()) - uint64(startoff) ctxt.Out.Write16(0) // hint - strput(ctxt.Out, m.s.Extname) + strput(ctxt.Out, m.s.Extname()) } } @@ -1185,7 +1218,7 @@ type byExtname []*sym.Symbol func (s byExtname) Len() int { return len(s) } func (s byExtname) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byExtname) Less(i, j int) bool { return s[i].Extname < s[j].Extname } +func (s byExtname) Less(i, j int) bool { return s[i].Extname() < s[j].Extname() } func initdynexport(ctxt *Link) { nexport = 0 @@ -1210,7 +1243,7 @@ func addexports(ctxt *Link) { size := binary.Size(&e) + 10*nexport + len(*flagOutfile) + 1 for i := 0; i < nexport; i++ { - size += len(dexport[i].Extname) + 1 + size += len(dexport[i].Extname()) + 1 } if nexport == 0 { @@ -1254,7 +1287,7 @@ func addexports(ctxt *Link) { for i := 0; i < nexport; i++ { out.Write32(uint32(v)) - v += len(dexport[i].Extname) + 1 + v += len(dexport[i].Extname()) + 1 } // put EXPORT Ordinal Table @@ -1266,18 +1299,168 @@ func addexports(ctxt *Link) { out.WriteStringN(*flagOutfile, len(*flagOutfile)+1) for i := 0; i < nexport; i++ { - out.WriteStringN(dexport[i].Extname, len(dexport[i].Extname)+1) + out.WriteStringN(dexport[i].Extname(), len(dexport[i].Extname())+1) } sect.pad(out, uint32(size)) } -func (ctxt *Link) dope() { - /* relocation table */ - rel := ctxt.Syms.Lookup(".rel", 0) +// peBaseRelocEntry represents a single relocation entry. +type peBaseRelocEntry struct { + typeOff uint16 + rel *sym.Reloc + sym *sym.Symbol // For debug +} + +// peBaseRelocBlock represents a Base Relocation Block. A block +// is a collection of relocation entries in a page, where each +// entry describes a single relocation. +// The block page RVA (Relative Virtual Address) is the index +// into peBaseRelocTable.blocks. +type peBaseRelocBlock struct { + entries []peBaseRelocEntry +} + +// pePages is a type used to store the list of pages for which there +// are base relocation blocks. This is defined as a type so that +// it can be sorted. +type pePages []uint32 + +func (p pePages) Len() int { return len(p) } +func (p pePages) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p pePages) Less(i, j int) bool { return p[i] < p[j] } + +// A PE base relocation table is a list of blocks, where each block +// contains relocation information for a single page. The blocks +// must be emitted in order of page virtual address. +// See https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format#the-reloc-section-image-only +type peBaseRelocTable struct { + blocks map[uint32]peBaseRelocBlock + + // pePages is a list of keys into blocks map. + // It is stored separately for ease of sorting. + pages pePages +} + +func (rt *peBaseRelocTable) init(ctxt *Link) { + rt.blocks = make(map[uint32]peBaseRelocBlock) +} + +func (rt *peBaseRelocTable) addentry(ctxt *Link, s *sym.Symbol, r *sym.Reloc) { + // pageSize is the size in bytes of a page + // described by a base relocation block. + const pageSize = 0x1000 + const pageMask = pageSize - 1 + + addr := s.Value + int64(r.Off) - int64(PEBASE) + page := uint32(addr &^ pageMask) + off := uint32(addr & pageMask) + + b, ok := rt.blocks[page] + if !ok { + rt.pages = append(rt.pages, page) + } + + e := peBaseRelocEntry{ + typeOff: uint16(off & 0xFFF), + rel: r, + sym: s, + } + + // Set entry type + switch r.Siz { + default: + Exitf("unsupported relocation size %d\n", r.Siz) + case 4: + e.typeOff |= uint16(IMAGE_REL_BASED_HIGHLOW << 12) + } + + b.entries = append(b.entries, e) + rt.blocks[page] = b +} + +func (rt *peBaseRelocTable) write(ctxt *Link) { + out := ctxt.Out + + // sort the pages array + sort.Sort(rt.pages) + + for _, p := range rt.pages { + b := rt.blocks[p] + const sizeOfPEbaseRelocBlock = 8 // 2 * sizeof(uint32) + blockSize := uint32(sizeOfPEbaseRelocBlock + len(b.entries)*2) + out.Write32(p) + out.Write32(blockSize) + + for _, e := range b.entries { + out.Write16(e.typeOff) + } + } +} + +func addPEBaseRelocSym(ctxt *Link, s *sym.Symbol, rt *peBaseRelocTable) { + for ri := 0; ri < len(s.R); ri++ { + r := &s.R[ri] + + if r.Sym == nil { + continue + } + if !r.Sym.Attr.Reachable() { + continue + } + if r.Type >= 256 { + continue + } + if r.Siz == 0 { // informational relocation + continue + } + if r.Type == objabi.R_DWARFFILEREF { + continue + } + + switch r.Type { + default: + case objabi.R_ADDR: + rt.addentry(ctxt, s, r) + } + } +} - rel.Attr |= sym.AttrReachable - rel.Type = sym.SELFROSECT +func addPEBaseReloc(ctxt *Link) { + // We only generate base relocation table for ARM (and ... ARM64), x86, and AMD64 are marked as legacy + // archs and can use fixed base with no base relocation information + switch ctxt.Arch.Family { + default: + return + case sys.ARM: + } + + var rt peBaseRelocTable + rt.init(ctxt) + // Get relocation information + for _, s := range ctxt.Textp { + addPEBaseRelocSym(ctxt, s, &rt) + } + for _, s := range datap { + addPEBaseRelocSym(ctxt, s, &rt) + } + + // Write relocation information + startoff := ctxt.Out.Offset() + rt.write(ctxt) + size := ctxt.Out.Offset() - startoff + + // Add a PE section and pad it at the end + rsect := pefile.addSection(".reloc", int(size), int(size)) + rsect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE + rsect.checkOffset(startoff) + rsect.pad(ctxt.Out, uint32(size)) + + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].VirtualAddress = rsect.virtualAddress + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_BASERELOC].Size = rsect.virtualSize +} + +func (ctxt *Link) dope() { initdynimport(ctxt) initdynexport(ctxt) } @@ -1326,7 +1509,7 @@ func Asmbpe(ctxt *Link) { switch ctxt.Arch.Family { default: Exitf("unknown PE architecture: %v", ctxt.Arch.Family) - case sys.AMD64, sys.I386: + case sys.AMD64, sys.I386, sys.ARM: } t := pefile.addSection(".text", int(Segtext.Length), int(Segtext.Length)) @@ -1345,9 +1528,6 @@ func Asmbpe(ctxt *Link) { // some data symbols (e.g. masks) end up in the .rdata section, and they normally // expect larger alignment requirement than the default text section alignment. ro.characteristics |= IMAGE_SCN_ALIGN_32BYTES - } else { - // TODO(brainman): should not need IMAGE_SCN_MEM_EXECUTE, but I do not know why it carshes without it - ro.characteristics |= IMAGE_SCN_MEM_EXECUTE } ro.checkSegment(&Segrodata) pefile.rdataSect = ro @@ -1380,6 +1560,7 @@ func Asmbpe(ctxt *Link) { if ctxt.LinkMode != LinkExternal { addimports(ctxt, d) addexports(ctxt) + addPEBaseReloc(ctxt) } pefile.writeSymbolTableAndStringTable(ctxt) addpersrc(ctxt) diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go index 3aa90c17dc8a1..a487b5e5f6ca1 100644 --- a/src/cmd/link/internal/ld/sym.go +++ b/src/cmd/link/internal/ld/sym.go @@ -66,7 +66,7 @@ func (ctxt *Link) computeTLSOffset() { default: log.Fatalf("unknown thread-local storage offset for %v", ctxt.HeadType) - case objabi.Hplan9, objabi.Hwindows, objabi.Hjs: + case objabi.Hplan9, objabi.Hwindows, objabi.Hjs, objabi.Haix: break /* diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 88d476710b937..7c296d766c44f 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -93,7 +93,7 @@ func putelfsym(ctxt *Link, x *sym.Symbol, s string, t SymbolType, addr int64, go case UndefinedSym: // ElfType is only set for symbols read from Go shared libraries, but // for other symbols it is left as STT_NOTYPE which is fine. - typ = int(x.ElfType) + typ = int(x.ElfType()) case TLSSym: typ = STT_TLS @@ -128,7 +128,7 @@ func putelfsym(ctxt *Link, x *sym.Symbol, s string, t SymbolType, addr int64, go // maybe one day STB_WEAK. bind := STB_GLOBAL - if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { + if x.IsFileLocal() || x.Attr.VisibilityHidden() || x.Attr.Local() { bind = STB_LOCAL } @@ -224,7 +224,7 @@ func putplan9sym(ctxt *Link, x *sym.Symbol, s string, typ SymbolType, addr int64 t := int(typ) switch typ { case TextSym, DataSym, BSSSym: - if x.Version != 0 { + if x.IsFileLocal() { t += 'a' - 'A' } fallthrough @@ -432,6 +432,10 @@ func (ctxt *Link) symtab() { // just defined above will be first. // hide the specific symbols. for _, s := range ctxt.Syms.Allsym { + if ctxt.LinkMode != LinkExternal && isStaticTemp(s.Name) { + s.Attr |= sym.AttrNotInSymbolTable + } + if !s.Attr.Reachable() || s.Attr.Special() || s.Type != sym.SRODATA { continue } @@ -502,7 +506,7 @@ func (ctxt *Link) symtab() { abihashgostr.AddAddr(ctxt.Arch, hashsym) abihashgostr.AddUint(ctxt.Arch, uint64(hashsym.Size)) } - if ctxt.BuildMode == BuildModePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil { + if ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() { for _, l := range ctxt.Library { s := ctxt.Syms.Lookup("go.link.pkghashbytes."+l.Pkg, 0) s.Attr |= sym.AttrReachable @@ -676,3 +680,10 @@ func (ctxt *Link) symtab() { lastmoduledatap.AddAddr(ctxt.Arch, moduledata) } } + +func isStaticTemp(name string) bool { + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i:] + } + return strings.Contains(name, "..stmp_") +} diff --git a/src/cmd/link/internal/ld/util.go b/src/cmd/link/internal/ld/util.go index b80e6106ba03e..b5b02296a14d2 100644 --- a/src/cmd/link/internal/ld/util.go +++ b/src/cmd/link/internal/ld/util.go @@ -89,3 +89,13 @@ var start = time.Now() func elapsed() float64 { return time.Since(start).Seconds() } + +// contains reports whether v is in s. +func contains(s []string, v string) bool { + for _, x := range s { + if x == v { + return true + } + } + return false +} diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go new file mode 100644 index 0000000000000..1561ce8cd0d17 --- /dev/null +++ b/src/cmd/link/internal/ld/xcoff.go @@ -0,0 +1,1321 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bytes" + "cmd/internal/objabi" + "cmd/link/internal/sym" + "encoding/binary" + "strings" +) + +// This file handles all algorithms related to XCOFF files generation. +// Most of them are adaptations of the ones in cmd/link/internal/pe.go +// as PE and XCOFF are based on COFF files. +// XCOFF files generated are 64 bits. + +const ( + // Total amount of space to reserve at the start of the file + // for File Header, Auxiliary Header, and Section Headers. + // May waste some. + XCOFFHDRRESERVE = FILHSZ_64 + AOUTHSZ_EXEC64 + SCNHSZ_64*23 + XCOFFSECTALIGN int64 = 32 // base on dump -o + + // XCOFF binaries should normally have all its sections position-independent. + // However, this is not yet possible for .text because of some R_ADDR relocations + // inside RODATA symbols. + // .data and .bss are position-independent so their address start inside a unreachable + // segment during execution to force segfault if something is wrong. + XCOFFTEXTBASE = 0x100000000 // Start of text address + XCOFFDATABASE = 0x200000000 // Start of data address +) + +// File Header +type XcoffFileHdr64 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint64 // Byte offset to symbol table start + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags + Fnsyms int32 // Number of entries in symbol table +} + +const ( + U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF +) + +// Flags that describe the type of the object file. +const ( + F_RELFLG = 0x0001 + F_EXEC = 0x0002 + F_LNNO = 0x0004 + F_FDPR_PROF = 0x0010 + F_FDPR_OPTI = 0x0020 + F_DSA = 0x0040 + F_VARPG = 0x0100 + F_DYNLOAD = 0x1000 + F_SHROBJ = 0x2000 + F_LOADONLY = 0x4000 +) + +// Auxiliary Header +type XcoffAoutHdr64 struct { + Omagic int16 // Flags - Ignored If Vstamp Is 1 + Ovstamp int16 // Version + Odebugger uint32 // Reserved For Debugger + Otextstart uint64 // Virtual Address Of Text + Odatastart uint64 // Virtual Address Of Data + Otoc uint64 // Toc Address + Osnentry int16 // Section Number For Entry Point + Osntext int16 // Section Number For Text + Osndata int16 // Section Number For Data + Osntoc int16 // Section Number For Toc + Osnloader int16 // Section Number For Loader + Osnbss int16 // Section Number For Bss + Oalgntext int16 // Max Text Alignment + Oalgndata int16 // Max Data Alignment + Omodtype [2]byte // Module Type Field + Ocpuflag uint8 // Bit Flags - Cputypes Of Objects + Ocputype uint8 // Reserved for CPU type + Otextpsize uint8 // Requested text page size + Odatapsize uint8 // Requested data page size + Ostackpsize uint8 // Requested stack page size + Oflags uint8 // Flags And TLS Alignment + Otsize uint64 // Text Size In Bytes + Odsize uint64 // Data Size In Bytes + Obsize uint64 // Bss Size In Bytes + Oentry uint64 // Entry Point Address + Omaxstack uint64 // Max Stack Size Allowed + Omaxdata uint64 // Max Data Size Allowed + Osntdata int16 // Section Number For Tdata Section + Osntbss int16 // Section Number For Tbss Section + Ox64flags uint16 // Additional Flags For 64-Bit Objects + Oresv3a int16 // Reserved + Oresv3 [2]int32 // Reserved + +} + +// Section Header +type XcoffScnHdr64 struct { + Sname [8]byte // Section Name + Spaddr uint64 // Physical Address + Svaddr uint64 // Virtual Address + Ssize uint64 // Section Size + Sscnptr uint64 // File Offset To Raw Data + Srelptr uint64 // File Offset To Relocation + Slnnoptr uint64 // File Offset To Line Numbers + Snreloc uint32 // Number Of Relocation Entries + Snlnno uint32 // Number Of Line Number Entries + Sflags uint32 // flags +} + +// Flags defining the section type. +const ( + STYP_DWARF = 0x0010 + STYP_TEXT = 0x0020 + STYP_DATA = 0x0040 + STYP_BSS = 0x0080 + STYP_EXCEPT = 0x0100 + STYP_INFO = 0x0200 + STYP_TDATA = 0x0400 + STYP_TBSS = 0x0800 + STYP_LOADER = 0x1000 + STYP_DEBUG = 0x2000 + STYP_TYPCHK = 0x4000 + STYP_OVRFLO = 0x8000 +) +const ( + SSUBTYP_DWINFO = 0x10000 // DWARF info section + SSUBTYP_DWLINE = 0x20000 // DWARF line-number section + SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section + SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section + SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section + SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section + SSUBTYP_DWSTR = 0x70000 // DWARF strings section + SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section + SSUBTYP_DWLOC = 0x90000 // DWARF location lists section + SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section + SSUBTYP_DWMAC = 0xB0000 // DWARF macros section +) + +// Headers size +const ( + FILHSZ_32 = 20 + FILHSZ_64 = 24 + AOUTHSZ_EXEC32 = 72 + AOUTHSZ_EXEC64 = 120 + SCNHSZ_32 = 40 + SCNHSZ_64 = 72 + LDHDRSZ_32 = 32 + LDHDRSZ_64 = 56 + LDSYMSZ_64 = 24 +) + +// Symbol Table Entry +type XcoffSymEnt64 struct { + Nvalue uint64 // Symbol value + Noffset uint32 // Offset of the name in string table or .debug section + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass uint8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +const SYMESZ = 18 + +const ( + // Nscnum + N_DEBUG = -2 + N_ABS = -1 + N_UNDEF = 0 + + //Ntype + SYM_V_INTERNAL = 0x1000 + SYM_V_HIDDEN = 0x2000 + SYM_V_PROTECTED = 0x3000 + SYM_V_EXPORTED = 0x4000 + SYM_TYPE_FUNC = 0x0020 // is function +) + +// Storage Class. +const ( + C_NULL = 0 // Symbol table entry marked for deletion + C_EXT = 2 // External symbol + C_STAT = 3 // Static symbol + C_BLOCK = 100 // Beginning or end of inner block + C_FCN = 101 // Beginning or end of function + C_FILE = 103 // Source file name and compiler information + C_HIDEXT = 107 // Unnamed external symbol + C_BINCL = 108 // Beginning of include file + C_EINCL = 109 // End of include file + C_WEAKEXT = 111 // Weak external symbol + C_DWARF = 112 // DWARF symbol + C_GSYM = 128 // Global variable + C_LSYM = 129 // Automatic variable allocated on stack + C_PSYM = 130 // Argument to subroutine allocated on stack + C_RSYM = 131 // Register variable + C_RPSYM = 132 // Argument to function or procedure stored in register + C_STSYM = 133 // Statically allocated symbol + C_BCOMM = 135 // Beginning of common block + C_ECOML = 136 // Local member of common block + C_ECOMM = 137 // End of common block + C_DECL = 140 // Declaration of object + C_ENTRY = 141 // Alternate entry + C_FUN = 142 // Function or procedure + C_BSTAT = 143 // Beginning of static block + C_ESTAT = 144 // End of static block + C_GTLS = 145 // Global thread-local variable + C_STTLS = 146 // Static thread-local variable +) + +// File Auxiliary Entry +type XcoffAuxFile64 struct { + Xfname [8]byte // Name or offset inside string table + Xftype uint8 // Source file string type + Xauxtype uint8 // Type of auxiliary entry +} + +// Function Auxiliary Entry +type XcoffAuxFcn64 struct { + Xlnnoptr uint64 // File pointer to line number + Xfsize uint32 // Size of function in bytes + Xendndx uint32 // Symbol table index of next entry + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// csect Auxiliary Entry. +type XcoffAuxCSect64 struct { + Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xscnlenhi uint32 // Upper 4 bytes of length or symbol table index + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// Auxiliary type +const ( + _AUX_EXCEPT = 255 + _AUX_FCN = 254 + _AUX_SYM = 253 + _AUX_FILE = 252 + _AUX_CSECT = 251 + _AUX_SECT = 250 +) + +// Xftype field +const ( + XFT_FN = 0 // Source File Name + XFT_CT = 1 // Compile Time Stamp + XFT_CV = 2 // Compiler Version Number + XFT_CD = 128 // Compiler Defined Information/ + +) + +// Symbol type field. +const ( + XTY_ER = 0 // External reference + XTY_SD = 1 // Section definition + XTY_LD = 2 // Label definition + XTY_CM = 3 // Common csect definition + XTY_WK = 0x8 // Weak symbol + XTY_EXP = 0x10 // Exported symbol + XTY_ENT = 0x20 // Entry point symbol + XTY_IMP = 0x40 // Imported symbol +) + +// Storage-mapping class. +const ( + XMC_PR = 0 // Program code + XMC_RO = 1 // Read-only constant + XMC_DB = 2 // Debug dictionary table + XMC_TC = 3 // TOC entry + XMC_UA = 4 // Unclassified + XMC_RW = 5 // Read/Write data + XMC_GL = 6 // Global linkage + XMC_XO = 7 // Extended operation + XMC_SV = 8 // 32-bit supervisor call descriptor + XMC_BS = 9 // BSS class + XMC_DS = 10 // Function descriptor + XMC_UC = 11 // Unnamed FORTRAN common + XMC_TC0 = 15 // TOC anchor + XMC_TD = 16 // Scalar data entry in the TOC + XMC_SV64 = 17 // 64-bit supervisor call descriptor + XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit + XMC_TL = 20 // Read/Write thread-local data + XMC_UL = 21 // Read/Write thread-local data (.tbss) + XMC_TE = 22 // TOC entry +) + +// Loader Header +type XcoffLdHdr64 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Lstlen uint32 // Length of string table + Limpoff uint64 // Offset to start of import file IDs + Lstoff uint64 // Offset to start of string table + Lsymoff uint64 // Offset to start of symbol table + Lrldoff uint64 // Offset to start of relocation entries +} + +// Loader Symbol +type XcoffLdSym64 struct { + Lvalue uint64 // Address field + Loffset uint32 // Byte offset into string table of symbol name + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type xcoffLoaderSymbol struct { + sym *sym.Symbol + smtype int8 + smclas int8 +} + +type XcoffLdImportFile64 struct { + Limpidpath string + Limpidbase string + Limpidmem string +} + +type XcoffLdRel64 struct { + Lvaddr uint64 // Address Field + Lrtype uint16 // Relocation Size and Type + Lrsecnm int16 // Section Number being relocated + Lsymndx int32 // Loader-Section symbol table index +} + +// xcoffLoaderReloc holds information about a relocation made by the loader. +type xcoffLoaderReloc struct { + sym *sym.Symbol + rel *sym.Reloc + rtype uint16 + symndx int32 +} + +const ( + XCOFF_R_POS = 0x00 // A(sym) Positive Relocation +) + +type XcoffLdStr64 struct { + size uint16 + name string +} + +// xcoffFile is used to build XCOFF file. +type xcoffFile struct { + xfhdr XcoffFileHdr64 + xahdr XcoffAoutHdr64 + sections []*XcoffScnHdr64 + stringTable xcoffStringTable + sectNameToScnum map[string]int16 + loaderSize uint64 + symtabOffset int64 // offset to the start of symbol table + symbolCount uint32 // number of symbol table records written + dynLibraries map[string]int // Dynamic libraries in .loader section. The integer represents its import file number (- 1) + loaderSymbols []*xcoffLoaderSymbol // symbols inside .loader symbol table + loaderReloc []*xcoffLoaderReloc // Reloc that must be made inside loader +} + +// Var used by XCOFF Generation algorithms +var ( + xfile xcoffFile +) + +// xcoffStringTable is a XCOFF string table. +type xcoffStringTable struct { + strings []string + stringsLen int +} + +// size returns size of string table t. +func (t *xcoffStringTable) size() int { + // string table starts with 4-byte length at the beginning + return t.stringsLen + 4 +} + +// add adds string str to string table t. +func (t *xcoffStringTable) add(str string) int { + off := t.size() + t.strings = append(t.strings, str) + t.stringsLen += len(str) + 1 // each string will have 0 appended to it + return off +} + +// write writes string table t into the output file. +func (t *xcoffStringTable) write(out *OutBuf) { + out.Write32(uint32(t.size())) + for _, s := range t.strings { + out.WriteString(s) + out.Write8(0) + } +} + +// write writes XCOFF section sect into the output file. +func (sect *XcoffScnHdr64) write(ctxt *Link) { + binary.Write(ctxt.Out, binary.BigEndian, sect) + ctxt.Out.Write32(0) // Add 4 empty bytes at the end to match alignment +} + +// addSection adds section to the XCOFF file f. +func (f *xcoffFile) addSection(name string, addr uint64, size uint64, fileoff uint64, flags uint32) *XcoffScnHdr64 { + sect := &XcoffScnHdr64{ + Spaddr: addr, + Svaddr: addr, + Ssize: size, + Sscnptr: fileoff, + Sflags: flags, + } + copy(sect.Sname[:], name) // copy string to [8]byte + f.sections = append(f.sections, sect) + f.sectNameToScnum[name] = int16(len(f.sections)) + return sect +} + +// addDwarfSection adds a dwarf section to the XCOFF file f. +// This function is similar to addSection, but Dwarf section names +// must be modified to conventional names and they are various subtypes. +func (f *xcoffFile) addDwarfSection(s *sym.Section) *XcoffScnHdr64 { + newName, subtype := xcoffGetDwarfSubtype(s.Name) + return f.addSection(newName, 0, s.Length, s.Seg.Fileoff+s.Vaddr-s.Seg.Vaddr, STYP_DWARF|subtype) +} + +// xcoffGetDwarfSubtype returns the XCOFF name of the DWARF section str +// and its subtype constant. +func xcoffGetDwarfSubtype(str string) (string, uint32) { + switch str { + default: + Exitf("unknown DWARF section name for XCOFF: %s", str) + case ".debug_abbrev": + return ".dwabrev", SSUBTYP_DWABREV + case ".debug_info": + return ".dwinfo", SSUBTYP_DWINFO + case ".debug_frame": + return ".dwframe", SSUBTYP_DWFRAME + case ".debug_line": + return ".dwline", SSUBTYP_DWLINE + case ".debug_loc": + return ".dwloc", SSUBTYP_DWLOC + case ".debug_pubnames": + return ".dwpbnms", SSUBTYP_DWPBNMS + case ".debug_pubtypes": + return ".dwpbtyp", SSUBTYP_DWPBTYP + case ".debug_ranges": + return ".dwrnge", SSUBTYP_DWRNGES + } + // never used + return "", 0 +} + +// getXCOFFscnum returns the XCOFF section number of a Go section. +func (f *xcoffFile) getXCOFFscnum(sect *sym.Section) int16 { + switch sect.Seg { + case &Segtext: + return f.sectNameToScnum[".text"] + case &Segdata: + if sect.Name == ".noptrdata" || sect.Name == ".data" { + return f.sectNameToScnum[".data"] + } + if sect.Name == ".noptrbss" || sect.Name == ".bss" { + return f.sectNameToScnum[".bss"] + } + Errorf(nil, "unknown XCOFF segment data section: %s", sect.Name) + case &Segdwarf: + name, _ := xcoffGetDwarfSubtype(sect.Name) + return f.sectNameToScnum[name] + } + Errorf(nil, "getXCOFFscnum not implemented for section %s", sect.Name) + return -1 +} + +// Xcoffinit initialised some internal value and setups +// already known header information +func Xcoffinit(ctxt *Link) { + xfile.dynLibraries = make(map[string]int) + + HEADR = int32(Rnd(XCOFFHDRRESERVE, XCOFFSECTALIGN)) + if *FlagTextAddr != -1 { + Errorf(nil, "-T not available on AIX") + } + *FlagTextAddr = XCOFFTEXTBASE + int64(HEADR) + *FlagDataAddr = 0 + if *FlagRound != -1 { + Errorf(nil, "-R not available on AIX") + } + *FlagRound = int(XCOFFSECTALIGN) + +} + +// SYMBOL TABLE + +// type records C_FILE information needed for genasmsym in XCOFF. +type xcoffSymSrcFile struct { + name string + fileSymNb uint32 // Symbol number of this C_FILE + csectSymNb uint64 // Symbol number for the current .csect + csectSize int64 +} + +var ( + currDwscnoff = make(map[string]uint64) // Needed to create C_DWARF symbols + currSymSrcFile xcoffSymSrcFile +) + +// writeSymbol writes a symbol or an auxiliary symbol entry on ctxt.out. +func (f *xcoffFile) writeSymbol(out *OutBuf, byteOrder binary.ByteOrder, sym interface{}) { + binary.Write(out, byteOrder, sym) + f.symbolCount++ +} + +// Write symbols needed when a new file appared : +// - a C_FILE with one auxiliary entry for its name +// - C_DWARF symbols to provide debug information +// - a C_HIDEXT which will be a csect containing all of its functions +// It needs several parameters to create .csect symbols such as its entry point and its section number. +// +// Currently, a new file is in fact a new package. It seems to be OK, but it might change +// in the future. +func (f *xcoffFile) writeSymbolNewFile(ctxt *Link, name string, firstEntry uint64, extnum int16) { + /* C_FILE */ + s := &XcoffSymEnt64{ + Noffset: uint32(f.stringTable.add(".file")), + Nsclass: C_FILE, + Nscnum: N_DEBUG, + Ntype: 0, // Go isn't inside predefined language. + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + // Auxiliary entry for file name. + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(f.stringTable.add(name))) + ctxt.Out.Write32(0) // 6 bytes empty + ctxt.Out.Write16(0) + ctxt.Out.Write8(XFT_FN) + ctxt.Out.Write16(0) // 2 bytes empty + ctxt.Out.Write8(_AUX_FILE) + f.symbolCount++ + + /* Dwarf */ + for _, sect := range Segdwarf.Sections { + // Find the size of this corresponding package DWARF compilation unit. + // This size is set during DWARF generation (see dwarf.go). + dwsize := getDwsectCUSize(sect.Name, name) + // .debug_abbrev is commun to all packages and not found with the previous function + if sect.Name == ".debug_abbrev" { + s := ctxt.Syms.Lookup(sect.Name, 0) + dwsize = uint64(s.Size) + } + + // get XCOFF name + name, _ := xcoffGetDwarfSubtype(sect.Name) + s := &XcoffSymEnt64{ + Nvalue: currDwscnoff[sect.Name], + Noffset: uint32(f.stringTable.add(name)), + Nsclass: C_DWARF, + Nscnum: f.getXCOFFscnum(sect), + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + // update the DWARF section offset in this file + if sect.Name != ".debug_abbrev" { + currDwscnoff[sect.Name] += dwsize + } + + // Auxiliary dwarf section + ctxt.Out.Write64(dwsize) // section length + ctxt.Out.Write64(0) // nreloc + ctxt.Out.Write8(0) // pad + ctxt.Out.Write8(_AUX_SECT) + f.symbolCount++ + } + + /* .csect */ + // Check if extnum is in text. + // This is temporary and only here to check if this algorithm is correct. + if extnum != 1 { + Exitf("XCOFF symtab: A new file was detected with its first symbol not in .text") + } + + currSymSrcFile.csectSymNb = uint64(f.symbolCount) + currSymSrcFile.csectSize = 0 + + // No offset because no name + s = &XcoffSymEnt64{ + Nvalue: firstEntry, + Nscnum: extnum, + Nsclass: C_HIDEXT, + Ntype: 0, // check visibility ? + Nnumaux: 1, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + + aux := &XcoffAuxCSect64{ + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD | 5<<3, // align = 5 + Xauxtype: _AUX_CSECT, + } + f.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, aux) + +} + +// Update values for the previous package. +// - Svalue of the C_FILE symbol: if it is the last one, this Svalue must be -1 +// - Xsclen of the csect symbol. +func (f *xcoffFile) updatePreviousFile(ctxt *Link, last bool) { + // first file + if currSymSrcFile.fileSymNb == 0 { + return + } + + prevOff := f.symtabOffset + int64(currSymSrcFile.fileSymNb*SYMESZ) + currOff := ctxt.Out.Offset() + + // Update C_FILE + ctxt.Out.SeekSet(prevOff) + if last { + ctxt.Out.Write64(0xFFFFFFFFFFFFFFFF) + } else { + ctxt.Out.Write64(uint64(f.symbolCount)) + } + + // update csect scnlen in this auxiliary entry + prevOff = f.symtabOffset + int64((currSymSrcFile.csectSymNb+1)*SYMESZ) + ctxt.Out.SeekSet(prevOff) + ctxt.Out.Write32(uint32(currSymSrcFile.csectSize & 0xFFFFFFFF)) + prevOff += 12 + ctxt.Out.SeekSet(prevOff) + ctxt.Out.Write32(uint32(currSymSrcFile.csectSize >> 32)) + + ctxt.Out.SeekSet(currOff) + +} + +// Write symbol representing a .text function. +// The symbol table is split with C_FILE corresponding to each package +// and not to each source file as it should be. +func (f *xcoffFile) writeSymbolFunc(ctxt *Link, x *sym.Symbol) []interface{} { + // New XCOFF symbols which will be written. + syms := []interface{}{} + + // Check if a new file is detected. + if x.File == "" { // Undefined global symbol + // If this happens, the algorithme must be redone. + if currSymSrcFile.name != "" { + Exitf("undefined global symbol found inside another file") + } + } else { + // Current file has changed. New C_FILE, C_DWARF, etc must be generated. + if currSymSrcFile.name != x.File { + // update previous file values + xfile.updatePreviousFile(ctxt, false) + currSymSrcFile.name = x.File + currSymSrcFile.fileSymNb = f.symbolCount + f.writeSymbolNewFile(ctxt, x.File, uint64(x.Value), xfile.getXCOFFscnum(x.Sect)) + } + } + + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(x.Name)), + Nvalue: uint64(x.Value), + Nscnum: f.getXCOFFscnum(x.Sect), + Ntype: SYM_TYPE_FUNC, + Nnumaux: 2, + } + + if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { + s.Nsclass = C_HIDEXT + } + + syms = append(syms, s) + + // Update current csect size + currSymSrcFile.csectSize += x.Size + + // create auxiliary entries + a2 := &XcoffAuxFcn64{ + Xfsize: uint32(x.Size), + Xlnnoptr: 0, // TODO + Xendndx: xfile.symbolCount + 3, // this symbol + 2 aux entries + Xauxtype: _AUX_FCN, + } + syms = append(syms, a2) + + a4 := &XcoffAuxCSect64{ + Xscnlenlo: uint32(currSymSrcFile.csectSymNb & 0xFFFFFFFF), + Xscnlenhi: uint32(currSymSrcFile.csectSymNb >> 32), + Xsmclas: XMC_PR, // Program Code + Xsmtyp: XTY_LD, // label definition (based on C) + Xauxtype: _AUX_CSECT, + } + syms = append(syms, a4) + return syms +} + +// put function used by genasmsym to write symbol table +func putaixsym(ctxt *Link, x *sym.Symbol, str string, t SymbolType, addr int64, go_ *sym.Symbol) { + + // All XCOFF symbols generated by this GO symbols + // Can be a symbol entry or a auxiliary entry + syms := []interface{}{} + + switch t { + default: + return + + case TextSym: + if x.FuncInfo != nil { + // Function within a file + syms = xfile.writeSymbolFunc(ctxt, x) + } else { + // Only runtime.text and runtime.etext come through this way + if x.Name != "runtime.text" && x.Name != "runtime.etext" && x.Name != "go.buildid" { + Exitf("putaixsym: unknown text symbol %s", x.Name) + } + s := &XcoffSymEnt64{ + Nsclass: C_HIDEXT, + Noffset: uint32(xfile.stringTable.add(str)), + Nvalue: uint64(x.Value), + Nscnum: xfile.getXCOFFscnum(x.Sect), + Ntype: SYM_TYPE_FUNC, + Nnumaux: 1, + } + syms = append(syms, s) + + size := uint64(x.Size) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + Xsmclas: XMC_PR, + Xsmtyp: XTY_SD, + } + syms = append(syms, a4) + + } + + case DataSym, BSSSym: + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(str)), + Nvalue: uint64(x.Value), + Nscnum: xfile.getXCOFFscnum(x.Sect), + Nnumaux: 1, + } + + if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { + // There is more symbols in the case of a global data + // which are related to the assembly generated + // to access such symbols. + // But as Golang as its own way to check if a symbol is + // global or local (the capital letter), we don't need to + // implement them yet. + s.Nsclass = C_HIDEXT + } + + syms = append(syms, s) + + // Create auxiliary entry + + // Normally, size should be the size of csect containing all + // the data and bss symbols of one file/package. + // However, it's easier to just have a csect for each symbol. + // It might change + size := uint64(x.Size) + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xscnlenlo: uint32(size & 0xFFFFFFFF), + Xscnlenhi: uint32(size >> 32), + } + // Read only data + if x.Type >= sym.STYPE && x.Type <= sym.SPCLNTAB { + a4.Xsmclas = XMC_RO + } else { + a4.Xsmclas = XMC_RW + } + if t == DataSym { + a4.Xsmtyp |= XTY_SD + } else { + a4.Xsmtyp |= XTY_CM + } + + syms = append(syms, a4) + + case UndefinedSym: + if x.Type != sym.SDYNIMPORT && x.Type != sym.SHOSTOBJ { + return + } + s := &XcoffSymEnt64{ + Nsclass: C_EXT, + Noffset: uint32(xfile.stringTable.add(str)), + Nnumaux: 1, + } + syms = append(syms, s) + + a4 := &XcoffAuxCSect64{ + Xauxtype: _AUX_CSECT, + Xsmclas: XMC_DS, + Xsmtyp: XTY_ER | XTY_IMP, + } + + if x.Name == "__n_pthreads" { + // Currently, all imported symbols made by cgo_import_dynamic are + // syscall functions, except __n_pthreads which is a variable. + // TODO(aix): Find a way to detect variables imported by cgo. + a4.Xsmclas = XMC_RW + } + + syms = append(syms, a4) + } + + for _, s := range syms { + xfile.writeSymbol(ctxt.Out, ctxt.Arch.ByteOrder, s) + } +} + +// Generate XCOFF Symbol table and XCOFF String table +func (f *xcoffFile) asmaixsym(ctxt *Link) { + // write symbol table + genasmsym(ctxt, putaixsym) + + // update last file Svalue + xfile.updatePreviousFile(ctxt, true) + + // write string table + xfile.stringTable.write(ctxt.Out) +} + +func (f *xcoffFile) genDynSym(ctxt *Link) { + var dynsyms []*sym.Symbol + for _, s := range ctxt.Syms.Allsym { + if s.Type != sym.SHOSTOBJ && s.Type != sym.SDYNIMPORT { + continue + } + dynsyms = append(dynsyms, s) + } + + for _, s := range dynsyms { + f.adddynimpsym(ctxt, s) + + if _, ok := f.dynLibraries[s.Dynimplib()]; !ok { + f.dynLibraries[s.Dynimplib()] = len(f.dynLibraries) + } + + } + +} + +// (*xcoffFile)adddynimpsym adds the dynamic symbol "s" to a XCOFF file. +// A new symbol named s.Extname() is created to be the actual dynamic symbol +// in the .loader section and in the symbol table as an External Reference. +// The symbol "s" is transformed to SXCOFFTOC to end up in .data section. +// However, there is no writing protection on those symbols and +// it might need to be added. +// TODO(aix): Handles dynamic symbols without library. +func (f *xcoffFile) adddynimpsym(ctxt *Link, s *sym.Symbol) { + // Check that library name is given. + // Pattern is already checked when compiling. + if s.Dynimplib() == "" { + Errorf(s, "imported symbol must have a given library") + } + + s.Type = sym.SXCOFFTOC + + // Create new dynamic symbol + extsym := ctxt.Syms.Lookup(s.Extname(), 0) + extsym.Type = sym.SDYNIMPORT + extsym.Attr |= sym.AttrReachable + extsym.SetDynimplib(s.Dynimplib()) + extsym.SetExtname(s.Extname()) + extsym.SetDynimpvers(s.Dynimpvers()) + + // Add loader symbol + lds := &xcoffLoaderSymbol{ + sym: extsym, + smtype: XTY_IMP, + smclas: XMC_DS, + } + if s.Name == "__n_pthreads" { + // Currently, all imported symbols made by cgo_import_dynamic are + // syscall functions, except __n_pthreads which is a variable. + // TODO(aix): Find a way to detect variables imported by cgo. + lds.smclas = XMC_RW + } + f.loaderSymbols = append(f.loaderSymbols, lds) + + // Relocation to retrieve the external address + s.AddBytes(make([]byte, 8)) + s.SetAddr(ctxt.Arch, 0, extsym) + +} + +// Xcoffadddynrel adds a dynamic relocation in a XCOFF file. +// This relocation will be made by the loader. +func Xcoffadddynrel(ctxt *Link, s *sym.Symbol, r *sym.Reloc) bool { + if s.Type <= sym.SPCLNTAB { + Errorf(s, "cannot have a relocation to %s in a text section symbol", r.Sym.Name) + return false + } + + ldr := &xcoffLoaderReloc{ + sym: s, + rel: r, + } + + switch r.Type { + default: + Errorf(s, "unexpected .loader relocation to symbol: %s (type: %s)", r.Sym.Name, r.Type.String()) + return false + case objabi.R_ADDR: + if s.Type == sym.SXCOFFTOC && r.Sym.Type == sym.SDYNIMPORT { + // Imported symbol relocation + for i, dynsym := range xfile.loaderSymbols { + if dynsym.sym.Name == r.Sym.Name { + ldr.symndx = int32(i + 3) // +3 because of 3 section symbols + break + } + } + } else if s.Type == sym.SDATA { + switch r.Sym.Sect.Seg { + default: + Errorf(s, "unknown segment for .loader relocation with symbol %s", r.Sym.Name) + case &Segtext: + case &Segrodata: + ldr.symndx = 0 // .text + case &Segdata: + if r.Sym.Type == sym.SBSS || r.Sym.Type == sym.SNOPTRBSS { + ldr.symndx = 2 // .bss + } else { + ldr.symndx = 1 // .data + } + + } + + } else { + Errorf(s, "unexpected type for .loader relocation R_ADDR for symbol %s: %s to %s", r.Sym.Name, s.Type, r.Sym.Type) + return false + } + + ldr.rtype = 0x3F<<8 + XCOFF_R_POS + } + + xfile.loaderReloc = append(xfile.loaderReloc, ldr) + return true +} + +func (ctxt *Link) doxcoff() { + if *FlagD { + // All XCOFF files have dynamic symbols because of the syscalls. + Exitf("-d is not available on AIX") + } + + // Initial map used to store compilation unit size for each DWARF section (see dwarf.go). + dwsectCUSize = make(map[string]uint64) + + // TOC + toc := ctxt.Syms.Lookup("TOC", 0) + toc.Type = sym.SXCOFFTOC + toc.Attr |= sym.AttrReachable + + // XCOFF does not allow relocations of data symbol address to a text symbol. + // Such case occurs when a RODATA symbol retrieves a data symbol address. + // When it happens, this RODATA symbol is moved to .data section. + // runtime.algarray is a readonly symbol but stored inside .data section. + // If it stays in .data, all type symbols will be moved to .data which + // cannot be done. + algarray := ctxt.Syms.Lookup("runtime.algarray", 0) + algarray.Type = sym.SRODATA + for { + again := false + for _, s := range ctxt.Syms.Allsym { + if s.Type != sym.SRODATA { + continue + } + for ri := range s.R { + r := &s.R[ri] + if r.Type != objabi.R_ADDR { + continue + } + if r.Sym.Type != sym.Sxxx && r.Sym.Type != sym.STEXT && r.Sym.Type != sym.SRODATA { + s.Type = sym.SDATA + again = true + break + } + } + + } + if !again { + break + } + } + + // Add entry point to .loader symbols. + ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0) + if !ep.Attr.Reachable() { + Exitf("wrong entry point") + } + xfile.loaderSymbols = append(xfile.loaderSymbols, &xcoffLoaderSymbol{ + sym: ep, + smtype: XTY_ENT | XTY_SD, + smclas: XMC_DS, + }) + + xfile.genDynSym(ctxt) + + for _, s := range ctxt.Syms.Allsym { + if strings.HasPrefix(s.Name, "TOC.") { + s.Type = sym.SXCOFFTOC + } + } +} + +// Loader section +// Currently, this section is created from scratch when assembling the XCOFF file +// according to information retrieved in xfile object. + +// Create loader section and returns its size +func Loaderblk(ctxt *Link, off uint64) { + xfile.writeLdrScn(ctxt, off) +} + +func (f *xcoffFile) writeLdrScn(ctxt *Link, globalOff uint64) { + var symtab []*XcoffLdSym64 + var strtab []*XcoffLdStr64 + var importtab []*XcoffLdImportFile64 + var reloctab []*XcoffLdRel64 + var dynimpreloc []*XcoffLdRel64 + + // As the string table is updated in any loader subsection, + // its length must be computed at the same time. + stlen := uint32(0) + + // Loader Header + hdr := &XcoffLdHdr64{ + Lversion: 2, + Lsymoff: LDHDRSZ_64, + } + + /* Symbol table */ + for _, s := range f.loaderSymbols { + lds := &XcoffLdSym64{ + Loffset: uint32(stlen + 2), + Lsmtype: s.smtype, + Lsmclas: s.smclas, + } + switch s.smtype { + default: + Errorf(s.sym, "unexpected loader symbol type: 0x%x", s.smtype) + case XTY_ENT | XTY_SD: + lds.Lvalue = uint64(s.sym.Value) + lds.Lscnum = f.getXCOFFscnum(s.sym.Sect) + case XTY_IMP: + lds.Lifile = int32(f.dynLibraries[s.sym.Dynimplib()] + 1) + } + ldstr := &XcoffLdStr64{ + size: uint16(len(s.sym.Name) + 1), // + null terminator + name: s.sym.Name, + } + stlen += uint32(2 + ldstr.size) // 2 = sizeof ldstr.size + symtab = append(symtab, lds) + strtab = append(strtab, ldstr) + + } + + hdr.Lnsyms = int32(len(symtab)) + hdr.Lrldoff = hdr.Lsymoff + uint64(24*hdr.Lnsyms) // 24 = sizeof one symbol + off := hdr.Lrldoff // current offset is the same of reloc offset + + /* Reloc */ + ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0) + ldr := &XcoffLdRel64{ + Lvaddr: uint64(ep.Value), + Lrtype: 0x3F00, + Lrsecnm: f.getXCOFFscnum(ep.Sect), + Lsymndx: 0, + } + off += 16 + reloctab = append(reloctab, ldr) + + off += uint64(16 * len(f.loaderReloc)) + for _, r := range f.loaderReloc { + ldr = &XcoffLdRel64{ + Lvaddr: uint64(r.sym.Value + int64(r.rel.Off)), + Lrtype: r.rtype, + Lsymndx: r.symndx, + } + + if r.sym.Sect != nil { + ldr.Lrsecnm = f.getXCOFFscnum(r.sym.Sect) + } + + reloctab = append(reloctab, ldr) + } + + off += uint64(16 * len(dynimpreloc)) + reloctab = append(reloctab, dynimpreloc...) + + hdr.Lnreloc = int32(len(reloctab)) + hdr.Limpoff = off + + /* Import */ + // Default import: /usr/lib:/lib + ldimpf := &XcoffLdImportFile64{ + Limpidpath: "/usr/lib:/lib", + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + + // The map created by adddynimpsym associates the name to a number + // This number represents the librairie index (- 1) in this import files section + // Therefore, they must be sorted before being put inside the section + libsOrdered := make([]string, len(f.dynLibraries)) + for key, val := range f.dynLibraries { + if libsOrdered[val] != "" { + continue + } + libsOrdered[val] = key + } + + for _, lib := range libsOrdered { + // lib string is defined as base.a/mem.o or path/base.a/mem.o + n := strings.Split(lib, "/") + path := "" + base := n[len(n)-2] + mem := n[len(n)-1] + if len(n) > 2 { + path = lib[:len(lib)-len(base)-len(mem)-2] + + } + ldimpf = &XcoffLdImportFile64{ + Limpidpath: path, + Limpidbase: base, + Limpidmem: mem, + } + off += uint64(len(ldimpf.Limpidpath) + len(ldimpf.Limpidbase) + len(ldimpf.Limpidmem) + 3) // + null delimiter + importtab = append(importtab, ldimpf) + } + + hdr.Lnimpid = int32(len(importtab)) + hdr.Listlen = uint32(off - hdr.Limpoff) + hdr.Lstoff = off + hdr.Lstlen = stlen + + /* Writing */ + ctxt.Out.SeekSet(int64(globalOff)) + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, hdr) + + for _, s := range symtab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, s) + + } + for _, r := range reloctab { + binary.Write(ctxt.Out, ctxt.Arch.ByteOrder, r) + } + for _, f := range importtab { + ctxt.Out.WriteString(f.Limpidpath) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidbase) + ctxt.Out.Write8(0) + ctxt.Out.WriteString(f.Limpidmem) + ctxt.Out.Write8(0) + } + for _, s := range strtab { + ctxt.Out.Write16(s.size) + ctxt.Out.WriteString(s.name) + ctxt.Out.Write8(0) // null terminator + } + + f.loaderSize = off + uint64(stlen) + ctxt.Out.Flush() + + /* again for printing */ + if !*flagA { + return + } + + ctxt.Logf("\n.loader section") + // write in buf + var buf bytes.Buffer + + binary.Write(&buf, ctxt.Arch.ByteOrder, hdr) + for _, s := range symtab { + binary.Write(&buf, ctxt.Arch.ByteOrder, s) + + } + for _, f := range importtab { + buf.WriteString(f.Limpidpath) + buf.WriteByte(0) + buf.WriteString(f.Limpidbase) + buf.WriteByte(0) + buf.WriteString(f.Limpidmem) + buf.WriteByte(0) + } + for _, s := range strtab { + binary.Write(&buf, ctxt.Arch.ByteOrder, s.size) + buf.WriteString(s.name) + buf.WriteByte(0) // null terminator + } + + // Log buffer + ctxt.Logf("\n\t%.8x|", globalOff) + for i, b := range buf.Bytes() { + if i > 0 && i%16 == 0 { + ctxt.Logf("\n\t%.8x|", uint64(globalOff)+uint64(i)) + } + ctxt.Logf(" %.2x", b) + } + ctxt.Logf("\n") + +} + +// XCOFF assembling and writing file + +func (f *xcoffFile) writeFileHeader(ctxt *Link) { + // File header + f.xfhdr.Fmagic = U64_TOCMAGIC + f.xfhdr.Fnscns = uint16(len(f.sections)) + f.xfhdr.Ftimedat = 0 + + if !*FlagS { + f.xfhdr.Fsymptr = uint64(f.symtabOffset) + f.xfhdr.Fnsyms = int32(f.symbolCount) + } + + if ctxt.BuildMode == BuildModeExe { + f.xfhdr.Fopthdr = AOUTHSZ_EXEC64 + f.xfhdr.Fflags = F_EXEC + + // auxiliary header + f.xahdr.Ovstamp = 1 // based on dump -o + f.xahdr.Omagic = 0x10b + copy(f.xahdr.Omodtype[:], "1L") + entry := ctxt.Syms.ROLookup(*flagEntrySymbol, 0) + f.xahdr.Oentry = uint64(entry.Value) + f.xahdr.Osnentry = f.getXCOFFscnum(entry.Sect) + toc := ctxt.Syms.ROLookup("TOC", 0) + f.xahdr.Otoc = uint64(toc.Value) + f.xahdr.Osntoc = f.getXCOFFscnum(toc.Sect) + + // Based on dump -o + f.xahdr.Oalgntext = 0x5 + f.xahdr.Oalgndata = 0x5 + + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + binary.Write(ctxt.Out, binary.BigEndian, &f.xahdr) + } else { + f.xfhdr.Fopthdr = 0 + binary.Write(ctxt.Out, binary.BigEndian, &f.xfhdr) + } + +} + +func xcoffwrite(ctxt *Link) { + ctxt.Out.SeekSet(0) + + xfile.writeFileHeader(ctxt) + + for _, sect := range xfile.sections { + sect.write(ctxt) + } +} + +// Generate XCOFF assembly file +func Asmbxcoff(ctxt *Link, fileoff int64) { + xfile.sectNameToScnum = make(map[string]int16) + + // Add sections + s := xfile.addSection(".text", Segtext.Vaddr, Segtext.Length, Segtext.Fileoff, STYP_TEXT) + xfile.xahdr.Otextstart = s.Svaddr + xfile.xahdr.Osntext = xfile.sectNameToScnum[".text"] + xfile.xahdr.Otsize = s.Ssize + + s = xfile.addSection(".data", Segdata.Vaddr, Segdata.Filelen, Segdata.Fileoff, STYP_DATA) + xfile.xahdr.Odatastart = s.Svaddr + xfile.xahdr.Osndata = xfile.sectNameToScnum[".data"] + xfile.xahdr.Odsize = s.Ssize + + s = xfile.addSection(".bss", Segdata.Vaddr+Segdata.Filelen, Segdata.Length-Segdata.Filelen, 0, STYP_BSS) + xfile.xahdr.Osnbss = xfile.sectNameToScnum[".bss"] + xfile.xahdr.Obsize = s.Ssize + + // add dwarf sections + for _, sect := range Segdwarf.Sections { + xfile.addDwarfSection(sect) + } + + // add and write remaining sections + if ctxt.LinkMode == LinkInternal { + // Loader section + if ctxt.BuildMode == BuildModeExe { + Loaderblk(ctxt, uint64(fileoff)) + s = xfile.addSection(".loader", 0, xfile.loaderSize, uint64(fileoff), STYP_LOADER) + xfile.xahdr.Osnloader = xfile.sectNameToScnum[".loader"] + } + } else { + // TODO: Relocation + } + + // Write symbol table + symo := Rnd(ctxt.Out.Offset(), int64(*FlagRound)) + xfile.symtabOffset = symo + ctxt.Out.SeekSet(int64(symo)) + xfile.asmaixsym(ctxt) + + // write headers + xcoffwrite(ctxt) +} diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go index 8e32e7dee6190..d85d91948a750 100644 --- a/src/cmd/link/internal/loadelf/ldelf.go +++ b/src/cmd/link/internal/loadelf/ldelf.go @@ -820,7 +820,7 @@ func Load(arch *sys.Arch, syms *sym.Symbols, f *bio.Reader, pkg string, length i if elfobj.machine == ElfMachPower64 { flag := int(elfsym.other) >> 5 if 2 <= flag && flag <= 6 { - s.Localentry = 1 << uint(flag-2) + s.SetLocalentry(1 << uint(flag-2)) } else if flag == 7 { return errorf("%v: invalid sym.other 0x%x", s, elfsym.other) } diff --git a/src/cmd/link/internal/loadpe/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go index c8fae3789839e..ac07d5c35d20e 100644 --- a/src/cmd/link/internal/loadpe/ldpe.go +++ b/src/cmd/link/internal/loadpe/ldpe.go @@ -101,6 +101,19 @@ const ( IMAGE_REL_AMD64_SREL32 = 0x000E IMAGE_REL_AMD64_PAIR = 0x000F IMAGE_REL_AMD64_SSPAN32 = 0x0010 + IMAGE_REL_ARM_ABSOLUTE = 0x0000 + IMAGE_REL_ARM_ADDR32 = 0x0001 + IMAGE_REL_ARM_ADDR32NB = 0x0002 + IMAGE_REL_ARM_BRANCH24 = 0x0003 + IMAGE_REL_ARM_BRANCH11 = 0x0004 + IMAGE_REL_ARM_SECTION = 0x000E + IMAGE_REL_ARM_SECREL = 0x000F + IMAGE_REL_ARM_MOV32 = 0x0010 + IMAGE_REL_THUMB_MOV32 = 0x0011 + IMAGE_REL_THUMB_BRANCH20 = 0x0012 + IMAGE_REL_THUMB_BRANCH24 = 0x0014 + IMAGE_REL_THUMB_BLX23 = 0x0015 + IMAGE_REL_ARM_PAIR = 0x0016 ) // TODO(crawshaw): de-duplicate these symbols with cmd/internal/ld, ideally in debug/pe. @@ -241,30 +254,56 @@ func Load(arch *sys.Arch, syms *sym.Symbols, input *bio.Reader, pkg string, leng rp.Sym = gosym rp.Siz = 4 rp.Off = int32(r.VirtualAddress) - switch r.Type { + switch arch.Family { default: - return nil, nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, sectsyms[rsect], r.Type) + return nil, nil, fmt.Errorf("%s: unsupported arch %v", pn, arch.Family) + case sys.I386, sys.AMD64: + switch r.Type { + default: + return nil, nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, sectsyms[rsect], r.Type) - case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32, - IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32 - IMAGE_REL_AMD64_ADDR32NB: - rp.Type = objabi.R_PCREL + case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32, + IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32 + IMAGE_REL_AMD64_ADDR32NB: + rp.Type = objabi.R_PCREL - rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) - case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32: - rp.Type = objabi.R_ADDR + case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32: + rp.Type = objabi.R_ADDR - // load addend from image - rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) + // load addend from image + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) - case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64 - rp.Siz = 8 + case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64 + rp.Siz = 8 - rp.Type = objabi.R_ADDR + rp.Type = objabi.R_ADDR - // load addend from image - rp.Add = int64(binary.LittleEndian.Uint64(sectdata[rsect][rp.Off:])) + // load addend from image + rp.Add = int64(binary.LittleEndian.Uint64(sectdata[rsect][rp.Off:])) + } + + case sys.ARM: + switch r.Type { + default: + return nil, nil, fmt.Errorf("%s: %v: unknown ARM relocation type %v", pn, sectsyms[rsect], r.Type) + + case IMAGE_REL_ARM_SECREL: + rp.Type = objabi.R_PCREL + + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) + + case IMAGE_REL_ARM_ADDR32: + rp.Type = objabi.R_ADDR + + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) + + case IMAGE_REL_ARM_BRANCH24: + rp.Type = objabi.R_CALLARM + + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) + } } // ld -r could generate multiple section symbols for the @@ -319,7 +358,7 @@ func Load(arch *sys.Arch, syms *sym.Symbols, input *bio.Reader, pkg string, leng if pesym.SectionNumber == 0 { // extern if s.Type == sym.SDYNIMPORT { - s.Plt = -2 // flag for dynimport in PE object files. + s.SetPlt(-2) // flag for dynimport in PE object files. } if s.Type == sym.SXREF && pesym.Value > 0 { // global data s.Type = sym.SNOPTRDATA @@ -440,7 +479,7 @@ func readpesym(arch *sys.Arch, syms *sym.Symbols, f *pe.File, pesym *pe.COFFSymb s.Type = sym.SXREF } if strings.HasPrefix(symname, "__imp_") { - s.Got = -2 // flag for __imp_ + s.SetGot(-2) // flag for __imp_ } return s, nil diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go new file mode 100644 index 0000000000000..7c863d79c5832 --- /dev/null +++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go @@ -0,0 +1,225 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadxcoff implements a XCOFF file reader. +package loadxcoff + +import ( + "cmd/internal/bio" + "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/sym" + "errors" + "fmt" + "internal/xcoff" +) + +// ldSection is an XCOFF section with its symbols. +type ldSection struct { + xcoff.Section + sym *sym.Symbol +} + +// TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating xcoffBiobuf + +// xcoffBiobuf makes bio.Reader look like io.ReaderAt. +type xcoffBiobuf bio.Reader + +func (f *xcoffBiobuf) ReadAt(p []byte, off int64) (int, error) { + ret := ((*bio.Reader)(f)).Seek(off, 0) + if ret < 0 { + return 0, errors.New("fail to seek") + } + n, err := f.Read(p) + if err != nil { + return 0, err + } + return n, nil +} + +// Load loads the Xcoff file pn from f. +// Symbols are written into syms, and a slice of the text symbols is returned. +func Load(arch *sys.Arch, syms *sym.Symbols, input *bio.Reader, pkg string, length int64, pn string) (textp []*sym.Symbol, err error) { + errorf := func(str string, args ...interface{}) ([]*sym.Symbol, error) { + return nil, fmt.Errorf("loadxcoff: %v: %v", pn, fmt.Sprintf(str, args...)) + } + localSymVersion := syms.IncVersion() + + var ldSections []*ldSection + + f, err := xcoff.NewFile((*xcoffBiobuf)(input)) + if err != nil { + return nil, err + } + defer f.Close() + + for _, sect := range f.Sections { + //only text, data and bss section + if sect.Type < xcoff.STYP_TEXT || sect.Type > xcoff.STYP_BSS { + continue + } + lds := new(ldSection) + lds.Section = *sect + name := fmt.Sprintf("%s(%s)", pkg, lds.Name) + s := syms.Lookup(name, localSymVersion) + + switch lds.Type { + default: + return errorf("unrecognized section type 0x%x", lds.Type) + case xcoff.STYP_TEXT: + s.Type = sym.STEXT + case xcoff.STYP_DATA: + s.Type = sym.SNOPTRDATA + case xcoff.STYP_BSS: + s.Type = sym.SNOPTRBSS + } + + s.Size = int64(lds.Size) + if s.Type != sym.SNOPTRBSS { + data, err := lds.Section.Data() + if err != nil { + return nil, err + } + s.P = data + } + + lds.sym = s + ldSections = append(ldSections, lds) + } + + // sx = symbol from file + // s = symbol for syms + for _, sx := range f.Symbols { + // get symbol type + stype, errmsg := getSymbolType(f, sx) + if errmsg != "" { + return errorf("error reading symbol %s: %s", sx.Name, errmsg) + } + if stype == sym.Sxxx { + continue + } + + s := syms.Lookup(sx.Name, 0) + + // Text symbol + if s.Type == sym.STEXT { + if s.Attr.OnList() { + return errorf("symbol %s listed multiple times", s.Name) + } + s.Attr |= sym.AttrOnList + textp = append(textp, s) + } + } + + // Read relocations + for _, sect := range ldSections { + // TODO(aix): Dwarf section relocation if needed + if sect.Type != xcoff.STYP_TEXT && sect.Type != xcoff.STYP_DATA { + continue + } + rs := make([]sym.Reloc, sect.Nreloc) + for i, rx := range sect.Relocs { + r := &rs[i] + + r.Sym = syms.Lookup(rx.Symbol.Name, 0) + if uint64(int32(rx.VirtualAddress)) != rx.VirtualAddress { + return errorf("virtual address of a relocation is too big: 0x%x", rx.VirtualAddress) + } + r.Off = int32(rx.VirtualAddress) + switch rx.Type { + default: + return errorf("section %s: unknown relocation of type 0x%x", sect.Name, rx.Type) + case xcoff.R_POS: + // Reloc the address of r.Sym + // Length should be 64 + if rx.Length != 64 { + return errorf("section %s: relocation R_POS has length different from 64: %d", sect.Name, rx.Length) + } + r.Siz = 8 + r.Type = objabi.R_CONST + r.Add = int64(rx.Symbol.Value) + + case xcoff.R_RBR: + r.Siz = 4 + r.Type = objabi.R_CALLPOWER + r.Add = 0 // + + } + } + s := sect.sym + s.R = rs + s.R = s.R[:sect.Nreloc] + } + return textp, nil + +} + +// Convert symbol xcoff type to sym.SymKind +// Returns nil if this shouldn't be added into syms (like .file or .dw symbols ) +func getSymbolType(f *xcoff.File, s *xcoff.Symbol) (stype sym.SymKind, err string) { + // .file symbol + if s.SectionNumber == -2 { + if s.StorageClass == xcoff.C_FILE { + return sym.Sxxx, "" + } + return sym.Sxxx, "unrecognised StorageClass for sectionNumber = -2" + } + + // extern symbols + // TODO(aix) + if s.SectionNumber == 0 { + return sym.Sxxx, "" + } + + sectType := f.Sections[s.SectionNumber-1].SectionHeader.Type + switch sectType { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Section type 0x%x not implemented", sectType) + case xcoff.STYP_DWARF, xcoff.STYP_DEBUG: + return sym.Sxxx, "" + case xcoff.STYP_DATA, xcoff.STYP_BSS, xcoff.STYP_TEXT: + } + + switch s.StorageClass { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x not implemented", s.StorageClass) + case xcoff.C_HIDEXT, xcoff.C_EXT, xcoff.C_WEAKEXT: + switch s.AuxCSect.StorageMappingClass { + default: + return sym.Sxxx, fmt.Sprintf("getSymbolType for Storage class 0x%x and Storage Map 0x%x not implemented", s.StorageClass, s.AuxCSect.StorageMappingClass) + + // Program Code + case xcoff.XMC_PR: + if sectType == xcoff.STYP_TEXT { + return sym.STEXT, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_PR", sectType, s.StorageClass) + + // Read/Write Data + case xcoff.XMC_RW: + if sectType == xcoff.STYP_DATA { + return sym.SDATA, "" + } + if sectType == xcoff.STYP_BSS { + return sym.SBSS, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_RW", sectType, s.StorageClass) + + // Function descriptor + case xcoff.XMC_DS: + if sectType == xcoff.STYP_DATA { + return sym.SDATA, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass) + + // TOC anchor and TOC entry + case xcoff.XMC_TC0, xcoff.XMC_TE: + if sectType == xcoff.STYP_DATA { + return sym.SXCOFFTOC, "" + } + return sym.Sxxx, fmt.Sprintf("unrecognised Section Type 0x%x for Storage Class 0x%x with Storage Map XMC_DS", sectType, s.StorageClass) + + } + } +} diff --git a/src/cmd/link/internal/mips/asm.go b/src/cmd/link/internal/mips/asm.go index 306d53f571579..8409e43afcbcf 100644 --- a/src/cmd/link/internal/mips/asm.go +++ b/src/cmd/link/internal/mips/asm.go @@ -82,23 +82,25 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, se return false } -func applyrel(arch *sys.Arch, r *sym.Reloc, s *sym.Symbol, val *int64, t int64) { +func applyrel(arch *sys.Arch, r *sym.Reloc, s *sym.Symbol, val int64, t int64) int64 { o := arch.ByteOrder.Uint32(s.P[r.Off:]) switch r.Type { case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSTLS: - *val = int64(o&0xffff0000 | uint32(t)&0xffff) + return int64(o&0xffff0000 | uint32(t)&0xffff) case objabi.R_ADDRMIPSU: - *val = int64(o&0xffff0000 | uint32((t+(1<<15))>>16)&0xffff) + return int64(o&0xffff0000 | uint32((t+(1<<15))>>16)&0xffff) case objabi.R_CALLMIPS, objabi.R_JMPMIPS: - *val = int64(o&0xfc000000 | uint32(t>>2)&^0xfc000000) + return int64(o&0xfc000000 | uint32(t>>2)&^0xfc000000) + default: + return val } } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return false + return val, false case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: r.Done = false @@ -114,28 +116,23 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs - applyrel(ctxt.Arch, r, s, val, r.Xadd) - return true + return applyrel(ctxt.Arch, r, s, val, r.Xadd), true case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - applyrel(ctxt.Arch, r, s, val, r.Add) - return true + return applyrel(ctxt.Arch, r, s, val, r.Add), true } } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: t := ld.Symaddr(r.Sym) + r.Add - applyrel(ctxt.Arch, r, s, val, t) - return true + return applyrel(ctxt.Arch, r, s, val, t), true case objabi.R_CALLMIPS, objabi.R_JMPMIPS: t := ld.Symaddr(r.Sym) + r.Add @@ -148,19 +145,17 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } - applyrel(ctxt.Arch, r, s, val, t) - return true + return applyrel(ctxt.Arch, r, s, val, t), true case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ld.Symaddr(r.Sym) + r.Add - 0x7000 if t < -32768 || t >= 32678 { ld.Errorf(s, "TLS offset out of range %d", t) } - applyrel(ctxt.Arch, r, s, val, t) - return true + return applyrel(ctxt.Arch, r, s, val, t), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index 295a0aafaed71..51eba596dc796 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -99,11 +99,11 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, se return false } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return false + return val, false case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: r.Done = false @@ -121,34 +121,30 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { } r.Xsym = rs - return true + return val, true case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return true + return val, true } } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: t := ld.Symaddr(r.Sym) + r.Add o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) if r.Type == objabi.R_ADDRMIPS { - *val = int64(o1&0xffff0000 | uint32(t)&0xffff) - } else { - *val = int64(o1&0xffff0000 | uint32((t+1<<15)>>16)&0xffff) + return int64(o1&0xffff0000 | uint32(t)&0xffff), true } - return true + return int64(o1&0xffff0000 | uint32((t+1<<15)>>16)&0xffff), true case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ld.Symaddr(r.Sym) + r.Add - 0x7000 @@ -156,18 +152,16 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { ld.Errorf(s, "TLS offset out of range %d", t) } o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) - *val = int64(o1&0xffff0000 | uint32(t)&0xffff) - return true + return int64(o1&0xffff0000 | uint32(t)&0xffff), true case objabi.R_CALLMIPS, objabi.R_JMPMIPS: // Low 26 bits = (S + A) >> 2 t := ld.Symaddr(r.Sym) + r.Add o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) - *val = int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000) - return true + return int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { diff --git a/src/cmd/link/internal/objfile/objfile.go b/src/cmd/link/internal/objfile/objfile.go index 67868be2a1926..b39e0521066bd 100644 --- a/src/cmd/link/internal/objfile/objfile.go +++ b/src/cmd/link/internal/objfile/objfile.go @@ -13,6 +13,7 @@ import ( "bytes" "cmd/internal/bio" "cmd/internal/dwarf" + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/sym" @@ -23,8 +24,8 @@ import ( ) const ( - startmagic = "\x00\x00go19ld" - endmagic = "\xff\xffgo19ld" + startmagic = "\x00go112ld" + endmagic = "\xffgo112ld" ) var emptyPkg = []byte(`"".`) @@ -203,6 +204,7 @@ func (r *objReader) readSym() { overwrite: s.File = pkg + s.Lib = r.lib if dupok { s.Attr |= sym.AttrDuplicateOK } @@ -316,9 +318,9 @@ overwrite: pc.InlTree[i].File = r.readSymIndex() pc.InlTree[i].Line = r.readInt32() pc.InlTree[i].Func = r.readSymIndex() + pc.InlTree[i].ParentPC = r.readInt32() } - s.Lib = r.lib if !dupok { if s.Attr.OnList() { log.Fatalf("symbol %s listed multiple times", s.Name) @@ -380,17 +382,20 @@ func (r *objReader) readRef() { log.Fatalf("readSym out of sync") } name := r.readSymName() - v := r.readInt() - if v != 0 && v != 1 { - log.Fatalf("invalid symbol version for %q: %d", name, v) - } - if v == 1 { + var v int + if abi := r.readInt(); abi == -1 { + // Static v = r.localSymVersion + } else if abiver := sym.ABIToVersion(obj.ABI(abi)); abiver != -1 { + // Note that data symbols are "ABI0", which maps to version 0. + v = abiver + } else { + log.Fatalf("invalid symbol ABI for %q: %d", name, abi) } s := r.syms.Lookup(name, v) r.refs = append(r.refs, s) - if s == nil || v != 0 { + if s == nil || v == r.localSymVersion { return } if s.Name[0] == '$' && len(s.Name) > 5 && s.Type == 0 && len(s.P) == 0 { diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index 11fdf1fb0501e..6e31668e28e2b 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -39,6 +39,7 @@ import ( "encoding/binary" "fmt" "log" + "strings" ) func genplt(ctxt *ld.Link) { @@ -133,7 +134,7 @@ func genplt(ctxt *ld.Link) { } func genaddmoduledata(ctxt *ld.Link) { - addmoduledata := ctxt.Syms.ROLookup("runtime.addmoduledata", 0) + addmoduledata := ctxt.Syms.ROLookup("runtime.addmoduledata", sym.SymVerABI0) if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { return } @@ -236,7 +237,7 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *sym.Symbol, targ *sym.Symbol) r.Off = int32(stub.Size) r.Sym = plt - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) r.Siz = 2 if ctxt.Arch.ByteOrder == binary.BigEndian { r.Off += int32(r.Siz) @@ -247,7 +248,7 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *sym.Symbol, targ *sym.Symbol) r = stub.AddRel() r.Off = int32(stub.Size) r.Sym = plt - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) r.Siz = 2 if ctxt.Arch.ByteOrder == binary.BigEndian { r.Off += int32(r.Siz) @@ -262,6 +263,14 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *sym.Symbol, targ *sym.Symbol) } func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { + if ctxt.IsELF { + return addelfdynrel(ctxt, s, r) + } else if ctxt.HeadType == objabi.Haix { + return ld.Xcoffadddynrel(ctxt, s, r) + } + return false +} +func addelfdynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { @@ -280,7 +289,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { // callee. Hence, we need to go to the local entry // point. (If we don't do this, the callee will try // to use r12 to compute r2.) - r.Add += int64(r.Sym.Localentry) * 4 + r.Add += int64(r.Sym.Localentry()) * 4 if targ.Type == sym.SDYNIMPORT { // Should have been handled in elfsetupplt @@ -374,6 +383,13 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { } func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + // Beware that bit0~bit15 start from the third byte of a instruction in Big-Endian machines. + if r.Type == objabi.R_ADDR || r.Type == objabi.R_POWER_TLS || r.Type == objabi.R_CALLPOWER { + } else { + if ctxt.Arch.ByteOrder == binary.BigEndian { + sectoff += 2 + } + } ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() @@ -474,14 +490,79 @@ func symtoc(ctxt *ld.Link, s *sym.Symbol) int64 { return toc.Value } -func archrelocaddr(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +// archreloctoc relocates a TOC relative symbol. +// If the symbol pointed by this TOC relative symbol is in .data or .bss, the +// default load instruction can be changed to an addi instruction and the +// symbol address can be used directly. +// This code is for AIX only. +func archreloctoc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) int64 { + if ctxt.HeadType == objabi.Hlinux { + ld.Errorf(s, "archrelocaddr called for %s relocation\n", r.Sym.Name) + } + var o1, o2 uint32 + + o1 = uint32(val >> 32) + o2 = uint32(val) + + var t int64 + useAddi := false + const prefix = "TOC." + var tarSym *sym.Symbol + if strings.HasPrefix(r.Sym.Name, prefix) { + tarSym = ctxt.Syms.ROLookup(strings.TrimPrefix(r.Sym.Name, prefix), 0) + } else { + ld.Errorf(s, "archreloctoc called for a symbol without TOC anchor") + } + + if tarSym != nil && tarSym.Attr.Reachable() && (tarSym.Sect.Seg == &ld.Segdata) { + t = ld.Symaddr(tarSym) + r.Add - ctxt.Syms.ROLookup("TOC", 0).Value + // change ld to addi in the second instruction + o2 = (o2 & 0x03FF0000) | 0xE<<26 + useAddi = true + } else { + t = ld.Symaddr(r.Sym) + r.Add - ctxt.Syms.ROLookup("TOC", 0).Value + } + + if t != int64(int32(t)) { + ld.Errorf(s, "TOC relocation for %s is too big to relocate %s: 0x%x", s.Name, r.Sym, t) + } + + if t&0x8000 != 0 { + t += 0x10000 + } + + o1 |= uint32((t >> 16) & 0xFFFF) + + switch r.Type { + case objabi.R_ADDRPOWER_TOCREL_DS: + if useAddi { + o2 |= uint32(t) & 0xFFFF + } else { + if t&3 != 0 { + ld.Errorf(s, "bad DS reloc for %s: %d", s.Name, ld.Symaddr(r.Sym)) + } + o2 |= uint32(t) & 0xFFFC + } + default: + return -1 + } + + return int64(o1)<<32 | int64(o2) +} + +// archrelocaddr relocates a symbol address. +// This code is for AIX only. +func archrelocaddr(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) int64 { + if ctxt.HeadType == objabi.Haix { + ld.Errorf(s, "archrelocaddr called for %s relocation\n", r.Sym.Name) + } var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o1 = uint32(*val >> 32) - o2 = uint32(*val) + o1 = uint32(val >> 32) + o2 = uint32(val) } else { - o1 = uint32(*val) - o2 = uint32(*val >> 32) + o1 = uint32(val) + o2 = uint32(val >> 32) } // We are spreading a 31-bit address across two instructions, putting the @@ -493,7 +574,7 @@ func archrelocaddr(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool t := ld.Symaddr(r.Sym) + r.Add if t < 0 || t >= 1<<31 { - ld.Errorf(s, "relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym)) + ld.Errorf(s, "relocation for %s is too big (>=2G): 0x%x", s.Name, ld.Symaddr(r.Sym)) } if t&0x8000 != 0 { t += 0x10000 @@ -510,15 +591,13 @@ func archrelocaddr(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool } o2 |= uint32(t) & 0xfffc default: - return false + return -1 } if ctxt.Arch.ByteOrder == binary.BigEndian { - *val = int64(o1)<<32 | int64(o2) - } else { - *val = int64(o2)<<32 | int64(o1) + return int64(o1)<<32 | int64(o2) } - return true + return int64(o2)<<32 | int64(o1) } // resolve direct jump relocation r in s, and add trampoline if necessary @@ -623,17 +702,17 @@ func gentramp(arch *sys.Arch, linkmode ld.LinkMode, tramp, target *sym.Symbol, o arch.ByteOrder.PutUint32(tramp.P[12:], o4) } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return false + return val, false case objabi.R_POWER_TLS, objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: r.Done = false // check Outer is nil, Type is TLSBSS? r.Xadd = r.Add r.Xsym = r.Sym - return true + return val, true case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, objabi.R_ADDRPOWER_TOCREL, @@ -655,24 +734,24 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { } r.Xsym = rs - return true + return val, true case objabi.R_CALLPOWER: r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return true + return val, true } } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true + case objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: + return archreloctoc(ctxt, r, s, val), true case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS: - return archrelocaddr(ctxt, r, s, val) + return archrelocaddr(ctxt, r, s, val), true case objabi.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 @@ -686,26 +765,28 @@ func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { if int64(int32(t<<6)>>6) != t { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } - *val |= int64(uint32(t) &^ 0xfc000003) - return true + return val | int64(uint32(t)&^0xfc000003), true case objabi.R_POWER_TOC: // S + A - .TOC. - *val = ld.Symaddr(r.Sym) + r.Add - symtoc(ctxt, s) + return ld.Symaddr(r.Sym) + r.Add - symtoc(ctxt, s), true - return true case objabi.R_POWER_TLS_LE: // The thread pointer points 0x7000 bytes after the start of the // thread local storage area as documented in section "3.7.2 TLS // Runtime Handling" of "Power Architecture 64-Bit ELF V2 ABI // Specification". v := r.Sym.Value - 0x7000 + if ctxt.HeadType == objabi.Haix { + // On AIX, the thread pointer points 0x7800 bytes after + // the TLS. + v -= 0x800 + } if int64(int16(v)) != v { ld.Errorf(s, "TLS offset out of range %d", v) } - *val = (*val &^ 0xffff) | (v & 0xffff) - return true + return (val &^ 0xffff) | (v & 0xffff), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { @@ -723,9 +804,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 // overflow depends on the instruction var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o1 = ld.Be32(s.P[r.Off-2:]) + o1 = binary.BigEndian.Uint32(s.P[r.Off-2:]) } else { - o1 = ld.Le32(s.P[r.Off:]) + o1 = binary.LittleEndian.Uint32(s.P[r.Off:]) } switch o1 >> 26 { case 24, // ori @@ -757,9 +838,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 // overflow depends on the instruction var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o1 = ld.Be32(s.P[r.Off-2:]) + o1 = binary.BigEndian.Uint32(s.P[r.Off-2:]) } else { - o1 = ld.Le32(s.P[r.Off:]) + o1 = binary.LittleEndian.Uint32(s.P[r.Off:]) } switch o1 >> 26 { case 25, // oris @@ -781,9 +862,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 case sym.RV_POWER_DS: var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { - o1 = uint32(ld.Be16(s.P[r.Off:])) + o1 = uint32(binary.BigEndian.Uint16(s.P[r.Off:])) } else { - o1 = uint32(ld.Le16(s.P[r.Off:])) + o1 = uint32(binary.LittleEndian.Uint16(s.P[r.Off:])) } if t&3 != 0 { ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) @@ -800,7 +881,7 @@ overflow: } func addpltsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Plt >= 0 { + if s.Plt() >= 0 { return } @@ -832,11 +913,11 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { // JMP_SLOT dynamic relocation for it. // // TODO(austin): ABI v1 is different - s.Plt = int32(plt.Size) + s.SetPlt(int32(plt.Size)) plt.Size += 8 - rela.AddAddrPlus(ctxt.Arch, plt, int64(s.Plt)) + rela.AddAddrPlus(ctxt.Arch, plt, int64(s.Plt())) rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_PPC64_JMP_SLOT))) rela.AddUint64(ctxt.Arch, 0) } else { @@ -967,6 +1048,9 @@ func asmb(ctxt *ld.Link) { case objabi.Hplan9: symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) + + case objabi.Haix: + // Nothing to do } ctxt.Out.SeekSet(int64(symo)) @@ -995,6 +1079,10 @@ func asmb(ctxt *ld.Link) { ctxt.Out.Write(sym.P) ctxt.Out.Flush() } + + case objabi.Haix: + // symtab must be added once sections have been created in ld.Asmbxcoff + ctxt.Out.Flush() } } @@ -1020,6 +1108,11 @@ func asmb(ctxt *ld.Link) { objabi.Hopenbsd, objabi.Hnacl: ld.Asmbelf(ctxt, int64(symo)) + + case objabi.Haix: + fileoff := uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) + fileoff = uint32(ld.Rnd(int64(fileoff), int64(*ld.FlagRound))) + ld.Asmbxcoff(ctxt, int64(fileoff)) } ctxt.Out.Flush() diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go index 273d9b42cb9da..fbedc728d9bae 100644 --- a/src/cmd/link/internal/ppc64/obj.go +++ b/src/cmd/link/internal/ppc64/obj.go @@ -93,9 +93,6 @@ func archinit(ctxt *ld.Link) { } case objabi.Hlinux: /* ppc64 elf */ - if ctxt.Arch == sys.ArchPPC64 { - *ld.FlagD = true // TODO(austin): ELF ABI v1 not supported yet - } ld.Elfinit(ctxt) ld.HEADR = ld.ELFRESERVE if *ld.FlagTextAddr == -1 { @@ -121,6 +118,10 @@ func archinit(ctxt *ld.Link) { if *ld.FlagRound == -1 { *ld.FlagRound = 0x10000 } + + case objabi.Haix: + ld.Xcoffinit(ctxt) + } if *ld.FlagDataAddr != 0 && *ld.FlagRound != 0 { diff --git a/src/cmd/link/internal/s390x/asm.go b/src/cmd/link/internal/s390x/asm.go index 634ba98dd3b44..88199f3a562c7 100644 --- a/src/cmd/link/internal/s390x/asm.go +++ b/src/cmd/link/internal/s390x/asm.go @@ -157,7 +157,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add += int64(targ.Plt) + r.Add += int64(targ.Plt()) } return true @@ -168,7 +168,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add += int64(targ.Plt) + r.Add += int64(targ.Plt()) } return true @@ -224,7 +224,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { r.Type = objabi.R_PCREL r.Variant = sym.RV_390_DBL r.Sym = ctxt.Syms.Lookup(".got", 0) - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) r.Add += int64(r.Siz) return true } @@ -285,7 +285,7 @@ func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { case objabi.R_PCRELDBL, objabi.R_CALL: isdbl = true } - if r.Xsym.Type == sym.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == objabi.R_CALL) { + if r.Xsym.Type == sym.SDYNIMPORT && (r.Xsym.ElfType() == elf.STT_FUNC || r.Type == objabi.R_CALL) { if isdbl { switch r.Siz { case 2: @@ -384,21 +384,19 @@ func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, se return false } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { - return false + return val, false } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { @@ -419,7 +417,7 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 } func addpltsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Plt >= 0 { + if s.Plt() >= 0 { return } @@ -474,7 +472,7 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_390_JMP_SLOT))) rela.AddUint64(ctxt.Arch, 0) - s.Plt = int32(plt.Size - 32) + s.SetPlt(int32(plt.Size - 32)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") @@ -482,18 +480,18 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { } func addgotsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Got >= 0 { + if s.Got() >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) got.AddUint64(ctxt.Arch, 0) if ctxt.IsELF { rela := ctxt.Syms.Lookup(".rela", 0) - rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got())) rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_390_GLOB_DAT))) rela.AddUint64(ctxt.Arch, 0) } else { diff --git a/src/cmd/link/internal/sym/reloc.go b/src/cmd/link/internal/sym/reloc.go index fc62c385f4025..da696d327b326 100644 --- a/src/cmd/link/internal/sym/reloc.go +++ b/src/cmd/link/internal/sym/reloc.go @@ -83,11 +83,11 @@ func RelocName(arch *sys.Arch, r objabi.RelocType) string { case sys.I386: return elf.R_386(nr).String() case sys.MIPS, sys.MIPS64: - // return elf.R_MIPS(nr).String() + return elf.R_MIPS(nr).String() case sys.PPC64: - // return elf.R_PPC64(nr).String() + return elf.R_PPC64(nr).String() case sys.S390X: - // return elf.R_390(nr).String() + return elf.R_390(nr).String() default: panic("unreachable") } diff --git a/src/cmd/link/internal/sym/sizeof_test.go b/src/cmd/link/internal/sym/sizeof_test.go index 2f2dfc79ed0d1..da4602a1619e7 100644 --- a/src/cmd/link/internal/sym/sizeof_test.go +++ b/src/cmd/link/internal/sym/sizeof_test.go @@ -23,7 +23,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Symbol{}, 132, 216}, + {Symbol{}, 108, 176}, } for _, tt := range tests { diff --git a/src/cmd/link/internal/sym/symbol.go b/src/cmd/link/internal/sym/symbol.go index ea0eb89e2bc99..8b70d6184628b 100644 --- a/src/cmd/link/internal/sym/symbol.go +++ b/src/cmd/link/internal/sym/symbol.go @@ -5,6 +5,7 @@ package sym import ( + "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/sys" "debug/elf" @@ -15,39 +16,66 @@ import ( // Symbol is an entry in the symbol table. type Symbol struct { Name string - Extname string Type SymKind Version int16 Attr Attribute - Localentry uint8 Dynid int32 - Plt int32 - Got int32 Align int32 Elfsym int32 LocalElfsym int32 Value int64 Size int64 - // ElfType is set for symbols read from shared libraries by ldshlibsyms. It - // is not set for symbols defined by the packages being linked or by symbols - // read by ldelf (and so is left as elf.STT_NOTYPE). - ElfType elf.SymType - Sub *Symbol - Outer *Symbol - Gotype *Symbol - File string - dyninfo *dynimp - Sect *Section - FuncInfo *FuncInfo - Lib *Library // Package defining this symbol + Sub *Symbol + Outer *Symbol + Gotype *Symbol + File string // actually package! + auxinfo *AuxSymbol + Sect *Section + FuncInfo *FuncInfo + Lib *Library // Package defining this symbol // P contains the raw symbol data. P []byte R []Reloc } -type dynimp struct { +// AuxSymbol contains less-frequently used sym.Symbol fields. +type AuxSymbol struct { + extname string dynimplib string dynimpvers string + localentry uint8 + plt int32 + got int32 + // ElfType is set for symbols read from shared libraries by ldshlibsyms. It + // is not set for symbols defined by the packages being linked or by symbols + // read by ldelf (and so is left as elf.STT_NOTYPE). + elftype elf.SymType +} + +const ( + SymVerABI0 = 0 + SymVerABIInternal = 1 + SymVerStatic = 10 // Minimum version used by static (file-local) syms +) + +func ABIToVersion(abi obj.ABI) int { + switch abi { + case obj.ABI0: + return SymVerABI0 + case obj.ABIInternal: + return SymVerABIInternal + } + return -1 +} + +func VersionToABI(v int) (obj.ABI, bool) { + switch v { + case SymVerABI0: + return obj.ABI0, true + case SymVerABIInternal: + return obj.ABIInternal, true + } + return ^obj.ABI(0), false } func (s *Symbol) String() string { @@ -57,6 +85,10 @@ func (s *Symbol) String() string { return fmt.Sprintf("%s<%d>", s.Name, s.Version) } +func (s *Symbol) IsFileLocal() bool { + return s.Version >= SymVerStatic +} + func (s *Symbol) ElfsymForReloc() int32 { // If putelfsym created a local version of this symbol, use that in all // relocations. @@ -128,6 +160,10 @@ func (s *Symbol) SetUint8(arch *sys.Arch, r int64, v uint8) int64 { return s.setUintXX(arch, r, uint64(v), 1) } +func (s *Symbol) SetUint16(arch *sys.Arch, r int64, v uint16) int64 { + return s.setUintXX(arch, r, uint64(v), 2) +} + func (s *Symbol) SetUint32(arch *sys.Arch, r int64, v uint32) int64 { return s.setUintXX(arch, r, uint64(v), 4) } @@ -268,38 +304,130 @@ func (s *Symbol) setUintXX(arch *sys.Arch, off int64, v uint64, wid int64) int64 return off + wid } +func (s *Symbol) makeAuxInfo() { + if s.auxinfo == nil { + s.auxinfo = &AuxSymbol{extname: s.Name, plt: -1, got: -1} + } +} + +func (s *Symbol) Extname() string { + if s.auxinfo == nil { + return s.Name + } + return s.auxinfo.extname +} + +func (s *Symbol) SetExtname(n string) { + if s.auxinfo == nil { + if s.Name == n { + return + } + s.makeAuxInfo() + } + s.auxinfo.extname = n +} + func (s *Symbol) Dynimplib() string { - if s.dyninfo == nil { + if s.auxinfo == nil { return "" } - return s.dyninfo.dynimplib + return s.auxinfo.dynimplib } func (s *Symbol) Dynimpvers() string { - if s.dyninfo == nil { + if s.auxinfo == nil { return "" } - return s.dyninfo.dynimpvers + return s.auxinfo.dynimpvers } func (s *Symbol) SetDynimplib(lib string) { - if s.dyninfo == nil { - s.dyninfo = &dynimp{dynimplib: lib} - } else { - s.dyninfo.dynimplib = lib + if s.auxinfo == nil { + s.makeAuxInfo() } + s.auxinfo.dynimplib = lib } func (s *Symbol) SetDynimpvers(vers string) { - if s.dyninfo == nil { - s.dyninfo = &dynimp{dynimpvers: vers} - } else { - s.dyninfo.dynimpvers = vers + if s.auxinfo == nil { + s.makeAuxInfo() } + s.auxinfo.dynimpvers = vers } func (s *Symbol) ResetDyninfo() { - s.dyninfo = nil + if s.auxinfo != nil { + s.auxinfo.dynimplib = "" + s.auxinfo.dynimpvers = "" + } +} + +func (s *Symbol) Localentry() uint8 { + if s.auxinfo == nil { + return 0 + } + return s.auxinfo.localentry +} + +func (s *Symbol) SetLocalentry(val uint8) { + if s.auxinfo == nil { + if val != 0 { + return + } + s.makeAuxInfo() + } + s.auxinfo.localentry = val +} + +func (s *Symbol) Plt() int32 { + if s.auxinfo == nil { + return -1 + } + return s.auxinfo.plt +} + +func (s *Symbol) SetPlt(val int32) { + if s.auxinfo == nil { + if val == -1 { + return + } + s.makeAuxInfo() + } + s.auxinfo.plt = val +} + +func (s *Symbol) Got() int32 { + if s.auxinfo == nil { + return -1 + } + return s.auxinfo.got +} + +func (s *Symbol) SetGot(val int32) { + if s.auxinfo == nil { + if val == -1 { + return + } + s.makeAuxInfo() + } + s.auxinfo.got = val +} + +func (s *Symbol) ElfType() elf.SymType { + if s.auxinfo == nil { + return elf.STT_NOTYPE + } + return s.auxinfo.elftype +} + +func (s *Symbol) SetElfType(val elf.SymType) { + if s.auxinfo == nil { + if val == elf.STT_NOTYPE { + return + } + s.makeAuxInfo() + } + s.auxinfo.elftype = val } // SortSub sorts a linked-list (by Sub) of *Symbol by Value. @@ -393,10 +521,11 @@ type FuncInfo struct { // InlinedCall is a node in a local inlining tree (FuncInfo.InlTree). type InlinedCall struct { - Parent int32 // index of parent in InlTree - File *Symbol // file of the inlined call - Line int32 // line number of the inlined call - Func *Symbol // function that was inlined + Parent int32 // index of parent in InlTree + File *Symbol // file of the inlined call + Line int32 // line number of the inlined call + Func *Symbol // function that was inlined + ParentPC int32 // PC of the instruction just before the inlined body (offset from function start) } type Pcdata struct { diff --git a/src/cmd/link/internal/sym/symbols.go b/src/cmd/link/internal/sym/symbols.go index 98a5ae67b8bd3..f0fcf2361b178 100644 --- a/src/cmd/link/internal/sym/symbols.go +++ b/src/cmd/link/internal/sym/symbols.go @@ -40,12 +40,13 @@ type Symbols struct { } func NewSymbols() *Symbols { + hash := make([]map[string]*Symbol, SymVerStatic) + // Preallocate about 2mb for hash of non static symbols + hash[0] = make(map[string]*Symbol, 100000) + // And another 1mb for internal ABI text symbols. + hash[SymVerABIInternal] = make(map[string]*Symbol, 50000) return &Symbols{ - hash: []map[string]*Symbol{ - // preallocate about 2mb for hash of - // non static symbols - make(map[string]*Symbol, 100000), - }, + hash: hash, Allsym: make([]*Symbol, 0, 100000), } } @@ -59,8 +60,6 @@ func (syms *Symbols) Newsym(name string, v int) *Symbol { syms.symbolBatch = batch[1:] s.Dynid = -1 - s.Plt = -1 - s.Got = -1 s.Name = name s.Version = int16(v) syms.Allsym = append(syms.Allsym, s) @@ -77,7 +76,6 @@ func (syms *Symbols) Lookup(name string, v int) *Symbol { return s } s = syms.Newsym(name, v) - s.Extname = s.Name m[name] = s return s } @@ -95,11 +93,12 @@ func (syms *Symbols) IncVersion() int { } // Rename renames a symbol. -func (syms *Symbols) Rename(old, new string, v int) { +func (syms *Symbols) Rename(old, new string, v int, reachparent map[*Symbol]*Symbol) { s := syms.hash[v][old] + oldExtName := s.Extname() s.Name = new - if s.Extname == old { - s.Extname = new + if oldExtName == old { + s.SetExtname(new) } delete(syms.hash[v], old) @@ -108,8 +107,16 @@ func (syms *Symbols) Rename(old, new string, v int) { syms.hash[v][new] = s } else { if s.Type == 0 { + dup.Attr |= s.Attr + if s.Attr.Reachable() && reachparent != nil { + reachparent[dup] = reachparent[s] + } *s = *dup } else if dup.Type == 0 { + s.Attr |= dup.Attr + if dup.Attr.Reachable() && reachparent != nil { + reachparent[s] = reachparent[dup] + } *dup = *s syms.hash[v][new] = s } diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go index 2e21cc1f00a8a..82e4b9eda48c4 100644 --- a/src/cmd/link/internal/sym/symkind.go +++ b/src/cmd/link/internal/sym/symkind.go @@ -89,6 +89,7 @@ const ( SNOPTRDATA SINITARR SDATA + SXCOFFTOC SBSS SNOPTRBSS STLSBSS @@ -108,6 +109,9 @@ const ( SDWARFRANGE SDWARFLOC SDWARFMISC // Not really a section; informs/affects other DWARF section generation + + // ABI aliases (these never appear in the output) + SABIALIAS ) // AbiSymKindToSymKind maps values read from object files (which are @@ -125,6 +129,7 @@ var AbiSymKindToSymKind = [...]SymKind{ SDWARFRANGE, SDWARFLOC, SDWARFMISC, + SABIALIAS, } // ReadOnly are the symbol kinds that form read-only sections. In some diff --git a/src/cmd/link/internal/sym/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go index e7e56c4003d04..9c8e1569f6bd1 100644 --- a/src/cmd/link/internal/sym/symkind_string.go +++ b/src/cmd/link/internal/sym/symkind_string.go @@ -4,9 +4,9 @@ package sym import "strconv" -const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISC" +const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASXCOFFTOCSBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISCSABIALIAS" -var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 292, 304, 316, 333, 350, 359, 365, 375, 383, 393, 403, 414, 423, 433} +var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 276, 280, 289, 296, 301, 313, 325, 342, 359, 368, 374, 384, 392, 402, 412, 423, 432, 442, 451} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index bffbc7c8a671e..737de599285d8 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -54,7 +54,11 @@ type wasmFuncType struct { } var wasmFuncTypes = map[string]*wasmFuncType{ - "_rt0_wasm_js": &wasmFuncType{Params: []byte{I32, I32}}, // argc, argv + "_rt0_wasm_js": &wasmFuncType{Params: []byte{}}, // + "wasm_export_run": &wasmFuncType{Params: []byte{I32, I32}}, // argc, argv + "wasm_export_resume": &wasmFuncType{Params: []byte{}}, // + "wasm_export_getsp": &wasmFuncType{Results: []byte{I32}}, // sp + "wasm_pc_f_loop": &wasmFuncType{Params: []byte{}}, // "runtime.wasmMove": &wasmFuncType{Params: []byte{I32, I32, I32}}, // dst, src, len "runtime.wasmZero": &wasmFuncType{Params: []byte{I32, I32}}, // ptr, len "runtime.wasmDiv": &wasmFuncType{Params: []byte{I64, I64}, Results: []byte{I64}}, // x, y -> x/y @@ -162,9 +166,6 @@ func asmb(ctxt *ld.Link) { fns[i] = &wasmFunc{Name: name, Type: typ, Code: wfn.Bytes()} } - // look up program entry point - rt0 := uint32(len(hostImports)) + uint32(ctxt.Syms.ROLookup("_rt0_wasm_js", 0).Value>>16) - funcValueOffset - ctxt.Out.Write([]byte{0x00, 0x61, 0x73, 0x6d}) // magic ctxt.Out.Write([]byte{0x01, 0x00, 0x00, 0x00}) // version @@ -180,7 +181,7 @@ func asmb(ctxt *ld.Link) { writeTableSec(ctxt, fns) writeMemorySec(ctxt) writeGlobalSec(ctxt) - writeExportSec(ctxt, rt0) + writeExportSec(ctxt, len(hostImports)) writeElementSec(ctxt, uint64(len(hostImports)), uint64(len(fns))) writeCodeSec(ctxt, fns) writeDataSec(ctxt) @@ -326,7 +327,7 @@ func writeGlobalSec(ctxt *ld.Link) { I64, // 6: RET1 I64, // 7: RET2 I64, // 8: RET3 - I32, // 9: RUN + I32, // 9: PAUSE } writeUleb128(ctxt.Out, uint64(len(globalRegs))) // number of globals @@ -348,15 +349,18 @@ func writeGlobalSec(ctxt *ld.Link) { // writeExportSec writes the section that declares exports. // Exports can be accessed by the WebAssembly host, usually JavaScript. -// Currently _rt0_wasm_js (program entry point) and the linear memory get exported. -func writeExportSec(ctxt *ld.Link, rt0 uint32) { +// The wasm_export_* functions and the linear memory get exported. +func writeExportSec(ctxt *ld.Link, lenHostImports int) { sizeOffset := writeSecHeader(ctxt, sectionExport) - writeUleb128(ctxt.Out, 2) // number of exports + writeUleb128(ctxt.Out, 4) // number of exports - writeName(ctxt.Out, "run") // inst.exports.run in wasm_exec.js - ctxt.Out.WriteByte(0x00) // func export - writeUleb128(ctxt.Out, uint64(rt0)) // funcidx + for _, name := range []string{"run", "resume", "getsp"} { + idx := uint32(lenHostImports) + uint32(ctxt.Syms.ROLookup("wasm_export_"+name, 0).Value>>16) - funcValueOffset + writeName(ctxt.Out, name) // inst.exports.run/resume/getsp in wasm_exec.js + ctxt.Out.WriteByte(0x00) // func export + writeUleb128(ctxt.Out, uint64(idx)) // funcidx + } writeName(ctxt.Out, "mem") // inst.exports.mem in wasm_exec.js ctxt.Out.WriteByte(0x02) // mem export diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go index 3150aac6cfe36..1744ab4d9969f 100644 --- a/src/cmd/link/internal/x86/asm.go +++ b/src/cmd/link/internal/x86/asm.go @@ -197,7 +197,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add += int64(targ.Plt) + r.Add += int64(targ.Plt()) } return true @@ -230,7 +230,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { addgotsym(ctxt, targ) r.Type = objabi.R_CONST // write r->add during relocsym r.Sym = nil - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) return true case 256 + objabi.RelocType(elf.R_386_GOTOFF): @@ -261,7 +261,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) r.Type = objabi.R_PCREL return true } @@ -285,7 +285,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { addgotsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".got", 0) - r.Add += int64(targ.Got) + r.Add += int64(targ.Got()) r.Type = objabi.R_PCREL return true } @@ -303,7 +303,7 @@ func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { } addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) + r.Add = int64(targ.Plt()) return true case objabi.R_ADDR: @@ -491,20 +491,18 @@ func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, secto return true } -func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) { if ctxt.LinkMode == ld.LinkExternal { - return false + return val, false } switch r.Type { case objabi.R_CONST: - *val = r.Add - return true + return r.Add, true case objabi.R_GOTOFF: - *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return true + return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true } - return false + return val, false } func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { @@ -540,7 +538,7 @@ func elfsetupplt(ctxt *ld.Link) { } func addpltsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Plt >= 0 { + if s.Plt() >= 0 { return } @@ -578,7 +576,7 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_386_JMP_SLOT))) - s.Plt = int32(plt.Size - 16) + s.SetPlt(int32(plt.Size - 16)) } else if ctxt.HeadType == objabi.Hdarwin { // Same laziness as in 6l. @@ -589,29 +587,29 @@ func addpltsym(ctxt *ld.Link, s *sym.Symbol) { ctxt.Syms.Lookup(".linkedit.plt", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) // jmpq *got+size(IP) - s.Plt = int32(plt.Size) + s.SetPlt(int32(plt.Size)) plt.AddUint8(0xff) plt.AddUint8(0x25) - plt.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) + plt.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got())) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } func addgotsym(ctxt *ld.Link, s *sym.Symbol) { - if s.Got >= 0 { + if s.Got() >= 0 { return } ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) - s.Got = int32(got.Size) + s.SetGot(int32(got.Size)) got.AddUint32(ctxt.Arch, 0) if ctxt.IsELF { rel := ctxt.Syms.Lookup(".rel", 0) - rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got())) rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_386_GLOB_DAT))) } else if ctxt.HeadType == objabi.Hdarwin { ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 4ec03abc85871..e0aae0288491c 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -6,6 +6,8 @@ import ( "os" "os/exec" "path/filepath" + "regexp" + "strings" "testing" ) @@ -70,3 +72,102 @@ func main() {} t.Fatalf("failed to link main.o: %v, output: %s\n", err, out) } } + +// TestIssue28429 ensures that the linker does not attempt to link +// sections not named *.o. Such sections may be used by a build system +// to, for example, save facts produced by a modular static analysis +// such as golang.org/x/tools/go/analysis. +func TestIssue28429(t *testing.T) { + testenv.MustHaveGoBuild(t) + + tmpdir, err := ioutil.TempDir("", "issue28429-") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpdir) + + write := func(name, content string) { + err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) + if err != nil { + t.Fatal(err) + } + } + + runGo := func(args ...string) { + cmd := exec.Command(testenv.GoToolPath(t), args...) + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("'go %s' failed: %v, output: %s", + strings.Join(args, " "), err, out) + } + } + + // Compile a main package. + write("main.go", "package main; func main() {}") + runGo("tool", "compile", "-p", "main", "main.go") + runGo("tool", "pack", "c", "main.a", "main.o") + + // Add an extra section with a short, non-.o name. + // This simulates an alternative build system. + write(".facts", "this is not an object file") + runGo("tool", "pack", "r", "main.a", ".facts") + + // Verify that the linker does not attempt + // to compile the extra section. + runGo("tool", "link", "main.a") +} + +func TestUnresolved(t *testing.T) { + testenv.MustHaveGoBuild(t) + + tmpdir, err := ioutil.TempDir("", "unresolved-") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpdir) + + write := func(name, content string) { + err := ioutil.WriteFile(filepath.Join(tmpdir, name), []byte(content), 0666) + if err != nil { + t.Fatal(err) + } + } + + // Test various undefined references. Because of issue #29852, + // this used to give confusing error messages because the + // linker would find an undefined reference to "zero" created + // by the runtime package. + + write("main.go", `package main + +func main() { + x() +} + +func x() +`) + write("main.s", ` +TEXT ·x(SB),0,$0 + MOVD zero<>(SB), AX + MOVD zero(SB), AX + MOVD ·zero(SB), AX + RET +`) + cmd := exec.Command(testenv.GoToolPath(t), "build") + cmd.Dir = tmpdir + cmd.Env = append(os.Environ(), []string{"GOARCH=amd64", "GOOS=linux"}...) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("expected build to fail, but it succeeded") + } + out = regexp.MustCompile("(?m)^#.*\n").ReplaceAll(out, nil) + got := string(out) + want := `main.x: relocation target zero not defined +main.x: relocation target zero not defined +main.x: relocation target main.zero not defined +` + if want != got { + t.Fatalf("want:\n%sgot:\n%s", want, got) + } +} diff --git a/src/cmd/nm/nm_test.go b/src/cmd/nm/nm_test.go index ccf5682d695ae..1b5bd21ad5dc0 100644 --- a/src/cmd/nm/nm_test.go +++ b/src/cmd/nm/nm_test.go @@ -5,8 +5,6 @@ package main import ( - "bufio" - "bytes" "fmt" "internal/testenv" "io/ioutil" @@ -55,18 +53,20 @@ func testMain(m *testing.M) int { } func TestNonGoExecs(t *testing.T) { + t.Parallel() testfiles := []string{ - "elf/testdata/gcc-386-freebsd-exec", - "elf/testdata/gcc-amd64-linux-exec", - "macho/testdata/gcc-386-darwin-exec", - "macho/testdata/gcc-amd64-darwin-exec", - // "pe/testdata/gcc-amd64-mingw-exec", // no symbols! - "pe/testdata/gcc-386-mingw-exec", - "plan9obj/testdata/amd64-plan9-exec", - "plan9obj/testdata/386-plan9-exec", + "debug/elf/testdata/gcc-386-freebsd-exec", + "debug/elf/testdata/gcc-amd64-linux-exec", + "debug/macho/testdata/gcc-386-darwin-exec", + "debug/macho/testdata/gcc-amd64-darwin-exec", + // "debug/pe/testdata/gcc-amd64-mingw-exec", // no symbols! + "debug/pe/testdata/gcc-386-mingw-exec", + "debug/plan9obj/testdata/amd64-plan9-exec", + "debug/plan9obj/testdata/386-plan9-exec", + "internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec", } for _, f := range testfiles { - exepath := filepath.Join(runtime.GOROOT(), "src", "debug", f) + exepath := filepath.Join(runtime.GOROOT(), "src", f) cmd := exec.Command(testnmpath, exepath) out, err := cmd.CombinedOutput() if err != nil { @@ -76,6 +76,7 @@ func TestNonGoExecs(t *testing.T) { } func testGoExec(t *testing.T, iscgo, isexternallinker bool) { + t.Parallel() tmpdir, err := ioutil.TempDir("", "TestGoExec") if err != nil { t.Fatal(err) @@ -139,17 +140,35 @@ func testGoExec(t *testing.T, iscgo, isexternallinker bool) { if err != nil { t.Fatalf("go tool nm: %v\n%s", err, string(out)) } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) + + relocated := func(code string) bool { + if runtime.GOOS == "aix" { + // On AIX, .data and .bss addresses are changed by the loader. + // Therefore, the values returned by the exec aren't the same + // than the ones inside the symbol table. + switch code { + case "D", "d", "B", "b": + return true + } + } + if runtime.GOOS == "windows" && runtime.GOARCH == "arm" { + return true + } + return false + } + dups := make(map[string]bool) - for scanner.Scan() { - f := strings.Fields(scanner.Text()) + for _, line := range strings.Split(string(out), "\n") { + f := strings.Fields(line) if len(f) < 3 { continue } name := f[2] if addr, found := names[name]; found { if want, have := addr, "0x"+f[0]; have != want { - t.Errorf("want %s address for %s symbol, but have %s", want, name, have) + if !relocated(f[1]) { + t.Errorf("want %s address for %s symbol, but have %s", want, name, have) + } } delete(names, name) } @@ -167,10 +186,6 @@ func testGoExec(t *testing.T, iscgo, isexternallinker bool) { delete(runtimeSyms, name) } } - err = scanner.Err() - if err != nil { - t.Fatalf("error reading nm output: %v", err) - } if len(names) > 0 { t.Errorf("executable is missing %v symbols", names) } @@ -184,6 +199,7 @@ func TestGoExec(t *testing.T) { } func testGoLib(t *testing.T, iscgo bool) { + t.Parallel() tmpdir, err := ioutil.TempDir("", "TestGoLib") if err != nil { t.Fatal(err) @@ -252,9 +268,9 @@ func testGoLib(t *testing.T, iscgo bool) { syms = append(syms, symType{"T", "cgofunc", true, false}) } } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - for scanner.Scan() { - f := strings.Fields(scanner.Text()) + + for _, line := range strings.Split(string(out), "\n") { + f := strings.Fields(line) var typ, name string var csym bool if iscgo { @@ -281,10 +297,6 @@ func testGoLib(t *testing.T, iscgo bool) { } } } - err = scanner.Err() - if err != nil { - t.Fatalf("error reading nm output: %v", err) - } for _, sym := range syms { if !sym.Found { t.Errorf("cannot found symbol %s %s", sym.Type, sym.Name) diff --git a/src/cmd/objdump/main.go b/src/cmd/objdump/main.go index 71636990a1d12..6a60697ebd4d6 100644 --- a/src/cmd/objdump/main.go +++ b/src/cmd/objdump/main.go @@ -75,6 +75,7 @@ func main() { if err != nil { log.Fatal(err) } + defer f.Close() dis, err := f.Disasm() if err != nil { @@ -87,7 +88,6 @@ func main() { case 1: // disassembly of entire object dis.Print(os.Stdout, symRE, 0, ^uint64(0), *printCode) - os.Exit(0) case 3: // disassembly of PC range @@ -100,6 +100,5 @@ func main() { log.Fatalf("invalid end PC: %v", err) } dis.Print(os.Stdout, symRE, start, end, *printCode) - os.Exit(0) } } diff --git a/src/cmd/pprof/readlineui.go b/src/cmd/pprof/readlineui.go index bf2f321184457..5b9701a0e22f5 100644 --- a/src/cmd/pprof/readlineui.go +++ b/src/cmd/pprof/readlineui.go @@ -101,7 +101,7 @@ func colorize(msg string) string { return colorEscape + msg + colorResetEscape } -// IsTerminal returns whether the UI is known to be tied to an +// IsTerminal reports whether the UI is known to be tied to an // interactive terminal (as opposed to being redirected to a file). func (r *readlineUI) IsTerminal() bool { const stdout = 1 diff --git a/src/cmd/trace/annotations.go b/src/cmd/trace/annotations.go index 96c109e0f251a..24984156811c4 100644 --- a/src/cmd/trace/annotations.go +++ b/src/cmd/trace/annotations.go @@ -438,8 +438,8 @@ func (task *taskDesc) complete() bool { return task.create != nil && task.end != nil } -// descendents returns all the task nodes in the subtree rooted from this task. -func (task *taskDesc) decendents() []*taskDesc { +// descendants returns all the task nodes in the subtree rooted from this task. +func (task *taskDesc) descendants() []*taskDesc { if task == nil { return nil } @@ -507,7 +507,7 @@ func (task *taskDesc) overlappingGCDuration(evs []*trace.Event) (overlapping tim return overlapping } -// overlappingInstant returns true if the instantaneous event, ev, occurred during +// overlappingInstant reports whether the instantaneous event, ev, occurred during // any of the task's region if ev is a goroutine-local event, or overlaps with the // task's lifetime if ev is a global event. func (task *taskDesc) overlappingInstant(ev *trace.Event) bool { @@ -538,7 +538,7 @@ func (task *taskDesc) overlappingInstant(ev *trace.Event) bool { return false } -// overlappingDuration returns whether the durational event, ev, overlaps with +// overlappingDuration reports whether the durational event, ev, overlaps with // any of the task's region if ev is a goroutine-local event, or overlaps with // the task's lifetime if ev is a global event. It returns the overlapping time // as well. diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go index 0c98b85c374e1..f94586abf3da6 100644 --- a/src/cmd/trace/main.go +++ b/src/cmd/trace/main.go @@ -188,7 +188,7 @@ var templMain = template.Must(template.New("").Parse(` {{if $}} {{range $e := $}} - View trace ({{$e.Name}})
    + View trace ({{$e.Name}})
    {{end}}
    {{else}} @@ -201,6 +201,7 @@ var templMain = template.Must(template.New("").Parse(` Scheduler latency profile ()
    User-defined tasks
    User-defined regions
    +Minimum mutator utilization
    `)) diff --git a/src/cmd/trace/mmu.go b/src/cmd/trace/mmu.go new file mode 100644 index 0000000000000..b92fac652cce8 --- /dev/null +++ b/src/cmd/trace/mmu.go @@ -0,0 +1,403 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimum mutator utilization (MMU) graphing. + +// TODO: +// +// In worst window list, show break-down of GC utilization sources +// (STW, assist, etc). Probably requires a different MutatorUtil +// representation. +// +// When a window size is selected, show a second plot of the mutator +// utilization distribution for that window size. +// +// Render plot progressively so rough outline is visible quickly even +// for very complex MUTs. Start by computing just a few window sizes +// and then add more window sizes. +// +// Consider using sampling to compute an approximate MUT. This would +// work by sampling the mutator utilization at randomly selected +// points in time in the trace to build an empirical distribution. We +// could potentially put confidence intervals on these estimates and +// render this progressively as we refine the distributions. + +package main + +import ( + "encoding/json" + "fmt" + "internal/trace" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" +) + +func init() { + http.HandleFunc("/mmu", httpMMU) + http.HandleFunc("/mmuPlot", httpMMUPlot) + http.HandleFunc("/mmuDetails", httpMMUDetails) +} + +var utilFlagNames = map[string]trace.UtilFlags{ + "perProc": trace.UtilPerProc, + "stw": trace.UtilSTW, + "background": trace.UtilBackground, + "assist": trace.UtilAssist, + "sweep": trace.UtilSweep, +} + +type mmuCacheEntry struct { + init sync.Once + util [][]trace.MutatorUtil + mmuCurve *trace.MMUCurve + err error +} + +var mmuCache struct { + m map[trace.UtilFlags]*mmuCacheEntry + lock sync.Mutex +} + +func init() { + mmuCache.m = make(map[trace.UtilFlags]*mmuCacheEntry) +} + +func getMMUCurve(r *http.Request) ([][]trace.MutatorUtil, *trace.MMUCurve, error) { + var flags trace.UtilFlags + for _, flagStr := range strings.Split(r.FormValue("flags"), "|") { + flags |= utilFlagNames[flagStr] + } + + mmuCache.lock.Lock() + c := mmuCache.m[flags] + if c == nil { + c = new(mmuCacheEntry) + mmuCache.m[flags] = c + } + mmuCache.lock.Unlock() + + c.init.Do(func() { + events, err := parseEvents() + if err != nil { + c.err = err + } else { + c.util = trace.MutatorUtilization(events, flags) + c.mmuCurve = trace.NewMMUCurve(c.util) + } + }) + return c.util, c.mmuCurve, c.err +} + +// httpMMU serves the MMU plot page. +func httpMMU(w http.ResponseWriter, r *http.Request) { + http.ServeContent(w, r, "", time.Time{}, strings.NewReader(templMMU)) +} + +// httpMMUPlot serves the JSON data for the MMU plot. +func httpMMUPlot(w http.ResponseWriter, r *http.Request) { + mu, mmuCurve, err := getMMUCurve(r) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError) + return + } + + var quantiles []float64 + for _, flagStr := range strings.Split(r.FormValue("flags"), "|") { + if flagStr == "mut" { + quantiles = []float64{0, 1 - .999, 1 - .99, 1 - .95} + break + } + } + + // Find a nice starting point for the plot. + xMin := time.Second + for xMin > 1 { + if mmu := mmuCurve.MMU(xMin); mmu < 0.0001 { + break + } + xMin /= 1000 + } + // Cover six orders of magnitude. + xMax := xMin * 1e6 + // But no more than the length of the trace. + minEvent, maxEvent := mu[0][0].Time, mu[0][len(mu[0])-1].Time + for _, mu1 := range mu[1:] { + if mu1[0].Time < minEvent { + minEvent = mu1[0].Time + } + if mu1[len(mu1)-1].Time > maxEvent { + maxEvent = mu1[len(mu1)-1].Time + } + } + if maxMax := time.Duration(maxEvent - minEvent); xMax > maxMax { + xMax = maxMax + } + // Compute MMU curve. + logMin, logMax := math.Log(float64(xMin)), math.Log(float64(xMax)) + const samples = 100 + plot := make([][]float64, samples) + for i := 0; i < samples; i++ { + window := time.Duration(math.Exp(float64(i)/(samples-1)*(logMax-logMin) + logMin)) + if quantiles == nil { + plot[i] = make([]float64, 2) + plot[i][1] = mmuCurve.MMU(window) + } else { + plot[i] = make([]float64, 1+len(quantiles)) + copy(plot[i][1:], mmuCurve.MUD(window, quantiles)) + } + plot[i][0] = float64(window) + } + + // Create JSON response. + err = json.NewEncoder(w).Encode(map[string]interface{}{"xMin": int64(xMin), "xMax": int64(xMax), "quantiles": quantiles, "curve": plot}) + if err != nil { + log.Printf("failed to serialize response: %v", err) + return + } +} + +var templMMU = ` + + + + + + + + + +
    +
    Loading plot...
    +
    +

    + View
    + + ?Consider whole system utilization. For example, if one of four procs is available to the mutator, mutator utilization will be 0.25. This is the standard definition of an MMU.
    + + ?Consider per-goroutine utilization. When even one goroutine is interrupted by GC, mutator utilization is 0.
    +

    +

    + Include
    + + ?Stop-the-world stops all goroutines simultaneously.
    + + ?Background workers are GC-specific goroutines. 25% of the CPU is dedicated to background workers during GC.
    + + ?Mark assists are performed by allocation to prevent the mutator from outpacing GC.
    + + ?Sweep reclaims unused memory between GCs. (Enabling this may be very slow.).
    +

    +

    + Display
    + + ?Display percentile mutator utilization in addition to minimum. E.g., p99 MU drops the worst 1% of windows.
    +

    +
    +
    +
    Select a point for details.
    + + +` + +// httpMMUDetails serves details of an MMU graph at a particular window. +func httpMMUDetails(w http.ResponseWriter, r *http.Request) { + _, mmuCurve, err := getMMUCurve(r) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError) + return + } + + windowStr := r.FormValue("window") + window, err := strconv.ParseUint(windowStr, 10, 64) + if err != nil { + http.Error(w, fmt.Sprintf("failed to parse window parameter %q: %v", windowStr, err), http.StatusBadRequest) + return + } + worst := mmuCurve.Examples(time.Duration(window), 10) + + // Construct a link for each window. + var links []linkedUtilWindow + for _, ui := range worst { + links = append(links, newLinkedUtilWindow(ui, time.Duration(window))) + } + + err = json.NewEncoder(w).Encode(links) + if err != nil { + log.Printf("failed to serialize trace: %v", err) + return + } +} + +type linkedUtilWindow struct { + trace.UtilWindow + URL string +} + +func newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow { + // Find the range containing this window. + var r Range + for _, r = range ranges { + if r.EndTime > ui.Time { + break + } + } + return linkedUtilWindow{ui, fmt.Sprintf("%s#%v:%v", r.URL(), float64(ui.Time)/1e6, float64(ui.Time+int64(window))/1e6)} +} diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 62ff4d68c51c5..f39a397d0d650 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -38,7 +38,7 @@ func httpTrace(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - html := strings.Replace(templTrace, "{{PARAMS}}", r.Form.Encode(), -1) + html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode()) w.Write([]byte(html)) } @@ -220,7 +220,7 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { params.startTime = task.firstTimestamp() - 1 params.endTime = task.lastTimestamp() + 1 params.maing = goid - params.tasks = task.decendents() + params.tasks = task.descendants() gs := map[uint64]bool{} for _, t := range params.tasks { // find only directly involved goroutines @@ -244,7 +244,7 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { params.mode = modeTaskOriented params.startTime = task.firstTimestamp() - 1 params.endTime = task.lastTimestamp() + 1 - params.tasks = task.decendents() + params.tasks = task.descendants() } start := int64(0) @@ -271,9 +271,15 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { } type Range struct { - Name string - Start int - End int + Name string + Start int + End int + StartTime int64 + EndTime int64 +} + +func (r Range) URL() string { + return fmt.Sprintf("/trace?start=%d&end=%d", r.Start, r.End) } // splitTrace splits the trace into a number of ranges, @@ -344,10 +350,14 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) { start := 0 for i, ev := range sizes { if sum+ev.Sz > max { + startTime := time.Duration(sizes[start].Time * 1000) + endTime := time.Duration(ev.Time * 1000) ranges = append(ranges, Range{ - Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(ev.Time*1000)), - Start: start, - End: i + 1, + Name: fmt.Sprintf("%v-%v", startTime, endTime), + Start: start, + End: i + 1, + StartTime: int64(startTime), + EndTime: int64(endTime), }) start = i + 1 sum = minSize @@ -362,9 +372,11 @@ func splittingTraceConsumer(max int) (*splitter, traceConsumer) { if end := len(sizes) - 1; start < end { ranges = append(ranges, Range{ - Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)), - Start: start, - End: end, + Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)), + Start: start, + End: end, + StartTime: int64(sizes[start].Time * 1000), + EndTime: int64(sizes[end].Time * 1000), }) } s.Ranges = ranges @@ -685,13 +697,14 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { } ctx.emitSlice(&fakeMarkStart, text) case trace.EvGCSweepStart: - slice := ctx.emitSlice(ev, "SWEEP") + slice := ctx.makeSlice(ev, "SWEEP") if done := ev.Link; done != nil && done.Args[0] != 0 { slice.Arg = struct { Swept uint64 `json:"Swept bytes"` Reclaimed uint64 `json:"Reclaimed bytes"` }{done.Args[0], done.Args[1]} } + ctx.emit(slice) case trace.EvGoStart, trace.EvGoStartLabel: info := getGInfo(ev.G) if ev.Type == trace.EvGoStartLabel { @@ -846,7 +859,11 @@ func (ctx *traceContext) proc(ev *trace.Event) uint64 { } } -func (ctx *traceContext) emitSlice(ev *trace.Event, name string) *ViewerEvent { +func (ctx *traceContext) emitSlice(ev *trace.Event, name string) { + ctx.emit(ctx.makeSlice(ev, name)) +} + +func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *ViewerEvent { // If ViewerEvent.Dur is not a positive value, // trace viewer handles it as a non-terminating time interval. // Avoid it by setting the field with a small value. @@ -885,7 +902,6 @@ func (ctx *traceContext) emitSlice(ev *trace.Event, name string) *ViewerEvent { sl.Cname = colorLightGrey } } - ctx.emit(sl) return sl } diff --git a/src/cmd/vendor/README b/src/cmd/vendor/README index 740905c652fdb..7eb97a1b9b16f 100644 --- a/src/cmd/vendor/README +++ b/src/cmd/vendor/README @@ -19,3 +19,7 @@ make govendor work and will create the .cache folder in $GOROOT as a side-effect. Please make sure to delete the directory and not to include the directory in the commit by accident. + +The vendored copy of golang.org/x/tools is maintained by +running the update-xtools.sh script in this directory, +not by govendor. \ No newline at end of file diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go index 3735d6ace9900..b1c745bacdb59 100644 --- a/src/cmd/vendor/github.com/google/pprof/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -17,6 +17,7 @@ package driver import ( "io" + "net/http" "regexp" "time" @@ -48,13 +49,14 @@ func (o *Options) internalOptions() *plugin.Options { } } return &plugin.Options{ - Writer: o.Writer, - Flagset: o.Flagset, - Fetch: o.Fetch, - Sym: sym, - Obj: obj, - UI: o.UI, - HTTPServer: httpServer, + Writer: o.Writer, + Flagset: o.Flagset, + Fetch: o.Fetch, + Sym: sym, + Obj: obj, + UI: o.UI, + HTTPServer: httpServer, + HTTPTransport: o.HTTPTransport, } } @@ -64,13 +66,14 @@ type HTTPServerArgs plugin.HTTPServerArgs // Options groups all the optional plugins into pprof. type Options struct { - Writer Writer - Flagset FlagSet - Fetch Fetcher - Sym Symbolizer - Obj ObjTool - UI UI - HTTPServer func(*HTTPServerArgs) error + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + HTTPServer func(*HTTPServerArgs) error + HTTPTransport http.RoundTripper } // Writer provides a mechanism to write data under a certain name, @@ -100,12 +103,16 @@ type FlagSet interface { // single flag StringList(name string, def string, usage string) *[]*string - // ExtraUsage returns any additional text that should be - // printed after the standard usage message. - // The typical use of ExtraUsage is to show any custom flags - // defined by the specific pprof plugins being used. + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. ExtraUsage() string + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + // Parse initializes the flags with their values for this run // and returns the non-flag command line arguments. // If an unknown flag is encountered or there are no arguments, diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go index 12b6a5c4b262e..309561112ce2c 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils.go @@ -18,11 +18,14 @@ package binutils import ( "debug/elf" "debug/macho" + "encoding/binary" "fmt" + "io" "os" "os/exec" "path/filepath" "regexp" + "runtime" "strings" "sync" @@ -173,12 +176,8 @@ func (bu *Binutils) Open(name string, start, limit, offset uint64) (plugin.ObjFi b := bu.get() // Make sure file is a supported executable. - // The pprof driver uses Open to sniff the difference - // between an executable and a profile. - // For now, only ELF is supported. - // Could read the first few bytes of the file and - // use a table of prefixes if we need to support other - // systems at some point. + // This uses magic numbers, mainly to provide better error messages but + // it should also help speed. if _, err := os.Stat(name); err != nil { // For testing, do not require file name to exist. @@ -188,21 +187,54 @@ func (bu *Binutils) Open(name string, start, limit, offset uint64) (plugin.ObjFi return nil, err } - if f, err := b.openELF(name, start, limit, offset); err == nil { + // Read the first 4 bytes of the file. + + f, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("error opening %s: %v", name, err) + } + defer f.Close() + + var header [4]byte + if _, err = io.ReadFull(f, header[:]); err != nil { + return nil, fmt.Errorf("error reading magic number from %s: %v", name, err) + } + + elfMagic := string(header[:]) + + // Match against supported file types. + if elfMagic == elf.ELFMAG { + f, err := b.openELF(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading ELF file %s: %v", name, err) + } return f, nil } - if f, err := b.openMachO(name, start, limit, offset); err == nil { + + // Mach-O magic numbers can be big or little endian. + machoMagicLittle := binary.LittleEndian.Uint32(header[:]) + machoMagicBig := binary.BigEndian.Uint32(header[:]) + + if machoMagicLittle == macho.Magic32 || machoMagicLittle == macho.Magic64 || + machoMagicBig == macho.Magic32 || machoMagicBig == macho.Magic64 { + f, err := b.openMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading Mach-O file %s: %v", name, err) + } + return f, nil + } + if machoMagicLittle == macho.MagicFat || machoMagicBig == macho.MagicFat { + f, err := b.openFatMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading fat Mach-O file %s: %v", name, err) + } return f, nil } - return nil, fmt.Errorf("unrecognized binary: %s", name) + + return nil, fmt.Errorf("unrecognized binary format: %s", name) } -func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { - of, err := macho.Open(name) - if err != nil { - return nil, fmt.Errorf("error parsing %s: %v", name, err) - } - defer of.Close() +func (b *binrep) openMachOCommon(name string, of *macho.File, start, limit, offset uint64) (plugin.ObjFile, error) { // Subtract the load address of the __TEXT section. Usually 0 for shared // libraries or 0x100000000 for executables. You can check this value by @@ -225,6 +257,53 @@ func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.Obj return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil } +func (b *binrep) openFatMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.OpenFat(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + if len(of.Arches) == 0 { + return nil, fmt.Errorf("empty fat Mach-O file: %s", name) + } + + var arch macho.Cpu + // Use the host architecture. + // TODO: This is not ideal because the host architecture may not be the one + // that was profiled. E.g. an amd64 host can profile a 386 program. + switch runtime.GOARCH { + case "386": + arch = macho.Cpu386 + case "amd64", "amd64p32": + arch = macho.CpuAmd64 + case "arm", "armbe", "arm64", "arm64be": + arch = macho.CpuArm + case "ppc": + arch = macho.CpuPpc + case "ppc64", "ppc64le": + arch = macho.CpuPpc64 + default: + return nil, fmt.Errorf("unsupported host architecture for %s: %s", name, runtime.GOARCH) + } + for i := range of.Arches { + if of.Arches[i].Cpu == arch { + return b.openMachOCommon(name, of.Arches[i].File, start, limit, offset) + } + } + return nil, fmt.Errorf("architecture not found in %s: %s", name, runtime.GOARCH) +} + +func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + return b.openMachOCommon(name, of, start, limit, offset) +} + func (b *binrep) openELF(name string, start, limit, offset uint64) (plugin.ObjFile, error) { ef, err := elf.Open(name) if err != nil { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go index d844ed7e4e7e0..17d4225a87fd1 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/binutils_test.go @@ -22,6 +22,7 @@ import ( "reflect" "regexp" "runtime" + "strings" "testing" "github.com/google/pprof/internal/plugin" @@ -361,3 +362,31 @@ func TestLLVMSymbolizer(t *testing.T) { } } } + +func TestOpenMalformedELF(t *testing.T) { + // Test that opening a malformed ELF file will report an error containing + // the word "ELF". + bu := &Binutils{} + _, err := bu.Open(filepath.Join("testdata", "malformed_elf"), 0, 0, 0) + if err == nil { + t.Fatalf("Open: unexpected success") + } + + if !strings.Contains(err.Error(), "ELF") { + t.Errorf("Open: got %v, want error containing 'ELF'", err) + } +} + +func TestOpenMalformedMachO(t *testing.T) { + // Test that opening a malformed Mach-O file will report an error containing + // the word "Mach-O". + bu := &Binutils{} + _, err := bu.Open(filepath.Join("testdata", "malformed_macho"), 0, 0, 0) + if err == nil { + t.Fatalf("Open: unexpected success") + } + + if !strings.Contains(err.Error(), "Mach-O") { + t.Errorf("Open: got %v, want error containing 'Mach-O'", err) + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_elf b/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_elf new file mode 100644 index 0000000000000..f0b503b0b6c52 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_elf @@ -0,0 +1 @@ +ELF \ No newline at end of file diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_macho b/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_macho new file mode 100644 index 0000000000000..b01ddf69a9a27 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/testdata/malformed_macho @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go index a5153e151132b..dfedf9d8491cd 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/cli.go @@ -45,8 +45,8 @@ type source struct { func parseFlags(o *plugin.Options) (*source, []string, error) { flag := o.Flagset // Comparisons. - flagBase := flag.StringList("base", "", "Source for base profile for profile subtraction") - flagDiffBase := flag.StringList("diff_base", "", "Source for diff base profile for comparison") + flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") + flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") // Source options. flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") flagBuildID := flag.String("buildid", "", "Override build id for first mapping") @@ -312,7 +312,8 @@ var usageMsgSrc = "\n\n" + " -buildid Override build id for main binary\n" + " -add_comment Free-form annotation to add to the profile\n" + " Displayed on some reports or with pprof -comments\n" + - " -base source Source of profile to use as baseline\n" + + " -diff_base source Source of base profile for comparison\n" + + " -base source Source of base profile for profile subtraction\n" + " profile.pb.gz Profile in compressed protobuf format\n" + " legacy_profile Profile in legacy pprof format\n" + " http://host/profile URL for profile handler to retrieve\n" + diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go index 91d32d1e716d4..ab073d878d547 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go @@ -228,17 +228,17 @@ var pprofVariables = variables{ // Output granularity "functions": &variable{boolKind, "t", "granularity", helpText( "Aggregate at the function level.", - "Takes into account the filename/lineno where the function was defined.")}, + "Ignores the filename where the function was defined.")}, + "filefunctions": &variable{boolKind, "t", "granularity", helpText( + "Aggregate at the function level.", + "Takes into account the filename where the function was defined.")}, "files": &variable{boolKind, "f", "granularity", "Aggregate at the file level."}, "lines": &variable{boolKind, "f", "granularity", "Aggregate at the source code line level."}, "addresses": &variable{boolKind, "f", "granularity", helpText( - "Aggregate at the function level.", + "Aggregate at the address level.", "Includes functions' addresses in the output.")}, - "noinlines": &variable{boolKind, "f", "granularity", helpText( - "Aggregate at the function level.", - "Attributes inlined functions to their first out-of-line caller.")}, - "addressnoinlines": &variable{boolKind, "f", "granularity", helpText( - "Aggregate at the function level, including functions' addresses in the output.", + "noinlines": &variable{boolKind, "f", "", helpText( + "Ignore inlines.", "Attributes inlined functions to their first out-of-line caller.")}, } @@ -337,21 +337,27 @@ func listHelp(c string, redirect bool) string { // browsers returns a list of commands to attempt for web visualization. func browsers() []string { - cmds := []string{"chrome", "google-chrome", "firefox"} + var cmds []string + if userBrowser := os.Getenv("BROWSER"); userBrowser != "" { + cmds = append(cmds, userBrowser) + } switch runtime.GOOS { case "darwin": - return append(cmds, "/usr/bin/open") + cmds = append(cmds, "/usr/bin/open") case "windows": - return append(cmds, "cmd /c start") + cmds = append(cmds, "cmd /c start") default: - userBrowser := os.Getenv("BROWSER") - if userBrowser != "" { - cmds = append([]string{userBrowser, "sensible-browser"}, cmds...) - } else { - cmds = append([]string{"sensible-browser"}, cmds...) + // Commands opening browsers are prioritized over xdg-open, so browser() + // command can be used on linux to open the .svg file generated by the -web + // command (the .svg file includes embedded javascript so is best viewed in + // a browser). + cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox", "sensible-browser"}...) + if os.Getenv("DISPLAY") != "" { + // xdg-open is only for use in a desktop environment. + cmds = append(cmds, "xdg-open") } - return append(cmds, "xdg-open") } + return cmds } var kcachegrind = []string{"kcachegrind"} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go index 2dabc3017b57e..45f1846749cbe 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -152,20 +152,33 @@ func generateReport(p *profile.Profile, cmd []string, vars variables, o *plugin. } func applyCommandOverrides(cmd string, outputFormat int, v variables) variables { - trim, tagfilter, filter := v["trim"].boolValue(), true, true + // Some report types override the trim flag to false below. This is to make + // sure the default heuristics of excluding insignificant nodes and edges + // from the call graph do not apply. One example where it is important is + // annotated source or disassembly listing. Those reports run on a specific + // function (or functions), but the trimming is applied before the function + // data is selected. So, with trimming enabled, the report could end up + // showing no data if the specified function is "uninteresting" as far as the + // trimming is concerned. + trim := v["trim"].boolValue() switch cmd { - case "callgrind", "kcachegrind": - trim = false - v.set("addresses", "t") case "disasm", "weblist": trim = false - v.set("addressnoinlines", "t") + v.set("addresses", "t") + // Force the 'noinlines' mode so that source locations for a given address + // collapse and there is only one for the given address. Without this + // cumulative metrics would be double-counted when annotating the assembly. + // This is because the merge is done by address and in case of an inlined + // stack each of the inlined entries is a separate callgraph node. + v.set("noinlines", "t") case "peek": - trim, tagfilter, filter = false, false, false + trim = false case "list": - v.set("nodecount", "0") + trim = false v.set("lines", "t") + // Do not force 'noinlines' to be false so that specifying + // "-list foo -noinlines" is supported and works as expected. case "text", "top", "topproto": if v["nodecount"].intValue() == -1 { v.set("nodecount", "0") @@ -176,9 +189,11 @@ func applyCommandOverrides(cmd string, outputFormat int, v variables) variables } } - if outputFormat == report.Proto || outputFormat == report.Raw { - trim, tagfilter, filter = false, false, false + switch outputFormat { + case report.Proto, report.Raw, report.Callgrind: + trim = false v.set("addresses", "t") + v.set("noinlines", "f") } if !trim { @@ -186,43 +201,32 @@ func applyCommandOverrides(cmd string, outputFormat int, v variables) variables v.set("nodefraction", "0") v.set("edgefraction", "0") } - if !tagfilter { - v.set("tagfocus", "") - v.set("tagignore", "") - } - if !filter { - v.set("focus", "") - v.set("ignore", "") - v.set("hide", "") - v.set("show", "") - v.set("show_from", "") - } return v } func aggregate(prof *profile.Profile, v variables) error { - var inlines, function, filename, linenumber, address bool + var function, filename, linenumber, address bool + inlines := !v["noinlines"].boolValue() switch { case v["addresses"].boolValue(): - return nil + if inlines { + return nil + } + function = true + filename = true + linenumber = true + address = true case v["lines"].boolValue(): - inlines = true function = true filename = true linenumber = true case v["files"].boolValue(): - inlines = true filename = true case v["functions"].boolValue(): - inlines = true - function = true - case v["noinlines"].boolValue(): function = true - case v["addressnoinlines"].boolValue(): + case v["filefunctions"].boolValue(): function = true filename = true - linenumber = true - address = true default: return fmt.Errorf("unexpected granularity") } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go index ff6afe9cff7e5..90f89dc7bc85a 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver_test.go @@ -53,6 +53,9 @@ func TestParse(t *testing.T) { flags, source string }{ {"text,functions,flat", "cpu"}, + {"text,functions,noinlines,flat", "cpu"}, + {"text,filefunctions,noinlines,flat", "cpu"}, + {"text,addresses,noinlines,flat", "cpu"}, {"tree,addresses,flat,nodecount=4", "cpusmall"}, {"text,functions,flat,nodecount=5,call_tree", "unknown"}, {"text,alloc_objects,flat", "heap_alloc"}, @@ -63,6 +66,7 @@ func TestParse(t *testing.T) { {"text,lines,cum,show=[12]00", "cpu"}, {"text,lines,cum,hide=line[X3]0,focus=[12]00", "cpu"}, {"topproto,lines,cum,hide=mangled[X3]0", "cpu"}, + {"topproto,lines", "cpu"}, {"tree,lines,cum,focus=[24]00", "heap"}, {"tree,relative_percentages,cum,focus=[24]00", "heap"}, {"tree,lines,cum,show_from=line2", "cpu"}, @@ -92,6 +96,8 @@ func TestParse(t *testing.T) { {"peek=line.*01", "cpu"}, {"weblist=line[13],addresses,flat", "cpu"}, {"tags,tagfocus=400kb:", "heap_request"}, + {"dot", "longNameFuncs"}, + {"text", "longNameFuncs"}, } baseVars := pprofVariables @@ -108,9 +114,6 @@ func TestParse(t *testing.T) { flags := strings.Split(tc.flags, ",") - // Skip the output format in the first flag, to output to a proto - addFlags(&f, flags[1:]) - // Encode profile into a protobuf and decode it again. protoTempFile, err := ioutil.TempFile("", "profile_proto") if err != nil { @@ -123,11 +126,13 @@ func TestParse(t *testing.T) { if flags[0] == "topproto" { f.bools["proto"] = false f.bools["topproto"] = true + f.bools["addresses"] = true } // First pprof invocation to save the profile into a profile.proto. - o1 := setDefaults(nil) - o1.Flagset = f + // Pass in flag set hen setting defaults, because otherwise default + // transport will try to add flags to the default flag set. + o1 := setDefaults(&plugin.Options{Flagset: f}) o1.Fetch = testFetcher{} o1.Sym = testSymbolizer{} o1.UI = testUI @@ -144,28 +149,28 @@ func TestParse(t *testing.T) { } defer os.Remove(outputTempFile.Name()) defer outputTempFile.Close() + + f = baseFlags() f.strings["output"] = outputTempFile.Name() f.args = []string{protoTempFile.Name()} - var solution string + delete(f.bools, "proto") + addFlags(&f, flags) + solution := solutionFilename(tc.source, &f) // Apply the flags for the second pprof run, and identify name of // the file containing expected results if flags[0] == "topproto" { + addFlags(&f, flags) solution = solutionFilename(tc.source, &f) delete(f.bools, "topproto") f.bools["text"] = true - } else { - delete(f.bools, "proto") - addFlags(&f, flags[:1]) - solution = solutionFilename(tc.source, &f) } - // The add_comment flag is not idempotent so only apply it on the first run. - delete(f.strings, "add_comment") // Second pprof invocation to read the profile from profile.proto // and generate a report. - o2 := setDefaults(nil) - o2.Flagset = f + // Pass in flag set hen setting defaults, because otherwise default + // transport will try to add flags to the default flag set. + o2 := setDefaults(&plugin.Options{Flagset: f}) o2.Sym = testSymbolizeDemangler{} o2.Obj = new(mockObjTool) o2.UI = testUI @@ -250,7 +255,8 @@ func testSourceURL(port int) string { func solutionFilename(source string, f *testFlags) string { name := []string{"pprof", strings.TrimPrefix(source, testSourceURL(8000))} name = addString(name, f, []string{"flat", "cum"}) - name = addString(name, f, []string{"functions", "files", "lines", "addresses"}) + name = addString(name, f, []string{"functions", "filefunctions", "files", "lines", "addresses"}) + name = addString(name, f, []string{"noinlines"}) name = addString(name, f, []string{"inuse_space", "inuse_objects", "alloc_space", "alloc_objects"}) name = addString(name, f, []string{"relative_percentages"}) name = addString(name, f, []string{"seconds"}) @@ -293,6 +299,8 @@ type testFlags struct { func (testFlags) ExtraUsage() string { return "" } +func (testFlags) AddExtraUsage(eu string) {} + func (f testFlags) Bool(s string, d bool, c string) *bool { if b, ok := f.bools[s]; ok { return &b @@ -436,6 +444,8 @@ func (testFetcher) Fetch(s string, d, t time.Duration) (*profile.Profile, string p = contentionProfile() case "symbolz": p = symzProfile() + case "longNameFuncs": + p = longNameFuncsProfile() default: return nil, "", fmt.Errorf("unexpected source: %s", s) } @@ -519,6 +529,83 @@ func fakeDemangler(name string) string { } } +// Returns a profile with function names which should be shortened in +// graph and flame views. +func longNameFuncsProfile() *profile.Profile { + var longNameFuncsM = []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x4000, + File: "/path/to/testbinary", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var longNameFuncsF = []*profile.Function{ + {ID: 1, Name: "path/to/package1.object.function1", SystemName: "path/to/package1.object.function1", Filename: "path/to/package1.go"}, + {ID: 2, Name: "(anonymous namespace)::Bar::Foo", SystemName: "(anonymous namespace)::Bar::Foo", Filename: "a/long/path/to/package2.cc"}, + {ID: 3, Name: "java.bar.foo.FooBar.run(java.lang.Runnable)", SystemName: "java.bar.foo.FooBar.run(java.lang.Runnable)", Filename: "FooBar.java"}, + } + + var longNameFuncsL = []*profile.Location{ + { + ID: 1000, + Mapping: longNameFuncsM[0], + Address: 0x1000, + Line: []profile.Line{ + {Function: longNameFuncsF[0], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: longNameFuncsM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: longNameFuncsF[1], Line: 4}, + }, + }, + { + ID: 3000, + Mapping: longNameFuncsM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: longNameFuncsF[2], Line: 9}, + }, + }, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{longNameFuncsL[0], longNameFuncsL[1], longNameFuncsL[2]}, + Value: []int64{1000, 1000}, + }, + { + Location: []*profile.Location{longNameFuncsL[0], longNameFuncsL[1]}, + Value: []int64{100, 100}, + }, + { + Location: []*profile.Location{longNameFuncsL[2]}, + Value: []int64{10, 10}, + }, + }, + Location: longNameFuncsL, + Function: longNameFuncsF, + Mapping: longNameFuncsM, + } +} + func cpuProfile() *profile.Profile { var cpuM = []*profile.Mapping{ { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go index 7a7a1a20f2a2e..b8a69e87fce75 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go @@ -16,7 +16,6 @@ package driver import ( "bytes" - "crypto/tls" "fmt" "io" "io/ioutil" @@ -57,7 +56,7 @@ func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) { }) } - p, pbase, m, mbase, save, err := grabSourcesAndBases(sources, bases, o.Fetch, o.Obj, o.UI) + p, pbase, m, mbase, save, err := grabSourcesAndBases(sources, bases, o.Fetch, o.Obj, o.UI, o.HTTPTransport) if err != nil { return nil, err } @@ -123,7 +122,7 @@ func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) { return p, nil } -func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (*profile.Profile, *profile.Profile, plugin.MappingSources, plugin.MappingSources, bool, error) { +func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, *profile.Profile, plugin.MappingSources, plugin.MappingSources, bool, error) { wg := sync.WaitGroup{} wg.Add(2) var psrc, pbase *profile.Profile @@ -133,11 +132,11 @@ func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, o var countsrc, countbase int go func() { defer wg.Done() - psrc, msrc, savesrc, countsrc, errsrc = chunkedGrab(sources, fetch, obj, ui) + psrc, msrc, savesrc, countsrc, errsrc = chunkedGrab(sources, fetch, obj, ui, tr) }() go func() { defer wg.Done() - pbase, mbase, savebase, countbase, errbase = chunkedGrab(bases, fetch, obj, ui) + pbase, mbase, savebase, countbase, errbase = chunkedGrab(bases, fetch, obj, ui, tr) }() wg.Wait() save := savesrc || savebase @@ -167,7 +166,7 @@ func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, o // chunkedGrab fetches the profiles described in source and merges them into // a single profile. It fetches a chunk of profiles concurrently, with a maximum // chunk size to limit its memory usage. -func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (*profile.Profile, plugin.MappingSources, bool, int, error) { +func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { const chunkSize = 64 var p *profile.Profile @@ -180,7 +179,7 @@ func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTo if end > len(sources) { end = len(sources) } - chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui) + chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui, tr) switch { case chunkErr != nil: return nil, nil, false, 0, chunkErr @@ -204,13 +203,13 @@ func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTo } // concurrentGrab fetches multiple profiles concurrently -func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (*profile.Profile, plugin.MappingSources, bool, int, error) { +func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { wg := sync.WaitGroup{} wg.Add(len(sources)) for i := range sources { go func(s *profileSource) { defer wg.Done() - s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, fetch, obj, ui) + s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, fetch, obj, ui, tr) }(&sources[i]) } wg.Wait() @@ -310,7 +309,7 @@ const testSourceAddress = "pproftest.local" // grabProfile fetches a profile. Returns the profile, sources for the // profile mappings, a bool indicating if the profile was fetched // remotely, and an error. -func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) { +func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) { var src string duration, timeout := time.Duration(s.Seconds)*time.Second, time.Duration(s.Timeout)*time.Second if fetcher != nil { @@ -321,7 +320,7 @@ func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.Ob } if err != nil || p == nil { // Fetch the profile over HTTP or from a file. - p, src, err = fetch(source, duration, timeout, ui) + p, src, err = fetch(source, duration, timeout, ui, tr) if err != nil { return } @@ -461,7 +460,7 @@ mapping: // fetch fetches a profile from source, within the timeout specified, // producing messages through the ui. It returns the profile and the // url of the actual source of the profile for remote profiles. -func fetch(source string, duration, timeout time.Duration, ui plugin.UI) (p *profile.Profile, src string, err error) { +func fetch(source string, duration, timeout time.Duration, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, src string, err error) { var f io.ReadCloser if sourceURL, timeout := adjustURL(source, duration, timeout); sourceURL != "" { @@ -469,7 +468,7 @@ func fetch(source string, duration, timeout time.Duration, ui plugin.UI) (p *pro if duration > 0 { ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) } - f, err = fetchURL(sourceURL, timeout) + f, err = fetchURL(sourceURL, timeout, tr) src = sourceURL } else if isPerfFile(source) { f, err = convertPerfData(source, ui) @@ -484,8 +483,12 @@ func fetch(source string, duration, timeout time.Duration, ui plugin.UI) (p *pro } // fetchURL fetches a profile from a URL using HTTP. -func fetchURL(source string, timeout time.Duration) (io.ReadCloser, error) { - resp, err := httpGet(source, timeout) +func fetchURL(source string, timeout time.Duration, tr http.RoundTripper) (io.ReadCloser, error) { + client := &http.Client{ + Transport: tr, + Timeout: timeout + 5*time.Second, + } + resp, err := client.Get(source) if err != nil { return nil, fmt.Errorf("http fetch: %v", err) } @@ -582,30 +585,3 @@ func adjustURL(source string, duration, timeout time.Duration) (string, time.Dur u.RawQuery = values.Encode() return u.String(), timeout } - -// httpGet is a wrapper around http.Get; it is defined as a variable -// so it can be redefined during for testing. -var httpGet = func(source string, timeout time.Duration) (*http.Response, error) { - url, err := url.Parse(source) - if err != nil { - return nil, err - } - - var tlsConfig *tls.Config - if url.Scheme == "https+insecure" { - tlsConfig = &tls.Config{ - InsecureSkipVerify: true, - } - url.Scheme = "https" - source = url.String() - } - - client := &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSClientConfig: tlsConfig, - ResponseHeaderTimeout: timeout + 5*time.Second, - }, - } - return client.Get(source) -} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go index e67b2e9f87108..b9e9dfe8f450b 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch_test.go @@ -24,8 +24,8 @@ import ( "fmt" "io/ioutil" "math/big" + "net" "net/http" - "net/url" "os" "path/filepath" "reflect" @@ -39,6 +39,7 @@ import ( "github.com/google/pprof/internal/plugin" "github.com/google/pprof/internal/proftest" "github.com/google/pprof/internal/symbolizer" + "github.com/google/pprof/internal/transport" "github.com/google/pprof/profile" ) @@ -173,12 +174,6 @@ func (testFile) Close() error { func TestFetch(t *testing.T) { const path = "testdata/" - - // Intercept http.Get calls from HTTPFetcher. - savedHTTPGet := httpGet - defer func() { httpGet = savedHTTPGet }() - httpGet = stubHTTPGet - type testcase struct { source, execName string } @@ -188,7 +183,7 @@ func TestFetch(t *testing.T) { {path + "go.nomappings.crash", "/bin/gotest.exe"}, {"http://localhost/profile?file=cppbench.cpu", ""}, } { - p, _, _, err := grabProfile(&source{ExecName: tc.execName}, tc.source, nil, testObj{}, &proftest.TestUI{T: t}) + p, _, _, err := grabProfile(&source{ExecName: tc.execName}, tc.source, nil, testObj{}, &proftest.TestUI{T: t}, &httpTransport{}) if err != nil { t.Fatalf("%s: %s", tc.source, err) } @@ -449,8 +444,9 @@ func TestFetchWithBase(t *testing.T) { f.args = tc.sources o := setDefaults(&plugin.Options{ - UI: &proftest.TestUI{T: t, AllowRx: "Local symbolization failed|Some binary filenames not available"}, - Flagset: f, + UI: &proftest.TestUI{T: t, AllowRx: "Local symbolization failed|Some binary filenames not available"}, + Flagset: f, + HTTPTransport: transport.New(nil), }) src, _, err := parseFlags(o) @@ -503,19 +499,14 @@ func mappingSources(key, source string, start uint64) plugin.MappingSources { } } -// stubHTTPGet intercepts a call to http.Get and rewrites it to use -// "file://" to get the profile directly from a file. -func stubHTTPGet(source string, _ time.Duration) (*http.Response, error) { - url, err := url.Parse(source) - if err != nil { - return nil, err - } +type httpTransport struct{} - values := url.Query() +func (tr *httpTransport) RoundTrip(req *http.Request) (*http.Response, error) { + values := req.URL.Query() file := values.Get("file") if file == "" { - return nil, fmt.Errorf("want .../file?profile, got %s", source) + return nil, fmt.Errorf("want .../file?profile, got %s", req.URL.String()) } t := &http.Transport{} @@ -532,7 +523,7 @@ func closedError() string { return "use of closed" } -func TestHttpsInsecure(t *testing.T) { +func TestHTTPSInsecure(t *testing.T) { if runtime.GOOS == "nacl" || runtime.GOOS == "js" { t.Skip("test assumes tcp available") } @@ -553,7 +544,8 @@ func TestHttpsInsecure(t *testing.T) { pprofVariables = baseVars.makeCopy() defer func() { pprofVariables = baseVars }() - tlsConfig := &tls.Config{Certificates: []tls.Certificate{selfSignedCert(t)}} + tlsCert, _, _ := selfSignedCert(t, "") + tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} l, err := tls.Listen("tcp", "localhost:0", tlsConfig) if err != nil { @@ -586,8 +578,9 @@ func TestHttpsInsecure(t *testing.T) { Symbolize: "remote", } o := &plugin.Options{ - Obj: &binutils.Binutils{}, - UI: &proftest.TestUI{T: t, AllowRx: "Saved profile in"}, + Obj: &binutils.Binutils{}, + UI: &proftest.TestUI{T: t, AllowRx: "Saved profile in"}, + HTTPTransport: transport.New(nil), } o.Sym = &symbolizer.Symbolizer{Obj: o.Obj, UI: o.UI} p, err := fetchProfiles(s, o) @@ -600,7 +593,122 @@ func TestHttpsInsecure(t *testing.T) { if len(p.Function) == 0 { t.Fatalf("fetchProfiles(%s) got non-symbolized profile: len(p.Function)==0", address) } - if err := checkProfileHasFunction(p, "TestHttpsInsecure"); err != nil { + if err := checkProfileHasFunction(p, "TestHTTPSInsecure"); err != nil { + t.Fatalf("fetchProfiles(%s) %v", address, err) + } +} + +func TestHTTPSWithServerCertFetch(t *testing.T) { + if runtime.GOOS == "nacl" || runtime.GOOS == "js" { + t.Skip("test assumes tcp available") + } + saveHome := os.Getenv(homeEnv()) + tempdir, err := ioutil.TempDir("", "home") + if err != nil { + t.Fatal("creating temp dir: ", err) + } + defer os.RemoveAll(tempdir) + + // pprof writes to $HOME/pprof by default which is not necessarily + // writeable (e.g. on a Debian buildd) so set $HOME to something we + // know we can write to for the duration of the test. + os.Setenv(homeEnv(), tempdir) + defer os.Setenv(homeEnv(), saveHome) + + baseVars := pprofVariables + pprofVariables = baseVars.makeCopy() + defer func() { pprofVariables = baseVars }() + + cert, certBytes, keyBytes := selfSignedCert(t, "localhost") + cas := x509.NewCertPool() + cas.AppendCertsFromPEM(certBytes) + + tlsConfig := &tls.Config{ + RootCAs: cas, + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: cas, + } + + l, err := tls.Listen("tcp", "localhost:0", tlsConfig) + if err != nil { + t.Fatalf("net.Listen: got error %v, want no error", err) + } + + donec := make(chan error, 1) + go func(donec chan<- error) { + donec <- http.Serve(l, nil) + }(donec) + defer func() { + if got, want := <-donec, closedError(); !strings.Contains(got.Error(), want) { + t.Fatalf("Serve got error %v, want %q", got, want) + } + }() + defer l.Close() + + outputTempFile, err := ioutil.TempFile("", "profile_output") + if err != nil { + t.Fatalf("Failed to create tempfile: %v", err) + } + defer os.Remove(outputTempFile.Name()) + defer outputTempFile.Close() + + // Get port from the address, so request to the server can be made using + // the host name specified in certificates. + _, portStr, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + t.Fatalf("cannot get port from URL: %v", err) + } + address := "https://" + "localhost:" + portStr + "/debug/pprof/goroutine" + s := &source{ + Sources: []string{address}, + Seconds: 10, + Timeout: 10, + Symbolize: "remote", + } + + certTempFile, err := ioutil.TempFile("", "cert_output") + if err != nil { + t.Errorf("cannot create cert tempfile: %v", err) + } + defer os.Remove(certTempFile.Name()) + defer certTempFile.Close() + certTempFile.Write(certBytes) + + keyTempFile, err := ioutil.TempFile("", "key_output") + if err != nil { + t.Errorf("cannot create key tempfile: %v", err) + } + defer os.Remove(keyTempFile.Name()) + defer keyTempFile.Close() + keyTempFile.Write(keyBytes) + + f := &testFlags{ + strings: map[string]string{ + "tls_cert": certTempFile.Name(), + "tls_key": keyTempFile.Name(), + "tls_ca": certTempFile.Name(), + }, + } + o := &plugin.Options{ + Obj: &binutils.Binutils{}, + UI: &proftest.TestUI{T: t, AllowRx: "Saved profile in"}, + Flagset: f, + HTTPTransport: transport.New(f), + } + + o.Sym = &symbolizer.Symbolizer{Obj: o.Obj, UI: o.UI, Transport: o.HTTPTransport} + p, err := fetchProfiles(s, o) + if err != nil { + t.Fatal(err) + } + if len(p.SampleType) == 0 { + t.Fatalf("fetchProfiles(%s) got empty profile: len(p.SampleType)==0", address) + } + if len(p.Function) == 0 { + t.Fatalf("fetchProfiles(%s) got non-symbolized profile: len(p.Function)==0", address) + } + if err := checkProfileHasFunction(p, "TestHTTPSWithServerCertFetch"); err != nil { t.Fatalf("fetchProfiles(%s) %v", address, err) } } @@ -614,7 +722,10 @@ func checkProfileHasFunction(p *profile.Profile, fname string) error { return fmt.Errorf("got %s, want function %q", p.String(), fname) } -func selfSignedCert(t *testing.T) tls.Certificate { +// selfSignedCert generates a self-signed certificate, and returns the +// generated certificate, and byte arrays containing the certificate and +// key associated with the certificate. +func selfSignedCert(t *testing.T, host string) (tls.Certificate, []byte, []byte) { privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { t.Fatalf("failed to generate private key: %v", err) @@ -629,6 +740,8 @@ func selfSignedCert(t *testing.T) tls.Certificate { SerialNumber: big.NewInt(1), NotBefore: time.Now(), NotAfter: time.Now().Add(10 * time.Minute), + IsCA: true, + DNSNames: []string{host}, } b, err = x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, privKey.Public(), privKey) @@ -641,5 +754,5 @@ func selfSignedCert(t *testing.T) tls.Certificate { if err != nil { t.Fatalf("failed to create TLS key pair: %v", err) } - return cert + return cert, bc, bk } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go new file mode 100644 index 0000000000000..c48fb5cd2e337 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/flags.go @@ -0,0 +1,78 @@ +package driver + +import ( + "flag" + "strings" +) + +// GoFlags implements the plugin.FlagSet interface. +type GoFlags struct { + UsageMsgs []string +} + +// Bool implements the plugin.FlagSet interface. +func (*GoFlags) Bool(o string, d bool, c string) *bool { + return flag.Bool(o, d, c) +} + +// Int implements the plugin.FlagSet interface. +func (*GoFlags) Int(o string, d int, c string) *int { + return flag.Int(o, d, c) +} + +// Float64 implements the plugin.FlagSet interface. +func (*GoFlags) Float64(o string, d float64, c string) *float64 { + return flag.Float64(o, d, c) +} + +// String implements the plugin.FlagSet interface. +func (*GoFlags) String(o, d, c string) *string { + return flag.String(o, d, c) +} + +// BoolVar implements the plugin.FlagSet interface. +func (*GoFlags) BoolVar(b *bool, o string, d bool, c string) { + flag.BoolVar(b, o, d, c) +} + +// IntVar implements the plugin.FlagSet interface. +func (*GoFlags) IntVar(i *int, o string, d int, c string) { + flag.IntVar(i, o, d, c) +} + +// Float64Var implements the plugin.FlagSet interface. +// the value of the flag. +func (*GoFlags) Float64Var(f *float64, o string, d float64, c string) { + flag.Float64Var(f, o, d, c) +} + +// StringVar implements the plugin.FlagSet interface. +func (*GoFlags) StringVar(s *string, o, d, c string) { + flag.StringVar(s, o, d, c) +} + +// StringList implements the plugin.FlagSet interface. +func (*GoFlags) StringList(o, d, c string) *[]*string { + return &[]*string{flag.String(o, d, c)} +} + +// ExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) ExtraUsage() string { + return strings.Join(f.UsageMsgs, "\n") +} + +// AddExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) AddExtraUsage(eu string) { + f.UsageMsgs = append(f.UsageMsgs, eu) +} + +// Parse implements the plugin.FlagSet interface. +func (*GoFlags) Parse(usage func()) []string { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage() + } + return args +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go index c9b9a5398f4db..13613cff86f59 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go @@ -55,7 +55,7 @@ func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { v := n.CumValue() fullName := n.Info.PrintableName() node := &treeNode{ - Name: getNodeShortName(fullName), + Name: graph.ShortenFunctionName(fullName), FullName: fullName, Cum: v, CumFormat: config.FormatValue(v), @@ -101,19 +101,3 @@ func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { Nodes: nodeArr, }) } - -// getNodeShortName builds a short node name from fullName. -func getNodeShortName(name string) string { - chunks := strings.SplitN(name, "(", 2) - head := chunks[0] - pathSep := strings.LastIndexByte(head, '/') - if pathSep == -1 || pathSep+1 >= len(head) { - return name - } - // Check if name is a stdlib package, i.e. doesn't have "." before "/" - if dot := strings.IndexByte(head, '.'); dot == -1 || dot > pathSep { - return name - } - // Trim package path prefix from node name - return name[pathSep+1:] -} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph_test.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph_test.go deleted file mode 100644 index c1a887c830ca1..0000000000000 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package driver - -import "testing" - -func TestGetNodeShortName(t *testing.T) { - type testCase struct { - name string - want string - } - testcases := []testCase{ - { - "root", - "root", - }, - { - "syscall.Syscall", - "syscall.Syscall", - }, - { - "net/http.(*conn).serve", - "net/http.(*conn).serve", - }, - { - "github.com/blah/foo.Foo", - "foo.Foo", - }, - { - "github.com/blah/foo_bar.(*FooBar).Foo", - "foo_bar.(*FooBar).Foo", - }, - { - "encoding/json.(*structEncoder).(encoding/json.encode)-fm", - "encoding/json.(*structEncoder).(encoding/json.encode)-fm", - }, - { - "github.com/blah/blah/vendor/gopkg.in/redis.v3.(*baseClient).(github.com/blah/blah/vendor/gopkg.in/redis.v3.process)-fm", - "redis.v3.(*baseClient).(github.com/blah/blah/vendor/gopkg.in/redis.v3.process)-fm", - }, - } - for _, tc := range testcases { - name := getNodeShortName(tc.name) - if got, want := name, tc.want; got != want { - t.Errorf("for %s, got %q, want %q", tc.name, got, want) - } - } -} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go index 8d775e16bdbc0..758adf9bdced1 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/interactive_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/pprof/internal/plugin" "github.com/google/pprof/internal/proftest" "github.com/google/pprof/internal/report" + "github.com/google/pprof/internal/transport" "github.com/google/pprof/profile" ) @@ -41,7 +42,10 @@ func TestShell(t *testing.T) { // Random interleave of independent scripts pprofVariables = testVariables(savedVariables) - o := setDefaults(nil) + + // pass in HTTPTransport when setting defaults, because otherwise default + // transport will try to add flags to the default flag set. + o := setDefaults(&plugin.Options{HTTPTransport: transport.New(nil)}) o.UI = newUI(t, interleave(script, 0)) if err := interactive(p, o); err != nil { t.Error("first attempt:", err) @@ -259,12 +263,13 @@ func TestInteractiveCommands(t *testing.T) { { "weblist find -test", map[string]string{ - "functions": "false", - "addressnoinlines": "true", - "nodecount": "0", - "cum": "false", - "flat": "true", - "ignore": "test", + "functions": "false", + "addresses": "true", + "noinlines": "true", + "nodecount": "0", + "cum": "false", + "flat": "true", + "ignore": "test", }, }, { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go index 34167d4bf5793..6e8f9fca25493 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/options.go @@ -16,7 +16,6 @@ package driver import ( "bufio" - "flag" "fmt" "io" "os" @@ -25,6 +24,7 @@ import ( "github.com/google/pprof/internal/binutils" "github.com/google/pprof/internal/plugin" "github.com/google/pprof/internal/symbolizer" + "github.com/google/pprof/internal/transport" ) // setDefaults returns a new plugin.Options with zero fields sets to @@ -38,7 +38,7 @@ func setDefaults(o *plugin.Options) *plugin.Options { d.Writer = oswriter{} } if d.Flagset == nil { - d.Flagset = goFlags{} + d.Flagset = &GoFlags{} } if d.Obj == nil { d.Obj = &binutils.Binutils{} @@ -46,67 +46,15 @@ func setDefaults(o *plugin.Options) *plugin.Options { if d.UI == nil { d.UI = &stdUI{r: bufio.NewReader(os.Stdin)} } + if d.HTTPTransport == nil { + d.HTTPTransport = transport.New(d.Flagset) + } if d.Sym == nil { - d.Sym = &symbolizer.Symbolizer{Obj: d.Obj, UI: d.UI} + d.Sym = &symbolizer.Symbolizer{Obj: d.Obj, UI: d.UI, Transport: d.HTTPTransport} } return d } -// goFlags returns a flagset implementation based on the standard flag -// package from the Go distribution. It implements the plugin.FlagSet -// interface. -type goFlags struct{} - -func (goFlags) Bool(o string, d bool, c string) *bool { - return flag.Bool(o, d, c) -} - -func (goFlags) Int(o string, d int, c string) *int { - return flag.Int(o, d, c) -} - -func (goFlags) Float64(o string, d float64, c string) *float64 { - return flag.Float64(o, d, c) -} - -func (goFlags) String(o, d, c string) *string { - return flag.String(o, d, c) -} - -func (goFlags) BoolVar(b *bool, o string, d bool, c string) { - flag.BoolVar(b, o, d, c) -} - -func (goFlags) IntVar(i *int, o string, d int, c string) { - flag.IntVar(i, o, d, c) -} - -func (goFlags) Float64Var(f *float64, o string, d float64, c string) { - flag.Float64Var(f, o, d, c) -} - -func (goFlags) StringVar(s *string, o, d, c string) { - flag.StringVar(s, o, d, c) -} - -func (goFlags) StringList(o, d, c string) *[]*string { - return &[]*string{flag.String(o, d, c)} -} - -func (goFlags) ExtraUsage() string { - return "" -} - -func (goFlags) Parse(usage func()) []string { - flag.Usage = usage - flag.Parse() - args := flag.Args() - if len(args) == 0 { - usage() - } - return args -} - type stdUI struct { r *bufio.Reader } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text new file mode 100644 index 0000000000000..d53c44dad9153 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text @@ -0,0 +1,7 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total +Dropped 1 node (cum <= 0.06s) + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% 0000000000001000 line1000 testdata/file1000.src:1 + 0.01s 0.89% 99.11% 1.01s 90.18% 0000000000002000 line2000 testdata/file2000.src:4 + 0.01s 0.89% 100% 1.01s 90.18% 0000000000003000 line3000 testdata/file3000.src:6 + 0 0% 100% 0.10s 8.93% 0000000000003001 line3000 testdata/file3000.src:9 diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text new file mode 100644 index 0000000000000..88fb760759c88 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src + 0.01s 0.89% 99.11% 1.01s 90.18% line2000 testdata/file2000.src + 0.01s 0.89% 100% 1.12s 100% line3000 testdata/file3000.src diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text new file mode 100644 index 0000000000000..493b4912de1c6 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 + 0.01s 0.89% 99.11% 1.01s 90.18% line2000 + 0.01s 0.89% 100% 1.12s 100% line3000 diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.lines.topproto b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.lines.topproto new file mode 100644 index 0000000000000..33bf6814a463c --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.cpu.lines.topproto @@ -0,0 +1,3 @@ +Showing nodes accounting for 1s, 100% of 1s total + flat flat% sum% cum cum% + 1s 100% 100% 1s 100% mangled1000 testdata/file1000.src:1 diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.dot b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.dot new file mode 100644 index 0000000000000..474a5108ba1c3 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.dot @@ -0,0 +1,9 @@ +digraph "testbinary" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "File: testbinary" [shape=box fontsize=16 label="File: testbinary\lType: cpu\lDuration: 10s, Total samples = 1.11s (11.10%)\lShowing nodes accounting for 1.11s, 100% of 1.11s total\l" tooltip="testbinary"] } +N1 [label="package1\nobject\nfunction1\n1.10s (99.10%)" id="node1" fontsize=24 shape=box tooltip="path/to/package1.object.function1 (1.10s)" color="#b20000" fillcolor="#edd5d5"] +N2 [label="FooBar\nrun\n0.01s (0.9%)\nof 1.01s (90.99%)" id="node2" fontsize=10 shape=box tooltip="java.bar.foo.FooBar.run(java.lang.Runnable) (1.01s)" color="#b20400" fillcolor="#edd6d5"] +N3 [label="Bar\nFoo\n0 of 1.10s (99.10%)" id="node3" fontsize=8 shape=box tooltip="(anonymous namespace)::Bar::Foo (1.10s)" color="#b20000" fillcolor="#edd5d5"] +N3 -> N1 [label=" 1.10s" weight=100 penwidth=5 color="#b20000" tooltip="(anonymous namespace)::Bar::Foo -> path/to/package1.object.function1 (1.10s)" labeltooltip="(anonymous namespace)::Bar::Foo -> path/to/package1.object.function1 (1.10s)"] +N2 -> N3 [label=" 1s" weight=91 penwidth=5 color="#b20500" tooltip="java.bar.foo.FooBar.run(java.lang.Runnable) -> (anonymous namespace)::Bar::Foo (1s)" labeltooltip="java.bar.foo.FooBar.run(java.lang.Runnable) -> (anonymous namespace)::Bar::Foo (1s)"] +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.text b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.text new file mode 100644 index 0000000000000..39cb24ed6a202 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/testdata/pprof.longNameFuncs.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.11s, 100% of 1.11s total + flat flat% sum% cum cum% + 1.10s 99.10% 99.10% 1.10s 99.10% path/to/package1.object.function1 + 0.01s 0.9% 100% 1.01s 90.99% java.bar.foo.FooBar.run(java.lang.Runnable) + 0 0% 100% 1.10s 99.10% (anonymous namespace)::Bar::Foo diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go index c3f9c384f8a66..74104899ca0ae 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -249,6 +249,21 @@ table tr td { + {{$sampleLen := len .SampleTypes}} + {{if gt $sampleLen 1}} + + {{end}} +
    ").FindAllStringSubmatch(row, -1) name := values[cols["Certificate name"]][1] fingerprint := values[cols["Fingerprint (SHA-256)"]][1] - fingerprint = strings.Replace(fingerprint, "
    ", "", -1) - fingerprint = strings.Replace(fingerprint, "\n", "", -1) - fingerprint = strings.Replace(fingerprint, " ", "", -1) + fingerprint = strings.ReplaceAll(fingerprint, "
    ", "") + fingerprint = strings.ReplaceAll(fingerprint, "\n", "") + fingerprint = strings.ReplaceAll(fingerprint, " ", "") fingerprint = strings.ToLower(fingerprint) ids = append(ids, certID{ diff --git a/src/crypto/x509/root_darwin_test.go b/src/crypto/x509/root_darwin_test.go index 68300c7955714..1165a97e205b9 100644 --- a/src/crypto/x509/root_darwin_test.go +++ b/src/crypto/x509/root_darwin_test.go @@ -5,6 +5,10 @@ package x509 import ( + "crypto/rsa" + "os" + "os/exec" + "path/filepath" "runtime" "testing" "time" @@ -16,11 +20,6 @@ func TestSystemRoots(t *testing.T) { t.Skipf("skipping on %s/%s, no system root", runtime.GOOS, runtime.GOARCH) } - switch runtime.GOOS { - case "darwin": - t.Skipf("skipping on %s/%s until golang.org/issue/24652 has been resolved.", runtime.GOOS, runtime.GOARCH) - } - t0 := time.Now() sysRoots := systemRootsPool() // actual system roots sysRootsDuration := time.Since(t0) @@ -36,45 +35,97 @@ func TestSystemRoots(t *testing.T) { t.Logf(" cgo sys roots: %v", sysRootsDuration) t.Logf("non-cgo sys roots: %v", execSysRootsDuration) - for _, tt := range []*CertPool{sysRoots, execRoots} { - if tt == nil { - t.Fatal("no system roots") - } - // On Mavericks, there are 212 bundled certs, at least - // there was at one point in time on one machine. - // (Maybe it was a corp laptop with extra certs?) - // Other OS X users report - // 135, 142, 145... Let's try requiring at least 100, - // since this is just a sanity check. - t.Logf("got %d roots", len(tt.certs)) - if want, have := 100, len(tt.certs); have < want { - t.Fatalf("want at least %d system roots, have %d", want, have) - } + // On Mavericks, there are 212 bundled certs, at least there was at + // one point in time on one machine. (Maybe it was a corp laptop + // with extra certs?) Other OS X users report 135, 142, 145... + // Let's try requiring at least 100, since this is just a sanity + // check. + if want, have := 100, len(sysRoots.certs); have < want { + t.Errorf("want at least %d system roots, have %d", want, have) } - // Check that the two cert pools are roughly the same; - // |A∩B| > max(|A|, |B|) / 2 should be a reasonably robust check. + // Fetch any intermediate certificate that verify-cert might be aware of. + out, err := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", + "/Library/Keychains/System.keychain", + filepath.Join(os.Getenv("HOME"), "/Library/Keychains/login.keychain"), + filepath.Join(os.Getenv("HOME"), "/Library/Keychains/login.keychain-db")).Output() + if err != nil { + t.Fatal(err) + } + allCerts := NewCertPool() + allCerts.AppendCertsFromPEM(out) - isect := make(map[string]bool, len(sysRoots.certs)) + // Check that the two cert pools are the same. + sysPool := make(map[string]*Certificate, len(sysRoots.certs)) for _, c := range sysRoots.certs { - isect[string(c.Raw)] = true + sysPool[string(c.Raw)] = c } - - have := 0 for _, c := range execRoots.certs { - if isect[string(c.Raw)] { - have++ + if _, ok := sysPool[string(c.Raw)]; ok { + delete(sysPool, string(c.Raw)) + } else { + // verify-cert lets in certificates that are not trusted roots, but + // are signed by trusted roots. This is not great, but unavoidable + // until we parse real policies without cgo, so confirm that's the + // case and skip them. + if _, err := c.Verify(VerifyOptions{ + Roots: sysRoots, + Intermediates: allCerts, + KeyUsages: []ExtKeyUsage{ExtKeyUsageAny}, + CurrentTime: c.NotBefore, // verify-cert does not check expiration + }); err != nil { + t.Errorf("certificate only present in non-cgo pool: %v (verify error: %v)", c.Subject, err) + } else { + t.Logf("signed certificate only present in non-cgo pool (acceptable): %v", c.Subject) + } } } + for _, c := range sysPool { + // The nocgo codepath uses verify-cert with the ssl policy, which also + // happens to check EKUs, so some certificates will appear only in the + // cgo pool. We can't easily make them consistent because the EKU check + // is only applied to the certificates passed to verify-cert. + var ekuOk bool + for _, eku := range c.ExtKeyUsage { + if eku == ExtKeyUsageServerAuth || eku == ExtKeyUsageNetscapeServerGatedCrypto || + eku == ExtKeyUsageMicrosoftServerGatedCrypto || eku == ExtKeyUsageAny { + ekuOk = true + } + } + if len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0 { + ekuOk = true + } + if !ekuOk { + t.Logf("off-EKU certificate only present in cgo pool (acceptable): %v", c.Subject) + continue + } + + // Same for expired certificates. We don't chain to them anyway. + now := time.Now() + if now.Before(c.NotBefore) || now.After(c.NotAfter) { + t.Logf("expired certificate only present in cgo pool (acceptable): %v", c.Subject) + continue + } + + // On 10.11 there are five unexplained roots that only show up from the + // C API. They have in common the fact that they are old, 1024-bit + // certificates. It's arguably better to ignore them anyway. + if key, ok := c.PublicKey.(*rsa.PublicKey); ok && key.N.BitLen() == 1024 { + t.Logf("1024-bit certificate only present in cgo pool (acceptable): %v", c.Subject) + continue + } - var want int - if nsys, nexec := len(sysRoots.certs), len(execRoots.certs); nsys > nexec { - want = nsys / 2 - } else { - want = nexec / 2 + t.Errorf("certificate only present in cgo pool: %v", c.Subject) } - if have < want { - t.Errorf("insufficient overlap between cgo and non-cgo roots; want at least %d, have %d", want, have) + if t.Failed() && debugDarwinRoots { + cmd := exec.Command("security", "dump-trust-settings") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() + cmd = exec.Command("security", "dump-trust-settings", "-d") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Run() } } diff --git a/src/crypto/x509/root_unix.go b/src/crypto/x509/root_unix.go index 8e7036234d7d3..48de50b4ea65f 100644 --- a/src/crypto/x509/root_unix.go +++ b/src/crypto/x509/root_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package x509 @@ -19,6 +19,7 @@ var certDirectories = []string{ "/usr/local/share/certs", // FreeBSD "/etc/pki/tls/certs", // Fedora/RHEL "/etc/openssl/certs", // NetBSD + "/var/ssl/certs", // AIX } const ( diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 210db4c1d0eb3..56b7948c41552 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -222,10 +222,9 @@ type rfc2821Mailbox struct { } // parseRFC2821Mailbox parses an email address into local and domain parts, -// based on the ABNF for a “Mailbox” from RFC 2821. According to -// https://tools.ietf.org/html/rfc5280#section-4.2.1.6 that's correct for an -// rfc822Name from a certificate: “The format of an rfc822Name is a "Mailbox" -// as defined in https://tools.ietf.org/html/rfc2821#section-4.1.2”. +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { if len(in) == 0 { return mailbox, false @@ -242,9 +241,8 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // quoted-pair = ("\" text) / obs-qp // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text // - // (Names beginning with “obs-” are the obsolete syntax from - // https://tools.ietf.org/html/rfc2822#section-4. Since it has - // been 16 years, we no longer accept that.) + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) in = in[1:] QuotedString: for { @@ -298,7 +296,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { // Atom ("." Atom)* NextChar: for len(in) > 0 { - // atext from https://tools.ietf.org/html/rfc2822#section-3.2.4 + // atext from RFC 2822, Section 3.2.4 c := in[0] switch { @@ -334,7 +332,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { return mailbox, false } - // https://tools.ietf.org/html/rfc3696#section-3 + // From RFC 3696, Section 3: // “period (".") may also appear, but may not be used to start // or end the local part, nor may two or more consecutive // periods appear.” @@ -415,7 +413,7 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro } func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { - // https://tools.ietf.org/html/rfc5280#section-4.2.1.10 + // From RFC 5280, Section 4.2.1.10: // “a uniformResourceIdentifier that does not include an authority // component with a host name specified as a fully qualified domain // name (e.g., if the URI either does not include an authority @@ -765,7 +763,7 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e if opts.Roots.contains(c) { candidateChains = append(candidateChains, []*Certificate{c}) } else { - if candidateChains, err = c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts); err != nil { + if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil { return nil, err } } @@ -802,66 +800,82 @@ func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate return n } -func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) { - possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c) -nextRoot: - for _, rootNum := range possibleRoots { - root := opts.Roots.certs[rootNum] +// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls +// that an invocation of buildChains will (tranistively) make. Most chains are +// less than 15 certificates long, so this leaves space for multiple chains and +// for failed checks due to different intermediates having the same Subject. +const maxChainSignatureChecks = 100 +func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) { + var ( + hintErr error + hintCert *Certificate + ) + + considerCandidate := func(certType int, candidate *Certificate) { for _, cert := range currentChain { - if cert.Equal(root) { - continue nextRoot + if cert.Equal(candidate) { + return } } - err = root.isValid(rootCertificate, currentChain, opts) - if err != nil { - continue + if sigChecks == nil { + sigChecks = new(int) + } + *sigChecks++ + if *sigChecks > maxChainSignatureChecks { + err = errors.New("x509: signature check attempts limit reached while verifying certificate chain") + return } - chains = append(chains, appendToFreshChain(currentChain, root)) - } - possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c) -nextIntermediate: - for _, intermediateNum := range possibleIntermediates { - intermediate := opts.Intermediates.certs[intermediateNum] - for _, cert := range currentChain { - if cert.Equal(intermediate) { - continue nextIntermediate + if err := c.CheckSignatureFrom(candidate); err != nil { + if hintErr == nil { + hintErr = err + hintCert = candidate } + return } - err = intermediate.isValid(intermediateCertificate, currentChain, opts) + + err = candidate.isValid(certType, currentChain, opts) if err != nil { - continue + return } - var childChains [][]*Certificate - childChains, ok := cache[intermediateNum] - if !ok { - childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts) - cache[intermediateNum] = childChains + + switch certType { + case rootCertificate: + chains = append(chains, appendToFreshChain(currentChain, candidate)) + case intermediateCertificate: + if cache == nil { + cache = make(map[*Certificate][][]*Certificate) + } + childChains, ok := cache[candidate] + if !ok { + childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts) + cache[candidate] = childChains + } + chains = append(chains, childChains...) } - chains = append(chains, childChains...) + } + + for _, rootNum := range opts.Roots.findPotentialParents(c) { + considerCandidate(rootCertificate, opts.Roots.certs[rootNum]) + } + for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) { + considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum]) } if len(chains) > 0 { err = nil } - if len(chains) == 0 && err == nil { - hintErr := rootErr - hintCert := failedRoot - if hintErr == nil { - hintErr = intermediateErr - hintCert = failedIntermediate - } err = UnknownAuthorityError{c, hintErr, hintCert} } return } -// validHostname returns whether host is a valid hostname that can be matched or -// matched against according to RFC 6125 2.2, with some leniency to accomodate +// validHostname reports whether host is a valid hostname that can be matched or +// matched against according to RFC 6125 2.2, with some leniency to accommodate // legacy values. func validHostname(host string) bool { host = strings.TrimSuffix(host, ".") @@ -894,8 +908,8 @@ func validHostname(host string) bool { if c == '-' && j != 0 { continue } - if c == '_' { - // _ is not a valid character in hostnames, but it's commonly + if c == '_' || c == ':' { + // Not valid characters in hostnames, but commonly // found in deployments outside the WebPKI. continue } @@ -987,7 +1001,7 @@ func (c *Certificate) VerifyHostname(h string) error { } if ip := net.ParseIP(candidateIP); ip != nil { // We only match IP addresses against IP SANs. - // https://tools.ietf.org/html/rfc6125#appendix-B.2 + // See RFC 6125, Appendix B.2. for _, candidate := range c.IPAddresses { if ip.Equal(candidate) { return nil diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 768414583962f..86fe76a57d7f8 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -5,10 +5,15 @@ package x509 import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" "crypto/x509/pkix" "encoding/pem" "errors" "fmt" + "math/big" "runtime" "strings" "testing" @@ -381,6 +386,19 @@ var verifyTests = []verifyTest{ errorCallback: expectHostnameError("not valid for any names"), }, + { + // A certificate with an AKID should still chain to a parent without SKID. + // See Issue 30079. + leaf: leafWithAKID, + roots: []string{rootWithoutSKID}, + currentTime: 1550000000, + dnsName: "example", + systemSkip: true, + + expectedChains: [][]string{ + {"Acme LLC", "Acme Co"}, + }, + }, } func expectHostnameError(msg string) func(*testing.T, int, error) bool { @@ -1674,6 +1692,109 @@ h7olHCpY9yMRiz0= -----END CERTIFICATE----- ` +const ( + rootWithoutSKID = ` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 78:29:2a:dc:2f:12:39:7f:c9:33:93:ea:61:39:7d:70 + Signature Algorithm: ecdsa-with-SHA256 + Issuer: O = Acme Co + Validity + Not Before: Feb 4 22:56:34 2019 GMT + Not After : Feb 1 22:56:34 2029 GMT + Subject: O = Acme Co + Subject Public Key Info: + Public Key Algorithm: id-ecPublicKey + Public-Key: (256 bit) + pub: + 04:84:a6:8c:69:53:af:87:4b:39:64:fe:04:24:e6: + d8:fc:d6:46:39:35:0e:92:dc:48:08:7e:02:5f:1e: + 07:53:5c:d9:e0:56:c5:82:07:f6:a3:e2:ad:f6:ad: + be:a0:4e:03:87:39:67:0c:9c:46:91:68:6b:0e:8e: + f8:49:97:9d:5b + ASN1 OID: prime256v1 + NIST CURVE: P-256 + X509v3 extensions: + X509v3 Key Usage: critical + Digital Signature, Key Encipherment, Certificate Sign + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Basic Constraints: critical + CA:TRUE + X509v3 Subject Alternative Name: + DNS:example + Signature Algorithm: ecdsa-with-SHA256 + 30:46:02:21:00:c6:81:61:61:42:8d:37:e7:d0:c3:72:43:44: + 17:bd:84:ff:88:81:68:9a:99:08:ab:3c:3a:c0:1e:ea:8c:ba: + c0:02:21:00:de:c9:fa:e5:5e:c6:e2:db:23:64:43:a9:37:42: + 72:92:7f:6e:89:38:ea:9e:2a:a7:fd:2f:ea:9a:ff:20:21:e7 +-----BEGIN CERTIFICATE----- +MIIBbzCCARSgAwIBAgIQeCkq3C8SOX/JM5PqYTl9cDAKBggqhkjOPQQDAjASMRAw +DgYDVQQKEwdBY21lIENvMB4XDTE5MDIwNDIyNTYzNFoXDTI5MDIwMTIyNTYzNFow +EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABISm +jGlTr4dLOWT+BCTm2PzWRjk1DpLcSAh+Al8eB1Nc2eBWxYIH9qPirfatvqBOA4c5 +ZwycRpFoaw6O+EmXnVujTDBKMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr +BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBIGA1UdEQQLMAmCB2V4YW1wbGUwCgYI +KoZIzj0EAwIDSQAwRgIhAMaBYWFCjTfn0MNyQ0QXvYT/iIFompkIqzw6wB7qjLrA +AiEA3sn65V7G4tsjZEOpN0Jykn9uiTjqniqn/S/qmv8gIec= +-----END CERTIFICATE----- +` + leafWithAKID = ` + Certificate: + Data: + Version: 3 (0x2) + Serial Number: + f0:8a:62:f0:03:84:a2:cf:69:63:ad:71:3b:b6:5d:8c + Signature Algorithm: ecdsa-with-SHA256 + Issuer: O = Acme Co + Validity + Not Before: Feb 4 23:06:52 2019 GMT + Not After : Feb 1 23:06:52 2029 GMT + Subject: O = Acme LLC + Subject Public Key Info: + Public Key Algorithm: id-ecPublicKey + Public-Key: (256 bit) + pub: + 04:5a:4e:4d:fb:ff:17:f7:b6:13:e8:29:45:34:81: + 39:ff:8c:9c:d9:8c:0a:9f:dd:b5:97:4c:2b:20:91: + 1c:4f:6b:be:53:27:66:ec:4a:ad:08:93:6d:66:36: + 0c:02:70:5d:01:ca:7f:c3:29:e9:4f:00:ba:b4:14: + ec:c5:c3:34:b3 + ASN1 OID: prime256v1 + NIST CURVE: P-256 + X509v3 extensions: + X509v3 Key Usage: critical + Digital Signature, Key Encipherment + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Basic Constraints: critical + CA:FALSE + X509v3 Authority Key Identifier: + keyid:C2:2B:5F:91:78:34:26:09:42:8D:6F:51:B2:C5:AF:4C:0B:DE:6A:42 + + X509v3 Subject Alternative Name: + DNS:example + Signature Algorithm: ecdsa-with-SHA256 + 30:44:02:20:64:e0:ba:56:89:63:ce:22:5e:4f:22:15:fd:3c: + 35:64:9a:3a:6b:7b:9a:32:a0:7f:f7:69:8c:06:f0:00:58:b8: + 02:20:09:e4:9f:6d:8b:9e:38:e1:b6:01:d5:ee:32:a4:94:65: + 93:2a:78:94:bb:26:57:4b:c7:dd:6c:3d:40:2b:63:90 +-----BEGIN CERTIFICATE----- +MIIBjTCCATSgAwIBAgIRAPCKYvADhKLPaWOtcTu2XYwwCgYIKoZIzj0EAwIwEjEQ +MA4GA1UEChMHQWNtZSBDbzAeFw0xOTAyMDQyMzA2NTJaFw0yOTAyMDEyMzA2NTJa +MBMxETAPBgNVBAoTCEFjbWUgTExDMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE +Wk5N+/8X97YT6ClFNIE5/4yc2YwKn921l0wrIJEcT2u+Uydm7EqtCJNtZjYMAnBd +Acp/wynpTwC6tBTsxcM0s6NqMGgwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoG +CCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUwitfkXg0JglCjW9R +ssWvTAveakIwEgYDVR0RBAswCYIHZXhhbXBsZTAKBggqhkjOPQQDAgNHADBEAiBk +4LpWiWPOIl5PIhX9PDVkmjpre5oyoH/3aYwG8ABYuAIgCeSfbYueOOG2AdXuMqSU +ZZMqeJS7JldLx91sPUArY5A= +-----END CERTIFICATE----- +` +) + var unknownAuthorityErrorTests = []struct { cert string expected string @@ -1881,6 +2002,7 @@ func TestValidHostname(t *testing.T) { {"foo.*.example.com", false}, {"exa_mple.com", true}, {"foo,bar", false}, + {"project-dev:us-central1:main", true}, } for _, tt := range tests { if got := validHostname(tt.host); got != tt.want { @@ -1888,3 +2010,117 @@ func TestValidHostname(t *testing.T) { } } } + +func generateCert(cn string, isCA bool, issuer *Certificate, issuerKey crypto.PrivateKey) (*Certificate, crypto.PrivateKey, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, _ := rand.Int(rand.Reader, serialNumberLimit) + + template := &Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{CommonName: cn}, + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(24 * time.Hour), + + KeyUsage: KeyUsageKeyEncipherment | KeyUsageDigitalSignature | KeyUsageCertSign, + ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: isCA, + } + if issuer == nil { + issuer = template + issuerKey = priv + } + + derBytes, err := CreateCertificate(rand.Reader, template, issuer, priv.Public(), issuerKey) + if err != nil { + return nil, nil, err + } + cert, err := ParseCertificate(derBytes) + if err != nil { + return nil, nil, err + } + + return cert, priv, nil +} + +func TestPathologicalChain(t *testing.T) { + if testing.Short() { + t.Skip("skipping generation of a long chain of certificates in short mode") + } + + // Build a chain where all intermediates share the same subject, to hit the + // path building worst behavior. + roots, intermediates := NewCertPool(), NewCertPool() + + parent, parentKey, err := generateCert("Root CA", true, nil, nil) + if err != nil { + t.Fatal(err) + } + roots.AddCert(parent) + + for i := 1; i < 100; i++ { + parent, parentKey, err = generateCert("Intermediate CA", true, parent, parentKey) + if err != nil { + t.Fatal(err) + } + intermediates.AddCert(parent) + } + + leaf, _, err := generateCert("Leaf", false, parent, parentKey) + if err != nil { + t.Fatal(err) + } + + start := time.Now() + _, err = leaf.Verify(VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + }) + t.Logf("verification took %v", time.Since(start)) + + if err == nil || !strings.Contains(err.Error(), "signature check attempts limit") { + t.Errorf("expected verification to fail with a signature checks limit error; got %v", err) + } +} + +func TestLongChain(t *testing.T) { + if testing.Short() { + t.Skip("skipping generation of a long chain of certificates in short mode") + } + + roots, intermediates := NewCertPool(), NewCertPool() + + parent, parentKey, err := generateCert("Root CA", true, nil, nil) + if err != nil { + t.Fatal(err) + } + roots.AddCert(parent) + + for i := 1; i < 15; i++ { + name := fmt.Sprintf("Intermediate CA #%d", i) + parent, parentKey, err = generateCert(name, true, parent, parentKey) + if err != nil { + t.Fatal(err) + } + intermediates.AddCert(parent) + } + + leaf, _, err := generateCert("Leaf", false, parent, parentKey) + if err != nil { + t.Fatal(err) + } + + start := time.Now() + if _, err := leaf.Verify(VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + }); err != nil { + t.Error(err) + } + t.Logf("verification took %v", time.Since(start)) +} diff --git a/src/crypto/x509/x509.go b/src/crypto/x509/x509.go index 2e72471de2878..58098adc2d5a2 100644 --- a/src/crypto/x509/x509.go +++ b/src/crypto/x509/x509.go @@ -24,6 +24,8 @@ import ( "encoding/pem" "errors" "fmt" + "internal/x/crypto/cryptobyte" + cryptobyte_asn1 "internal/x/crypto/cryptobyte/asn1" "io" "math/big" "net" @@ -32,9 +34,6 @@ import ( "strings" "time" "unicode/utf8" - - "golang_org/x/crypto/cryptobyte" - cryptobyte_asn1 "golang_org/x/crypto/cryptobyte/asn1" ) // pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo @@ -78,7 +77,7 @@ func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorith } publicKeyAlgorithm.Algorithm = oidPublicKeyRSA // This is a NULL parameters value which is required by - // https://tools.ietf.org/html/rfc3279#section-2.3.1. + // RFC 3279, Section 2.3.1. publicKeyAlgorithm.Parameters = asn1.NullRawValue case *ecdsa.PublicKey: publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) @@ -334,7 +333,7 @@ var signatureAlgorithmDetails = []struct { } // pssParameters reflects the parameters in an AlgorithmIdentifier that -// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3 +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. type pssParameters struct { // The following three fields are not marked as // optional because the default values specify SHA-1, @@ -413,13 +412,11 @@ func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm return UnknownSignatureAlgorithm } - // PSS is greatly overburdened with options. This code forces - // them into three buckets by requiring that the MGF1 hash - // function always match the message hash function (as - // recommended in - // https://tools.ietf.org/html/rfc3447#section-8.1), that the - // salt length matches the hash length, and that the trailer - // field has the default value. + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || !params.MGF.Algorithm.Equal(oidMGF1) || !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || @@ -987,8 +984,8 @@ func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{ asn1Data := keyData.PublicKey.RightAlign() switch algo { case RSA: - // RSA public keys must have a NULL in the parameters - // (https://tools.ietf.org/html/rfc3279#section-2.3.1). + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { return nil, errors.New("x509: RSA key missing NULL parameters") } @@ -1150,7 +1147,7 @@ func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddre return } -// isValidIPMask returns true iff mask consists of zero or more 1 bits, followed by zero bits. +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. func isValidIPMask(mask []byte) bool { seenZero := false @@ -1203,7 +1200,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle } if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { - // https://tools.ietf.org/html/rfc5280#section-4.2.1.10: + // From RFC 5280, Section 4.2.1.10: // “either the permittedSubtrees field // or the excludedSubtrees MUST be // present” @@ -1644,7 +1641,7 @@ var ( oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} ) -// oidNotInExtensions returns whether an extension with the given oid exists in +// oidNotInExtensions reports whether an extension with the given oid exists in // extensions. func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { for _, e := range extensions { @@ -1798,7 +1795,7 @@ func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { ret[n].Id = oidExtensionSubjectAltName - // https://tools.ietf.org/html/rfc5280#section-4.2.1.6 + // From RFC 5280, Section 4.2.1.6: // “If the subject field contains an empty sequence ... then // subjectAltName extension ... is marked as critical” ret[n].Critical = subjectIsEmpty @@ -2275,21 +2272,25 @@ type CertificateRequest struct { Subject pkix.Name - // Attributes is the dried husk of a bug and shouldn't be used. + // Attributes contains the CSR attributes that can parse as + // pkix.AttributeTypeAndValueSET. + // + // Deprecated: use Extensions and ExtraExtensions instead for parsing and + // generating the requestedExtensions attribute. Attributes []pkix.AttributeTypeAndValueSET - // Extensions contains raw X.509 extensions. When parsing CSRs, this - // can be used to extract extensions that are not parsed by this + // Extensions contains all requested extensions, in raw form. When parsing + // CSRs, this can be used to extract extensions that are not parsed by this // package. Extensions []pkix.Extension - // ExtraExtensions contains extensions to be copied, raw, into any - // marshaled CSR. Values override any extensions that would otherwise - // be produced based on the other fields but are overridden by any - // extensions specified in Attributes. + // ExtraExtensions contains extensions to be copied, raw, into any CSR + // marshaled by CreateCertificateRequest. Values override any extensions + // that would otherwise be produced based on the other fields but are + // overridden by any extensions specified in Attributes. // - // The ExtraExtensions field is not populated when parsing CSRs, see - // Extensions. + // The ExtraExtensions field is not populated by ParseCertificateRequest, + // see Extensions instead. ExtraExtensions []pkix.Extension // Subject Alternate Name values. @@ -2357,8 +2358,7 @@ func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndVa // parseCSRExtensions parses the attributes from a CSR and extracts any // requested extensions. func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { - // pkcs10Attribute reflects the Attribute structure from section 4.1 of - // https://tools.ietf.org/html/rfc2986. + // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1. type pkcs10Attribute struct { Id asn1.ObjectIdentifier Values []asn1.RawValue `asn1:"set"` @@ -2389,21 +2389,21 @@ func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) // CreateCertificateRequest creates a new certificate request based on a // template. The following members of template are used: // -// - Attributes +// - SignatureAlgorithm +// - Subject // - DNSNames // - EmailAddresses -// - ExtraExtensions // - IPAddresses // - URIs -// - SignatureAlgorithm -// - Subject +// - ExtraExtensions +// - Attributes (deprecated) // -// The private key is the private key of the signer. +// priv is the private key to sign the CSR with, and the corresponding public +// key will be included in the CSR. It must implement crypto.Signer and its +// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey. (A +// *rsa.PrivateKey or *ecdsa.PrivateKey satisfies this.) // // The returned slice is the certificate request in DER encoding. -// -// All keys types that are implemented via crypto.Signer are supported (This -// includes *rsa.PublicKey and *ecdsa.PublicKey.) func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) { key, ok := priv.(crypto.Signer) if !ok { diff --git a/src/database/sql/convert.go b/src/database/sql/convert.go index 92a2ebe0e9913..c450d987a4651 100644 --- a/src/database/sql/convert.go +++ b/src/database/sql/convert.go @@ -203,10 +203,18 @@ func driverArgsConnLocked(ci driver.Conn, ds *driverStmt, args []interface{}) ([ } -// convertAssign copies to dest the value in src, converting it if possible. -// An error is returned if the copy would result in loss of information. -// dest should be a pointer type. +// convertAssign is the same as convertAssignRows, but without the optional +// rows argument. func convertAssign(dest, src interface{}) error { + return convertAssignRows(dest, src, nil) +} + +// convertAssignRows copies to dest the value in src, converting it if possible. +// An error is returned if the copy would result in loss of information. +// dest should be a pointer type. If rows is passed in, the rows will +// be used as the parent for any cursor values converted from a +// driver.Rows to a *Rows. +func convertAssignRows(dest, src interface{}, rows *Rows) error { // Common cases, without reflect. switch s := src.(type) { case string: @@ -299,6 +307,35 @@ func convertAssign(dest, src interface{}) error { *d = nil return nil } + // The driver is returning a cursor the client may iterate over. + case driver.Rows: + switch d := dest.(type) { + case *Rows: + if d == nil { + return errNilPtr + } + if rows == nil { + return errors.New("invalid context to convert cursor rows, missing parent *Rows") + } + rows.closemu.Lock() + *d = Rows{ + dc: rows.dc, + releaseConn: func(error) {}, + rowsi: s, + } + // Chain the cancel function. + parentCancel := rows.cancel + rows.cancel = func() { + // When Rows.cancel is called, the closemu will be locked as well. + // So we can access rs.lasterr. + d.close(rows.lasterr) + if parentCancel != nil { + parentCancel() + } + } + rows.closemu.Unlock() + return nil + } } var sv reflect.Value @@ -381,7 +418,7 @@ func convertAssign(dest, src interface{}) error { return nil } dv.Set(reflect.New(dv.Type().Elem())) - return convertAssign(dv.Interface(), src) + return convertAssignRows(dv.Interface(), src, rows) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: s := asString(src) i64, err := strconv.ParseInt(s, 10, dv.Type().Bits()) diff --git a/src/database/sql/driver/driver.go b/src/database/sql/driver/driver.go index 1e54b4cf2cf17..5ff2bc97350e9 100644 --- a/src/database/sql/driver/driver.go +++ b/src/database/sql/driver/driver.go @@ -24,6 +24,11 @@ import ( // []byte // string // time.Time +// +// If the driver supports cursors, a returned Value may also implement the Rows interface +// in this package. This is used when, for example, when a user selects a cursor +// such as "select cursor(select * from my_table) from dual". If the Rows +// from the select is closed, the cursor Rows will also be closed. type Value interface{} // NamedValue holds both the value name and value. @@ -469,7 +474,7 @@ type RowsAffected int64 var _ Result = RowsAffected(0) func (RowsAffected) LastInsertId() (int64, error) { - return 0, errors.New("no LastInsertId available") + return 0, errors.New("LastInsertId is not supported by this driver") } func (v RowsAffected) RowsAffected() (int64, error) { diff --git a/src/database/sql/example_cli_test.go b/src/database/sql/example_cli_test.go new file mode 100644 index 0000000000000..8c61d755bb845 --- /dev/null +++ b/src/database/sql/example_cli_test.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sql_test + +import ( + "context" + "database/sql" + "flag" + "log" + "os" + "os/signal" + "time" +) + +var pool *sql.DB // Database connection pool. + +func Example_openDBCLI() { + id := flag.Int64("id", 0, "person ID to find") + dsn := flag.String("dsn", os.Getenv("DSN"), "connection data source name") + flag.Parse() + + if len(*dsn) == 0 { + log.Fatal("missing dsn flag") + } + if *id == 0 { + log.Fatal("missing person ID") + } + var err error + + // Opening a driver typically will not attempt to connect to the database. + pool, err = sql.Open("driver-name", *dsn) + if err != nil { + // This will not be a connection error, but a DSN parse error or + // another initialization error. + log.Fatal("unable to use data source name", err) + } + defer pool.Close() + + pool.SetConnMaxLifetime(0) + pool.SetMaxIdleConns(3) + pool.SetMaxOpenConns(3) + + ctx, stop := context.WithCancel(context.Background()) + defer stop() + + appSignal := make(chan os.Signal, 3) + signal.Notify(appSignal, os.Interrupt) + + go func() { + select { + case <-appSignal: + stop() + } + }() + + Ping(ctx) + + Query(ctx, *id) +} + +// Ping the database to verify DSN provided by the user is valid and the +// server accessible. If the ping fails exit the program with an error. +func Ping(ctx context.Context) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + + if err := pool.PingContext(ctx); err != nil { + log.Fatalf("unable to connect to database: %v", err) + } +} + +// Query the database for the information requested and prints the results. +// If the query fails exit the program with an error. +func Query(ctx context.Context, id int64) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var name string + err := pool.QueryRowContext(ctx, "select p.name from people as p where p.id = :id;", sql.Named("id", id)).Scan(&name) + if err != nil { + log.Fatal("unable to execute search query", err) + } + log.Println("name=", name) +} diff --git a/src/database/sql/example_service_test.go b/src/database/sql/example_service_test.go new file mode 100644 index 0000000000000..768307c1471a7 --- /dev/null +++ b/src/database/sql/example_service_test.go @@ -0,0 +1,158 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sql_test + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "time" +) + +func Example_openDBService() { + // Opening a driver typically will not attempt to connect to the database. + db, err := sql.Open("driver-name", "database=test1") + if err != nil { + // This will not be a connection error, but a DSN parse error or + // another initialization error. + log.Fatal(err) + } + db.SetConnMaxLifetime(0) + db.SetMaxIdleConns(50) + db.SetMaxOpenConns(50) + + s := &Service{db: db} + + http.ListenAndServe(":8080", s) +} + +type Service struct { + db *sql.DB +} + +func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) { + db := s.db + switch r.URL.Path { + default: + http.Error(w, "not found", http.StatusNotFound) + return + case "/healthz": + ctx, cancel := context.WithTimeout(r.Context(), 1*time.Second) + defer cancel() + + err := s.db.PingContext(ctx) + if err != nil { + http.Error(w, fmt.Sprintf("db down: %v", err), http.StatusFailedDependency) + return + } + w.WriteHeader(http.StatusOK) + return + case "/quick-action": + // This is a short SELECT. Use the request context as the base of + // the context timeout. + ctx, cancel := context.WithTimeout(r.Context(), 3*time.Second) + defer cancel() + + id := 5 + org := 10 + var name string + err := db.QueryRowContext(ctx, ` +select + p.name +from + people as p + join organization as o on p.organization = o.id +where + p.id = :id + and o.id = :org +;`, + sql.Named("id", id), + sql.Named("org", org), + ).Scan(&name) + if err != nil { + if err == sql.ErrNoRows { + http.Error(w, "not found", http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + io.WriteString(w, name) + return + case "/long-action": + // This is a long SELECT. Use the request context as the base of + // the context timeout, but give it some time to finish. If + // the client cancels before the query is done the query will also + // be canceled. + ctx, cancel := context.WithTimeout(r.Context(), 60*time.Second) + defer cancel() + + var names []string + rows, err := db.QueryContext(ctx, "select p.name from people as p where p.active = true;") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + for rows.Next() { + var name string + err = rows.Scan(&name) + if err != nil { + break + } + names = append(names, name) + } + // Check for errors during rows "Close". + // This may be more important if multiple statements are executed + // in a single batch and rows were written as well as read. + if closeErr := rows.Close(); closeErr != nil { + http.Error(w, closeErr.Error(), http.StatusInternalServerError) + return + } + + // Check for row scan error. + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // Check for errors during row iteration. + if err = rows.Err(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + json.NewEncoder(w).Encode(names) + return + case "/async-action": + // This action has side effects that we want to preserve + // even if the client cancels the HTTP request part way through. + // For this we do not use the http request context as a base for + // the timeout. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + var orderRef = "ABC123" + tx, err := db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable}) + _, err = tx.ExecContext(ctx, "stored_proc_name", orderRef) + + if err != nil { + tx.Rollback() + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + err = tx.Commit() + if err != nil { + http.Error(w, "action in unknown state, check state before attempting again", http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + return + } +} diff --git a/src/database/sql/example_test.go b/src/database/sql/example_test.go index da938b071a121..6f9bd91276edf 100644 --- a/src/database/sql/example_test.go +++ b/src/database/sql/example_test.go @@ -13,8 +13,10 @@ import ( "time" ) -var ctx = context.Background() -var db *sql.DB +var ( + ctx context.Context + db *sql.DB +) func ExampleDB_QueryContext() { age := 27 @@ -24,13 +26,25 @@ func ExampleDB_QueryContext() { } defer rows.Close() names := make([]string, 0) + for rows.Next() { var name string if err := rows.Scan(&name); err != nil { + // Check for a scan error. + // Query rows will be closed with defer. log.Fatal(err) } names = append(names, name) } + // If the database is being written to ensure to check for Close + // errors that may be returned from the driver. The query may + // encounter an auto-commit error and be forced to rollback changes. + rerr := rows.Close() + if rerr != nil { + log.Fatal(err) + } + + // Rows.Err will report the last error encountered by Rows.Scan. if err := rows.Err(); err != nil { log.Fatal(err) } @@ -44,11 +58,11 @@ func ExampleDB_QueryRowContext() { err := db.QueryRowContext(ctx, "SELECT username, created_at FROM users WHERE id=?", id).Scan(&username, &created) switch { case err == sql.ErrNoRows: - log.Printf("No user with id %d", id) + log.Printf("no user with id %d\n", id) case err != nil: - log.Fatal(err) + log.Fatalf("query error: %v\n", err) default: - fmt.Printf("Username is %s, account created on %s\n", username, created) + log.Printf("username is %q, account created on %s\n", username, created) } } @@ -63,7 +77,7 @@ func ExampleDB_ExecContext() { log.Fatal(err) } if rows != 1 { - panic(err) + log.Fatalf("expected to affect 1 row, affected %d", rows) } } @@ -104,10 +118,10 @@ from if err := rows.Scan(&id, &name); err != nil { log.Fatal(err) } - fmt.Printf("id %d name is %s\n", id, name) + log.Printf("id %d name is %s\n", id, name) } if !rows.NextResultSet() { - log.Fatal("expected more result sets", rows.Err()) + log.Fatalf("expected more result sets: %v", rows.Err()) } var roleMap = map[int64]string{ 1: "user", @@ -122,7 +136,7 @@ from if err := rows.Scan(&id, &role); err != nil { log.Fatal(err) } - fmt.Printf("id %d has role %s\n", id, roleMap[role]) + log.Printf("id %d has role %s\n", id, roleMap[role]) } if err := rows.Err(); err != nil { log.Fatal(err) @@ -130,11 +144,23 @@ from } func ExampleDB_PingContext() { + // Ping and PingContext may be used to determine if communication with + // the database server is still possible. + // + // When used in a command line application Ping may be used to establish + // that further queries are possible; that the provided DSN is valid. + // + // When used in long running service Ping may be part of the health + // checking system. + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() + + status := "up" if err := db.PingContext(ctx); err != nil { - log.Fatal(err) + status = "down" } + log.Println(status) } func ExampleConn_BeginTx() { @@ -162,7 +188,7 @@ func ExampleConn_ExecContext() { } defer conn.Close() // Return the connection to the pool. id := 41 - result, err := conn.ExecContext(ctx, `UPDATE balances SET balance = balance + 10 WHERE user_id = ?`, id) + result, err := conn.ExecContext(ctx, `UPDATE balances SET balance = balance + 10 WHERE user_id = ?;`, id) if err != nil { log.Fatal(err) } @@ -171,7 +197,7 @@ func ExampleConn_ExecContext() { log.Fatal(err) } if rows != 1 { - panic(err) + log.Fatalf("expected single row affected, got %d rows affected", rows) } } @@ -184,9 +210,9 @@ func ExampleTx_ExecContext() { _, execErr := tx.ExecContext(ctx, "UPDATE users SET status = ? WHERE id = ?", "paid", id) if execErr != nil { if rollbackErr := tx.Rollback(); rollbackErr != nil { - log.Printf("Could not roll back: %v\n", rollbackErr) + log.Fatalf("update failed: %v, unable to rollback: %v\n", execErr, rollbackErr) } - log.Fatal(execErr) + log.Fatalf("update failed: %v", execErr) } if err := tx.Commit(); err != nil { log.Fatal(err) @@ -199,17 +225,17 @@ func ExampleTx_Rollback() { log.Fatal(err) } id := 53 - _, err = tx.ExecContext(ctx, "UPDATE drivers SET status = ? WHERE id = ?", "assigned", id) + _, err = tx.ExecContext(ctx, "UPDATE drivers SET status = ? WHERE id = ?;", "assigned", id) if err != nil { if rollbackErr := tx.Rollback(); rollbackErr != nil { - log.Printf("Could not roll back: %v\n", rollbackErr) + log.Fatalf("update drivers: unable to rollback: %v", rollbackErr) } log.Fatal(err) } - _, err = tx.ExecContext(ctx, "UPDATE pickups SET driver_id = $1", id) + _, err = tx.ExecContext(ctx, "UPDATE pickups SET driver_id = $1;", id) if err != nil { if rollbackErr := tx.Rollback(); rollbackErr != nil { - log.Printf("Could not roll back: %v\n", rollbackErr) + log.Fatalf("update failed: %v, unable to back: %v", err, rollbackErr) } log.Fatal(err) } @@ -225,17 +251,18 @@ func ExampleStmt() { log.Fatal(err) } defer stmt.Close() + // Then reuse it each time you need to issue the query. id := 43 var username string err = stmt.QueryRowContext(ctx, id).Scan(&username) switch { case err == sql.ErrNoRows: - log.Printf("No user with that ID.") + log.Fatalf("no user with id %d", id) case err != nil: log.Fatal(err) default: - fmt.Printf("Username is %s\n", username) + log.Printf("username is %s\n", username) } } @@ -245,17 +272,19 @@ func ExampleStmt_QueryRowContext() { if err != nil { log.Fatal(err) } + defer stmt.Close() + // Then reuse it each time you need to issue the query. id := 43 var username string err = stmt.QueryRowContext(ctx, id).Scan(&username) switch { case err == sql.ErrNoRows: - log.Printf("No user with that ID.") + log.Fatalf("no user with id %d", id) case err != nil: log.Fatal(err) default: - fmt.Printf("Username is %s\n", username) + log.Printf("username is %s\n", username) } } @@ -266,6 +295,7 @@ func ExampleRows() { log.Fatal(err) } defer rows.Close() + names := make([]string, 0) for rows.Next() { var name string @@ -274,8 +304,9 @@ func ExampleRows() { } names = append(names, name) } + // Check for errors from iterating over rows. if err := rows.Err(); err != nil { log.Fatal(err) } - fmt.Printf("%s are %d years old", strings.Join(names, ", "), age) + log.Printf("%s are %d years old", strings.Join(names, ", "), age) } diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go index a21bae61bacca..dcdd264baa842 100644 --- a/src/database/sql/fakedb_test.go +++ b/src/database/sql/fakedb_test.go @@ -539,7 +539,7 @@ func (c *fakeConn) prepareCreate(stmt *fakeStmt, parts []string) (*fakeStmt, err } // parts are table|col=?,col2=val -func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (*fakeStmt, error) { +func (c *fakeConn) prepareInsert(ctx context.Context, stmt *fakeStmt, parts []string) (*fakeStmt, error) { if len(parts) != 2 { stmt.Close() return nil, errf("invalid INSERT syntax with %d parts; want 2", len(parts)) @@ -574,6 +574,20 @@ func (c *fakeConn) prepareInsert(stmt *fakeStmt, parts []string) (*fakeStmt, err return nil, errf("invalid conversion to int32 from %q", value) } subsetVal = int64(i) // int64 is a subset type, but not int32 + case "table": // For testing cursor reads. + c.skipDirtySession = true + vparts := strings.Split(value, "!") + + substmt, err := c.PrepareContext(ctx, fmt.Sprintf("SELECT|%s|%s|", vparts[0], strings.Join(vparts[1:], ","))) + if err != nil { + return nil, err + } + cursor, err := (substmt.(driver.StmtQueryContext)).QueryContext(ctx, []driver.NamedValue{}) + substmt.Close() + if err != nil { + return nil, err + } + subsetVal = cursor default: stmt.Close() return nil, errf("unsupported conversion for pre-bound parameter %q to type %q", value, ctype) @@ -658,11 +672,11 @@ func (c *fakeConn) PrepareContext(ctx context.Context, query string) (driver.Stm case "CREATE": stmt, err = c.prepareCreate(stmt, parts) case "INSERT": - stmt, err = c.prepareInsert(stmt, parts) + stmt, err = c.prepareInsert(ctx, stmt, parts) case "NOSERT": // Do all the prep-work like for an INSERT but don't actually insert the row. // Used for some of the concurrent tests. - stmt, err = c.prepareInsert(stmt, parts) + stmt, err = c.prepareInsert(ctx, stmt, parts) default: stmt.Close() return nil, errf("unsupported command type %q", cmd) diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index 36179855db2b6..8cdc903c6808d 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -133,6 +133,7 @@ const ( LevelLinearizable ) +// String returns the name of the transaction isolation level. func (i IsolationLevel) String() string { switch i { case LevelDefault: @@ -567,7 +568,6 @@ type finalCloser interface { // addDep notes that x now depends on dep, and x's finalClose won't be // called until all of x's dependencies are removed with removeDep. func (db *DB) addDep(x finalCloser, dep interface{}) { - //println(fmt.Sprintf("addDep(%T %p, %T %p)", x, x, dep, dep)) db.mu.Lock() defer db.mu.Unlock() db.addDepLocked(x, dep) @@ -597,7 +597,6 @@ func (db *DB) removeDep(x finalCloser, dep interface{}) error { } func (db *DB) removeDepLocked(x finalCloser, dep interface{}) func() error { - //println(fmt.Sprintf("removeDep(%T %p, %T %p)", x, x, dep, dep)) xdep, ok := db.dep[x] if !ok { @@ -1322,11 +1321,13 @@ func (db *DB) putConnDBLocked(dc *driverConn, err error) bool { err: err, } return true - } else if err == nil && !db.closed && db.maxIdleConnsLocked() > len(db.freeConn) { - db.freeConn = append(db.freeConn, dc) + } else if err == nil && !db.closed { + if db.maxIdleConnsLocked() > len(db.freeConn) { + db.freeConn = append(db.freeConn, dc) + db.startCleanerLocked() + return true + } db.maxIdleClosed++ - db.startCleanerLocked() - return true } return false } @@ -1697,7 +1698,7 @@ func (db *DB) Conn(ctx context.Context) (*Conn, error) { } } if err == driver.ErrBadConn { - dc, err = db.conn(ctx, cachedOrNewConn) + dc, err = db.conn(ctx, alwaysNewConn) } if err != nil { return nil, err @@ -2255,6 +2256,13 @@ var ( // Stmt is a prepared statement. // A Stmt is safe for concurrent use by multiple goroutines. +// +// If a Stmt is prepared on a Tx or Conn, it will be bound to a single +// underlying connection forever. If the Tx or Conn closes, the Stmt will +// become unusable and all operations will return an error. +// If a Stmt is prepared on a DB, it will remain usable for the lifetime of the +// DB. When the Stmt needs to execute on a new underlying connection, it will +// prepare itself on the new connection automatically. type Stmt struct { // Immutable: db *DB // where we came from @@ -2605,6 +2613,15 @@ type Rows struct { lastcols []driver.Value } +// lasterrOrErrLocked returns either lasterr or the provided err. +// rs.closemu must be read-locked. +func (rs *Rows) lasterrOrErrLocked(err error) error { + if rs.lasterr != nil && rs.lasterr != io.EOF { + return rs.lasterr + } + return err +} + func (rs *Rows) initContextClose(ctx, txctx context.Context) { if ctx.Done() == nil && (txctx == nil || txctx.Done() == nil) { return @@ -2681,7 +2698,7 @@ func (rs *Rows) nextLocked() (doClose, ok bool) { return false, true } -// NextResultSet prepares the next result set for reading. It returns true if +// NextResultSet prepares the next result set for reading. It reports whether // there is further result sets, or false if there is no further result set // or if there is an error advancing to it. The Err method should be consulted // to distinguish between the two cases. @@ -2728,23 +2745,22 @@ func (rs *Rows) NextResultSet() bool { func (rs *Rows) Err() error { rs.closemu.RLock() defer rs.closemu.RUnlock() - if rs.lasterr == io.EOF { - return nil - } - return rs.lasterr + return rs.lasterrOrErrLocked(nil) } +var errRowsClosed = errors.New("sql: Rows are closed") +var errNoRows = errors.New("sql: no Rows available") + // Columns returns the column names. -// Columns returns an error if the rows are closed, or if the rows -// are from QueryRow and there was a deferred error. +// Columns returns an error if the rows are closed. func (rs *Rows) Columns() ([]string, error) { rs.closemu.RLock() defer rs.closemu.RUnlock() if rs.closed { - return nil, errors.New("sql: Rows are closed") + return nil, rs.lasterrOrErrLocked(errRowsClosed) } if rs.rowsi == nil { - return nil, errors.New("sql: no Rows available") + return nil, rs.lasterrOrErrLocked(errNoRows) } rs.dc.Lock() defer rs.dc.Unlock() @@ -2758,10 +2774,10 @@ func (rs *Rows) ColumnTypes() ([]*ColumnType, error) { rs.closemu.RLock() defer rs.closemu.RUnlock() if rs.closed { - return nil, errors.New("sql: Rows are closed") + return nil, rs.lasterrOrErrLocked(errRowsClosed) } if rs.rowsi == nil { - return nil, errors.New("sql: no Rows available") + return nil, rs.lasterrOrErrLocked(errNoRows) } rs.dc.Lock() defer rs.dc.Unlock() @@ -2812,7 +2828,7 @@ func (ci *ColumnType) ScanType() reflect.Type { return ci.scanType } -// Nullable returns whether the column may be null. +// Nullable reports whether the column may be null. // If a driver does not support this property ok will be false. func (ci *ColumnType) Nullable() (nullable, ok bool) { return ci.nullable, ci.hasNullable @@ -2873,6 +2889,7 @@ func rowsColumnInfoSetupConnLocked(rowsi driver.Rows) []*ColumnType { // *float32, *float64 // *interface{} // *RawBytes +// *Rows (cursor value) // any type implementing Scanner (see Scanner docs) // // In the most simple case, if the type of the value from the source @@ -2909,6 +2926,11 @@ func rowsColumnInfoSetupConnLocked(rowsi driver.Rows) []*ColumnType { // // For scanning into *bool, the source may be true, false, 1, 0, or // string inputs parseable by strconv.ParseBool. +// +// Scan can also convert a cursor returned from a query, such as +// "select cursor(select * from my_table) from dual", into a +// *Rows value that can itself be scanned from. The parent +// select query will close any cursor *Rows if the parent *Rows is closed. func (rs *Rows) Scan(dest ...interface{}) error { rs.closemu.RLock() @@ -2917,8 +2939,9 @@ func (rs *Rows) Scan(dest ...interface{}) error { return rs.lasterr } if rs.closed { + err := rs.lasterrOrErrLocked(errRowsClosed) rs.closemu.RUnlock() - return errors.New("sql: Rows are closed") + return err } rs.closemu.RUnlock() @@ -2929,7 +2952,7 @@ func (rs *Rows) Scan(dest ...interface{}) error { return fmt.Errorf("sql: expected %d destination arguments in Scan, not %d", len(rs.lastcols), len(dest)) } for i, sv := range rs.lastcols { - err := convertAssign(dest[i], sv) + err := convertAssignRows(dest[i], sv, rs) if err != nil { return fmt.Errorf(`sql: Scan error on column index %d, name %q: %v`, i, rs.rowsi.Columns()[i], err) } diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index f194744aefe72..64b9dfea5c213 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -397,7 +397,7 @@ func TestQueryContextWait(t *testing.T) { prepares0 := numPrepares(t, db) // TODO(kardianos): convert this from using a timeout to using an explicit - // cancel when the query signals that is is "executing" the query. + // cancel when the query signals that it is "executing" the query. ctx, cancel := context.WithTimeout(context.Background(), 300*time.Millisecond) defer cancel() @@ -597,7 +597,7 @@ func TestPoolExhaustOnCancel(t *testing.T) { state := 0 // waiter will be called for all queries, including - // initial setup queries. The state is only assigned when no + // initial setup queries. The state is only assigned when // no queries are made. // // Only allow the first batch of queries to finish once the @@ -1338,6 +1338,52 @@ func TestConnQuery(t *testing.T) { } } +func TestCursorFake(t *testing.T) { + db := newTestDB(t, "people") + defer closeDB(t, db) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + exec(t, db, "CREATE|peoplecursor|list=table") + exec(t, db, "INSERT|peoplecursor|list=people!name!age") + + rows, err := db.QueryContext(ctx, `SELECT|peoplecursor|list|`) + if err != nil { + t.Fatal(err) + } + defer rows.Close() + + if !rows.Next() { + t.Fatal("no rows") + } + var cursor = &Rows{} + err = rows.Scan(cursor) + if err != nil { + t.Fatal(err) + } + defer cursor.Close() + + const expectedRows = 3 + var currentRow int64 + + var n int64 + var s string + for cursor.Next() { + currentRow++ + err = cursor.Scan(&s, &n) + if err != nil { + t.Fatal(err) + } + if n != currentRow { + t.Errorf("expected number(Age)=%d, got %d", currentRow, n) + } + } + if currentRow != expectedRows { + t.Errorf("expected %d rows, got %d rows", expectedRows, currentRow) + } +} + func TestInvalidNilValues(t *testing.T) { var date1 time.Time var date2 int @@ -3415,6 +3461,58 @@ func TestConnectionLeak(t *testing.T) { wg.Wait() } +func TestStatsMaxIdleClosedZero(t *testing.T) { + db := newTestDB(t, "people") + defer closeDB(t, db) + + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(1) + db.SetConnMaxLifetime(0) + + preMaxIdleClosed := db.Stats().MaxIdleClosed + + for i := 0; i < 10; i++ { + rows, err := db.Query("SELECT|people|name|") + if err != nil { + t.Fatal(err) + } + rows.Close() + } + + st := db.Stats() + maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed + t.Logf("MaxIdleClosed: %d", maxIdleClosed) + if maxIdleClosed != 0 { + t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed) + } +} + +func TestStatsMaxIdleClosedTen(t *testing.T) { + db := newTestDB(t, "people") + defer closeDB(t, db) + + db.SetMaxOpenConns(1) + db.SetMaxIdleConns(0) + db.SetConnMaxLifetime(0) + + preMaxIdleClosed := db.Stats().MaxIdleClosed + + for i := 0; i < 10; i++ { + rows, err := db.Query("SELECT|people|name|") + if err != nil { + t.Fatal(err) + } + rows.Close() + } + + st := db.Stats() + maxIdleClosed := st.MaxIdleClosed - preMaxIdleClosed + t.Logf("MaxIdleClosed: %d", maxIdleClosed) + if maxIdleClosed != 10 { + t.Fatal("expected 0 max idle closed conns, got: ", maxIdleClosed) + } +} + type nvcDriver struct { fakeDriver skipNamedValueCheck bool diff --git a/src/debug/dwarf/line.go b/src/debug/dwarf/line.go index 4e6e1429d93b9..b862b49d62788 100644 --- a/src/debug/dwarf/line.go +++ b/src/debug/dwarf/line.go @@ -590,7 +590,7 @@ func (r *LineReader) SeekPC(pc uint64, entry *LineEntry) error { } } -// pathIsAbs returns whether path is an absolute path (or "full path +// pathIsAbs reports whether path is an absolute path (or "full path // name" in DWARF parlance). This is in "whatever form makes sense for // the host system", so this accepts both UNIX-style and DOS-style // absolute paths. We avoid the filepath package because we want this diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go index 07c03e79996a9..96a67ce732728 100644 --- a/src/debug/elf/elf.go +++ b/src/debug/elf/elf.go @@ -2424,6 +2424,7 @@ const ( R_RISCV_SET8 R_RISCV = 54 /* Local label subtraction */ R_RISCV_SET16 R_RISCV = 55 /* Local label subtraction */ R_RISCV_SET32 R_RISCV = 56 /* Local label subtraction */ + R_RISCV_32_PCREL R_RISCV = 57 /* 32-bit PC relative */ ) var rriscvStrings = []intName{ @@ -2480,6 +2481,7 @@ var rriscvStrings = []intName{ {54, "R_RISCV_SET8"}, {55, "R_RISCV_SET16"}, {56, "R_RISCV_SET32"}, + {57, "R_RISCV_32_PCREL"}, } func (i R_RISCV) String() string { return stringName(uint32(i), rriscvStrings, false) } diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go index 11d8992b71468..d7c1e9f800d86 100644 --- a/src/debug/elf/file_test.go +++ b/src/debug/elf/file_test.go @@ -784,7 +784,7 @@ func TestCompressedSection(t *testing.T) { func TestNoSectionOverlaps(t *testing.T) { // Ensure cmd/link outputs sections without overlaps. switch runtime.GOOS { - case "android", "darwin", "js", "nacl", "plan9", "windows": + case "aix", "android", "darwin", "js", "nacl", "plan9", "windows": t.Skipf("cmd/link doesn't produce ELF binaries on %s", runtime.GOOS) } _ = net.ResolveIPAddr // force dynamic linkage diff --git a/src/debug/gosym/pclntab_test.go b/src/debug/gosym/pclntab_test.go index 7e7cee6793474..d21f0e24a8324 100644 --- a/src/debug/gosym/pclntab_test.go +++ b/src/debug/gosym/pclntab_test.go @@ -5,7 +5,6 @@ package gosym import ( - "bytes" "debug/elf" "internal/testenv" "io/ioutil" @@ -33,33 +32,10 @@ func dotest(t *testing.T) { if err != nil { t.Fatal(err) } - // This command builds pclinetest from pclinetest.asm; - // the resulting binary looks like it was built from pclinetest.s, - // but we have renamed it to keep it away from the go tool. pclinetestBinary = filepath.Join(pclineTempDir, "pclinetest") - cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", pclinetestBinary+".o", "pclinetest.asm") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - t.Fatal(err) - } - - // stamp .o file as being 'package main' so that go tool link will accept it - data, err := ioutil.ReadFile(pclinetestBinary + ".o") - if err != nil { - t.Fatal(err) - } - i := bytes.IndexByte(data, '\n') - if i < 0 { - t.Fatal("bad binary") - } - data = append(append(data[:i:i], "\nmain"...), data[i:]...) - if err := ioutil.WriteFile(pclinetestBinary+".o", data, 0666); err != nil { - t.Fatal(err) - } - - cmd = exec.Command(testenv.GoToolPath(t), "tool", "link", "-H", "linux", - "-o", pclinetestBinary, pclinetestBinary+".o") + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", pclinetestBinary) + cmd.Dir = "testdata" + cmd.Env = append(os.Environ(), "GOOS=linux") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { @@ -232,7 +208,7 @@ func TestPCLine(t *testing.T) { } // Test PCToLine - sym := tab.LookupFunc("linefrompc") + sym := tab.LookupFunc("main.linefrompc") wantLine := 0 for pc := sym.Entry; pc < sym.End; pc++ { off := pc - text.Addr // TODO(rsc): should not need off; bug in 8g @@ -244,13 +220,13 @@ func TestPCLine(t *testing.T) { file, line, fn := tab.PCToLine(pc) if fn == nil { t.Errorf("failed to get line of PC %#x", pc) - } else if !strings.HasSuffix(file, "pclinetest.asm") || line != wantLine || fn != sym { - t.Errorf("PCToLine(%#x) = %s:%d (%s), want %s:%d (%s)", pc, file, line, fn.Name, "pclinetest.asm", wantLine, sym.Name) + } else if !strings.HasSuffix(file, "pclinetest.s") || line != wantLine || fn != sym { + t.Errorf("PCToLine(%#x) = %s:%d (%s), want %s:%d (%s)", pc, file, line, fn.Name, "pclinetest.s", wantLine, sym.Name) } } // Test LineToPC - sym = tab.LookupFunc("pcfromline") + sym = tab.LookupFunc("main.pcfromline") lookupline := -1 wantLine = 0 off := uint64(0) // TODO(rsc): should not need off; bug in 8g diff --git a/src/debug/gosym/symtab.go b/src/debug/gosym/symtab.go index a995209934ffb..a84b7f6def1d9 100644 --- a/src/debug/gosym/symtab.go +++ b/src/debug/gosym/symtab.go @@ -7,11 +7,6 @@ // by the gc compilers. package gosym -// The table format is a variant of the format used in Plan 9's a.out -// format, documented at https://9p.io/magic/man2html/6/a.out. -// The best reference for the differences between the Plan 9 format -// and the Go format is the runtime source, specifically ../../runtime/symtab.c. - import ( "bytes" "encoding/binary" diff --git a/src/debug/gosym/testdata/main.go b/src/debug/gosym/testdata/main.go new file mode 100644 index 0000000000000..b7702184cdef4 --- /dev/null +++ b/src/debug/gosym/testdata/main.go @@ -0,0 +1,10 @@ +package main + +func linefrompc() +func pcfromline() + +func main() { + // Prevent GC of our test symbols + linefrompc() + pcfromline() +} diff --git a/src/debug/gosym/pclinetest.h b/src/debug/gosym/testdata/pclinetest.h similarity index 100% rename from src/debug/gosym/pclinetest.h rename to src/debug/gosym/testdata/pclinetest.h diff --git a/src/debug/gosym/pclinetest.asm b/src/debug/gosym/testdata/pclinetest.s similarity index 96% rename from src/debug/gosym/pclinetest.asm rename to src/debug/gosym/testdata/pclinetest.s index b9ee9c0a50413..53461cdfc1a1e 100644 --- a/src/debug/gosym/pclinetest.asm +++ b/src/debug/gosym/testdata/pclinetest.s @@ -1,4 +1,4 @@ -TEXT linefrompc(SB),4,$0 // Each byte stores its line delta +TEXT ·linefrompc(SB),4,$0 // Each byte stores its line delta BYTE $2; BYTE $1; BYTE $1; BYTE $0; @@ -28,7 +28,7 @@ BYTE $2; BYTE $2; BYTE $255; -TEXT pcfromline(SB),4,$0 // Each record stores its line delta, then n, then n more bytes +TEXT ·pcfromline(SB),4,$0 // Each record stores its line delta, then n, then n more bytes BYTE $32; BYTE $0; BYTE $1; BYTE $1; BYTE $0; BYTE $1; BYTE $0; @@ -46,13 +46,3 @@ BYTE $3; BYTE $3; BYTE $0; BYTE $0; BYTE $0; BYTE $4; BYTE $3; BYTE $0; BYTE $0; BYTE $0; BYTE $255; - -// Keep the linker happy -TEXT main·main(SB),4,$0 - RET - -TEXT main·init(SB),4,$0 - // Prevent GC of our test symbols - CALL linefrompc(SB) - CALL pcfromline(SB) - RET diff --git a/src/debug/pe/file.go b/src/debug/pe/file.go index 2f5efae4e6717..1c308b3dc3b0c 100644 --- a/src/debug/pe/file.go +++ b/src/debug/pe/file.go @@ -91,7 +91,7 @@ func NewFile(r io.ReaderAt) (*File, error) { return nil, err } switch f.FileHeader.Machine { - case IMAGE_FILE_MACHINE_UNKNOWN, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_I386: + case IMAGE_FILE_MACHINE_UNKNOWN, IMAGE_FILE_MACHINE_ARMNT, IMAGE_FILE_MACHINE_AMD64, IMAGE_FILE_MACHINE_I386: default: return nil, fmt.Errorf("Unrecognised COFF file header machine value of 0x%x.", f.FileHeader.Machine) } diff --git a/src/debug/pe/file_test.go b/src/debug/pe/file_test.go index 24cd673254e5f..9613af3a3c3c0 100644 --- a/src/debug/pe/file_test.go +++ b/src/debug/pe/file_test.go @@ -298,6 +298,17 @@ const ( linkCgoExternal ) +func getImageBase(f *File) uintptr { + switch oh := f.OptionalHeader.(type) { + case *OptionalHeader32: + return uintptr(oh.ImageBase) + case *OptionalHeader64: + return uintptr(oh.ImageBase) + default: + panic("unexpected optionalheader type") + } +} + func testDWARF(t *testing.T, linktype int) { if runtime.GOOS != "windows" { t.Skip("skipping windows only test") @@ -347,14 +358,15 @@ func testDWARF(t *testing.T, linktype int) { if err != nil { t.Fatalf("running test executable failed: %s %s", err, out) } + t.Logf("Testprog output:\n%s", string(out)) - matches := regexp.MustCompile("main=(.*)\n").FindStringSubmatch(string(out)) + matches := regexp.MustCompile("offset=(.*)\n").FindStringSubmatch(string(out)) if len(matches) < 2 { t.Fatalf("unexpected program output: %s", out) } - wantaddr, err := strconv.ParseUint(matches[1], 0, 64) + wantoffset, err := strconv.ParseUint(matches[1], 0, 64) if err != nil { - t.Fatalf("unexpected main address %q: %s", matches[1], err) + t.Fatalf("unexpected main offset %q: %s", matches[1], err) } f, err := Open(exe) @@ -363,6 +375,8 @@ func testDWARF(t *testing.T, linktype int) { } defer f.Close() + imageBase := getImageBase(f) + var foundDebugGDBScriptsSection bool for _, sect := range f.Sections { if sect.Name == ".debug_gdb_scripts" { @@ -389,10 +403,20 @@ func testDWARF(t *testing.T, linktype int) { break } if e.Tag == dwarf.TagSubprogram { - if name, ok := e.Val(dwarf.AttrName).(string); ok && name == "main.main" { - if addr, ok := e.Val(dwarf.AttrLowpc).(uint64); ok && addr == wantaddr { - return + name, ok := e.Val(dwarf.AttrName).(string) + if ok && name == "main.main" { + t.Logf("Found main.main") + addr, ok := e.Val(dwarf.AttrLowpc).(uint64) + if !ok { + t.Fatal("Failed to get AttrLowpc") + } + offset := uintptr(addr) - imageBase + if offset != uintptr(wantoffset) { + t.Fatal("Runtime offset (0x%x) did "+ + "not match dwarf offset "+ + "(0x%x)", wantoffset, offset) } + return } } } @@ -479,11 +503,52 @@ const testprog = ` package main import "fmt" +import "syscall" +import "unsafe" {{if .}}import "C" {{end}} +// struct MODULEINFO from the Windows SDK +type moduleinfo struct { + BaseOfDll uintptr + SizeOfImage uint32 + EntryPoint uintptr +} + +func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +func funcPC(f interface{}) uintptr { + var a uintptr + return **(**uintptr)(add(unsafe.Pointer(&f), unsafe.Sizeof(a))) +} + func main() { + kernel32 := syscall.MustLoadDLL("kernel32.dll") + psapi := syscall.MustLoadDLL("psapi.dll") + getModuleHandle := kernel32.MustFindProc("GetModuleHandleW") + getCurrentProcess := kernel32.MustFindProc("GetCurrentProcess") + getModuleInformation := psapi.MustFindProc("GetModuleInformation") + + procHandle, _, _ := getCurrentProcess.Call() + moduleHandle, _, err := getModuleHandle.Call(0) + if moduleHandle == 0 { + panic(fmt.Sprintf("GetModuleHandle() failed: %d", err)) + } + + var info moduleinfo + ret, _, err := getModuleInformation.Call(procHandle, moduleHandle, + uintptr(unsafe.Pointer(&info)), unsafe.Sizeof(info)) + + if ret == 0 { + panic(fmt.Sprintf("GetModuleInformation() failed: %d", err)) + } + + offset := funcPC(main) - info.BaseOfDll + fmt.Printf("base=0x%x\n", info.BaseOfDll) fmt.Printf("main=%p\n", main) + fmt.Printf("offset=0x%x\n", offset) } ` @@ -535,13 +600,15 @@ func TestBuildingWindowsGUI(t *testing.T) { func TestImportTableInUnknownSection(t *testing.T) { if runtime.GOOS != "windows" { - t.Skip("skipping windows only test") + t.Skip("skipping Windows-only test") } - // first we need to find this font driver - path, err := exec.LookPath("atmfd.dll") + // ws2_32.dll import table is located in ".rdata" section, + // so it is good enough to test issue #16103. + const filename = "ws2_32.dll" + path, err := exec.LookPath(filename) if err != nil { - t.Fatalf("unable to locate required file %q in search path: %s", "atmfd.dll", err) + t.Fatalf("unable to locate required file %q in search path: %s", filename, err) } f, err := Open(path) diff --git a/src/debug/pe/pe.go b/src/debug/pe/pe.go index e933ae1c2aa66..3f8099dfab18c 100644 --- a/src/debug/pe/pe.go +++ b/src/debug/pe/pe.go @@ -91,6 +91,7 @@ const ( IMAGE_FILE_MACHINE_AM33 = 0x1d3 IMAGE_FILE_MACHINE_AMD64 = 0x8664 IMAGE_FILE_MACHINE_ARM = 0x1c0 + IMAGE_FILE_MACHINE_ARMNT = 0x1c4 IMAGE_FILE_MACHINE_ARM64 = 0xaa64 IMAGE_FILE_MACHINE_EBC = 0xebc IMAGE_FILE_MACHINE_I386 = 0x14c diff --git a/src/debug/plan9obj/file.go b/src/debug/plan9obj/file.go index c78e35d00082f..314608da61083 100644 --- a/src/debug/plan9obj/file.go +++ b/src/debug/plan9obj/file.go @@ -274,7 +274,7 @@ func newTable(symtab []byte, ptrsz int) ([]Sym, error) { ts.Value = s.value switch s.typ { default: - ts.Name = string(s.name[:]) + ts.Name = string(s.name) case 'z', 'Z': for i := 0; i < len(s.name); i += 2 { eltIdx := binary.BigEndian.Uint16(s.name[i : i+2]) diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index 1ed357adfffcc..3cfd9d1276497 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -633,7 +633,7 @@ var ( bigIntType = reflect.TypeOf(new(big.Int)) ) -// invalidLength returns true iff offset + length > sliceLength, or if the +// invalidLength reports whether offset + length > sliceLength, or if the // addition would overflow. func invalidLength(offset, length, sliceLength int) bool { return offset+length < offset || offset+length > sliceLength diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go index c5506ed4de70d..b74054ba40227 100644 --- a/src/encoding/base32/base32_test.go +++ b/src/encoding/base32/base32_test.go @@ -425,7 +425,7 @@ IZJAOZSWY2LUEBSXG43FEBRWS3DMOVWSAZDPNRXXEZJAMV2SAZTVM5UWC5BANZ2WY3DBBJYGC4TJMF NZ2CYIDTOVXHIIDJNYFGG5LMOBQSA4LVNEQG6ZTGNFRWSYJAMRSXGZLSOVXHIIDNN5WGY2LUEBQW42 LNEBUWIIDFON2CA3DBMJXXE5LNFY== ====` - encodedShort := strings.Replace(encoded, "\n", "", -1) + encodedShort := strings.ReplaceAll(encoded, "\n", "") dec := NewDecoder(StdEncoding, strings.NewReader(encoded)) res1, err := ioutil.ReadAll(dec) @@ -465,7 +465,7 @@ func TestWithCustomPadding(t *testing.T) { for _, testcase := range pairs { defaultPadding := StdEncoding.EncodeToString([]byte(testcase.decoded)) customPadding := StdEncoding.WithPadding('@').EncodeToString([]byte(testcase.decoded)) - expected := strings.Replace(defaultPadding, "=", "@", -1) + expected := strings.ReplaceAll(defaultPadding, "=", "@") if expected != customPadding { t.Errorf("Expected custom %s, got %s", expected, customPadding) @@ -675,7 +675,7 @@ func TestWithoutPaddingClose(t *testing.T) { expected := testpair.encoded if encoding.padChar == NoPadding { - expected = strings.Replace(expected, "=", "", -1) + expected = strings.ReplaceAll(expected, "=", "") } res := buf.String() @@ -697,7 +697,7 @@ func TestDecodeReadAll(t *testing.T) { for encIndex, encoding := range encodings { encoded := pair.encoded if encoding.padChar == NoPadding { - encoded = strings.Replace(encoded, "=", "", -1) + encoded = strings.ReplaceAll(encoded, "=", "") } decReader, err := ioutil.ReadAll(NewDecoder(encoding, strings.NewReader(encoded))) @@ -723,7 +723,7 @@ func TestDecodeSmallBuffer(t *testing.T) { for encIndex, encoding := range encodings { encoded := pair.encoded if encoding.padChar == NoPadding { - encoded = strings.Replace(encoded, "=", "", -1) + encoded = strings.ReplaceAll(encoded, "=", "") } decoder := NewDecoder(encoding, strings.NewReader(encoded)) diff --git a/src/encoding/base64/base64.go b/src/encoding/base64/base64.go index 9a99370f1e530..0bb37b311a099 100644 --- a/src/encoding/base64/base64.go +++ b/src/encoding/base64/base64.go @@ -270,7 +270,7 @@ func (e CorruptInputError) Error() string { return "illegal base64 data at input byte " + strconv.FormatInt(int64(e), 10) } -// decodeQuantum decodes up to 4 base64 bytes. It takes for parameters +// decodeQuantum decodes up to 4 base64 bytes. The received parameters are // the destination buffer dst, the source buffer src and an index in the // source buffer si. // It returns the number of bytes read from src, the number of bytes written @@ -465,10 +465,9 @@ func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { } si := 0 - ilen := len(src) - olen := len(dst) - for strconv.IntSize >= 64 && ilen-si >= 8 && olen-n >= 8 { - if ok := enc.decode64(dst[n:], src[si:]); ok { + for strconv.IntSize >= 64 && len(src)-si >= 8 && len(dst)-n >= 8 { + if dn, ok := enc.decode64(src[si:]); ok { + binary.BigEndian.PutUint64(dst[n:], dn) n += 6 si += 8 } else { @@ -481,8 +480,9 @@ func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { } } - for ilen-si >= 4 && olen-n >= 4 { - if ok := enc.decode32(dst[n:], src[si:]); ok { + for len(src)-si >= 4 && len(dst)-n >= 4 { + if dn, ok := enc.decode32(src[si:]); ok { + binary.BigEndian.PutUint32(dst[n:], dn) n += 3 si += 4 } else { @@ -506,72 +506,70 @@ func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { return n, err } -// decode32 tries to decode 4 base64 char into 3 bytes. -// len(dst) and len(src) must both be >= 4. -// Returns true if decode succeeded. -func (enc *Encoding) decode32(dst, src []byte) bool { - var dn, n uint32 +// decode32 tries to decode 4 base64 characters into 3 bytes, and returns those +// bytes. len(src) must be >= 4. +// Returns (0, false) if decoding failed. +func (enc *Encoding) decode32(src []byte) (dn uint32, ok bool) { + var n uint32 + _ = src[3] if n = uint32(enc.decodeMap[src[0]]); n == 0xff { - return false + return 0, false } dn |= n << 26 if n = uint32(enc.decodeMap[src[1]]); n == 0xff { - return false + return 0, false } dn |= n << 20 if n = uint32(enc.decodeMap[src[2]]); n == 0xff { - return false + return 0, false } dn |= n << 14 if n = uint32(enc.decodeMap[src[3]]); n == 0xff { - return false + return 0, false } dn |= n << 8 - - binary.BigEndian.PutUint32(dst, dn) - return true + return dn, true } -// decode64 tries to decode 8 base64 char into 6 bytes. -// len(dst) and len(src) must both be >= 8. -// Returns true if decode succeeded. -func (enc *Encoding) decode64(dst, src []byte) bool { - var dn, n uint64 +// decode64 tries to decode 8 base64 characters into 6 bytes, and returns those +// bytes. len(src) must be >= 8. +// Returns (0, false) if decoding failed. +func (enc *Encoding) decode64(src []byte) (dn uint64, ok bool) { + var n uint64 + _ = src[7] if n = uint64(enc.decodeMap[src[0]]); n == 0xff { - return false + return 0, false } dn |= n << 58 if n = uint64(enc.decodeMap[src[1]]); n == 0xff { - return false + return 0, false } dn |= n << 52 if n = uint64(enc.decodeMap[src[2]]); n == 0xff { - return false + return 0, false } dn |= n << 46 if n = uint64(enc.decodeMap[src[3]]); n == 0xff { - return false + return 0, false } dn |= n << 40 if n = uint64(enc.decodeMap[src[4]]); n == 0xff { - return false + return 0, false } dn |= n << 34 if n = uint64(enc.decodeMap[src[5]]); n == 0xff { - return false + return 0, false } dn |= n << 28 if n = uint64(enc.decodeMap[src[6]]); n == 0xff { - return false + return 0, false } dn |= n << 22 if n = uint64(enc.decodeMap[src[7]]); n == 0xff { - return false + return 0, false } dn |= n << 16 - - binary.BigEndian.PutUint64(dst, dn) - return true + return dn, true } type newlineFilteringReader struct { diff --git a/src/encoding/base64/base64_test.go b/src/encoding/base64/base64_test.go index f019654f5b57e..f7f312ca39324 100644 --- a/src/encoding/base64/base64_test.go +++ b/src/encoding/base64/base64_test.go @@ -53,8 +53,8 @@ func stdRef(ref string) string { // Convert a reference string to URL-encoding func urlRef(ref string) string { - ref = strings.Replace(ref, "+", "-", -1) - ref = strings.Replace(ref, "/", "_", -1) + ref = strings.ReplaceAll(ref, "+", "-") + ref = strings.ReplaceAll(ref, "/", "_") return ref } @@ -72,7 +72,7 @@ func rawURLRef(ref string) string { var funnyEncoding = NewEncoding(encodeStd).WithPadding(rune('@')) func funnyRef(ref string) string { - return strings.Replace(ref, "=", "@", -1) + return strings.ReplaceAll(ref, "=", "@") } type encodingTest struct { @@ -418,7 +418,7 @@ j+mSARB/17pKVXYWHXjsj7yIex0PadzXMO1zT5KHoNA3HT8ietoGhgjsfA+CSnvvqh/jJtqsrwOv 2b6NGNzXfTYexzJ+nU7/ALkf4P8Awv6P9KvTQQ4AgyDqCF85Pho3CTB7eHwXoH+LT65uZbX9X+o2 bqbPb06551Y4 ` - encodedShort := strings.Replace(encoded, "\n", "", -1) + encodedShort := strings.ReplaceAll(encoded, "\n", "") dec := NewDecoder(StdEncoding, strings.NewReader(encoded)) res1, err := ioutil.ReadAll(dec) diff --git a/src/encoding/binary/binary.go b/src/encoding/binary/binary.go index 85b3bc2295dc6..8c2d1d9da4719 100644 --- a/src/encoding/binary/binary.go +++ b/src/encoding/binary/binary.go @@ -161,23 +161,17 @@ func (bigEndian) GoString() string { return "binary.BigEndian" } func Read(r io.Reader, order ByteOrder, data interface{}) error { // Fast path for basic types and slices. if n := intDataSize(data); n != 0 { - var b [8]byte - var bs []byte - if n > len(b) { - bs = make([]byte, n) - } else { - bs = b[:n] - } + bs := make([]byte, n) if _, err := io.ReadFull(r, bs); err != nil { return err } switch data := data.(type) { case *bool: - *data = b[0] != 0 + *data = bs[0] != 0 case *int8: - *data = int8(b[0]) + *data = int8(bs[0]) case *uint8: - *data = b[0] + *data = bs[0] case *int16: *data = int16(order.Uint16(bs)) case *uint16: @@ -260,25 +254,19 @@ func Read(r io.Reader, order ByteOrder, data interface{}) error { func Write(w io.Writer, order ByteOrder, data interface{}) error { // Fast path for basic types and slices. if n := intDataSize(data); n != 0 { - var b [8]byte - var bs []byte - if n > len(b) { - bs = make([]byte, n) - } else { - bs = b[:n] - } + bs := make([]byte, n) switch v := data.(type) { case *bool: if *v { - b[0] = 1 + bs[0] = 1 } else { - b[0] = 0 + bs[0] = 0 } case bool: if v { - b[0] = 1 + bs[0] = 1 } else { - b[0] = 0 + bs[0] = 0 } case []bool: for i, x := range v { @@ -289,19 +277,19 @@ func Write(w io.Writer, order ByteOrder, data interface{}) error { } } case *int8: - b[0] = byte(*v) + bs[0] = byte(*v) case int8: - b[0] = byte(v) + bs[0] = byte(v) case []int8: for i, x := range v { bs[i] = byte(x) } case *uint8: - b[0] = *v + bs[0] = *v case uint8: - b[0] = v + bs[0] = v case []uint8: - bs = v + bs = v // TODO(josharian): avoid allocating bs in this case? case *int16: order.PutUint16(bs, uint16(*v)) case int16: diff --git a/src/encoding/gob/decoder.go b/src/encoding/gob/decoder.go index 5ef0388862114..b52aabe54b532 100644 --- a/src/encoding/gob/decoder.go +++ b/src/encoding/gob/decoder.go @@ -12,13 +12,14 @@ import ( "sync" ) -// tooBig provides a sanity check for sizes; used in several places. -// Upper limit of 1GB, allowing room to grow a little without overflow. -// TODO: make this adjustable? -const tooBig = 1 << 30 +// tooBig provides a sanity check for sizes; used in several places. Upper limit +// of is 1GB on 32-bit systems, 8GB on 64-bit, allowing room to grow a little +// without overflow. +const tooBig = (1 << 30) << (^uint(0) >> 62) // A Decoder manages the receipt of type and data information read from the -// remote side of a connection. +// remote side of a connection. It is safe for concurrent use by multiple +// goroutines. // // The Decoder does only basic sanity checking on decoded input sizes, // and its limits are not configurable. Take caution when decoding gob data diff --git a/src/encoding/gob/encoder.go b/src/encoding/gob/encoder.go index 40ec81b6e6936..53e2cace16674 100644 --- a/src/encoding/gob/encoder.go +++ b/src/encoding/gob/encoder.go @@ -12,7 +12,8 @@ import ( ) // An Encoder manages the transmission of type and data information to the -// other side of a connection. +// other side of a connection. It is safe for concurrent use by multiple +// goroutines. type Encoder struct { mutex sync.Mutex // each item must be sent atomically w []io.Writer // where to send the data diff --git a/src/encoding/gob/encoder_test.go b/src/encoding/gob/encoder_test.go index dc9bbcf35d8b5..825f0d6f03eff 100644 --- a/src/encoding/gob/encoder_test.go +++ b/src/encoding/gob/encoder_test.go @@ -10,7 +10,6 @@ import ( "fmt" "io/ioutil" "reflect" - "runtime" "strings" "testing" ) @@ -1128,24 +1127,3 @@ func TestBadData(t *testing.T) { } } } - -// TestHugeWriteFails tests that enormous messages trigger an error. -func TestHugeWriteFails(t *testing.T) { - if runtime.GOARCH == "wasm" { - t.Skip("out of memory on wasm") - } - if testing.Short() { - // Requires allocating a monster, so don't do this from all.bash. - t.Skip("skipping huge allocation in short mode") - } - huge := make([]byte, tooBig) - huge[0] = 7 // Make sure it's not all zeros. - buf := new(bytes.Buffer) - err := NewEncoder(buf).Encode(huge) - if err == nil { - t.Fatalf("expected error for huge slice") - } - if !strings.Contains(err.Error(), "message too big") { - t.Fatalf("expected 'too big' error; got %s\n", err.Error()) - } -} diff --git a/src/encoding/hex/hex.go b/src/encoding/hex/hex.go index aee5aecb1a757..2bb2b57df9cd9 100644 --- a/src/encoding/hex/hex.go +++ b/src/encoding/hex/hex.go @@ -6,10 +6,10 @@ package hex import ( - "bytes" "errors" "fmt" "io" + "strings" ) const hextable = "0123456789abcdef" @@ -116,7 +116,16 @@ func DecodeString(s string) ([]byte, error) { // Dump returns a string that contains a hex dump of the given data. The format // of the hex dump matches the output of `hexdump -C` on the command line. func Dump(data []byte) string { - var buf bytes.Buffer + if len(data) == 0 { + return "" + } + + var buf strings.Builder + // Dumper will write 79 bytes per complete 16 byte chunk, and at least + // 64 bytes for whatever remains. Round the allocation up, since only a + // maximum of 15 bytes will be wasted. + buf.Grow((1 + ((len(data) - 1) / 16)) * 79) + dumper := Dumper(&buf) dumper.Write(data) dumper.Close() diff --git a/src/encoding/hex/hex_test.go b/src/encoding/hex/hex_test.go index 6ba054ef9a0df..e9f4b3a53adef 100644 --- a/src/encoding/hex/hex_test.go +++ b/src/encoding/hex/hex_test.go @@ -248,3 +248,16 @@ func BenchmarkEncode(b *testing.B) { }) } } + +func BenchmarkDump(b *testing.B) { + for _, size := range []int{256, 1024, 4096, 16384} { + src := bytes.Repeat([]byte{2, 3, 5, 7, 9, 11, 13, 17}, size/8) + sink = make([]byte, 2*size) + + b.Run(fmt.Sprintf("%v", size), func(b *testing.B) { + for i := 0; i < b.N; i++ { + Dump(src) + } + }) + } +} diff --git a/src/encoding/json/bench_test.go b/src/encoding/json/bench_test.go index bd322db2e6ffe..72cb349062c4a 100644 --- a/src/encoding/json/bench_test.go +++ b/src/encoding/json/bench_test.go @@ -114,6 +114,34 @@ func BenchmarkCodeMarshal(b *testing.B) { b.SetBytes(int64(len(codeJSON))) } +func benchMarshalBytes(n int) func(*testing.B) { + sample := []byte("hello world") + // Use a struct pointer, to avoid an allocation when passing it as an + // interface parameter to Marshal. + v := &struct { + Bytes []byte + }{ + bytes.Repeat(sample, (n/len(sample))+1)[:n], + } + return func(b *testing.B) { + for i := 0; i < b.N; i++ { + if _, err := Marshal(v); err != nil { + b.Fatal("Marshal:", err) + } + } + } +} + +func BenchmarkMarshalBytes(b *testing.B) { + // 32 fits within encodeState.scratch. + b.Run("32", benchMarshalBytes(32)) + // 256 doesn't fit in encodeState.scratch, but is small enough to + // allocate and avoid the slower base64.NewEncoder. + b.Run("256", benchMarshalBytes(256)) + // 4096 is large enough that we want to avoid allocating for it. + b.Run("4096", benchMarshalBytes(4096)) +} + func BenchmarkCodeDecoder(b *testing.B) { if codeJSON == nil { b.StopTimer() diff --git a/src/encoding/json/decode.go b/src/encoding/json/decode.go index 0b29249218a32..731553dca6b48 100644 --- a/src/encoding/json/decode.go +++ b/src/encoding/json/decode.go @@ -11,7 +11,6 @@ import ( "bytes" "encoding" "encoding/base64" - "errors" "fmt" "reflect" "strconv" @@ -179,7 +178,7 @@ func (d *decodeState) unmarshal(v interface{}) error { // test must be applied at the top level of the value. err := d.value(rv) if err != nil { - return err + return d.addErrorContext(err) } return d.savedError } @@ -267,7 +266,7 @@ type decodeState struct { opcode int // last read result scan scanner errorContext struct { // provides context for type errors - Struct string + Struct reflect.Type Field string } savedError error @@ -280,16 +279,16 @@ func (d *decodeState) readIndex() int { return d.off - 1 } -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil - d.errorContext.Struct = "" + d.errorContext.Struct = nil d.errorContext.Field = "" return d } @@ -304,10 +303,10 @@ func (d *decodeState) saveError(err error) { // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { - if d.errorContext.Struct != "" || d.errorContext.Field != "" { + if d.errorContext.Struct != nil || d.errorContext.Field != "" { switch err := err.(type) { case *UnmarshalTypeError: - err.Struct = d.errorContext.Struct + err.Struct = d.errorContext.Struct.Name() err.Field = d.errorContext.Field return err } @@ -332,13 +331,12 @@ func (d *decodeState) skip() { // scanNext processes the byte at d.data[d.off]. func (d *decodeState) scanNext() { - s, data, i := &d.scan, d.data, d.off - if i < len(data) { - d.opcode = s.step(s, data[i]) - d.off = i + 1 + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ } else { - d.opcode = s.eof() - d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 } } @@ -346,7 +344,7 @@ func (d *decodeState) scanNext() { // receives a scan code not equal to op. func (d *decodeState) scanWhile(op int) { s, data, i := &d.scan, d.data, d.off - for i < len(d.data) { + for i < len(data) { newOp := s.step(s, data[i]) i++ if newOp != op { @@ -356,7 +354,7 @@ func (d *decodeState) scanWhile(op int) { } } - d.off = len(d.data) + 1 // mark processed EOF with len+1 + d.off = len(data) + 1 // mark processed EOF with len+1 d.opcode = d.scan.eof() } @@ -366,7 +364,7 @@ func (d *decodeState) scanWhile(op int) { func (d *decodeState) value(v reflect.Value) error { switch d.opcode { default: - return errPhase + panic(phasePanicMsg) case scanBeginArray: if v.IsValid() { @@ -408,30 +406,23 @@ type unquotedValue struct{} // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() (interface{}, error) { +func (d *decodeState) valueQuoted() interface{} { switch d.opcode { default: - return nil, errPhase + panic(phasePanicMsg) - case scanBeginArray: - d.skip() - d.scanNext() - - case scanBeginObject: + case scanBeginArray, scanBeginObject: d.skip() d.scanNext() case scanBeginLiteral: - v, err := d.literalInterface() - if err != nil { - return nil, err - } + v := d.literalInterface() switch v.(type) { case nil, string: - return v, nil + return v } } - return unquotedValue{}, nil + return unquotedValue{} } // indirect walks down v allocating pointers as needed, @@ -482,7 +473,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } - if v.Type().NumMethod() > 0 { + if v.Type().NumMethod() > 0 && v.CanInterface() { if u, ok := v.Interface().(Unmarshaler); ok { return u, nil, reflect.Value{} } @@ -525,10 +516,7 @@ func (d *decodeState) array(v reflect.Value) error { case reflect.Interface: if v.NumMethod() == 0 { // Decoding into nil interface? Switch to non-reflect code. - ai, err := d.arrayInterface() - if err != nil { - return err - } + ai := d.arrayInterface() v.Set(reflect.ValueOf(ai)) return nil } @@ -538,8 +526,7 @@ func (d *decodeState) array(v reflect.Value) error { d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) d.skip() return nil - case reflect.Array: - case reflect.Slice: + case reflect.Array, reflect.Slice: break } @@ -589,7 +576,7 @@ func (d *decodeState) array(v reflect.Value) error { break } if d.opcode != scanArrayValue { - return errPhase + panic(phasePanicMsg) } } @@ -611,7 +598,7 @@ func (d *decodeState) array(v reflect.Value) error { } var nullLiteral = []byte("null") -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() // object consumes an object from d.data[d.off-1:], decoding into v. // The first byte ('{') of the object has been read already. @@ -629,17 +616,17 @@ func (d *decodeState) object(v reflect.Value) error { return nil } v = pv + t := v.Type() // Decoding into nil interface? Switch to non-reflect code. if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - oi, err := d.objectInterface() - if err != nil { - return err - } + oi := d.objectInterface() v.Set(reflect.ValueOf(oi)) return nil } + var fields []field + // Check type of target: // struct or // map[T1]T2 where T1 is string, an integer type, @@ -648,14 +635,13 @@ func (d *decodeState) object(v reflect.Value) error { case reflect.Map: // Map key must either have string kind, have an integer kind, // or be an encoding.TextUnmarshaler. - t := v.Type() switch t.Key().Kind() { case reflect.String, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { - d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil } @@ -664,14 +650,16 @@ func (d *decodeState) object(v reflect.Value) error { v.Set(reflect.MakeMap(t)) } case reflect.Struct: + fields = cachedTypeFields(t) // ok default: - d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil } var mapElem reflect.Value + originalErrorContext := d.errorContext for { // Read opening " of string key or closing }. @@ -681,7 +669,7 @@ func (d *decodeState) object(v reflect.Value) error { break } if d.opcode != scanBeginLiteral { - return errPhase + panic(phasePanicMsg) } // Read key. @@ -690,7 +678,7 @@ func (d *decodeState) object(v reflect.Value) error { item := d.data[start:d.readIndex()] key, ok := unquoteBytes(item) if !ok { - return errPhase + panic(phasePanicMsg) } // Figure out field corresponding to key. @@ -698,7 +686,7 @@ func (d *decodeState) object(v reflect.Value) error { destring := false // whether the value is wrapped in a string to be decoded first if v.Kind() == reflect.Map { - elemType := v.Type().Elem() + elemType := t.Elem() if !mapElem.IsValid() { mapElem = reflect.New(elemType).Elem() } else { @@ -707,7 +695,6 @@ func (d *decodeState) object(v reflect.Value) error { subv = mapElem } else { var f *field - fields := cachedTypeFields(v.Type()) for i := range fields { ff := &fields[i] if bytes.Equal(ff.nameBytes, key) { @@ -744,7 +731,7 @@ func (d *decodeState) object(v reflect.Value) error { subv = subv.Field(i) } d.errorContext.Field = f.name - d.errorContext.Struct = v.Type().Name() + d.errorContext.Struct = t } else if d.disallowUnknownFields { d.saveError(fmt.Errorf("json: unknown field %q", key)) } @@ -755,16 +742,12 @@ func (d *decodeState) object(v reflect.Value) error { d.scanWhile(scanSkipSpace) } if d.opcode != scanObjectKey { - return errPhase + panic(phasePanicMsg) } d.scanWhile(scanSkipSpace) if destring { - q, err := d.valueQuoted() - if err != nil { - return err - } - switch qv := q.(type) { + switch qv := d.valueQuoted().(type) { case nil: if err := d.literalStore(nullLiteral, subv, false); err != nil { return err @@ -785,13 +768,13 @@ func (d *decodeState) object(v reflect.Value) error { // Write value back to map; // if using struct, subv points into struct already. if v.Kind() == reflect.Map { - kt := v.Type().Key() + kt := t.Key() var kv reflect.Value switch { case kt.Kind() == reflect.String: kv = reflect.ValueOf(key).Convert(kt) case reflect.PtrTo(kt).Implements(textUnmarshalerType): - kv = reflect.New(v.Type().Key()) + kv = reflect.New(kt) if err := d.literalStore(item, kv, true); err != nil { return err } @@ -803,7 +786,7 @@ func (d *decodeState) object(v reflect.Value) error { n, err := strconv.ParseInt(s, 10, 64) if err != nil || reflect.Zero(kt).OverflowInt(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) - return nil + break } kv = reflect.ValueOf(n).Convert(kt) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: @@ -811,14 +794,16 @@ func (d *decodeState) object(v reflect.Value) error { n, err := strconv.ParseUint(s, 10, 64) if err != nil || reflect.Zero(kt).OverflowUint(n) { d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) - return nil + break } kv = reflect.ValueOf(n).Convert(kt) default: panic("json: Unexpected key type") // should never occur } } - v.SetMapIndex(kv, subv) + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } } // Next token must be , or }. @@ -829,11 +814,10 @@ func (d *decodeState) object(v reflect.Value) error { break } if d.opcode != scanObjectValue { - return errPhase + panic(phasePanicMsg) } - d.errorContext.Struct = "" - d.errorContext.Field = "" + d.errorContext = originalErrorContext } return nil } @@ -874,18 +858,16 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool if item[0] != '"' { if fromQuoted { d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - var val string - switch item[0] { - case 'n': - val = "null" - case 't', 'f': - val = "bool" - default: - val = "number" - } - d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) return nil } s, ok := unquoteBytes(item) @@ -893,7 +875,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } - return errPhase + panic(phasePanicMsg) } return ut.UnmarshalText(s) } @@ -944,7 +926,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } - return errPhase + panic(phasePanicMsg) } switch v.Kind() { default: @@ -976,7 +958,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } - return errPhase + panic(phasePanicMsg) } s := string(item) switch v.Kind() { @@ -991,7 +973,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) } - return &UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())} + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) case reflect.Interface: n, err := d.convertNumber(s) if err != nil { @@ -1037,24 +1019,24 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // but they avoid the weight of reflection in this common case. // valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() (val interface{}, err error) { +func (d *decodeState) valueInterface() (val interface{}) { switch d.opcode { default: - err = errPhase + panic(phasePanicMsg) case scanBeginArray: - val, err = d.arrayInterface() + val = d.arrayInterface() d.scanNext() case scanBeginObject: - val, err = d.objectInterface() + val = d.objectInterface() d.scanNext() case scanBeginLiteral: - val, err = d.literalInterface() + val = d.literalInterface() } return } // arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() ([]interface{}, error) { +func (d *decodeState) arrayInterface() []interface{} { var v = make([]interface{}, 0) for { // Look ahead for ] - can only happen on first iteration. @@ -1063,11 +1045,7 @@ func (d *decodeState) arrayInterface() ([]interface{}, error) { break } - vi, err := d.valueInterface() - if err != nil { - return nil, err - } - v = append(v, vi) + v = append(v, d.valueInterface()) // Next token must be , or ]. if d.opcode == scanSkipSpace { @@ -1077,14 +1055,14 @@ func (d *decodeState) arrayInterface() ([]interface{}, error) { break } if d.opcode != scanArrayValue { - return nil, errPhase + panic(phasePanicMsg) } } - return v, nil + return v } // objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() (map[string]interface{}, error) { +func (d *decodeState) objectInterface() map[string]interface{} { m := make(map[string]interface{}) for { // Read opening " of string key or closing }. @@ -1094,7 +1072,7 @@ func (d *decodeState) objectInterface() (map[string]interface{}, error) { break } if d.opcode != scanBeginLiteral { - return nil, errPhase + panic(phasePanicMsg) } // Read string key. @@ -1103,7 +1081,7 @@ func (d *decodeState) objectInterface() (map[string]interface{}, error) { item := d.data[start:d.readIndex()] key, ok := unquote(item) if !ok { - return nil, errPhase + panic(phasePanicMsg) } // Read : before value. @@ -1111,16 +1089,12 @@ func (d *decodeState) objectInterface() (map[string]interface{}, error) { d.scanWhile(scanSkipSpace) } if d.opcode != scanObjectKey { - return nil, errPhase + panic(phasePanicMsg) } d.scanWhile(scanSkipSpace) // Read value. - vi, err := d.valueInterface() - if err != nil { - return nil, err - } - m[key] = vi + m[key] = d.valueInterface() // Next token must be , or }. if d.opcode == scanSkipSpace { @@ -1130,16 +1104,16 @@ func (d *decodeState) objectInterface() (map[string]interface{}, error) { break } if d.opcode != scanObjectValue { - return nil, errPhase + panic(phasePanicMsg) } } - return m, nil + return m } // literalInterface consumes and returns a literal from d.data[d.off-1:] and // it reads the following byte ahead. The first byte of the literal has been // read already (that's how the caller knows it's a literal). -func (d *decodeState) literalInterface() (interface{}, error) { +func (d *decodeState) literalInterface() interface{} { // All bytes inside literal return scanContinue op code. start := d.readIndex() d.scanWhile(scanContinue) @@ -1148,27 +1122,27 @@ func (d *decodeState) literalInterface() (interface{}, error) { switch c := item[0]; c { case 'n': // null - return nil, nil + return nil case 't', 'f': // true, false - return c == 't', nil + return c == 't' case '"': // string s, ok := unquote(item) if !ok { - return nil, errPhase + panic(phasePanicMsg) } - return s, nil + return s default: // number if c != '-' && (c < '0' || c > '9') { - return nil, errPhase + panic(phasePanicMsg) } n, err := d.convertNumber(string(item)) if err != nil { d.saveError(err) } - return n, nil + return n } } diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go index ab83b81bb3958..54432600a533a 100644 --- a/src/encoding/json/decode_test.go +++ b/src/encoding/json/decode_test.go @@ -41,6 +41,16 @@ type VOuter struct { V V } +type W struct { + S SS +} + +type SS string + +func (*SS) UnmarshalJSON(data []byte) error { + return &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(SS(""))} +} + // ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and // without UseNumber var ifaceNumAsFloat64 = map[string]interface{}{ @@ -142,7 +152,7 @@ var ( umstructXY = ustructText{unmarshalerText{"x", "y"}} ummapType = map[unmarshalerText]bool{} - ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true} + ummapXY = map[unmarshalerText]bool{{"x", "y"}: true} ) // Test data structures for anonymous fields. @@ -256,6 +266,10 @@ type XYZ struct { Z interface{} } +type unexportedWithMethods struct{} + +func (unexportedWithMethods) F() {} + func sliceAddr(x []int) *[]int { return &x } func mapAddr(x map[string]int) *map[string]int { return &x } @@ -371,6 +385,10 @@ func (b *intWithPtrMarshalText) UnmarshalText(data []byte) error { return (*intWithMarshalText)(b).UnmarshalText(data) } +type mapStringToStringData struct { + Data map[string]string `json:"data"` +} + type unmarshalTest struct { in string ptr interface{} @@ -401,8 +419,10 @@ var unmarshalTests = []unmarshalTest{ {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, {in: "null", ptr: new(interface{}), out: nil}, {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7, "T", "X"}}, + {in: `{"X": 23}`, ptr: new(T), out: T{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(""), 8, "T", "X"}}, {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, {in: `{"x": 1}`, ptr: new(tx), err: fmt.Errorf("json: unknown field \"x\""), disallowUnknownFields: true}, + {in: `{"S": 23}`, ptr: new(W), out: W{}, err: &UnmarshalTypeError{"number", reflect.TypeOf(SS("")), 0, "W", "S"}}, {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, @@ -429,6 +449,7 @@ var unmarshalTests = []unmarshalTest{ {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, + {in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}}, // raw value errors {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, @@ -444,6 +465,7 @@ var unmarshalTests = []unmarshalTest{ {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + {in: `[1, 2, 3]`, ptr: new(MustNotUnmarshalJSON), err: errors.New("MustNotUnmarshalJSON was used")}, // empty array to interface test {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, @@ -536,6 +558,16 @@ var unmarshalTests = []unmarshalTest{ ptr: new(map[uint8]string), err: &UnmarshalTypeError{Value: "number -1", Type: reflect.TypeOf(uint8(0)), Offset: 2}, }, + { + in: `{"F":{"a":2,"3":4}}`, + ptr: new(map[string]map[int]int), + err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(int(0)), Offset: 7}, + }, + { + in: `{"F":{"a":2,"3":4}}`, + ptr: new(map[string]map[uint]int), + err: &UnmarshalTypeError{Value: "number a", Type: reflect.TypeOf(uint(0)), Offset: 7}, + }, // Map keys can be encoding.TextUnmarshalers. {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY}, @@ -810,6 +842,7 @@ var unmarshalTests = []unmarshalTest{ {in: `{"B": "False"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "False" into bool`)}, {in: `{"B": "null"}`, ptr: new(B), out: B{false}}, {in: `{"B": "nul"}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal "nul" into bool`)}, + {in: `{"B": [2, 3]}`, ptr: new(B), err: errors.New(`json: invalid use of ,string struct tag, trying to unmarshal unquoted value into bool`)}, // additional tests for disallowUnknownFields { @@ -866,6 +899,30 @@ var unmarshalTests = []unmarshalTest{ err: fmt.Errorf("json: unknown field \"extra\""), disallowUnknownFields: true, }, + // issue 26444 + // UnmarshalTypeError without field & struct values + { + in: `{"data":{"test1": "bob", "test2": 123}}`, + ptr: new(mapStringToStringData), + err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 37, Struct: "mapStringToStringData", Field: "data"}, + }, + { + in: `{"data":{"test1": 123, "test2": "bob"}}`, + ptr: new(mapStringToStringData), + err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeOf(""), Offset: 21, Struct: "mapStringToStringData", Field: "data"}, + }, + + // trying to decode JSON arrays or objects via TextUnmarshaler + { + in: `[1, 2, 3]`, + ptr: new(MustNotUnmarshalText), + err: &UnmarshalTypeError{Value: "array", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1}, + }, + { + in: `{"foo": "bar"}`, + ptr: new(MustNotUnmarshalText), + err: &UnmarshalTypeError{Value: "object", Type: reflect.TypeOf(&MustNotUnmarshalText{}), Offset: 1}, + }, } func TestMarshal(t *testing.T) { @@ -1927,10 +1984,12 @@ type unexportedFields struct { Name string m map[string]interface{} `json:"-"` m2 map[string]interface{} `json:"abcd"` + + s []int `json:"-"` } func TestUnmarshalUnexported(t *testing.T) { - input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` + input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}, "s": [2, 3]}` want := &unexportedFields{Name: "Bob"} out := &unexportedFields{} @@ -2096,6 +2155,9 @@ func TestInvalidStringOption(t *testing.T) { // // (Issue 24152) If the embedded struct is given an explicit name, // ensure that the normal unmarshal logic does not panic in reflect. +// +// (Issue 28145) If the embedded struct is given an explicit name and has +// exported methods, don't cause a panic trying to get its value. func TestUnmarshalEmbeddedUnexported(t *testing.T) { type ( embed1 struct{ Q int } @@ -2135,6 +2197,9 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) { embed2 `json:"embed2"` Q int } + S9 struct { + unexportedWithMethods `json:"embed"` + } ) tests := []struct { @@ -2196,6 +2261,11 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) { in: `{"embed1": {"Q": 1}, "embed2": {"Q": 2}, "Q": 3}`, ptr: new(S8), out: &S8{embed1{1}, embed2{2}, 3}, + }, { + // Issue 228145, similar to the cases above. + in: `{"embed": {}}`, + ptr: new(S9), + out: &S9{}, }} for i, tt := range tests { diff --git a/src/encoding/json/encode.go b/src/encoding/json/encode.go index 28ca5fe9e009f..dea63f1850fef 100644 --- a/src/encoding/json/encode.go +++ b/src/encoding/json/encode.go @@ -259,6 +259,7 @@ func (e *InvalidUTF8Error) Error() string { return "json: invalid UTF-8 in string: " + strconv.Quote(e.S) } +// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. type MarshalerError struct { Type reflect.Type Err error @@ -381,8 +382,8 @@ func typeEncoder(t reflect.Type) encoderFunc { } var ( - marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() ) // newTypeEncoder constructs an encoderFunc for a type. @@ -624,40 +625,49 @@ func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) { } type structEncoder struct { - fields []field - fieldEncs []encoderFunc + fields []field } -func (se *structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { - e.WriteByte('{') - first := true - for i, f := range se.fields { - fv := fieldByIndex(v, f.index) - if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { +func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { + next := byte('{') +FieldLoop: + for i := range se.fields { + f := &se.fields[i] + + // Find the nested struct field by following f.index. + fv := v + for _, i := range f.index { + if fv.Kind() == reflect.Ptr { + if fv.IsNil() { + continue FieldLoop + } + fv = fv.Elem() + } + fv = fv.Field(i) + } + + if f.omitEmpty && isEmptyValue(fv) { continue } - if first { - first = false + e.WriteByte(next) + next = ',' + if opts.escapeHTML { + e.WriteString(f.nameEscHTML) } else { - e.WriteByte(',') + e.WriteString(f.nameNonEsc) } - e.string(f.name, opts.escapeHTML) - e.WriteByte(':') opts.quoted = f.quoted - se.fieldEncs[i](e, fv, opts) + f.encoder(e, fv, opts) + } + if next == '{' { + e.WriteString("{}") + } else { + e.WriteByte('}') } - e.WriteByte('}') } func newStructEncoder(t reflect.Type) encoderFunc { - fields := cachedTypeFields(t) - se := &structEncoder{ - fields: fields, - fieldEncs: make([]encoderFunc, len(fields)), - } - for i, f := range fields { - se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) - } + se := structEncoder{fields: cachedTypeFields(t)} return se.encode } @@ -665,7 +675,7 @@ type mapEncoder struct { elemEnc encoderFunc } -func (me *mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { +func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return @@ -704,7 +714,7 @@ func newMapEncoder(t reflect.Type) encoderFunc { return unsupportedTypeEncoder } } - me := &mapEncoder{typeEncoder(t.Elem())} + me := mapEncoder{typeEncoder(t.Elem())} return me.encode } @@ -715,14 +725,22 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) { } s := v.Bytes() e.WriteByte('"') - if len(s) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) + encodedLen := base64.StdEncoding.EncodedLen(len(s)) + if encodedLen <= len(e.scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := e.scratch[:encodedLen] + base64.StdEncoding.Encode(dst, s) + e.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) base64.StdEncoding.Encode(dst, s) e.Write(dst) } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. enc := base64.NewEncoder(base64.StdEncoding, e) enc.Write(s) enc.Close() @@ -735,7 +753,7 @@ type sliceEncoder struct { arrayEnc encoderFunc } -func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { +func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return @@ -751,7 +769,7 @@ func newSliceEncoder(t reflect.Type) encoderFunc { return encodeByteSlice } } - enc := &sliceEncoder{newArrayEncoder(t)} + enc := sliceEncoder{newArrayEncoder(t)} return enc.encode } @@ -759,7 +777,7 @@ type arrayEncoder struct { elemEnc encoderFunc } -func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { +func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteByte('[') n := v.Len() for i := 0; i < n; i++ { @@ -772,7 +790,7 @@ func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { } func newArrayEncoder(t reflect.Type) encoderFunc { - enc := &arrayEncoder{typeEncoder(t.Elem())} + enc := arrayEncoder{typeEncoder(t.Elem())} return enc.encode } @@ -780,7 +798,7 @@ type ptrEncoder struct { elemEnc encoderFunc } -func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { +func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.IsNil() { e.WriteString("null") return @@ -789,7 +807,7 @@ func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { } func newPtrEncoder(t reflect.Type) encoderFunc { - enc := &ptrEncoder{typeEncoder(t.Elem())} + enc := ptrEncoder{typeEncoder(t.Elem())} return enc.encode } @@ -797,7 +815,7 @@ type condAddrEncoder struct { canAddrEnc, elseEnc encoderFunc } -func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { +func (ce condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if v.CanAddr() { ce.canAddrEnc(e, v, opts) } else { @@ -808,7 +826,7 @@ func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) // newCondAddrEncoder returns an encoder that checks whether its value // CanAddr and delegates to canAddrEnc if so, else to elseEnc. func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { - enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + enc := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} return enc.encode } @@ -822,28 +840,13 @@ func isValidTag(s string) bool { // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false } } return true } -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for _, i := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - } - v = v.Field(i) - } - return v -} - func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { @@ -893,18 +896,15 @@ func (e *encodeState) string(s string, escapeHTML bool) { if start < i { e.WriteString(s[start:i]) } + e.WriteByte('\\') switch b { case '\\', '"': - e.WriteByte('\\') e.WriteByte(b) case '\n': - e.WriteByte('\\') e.WriteByte('n') case '\r': - e.WriteByte('\\') e.WriteByte('r') case '\t': - e.WriteByte('\\') e.WriteByte('t') default: // This encodes bytes < 0x20 except for \t, \n and \r. @@ -912,7 +912,7 @@ func (e *encodeState) string(s string, escapeHTML bool) { // because they can lead to security holes when // user-controlled strings are rendered into JSON // and served to some browsers. - e.WriteString(`\u00`) + e.WriteString(`u00`) e.WriteByte(hex[b>>4]) e.WriteByte(hex[b&0xF]) } @@ -968,18 +968,15 @@ func (e *encodeState) stringBytes(s []byte, escapeHTML bool) { if start < i { e.Write(s[start:i]) } + e.WriteByte('\\') switch b { case '\\', '"': - e.WriteByte('\\') e.WriteByte(b) case '\n': - e.WriteByte('\\') e.WriteByte('n') case '\r': - e.WriteByte('\\') e.WriteByte('r') case '\t': - e.WriteByte('\\') e.WriteByte('t') default: // This encodes bytes < 0x20 except for \t, \n and \r. @@ -987,7 +984,7 @@ func (e *encodeState) stringBytes(s []byte, escapeHTML bool) { // because they can lead to security holes when // user-controlled strings are rendered into JSON // and served to some browsers. - e.WriteString(`\u00`) + e.WriteString(`u00`) e.WriteByte(hex[b>>4]) e.WriteByte(hex[b&0xF]) } @@ -1036,17 +1033,16 @@ type field struct { nameBytes []byte // []byte(name) equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + nameNonEsc string // `"` + name + `":` + nameEscHTML string // `"` + HTMLEscape(name) + `":` + tag bool index []int typ reflect.Type omitEmpty bool quoted bool -} -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f + encoder encoderFunc } // byIndex sorts field by index sequence. @@ -1086,6 +1082,9 @@ func typeFields(t reflect.Type) []field { // Fields found. var fields []field + // Buffer to run HTMLEscape on field names. + var nameEscBuf bytes.Buffer + for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} @@ -1152,14 +1151,26 @@ func typeFields(t reflect.Type) []field { if name == "" { name = sf.Name } - fields = append(fields, fillField(field{ + field := field{ name: name, tag: tagged, index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), quoted: quoted, - })) + } + field.nameBytes = []byte(field.name) + field.equalFold = foldFunc(field.nameBytes) + + // Build nameEscHTML and nameNonEsc ahead of time. + nameEscBuf.Reset() + nameEscBuf.WriteString(`"`) + HTMLEscape(&nameEscBuf, field.nameBytes) + nameEscBuf.WriteString(`":`) + field.nameEscHTML = nameEscBuf.String() + field.nameNonEsc = `"` + field.name + `":` + + fields = append(fields, field) if count[f.typ] > 1 { // If there were multiple instances, add a second, // so that the annihilation code will see a duplicate. @@ -1173,7 +1184,7 @@ func typeFields(t reflect.Type) []field { // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + next = append(next, field{name: ft.Name(), index: index, typ: ft}) } } } @@ -1227,6 +1238,10 @@ func typeFields(t reflect.Type) []field { fields = out sort.Sort(byIndex(fields)) + for i := range fields { + f := &fields[i] + f.encoder = typeEncoder(typeByIndex(t, f.index)) + } return fields } diff --git a/src/encoding/json/encode_test.go b/src/encoding/json/encode_test.go index b90483cf35f91..cd5eadf3c1cd2 100644 --- a/src/encoding/json/encode_test.go +++ b/src/encoding/json/encode_test.go @@ -405,6 +405,19 @@ func TestAnonymousFields(t *testing.T) { return S{s1{1, 2, s2{3, 4}}, 6} }, want: `{"MyInt1":1,"MyInt2":3}`, + }, { + // If an anonymous struct pointer field is nil, we should ignore + // the embedded fields behind it. Not properly doing so may + // result in the wrong output or reflect panics. + label: "EmbeddedFieldBehindNilPointer", + makeInput: func() interface{} { + type ( + S2 struct{ Field string } + S struct{ *S2 } + ) + return S{} + }, + want: `{}`, }} for _, tt := range tests { @@ -995,3 +1008,18 @@ func TestMarshalPanic(t *testing.T) { Marshal(&marshalPanic{}) t.Error("Marshal should have panicked") } + +func TestMarshalUncommonFieldNames(t *testing.T) { + v := struct { + A0, À, Aβ int + }{} + b, err := Marshal(v) + if err != nil { + t.Fatal("Marshal:", err) + } + want := `{"A0":0,"À":0,"Aβ":0}` + got := string(b) + if got != want { + t.Fatalf("Marshal: got %s want %s", got, want) + } +} diff --git a/src/encoding/json/example_test.go b/src/encoding/json/example_test.go index 39b3231850a95..2031cba793892 100644 --- a/src/encoding/json/example_test.go +++ b/src/encoding/json/example_test.go @@ -292,3 +292,12 @@ func ExampleMarshalIndent() { // "b": 2 // } } + +func ExampleValid() { + goodJSON := `{"example": 1}` + badJSON := `{"example":2:]}}` + + fmt.Println(json.Valid([]byte(goodJSON)), json.Valid([]byte(badJSON))) + // Output: + // true false +} diff --git a/src/encoding/json/example_text_marshaling_test.go b/src/encoding/json/example_text_marshaling_test.go new file mode 100644 index 0000000000000..04c7813b26783 --- /dev/null +++ b/src/encoding/json/example_text_marshaling_test.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json_test + +import ( + "encoding/json" + "fmt" + "log" + "strings" +) + +type Size int + +const ( + Unrecognized Size = iota + Small + Large +) + +func (s *Size) UnmarshalText(text []byte) error { + switch strings.ToLower(string(text)) { + default: + *s = Unrecognized + case "small": + *s = Small + case "large": + *s = Large + } + return nil +} + +func (s Size) MarshalText() ([]byte, error) { + var name string + switch s { + default: + name = "unrecognized" + case Small: + name = "small" + case Large: + name = "large" + } + return []byte(name), nil +} + +func Example_textMarshalJSON() { + blob := `["small","regular","large","unrecognized","small","normal","small","large"]` + var inventory []Size + if err := json.Unmarshal([]byte(blob), &inventory); err != nil { + log.Fatal(err) + } + + counts := make(map[Size]int) + for _, size := range inventory { + counts[size] += 1 + } + + fmt.Printf("Inventory Counts:\n* Small: %d\n* Large: %d\n* Unrecognized: %d\n", + counts[Small], counts[Large], counts[Unrecognized]) + + // Output: + // Inventory Counts: + // * Small: 3 + // * Large: 2 + // * Unrecognized: 3 +} diff --git a/src/encoding/json/scanner.go b/src/encoding/json/scanner.go index 9e6d482e168d3..88572245fc710 100644 --- a/src/encoding/json/scanner.go +++ b/src/encoding/json/scanner.go @@ -289,7 +289,7 @@ func stateEndValue(s *scanner, c byte) int { // such as after reading `{}` or `[1,2,3]`. // Only space characters should be seen now. func stateEndTop(s *scanner, c byte) int { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + if !isSpace(c) { // Complain about non-space byte on next call. s.error(c, "after top-level value") } diff --git a/src/encoding/json/stream.go b/src/encoding/json/stream.go index 75a4270df7de1..7d5137fbc716f 100644 --- a/src/encoding/json/stream.go +++ b/src/encoding/json/stream.go @@ -96,19 +96,19 @@ Input: // Look in the buffer for a new value. for i, c := range dec.buf[scanp:] { dec.scan.bytes++ - v := dec.scan.step(&dec.scan, c) - if v == scanEnd { + switch dec.scan.step(&dec.scan, c) { + case scanEnd: scanp += i break Input - } - // scanEnd is delayed one byte. - // We might block trying to get that byte from src, - // so instead invent a space byte. - if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd { - scanp += i + 1 - break Input - } - if v == scanError { + case scanEndObject, scanEndArray: + // scanEnd is delayed one byte. + // We might block trying to get that byte from src, + // so instead invent a space byte. + if stateEndValue(&dec.scan, ' ') == scanEnd { + scanp += i + 1 + break Input + } + case scanError: dec.err = dec.scan.err return 0, dec.scan.err } @@ -471,7 +471,7 @@ func (dec *Decoder) tokenError(c byte) (Token, error) { case tokenObjectComma: context = " after object key:value pair" } - return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, dec.offset()} + return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.offset()} } // More reports whether there is another element in the diff --git a/src/encoding/json/stream_test.go b/src/encoding/json/stream_test.go index 83c01d170c0b3..aaf32e0a24ce5 100644 --- a/src/encoding/json/stream_test.go +++ b/src/encoding/json/stream_test.go @@ -93,6 +93,10 @@ func TestEncoderIndent(t *testing.T) { func TestEncoderSetEscapeHTML(t *testing.T) { var c C var ct CText + var tagStruct struct { + Valid int `json:"<>&#! "` + Invalid int `json:"\\"` + } for _, tt := range []struct { name string v interface{} @@ -102,6 +106,11 @@ func TestEncoderSetEscapeHTML(t *testing.T) { {"c", c, `"\u003c\u0026\u003e"`, `"<&>"`}, {"ct", ct, `"\"\u003c\u0026\u003e\""`, `"\"<&>\""`}, {`"<&>"`, "<&>", `"\u003c\u0026\u003e"`, `"<&>"`}, + { + "tagStruct", tagStruct, + `{"\u003c\u003e\u0026#! ":0,"Invalid":0}`, + `{"<>&#! ":0,"Invalid":0}`, + }, } { var buf bytes.Buffer enc := NewEncoder(&buf) @@ -192,10 +201,9 @@ func nlines(s string, n int) string { } func TestRawMessage(t *testing.T) { - // TODO(rsc): Should not need the * in *RawMessage var data struct { X float64 - Id *RawMessage + Id RawMessage Y float32 } const raw = `["\u0056",null]` @@ -204,8 +212,8 @@ func TestRawMessage(t *testing.T) { if err != nil { t.Fatalf("Unmarshal: %v", err) } - if string([]byte(*data.Id)) != raw { - t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw) + if string([]byte(data.Id)) != raw { + t.Fatalf("Raw mismatch: have %#q want %#q", []byte(data.Id), raw) } b, err := Marshal(&data) if err != nil { @@ -217,20 +225,22 @@ func TestRawMessage(t *testing.T) { } func TestNullRawMessage(t *testing.T) { - // TODO(rsc): Should not need the * in *RawMessage var data struct { - X float64 - Id *RawMessage - Y float32 + X float64 + Id RawMessage + IdPtr *RawMessage + Y float32 } - data.Id = new(RawMessage) - const msg = `{"X":0.1,"Id":null,"Y":0.2}` + const msg = `{"X":0.1,"Id":null,"IdPtr":null,"Y":0.2}` err := Unmarshal([]byte(msg), &data) if err != nil { t.Fatalf("Unmarshal: %v", err) } - if data.Id != nil { - t.Fatalf("Raw mismatch: have non-nil, want nil") + if want, got := "null", string(data.Id); want != got { + t.Fatalf("Raw mismatch: have %q, want %q", got, want) + } + if data.IdPtr != nil { + t.Fatalf("Raw pointer mismatch: have non-nil, want nil") } b, err := Marshal(&data) if err != nil { diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go index 6a1751621835f..204611bda02d7 100644 --- a/src/encoding/pem/pem_test.go +++ b/src/encoding/pem/pem_test.go @@ -26,6 +26,10 @@ var getLineTests = []GetLineTest{ {"abc\r\nd", "abc", "d"}, {"\nabc", "", "abc"}, {"\r\nabc", "", "abc"}, + {"abc\t \nd", "abc", "d"}, + {"\t abc\nd", "\t abc", "d"}, + {"abc\n\t d", "abc", "\t d"}, + {"abc\nd\t ", "abc", "d\t "}, } func TestGetLine(t *testing.T) { @@ -213,7 +217,9 @@ func TestFuzz(t *testing.T) { } testRoundtrip := func(block Block) bool { - if isBad(block.Type) { + // Reject bad Type + // Type with colons will proceed as key/val pair and cause an error. + if isBad(block.Type) || strings.Contains(block.Type, ":") { return true } for key, val := range block.Headers { diff --git a/src/encoding/xml/example_marshaling_test.go b/src/encoding/xml/example_marshaling_test.go new file mode 100644 index 0000000000000..9f9e801e74eca --- /dev/null +++ b/src/encoding/xml/example_marshaling_test.go @@ -0,0 +1,84 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "log" + "strings" +) + +type Animal int + +const ( + Unknown Animal = iota + Gopher + Zebra +) + +func (a *Animal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var s string + if err := d.DecodeElement(&s, &start); err != nil { + return err + } + switch strings.ToLower(s) { + default: + *a = Unknown + case "gopher": + *a = Gopher + case "zebra": + *a = Zebra + } + + return nil +} + +func (a Animal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + var s string + switch a { + default: + s = "unknown" + case Gopher: + s = "gopher" + case Zebra: + s = "zebra" + } + return e.EncodeElement(s, start) +} + +func Example_customMarshalXML() { + blob := ` + + gopher + armadillo + zebra + unknown + gopher + bee + gopher + zebra + ` + var zoo struct { + Animals []Animal `xml:"animal"` + } + if err := xml.Unmarshal([]byte(blob), &zoo); err != nil { + log.Fatal(err) + } + + census := make(map[Animal]int) + for _, animal := range zoo.Animals { + census[animal] += 1 + } + + fmt.Printf("Zoo Census:\n* Gophers: %d\n* Zebras: %d\n* Unknown: %d\n", + census[Gopher], census[Zebra], census[Unknown]) + + // Output: + // Zoo Census: + // * Gophers: 3 + // * Zebras: 2 + // * Unknown: 3 +} diff --git a/src/encoding/xml/example_text_marshaling_test.go b/src/encoding/xml/example_text_marshaling_test.go new file mode 100644 index 0000000000000..2549cb16ae54f --- /dev/null +++ b/src/encoding/xml/example_text_marshaling_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "log" + "strings" +) + +type Size int + +const ( + Unrecognized Size = iota + Small + Large +) + +func (s *Size) UnmarshalText(text []byte) error { + switch strings.ToLower(string(text)) { + default: + *s = Unrecognized + case "small": + *s = Small + case "large": + *s = Large + } + return nil +} + +func (s Size) MarshalText() ([]byte, error) { + var name string + switch s { + default: + name = "unrecognized" + case Small: + name = "small" + case Large: + name = "large" + } + return []byte(name), nil +} + +func Example_textMarshalXML() { + blob := ` + + small + regular + large + unrecognized + small + normal + small + large + ` + var inventory struct { + Sizes []Size `xml:"size"` + } + if err := xml.Unmarshal([]byte(blob), &inventory); err != nil { + log.Fatal(err) + } + + counts := make(map[Size]int) + for _, size := range inventory.Sizes { + counts[size] += 1 + } + + fmt.Printf("Inventory Counts:\n* Small: %d\n* Large: %d\n* Unrecognized: %d\n", + counts[Small], counts[Large], counts[Unrecognized]) + + // Output: + // Inventory Counts: + // * Small: 3 + // * Large: 2 + // * Unrecognized: 3 +} diff --git a/src/encoding/xml/marshal.go b/src/encoding/xml/marshal.go index d393d0610bf60..add5ece78211f 100644 --- a/src/encoding/xml/marshal.go +++ b/src/encoding/xml/marshal.go @@ -61,6 +61,10 @@ const ( // string of length zero. // - an anonymous struct field is handled as if the fields of its // value were part of the outer struct. +// - a field implementing Marshaler is written by calling its MarshalXML +// method. +// - a field implementing encoding.TextMarshaler is written by encoding the +// result of its MarshalText method as text. // // If a field uses a tag "a>b>c", then the element c will be nested inside // parent elements a and b. Fields that appear next to each other that name diff --git a/src/encoding/xml/read.go b/src/encoding/xml/read.go index 36c7ba6311108..12102bc804e4e 100644 --- a/src/encoding/xml/read.go +++ b/src/encoding/xml/read.go @@ -92,6 +92,11 @@ import ( // // * A struct field with tag "-" is never unmarshaled into. // +// If Unmarshal encounters a field type that implements the Unmarshaler +// interface, Unmarshal calls its UnmarshalXML method to produce the value from +// the XML element. Otherwise, if the value implements +// encoding.TextUnmarshaler, Unmarshal calls that value's UnmarshalText method. +// // Unmarshal maps an XML element to a string or []byte by saving the // concatenation of that element's character data in the string or // []byte. The saved []byte is never nil. diff --git a/src/expvar/expvar.go b/src/expvar/expvar.go index 174873a7d47c4..976b300d63779 100644 --- a/src/expvar/expvar.go +++ b/src/expvar/expvar.go @@ -137,7 +137,7 @@ func (v *Map) Init() *Map { return v } -// updateKeys updates the sorted list of keys in v.keys. +// addKey updates the sorted list of keys in v.keys. func (v *Map) addKey(key string) { v.keysMu.Lock() defer v.keysMu.Unlock() @@ -199,6 +199,17 @@ func (v *Map) AddFloat(key string, delta float64) { } } +// Deletes the given key from the map. +func (v *Map) Delete(key string) { + v.keysMu.Lock() + defer v.keysMu.Unlock() + i := sort.SearchStrings(v.keys, key) + if i < len(v.keys) && key == v.keys[i] { + v.keys = append(v.keys[:i], v.keys[i+1:]...) + v.m.Delete(key) + } +} + // Do calls f for each entry in the map. // The map is locked during the iteration, // but existing entries may be concurrently updated. @@ -221,7 +232,7 @@ func (v *String) Value() string { return p } -// String implements the Val interface. To get the unquoted string +// String implements the Var interface. To get the unquoted string // use Value. func (v *String) String() string { s := v.Value() diff --git a/src/expvar/expvar_test.go b/src/expvar/expvar_test.go index 728e763896ceb..804b56c1aaaa1 100644 --- a/src/expvar/expvar_test.go +++ b/src/expvar/expvar_test.go @@ -183,6 +183,43 @@ func TestMapInit(t *testing.T) { } } +func TestMapDelete(t *testing.T) { + RemoveAll() + colors := NewMap("bike-shed-colors") + + colors.Add("red", 1) + colors.Add("red", 2) + colors.Add("blue", 4) + + n := 0 + colors.Do(func(KeyValue) { n++ }) + if n != 2 { + t.Errorf("after two Add calls with distinct keys, Do should invoke f 2 times; got %v", n) + } + + colors.Delete("red") + n = 0 + colors.Do(func(KeyValue) { n++ }) + if n != 1 { + t.Errorf("removed red, Do should invoke f 1 times; got %v", n) + } + + colors.Delete("notfound") + n = 0 + colors.Do(func(KeyValue) { n++ }) + if n != 1 { + t.Errorf("attempted to remove notfound, Do should invoke f 1 times; got %v", n) + } + + colors.Delete("blue") + colors.Delete("blue") + n = 0 + colors.Do(func(KeyValue) { n++ }) + if n != 0 { + t.Errorf("all keys removed, Do should invoke f 0 times; got %v", n) + } +} + func TestMapCounter(t *testing.T) { RemoveAll() colors := NewMap("bike-shed-colors") diff --git a/src/flag/flag.go b/src/flag/flag.go index 2cd7829c1a69b..c312c62a58215 100644 --- a/src/flag/flag.go +++ b/src/flag/flag.go @@ -83,6 +83,28 @@ import ( // but no such flag is defined. var ErrHelp = errors.New("flag: help requested") +// errParse is returned by Set if a flag's value fails to parse, such as with an invalid integer for Int. +// It then gets wrapped through failf to provide more information. +var errParse = errors.New("parse error") + +// errRange is returned by Set if a flag's value is out of range. +// It then gets wrapped through failf to provide more information. +var errRange = errors.New("value out of range") + +func numError(err error) error { + ne, ok := err.(*strconv.NumError) + if !ok { + return err + } + if ne.Err == strconv.ErrSyntax { + return errParse + } + if ne.Err == strconv.ErrRange { + return errRange + } + return err +} + // -- bool Value type boolValue bool @@ -93,6 +115,9 @@ func newBoolValue(val bool, p *bool) *boolValue { func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) + if err != nil { + err = errParse + } *b = boolValue(v) return err } @@ -120,6 +145,9 @@ func newIntValue(val int, p *int) *intValue { func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, strconv.IntSize) + if err != nil { + err = numError(err) + } *i = intValue(v) return err } @@ -138,6 +166,9 @@ func newInt64Value(val int64, p *int64) *int64Value { func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) + if err != nil { + err = numError(err) + } *i = int64Value(v) return err } @@ -156,6 +187,9 @@ func newUintValue(val uint, p *uint) *uintValue { func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, strconv.IntSize) + if err != nil { + err = numError(err) + } *i = uintValue(v) return err } @@ -174,6 +208,9 @@ func newUint64Value(val uint64, p *uint64) *uint64Value { func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) + if err != nil { + err = numError(err) + } *i = uint64Value(v) return err } @@ -209,6 +246,9 @@ func newFloat64Value(val float64, p *float64) *float64Value { func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) + if err != nil { + err = numError(err) + } *f = float64Value(v) return err } @@ -227,6 +267,9 @@ func newDurationValue(val time.Duration, p *time.Duration) *durationValue { func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) + if err != nil { + err = errParse + } *d = durationValue(v) return err } @@ -472,7 +515,7 @@ func (f *FlagSet) PrintDefaults() { // for both 4- and 8-space tab stops. s += "\n \t" } - s += strings.Replace(usage, "\n", "\n \t", -1) + s += strings.ReplaceAll(usage, "\n", "\n \t") if !isZeroValue(flag, flag.DefValue) { if _, ok := flag.Value.(*stringValue); ok { @@ -505,6 +548,8 @@ func (f *FlagSet) PrintDefaults() { // the output will be // -I directory // search directory for include files. +// +// To change the destination for flag messages, call CommandLine.SetOutput. func PrintDefaults() { CommandLine.PrintDefaults() } diff --git a/src/flag/flag_test.go b/src/flag/flag_test.go index c7f0c07d4489b..0d9491c02089d 100644 --- a/src/flag/flag_test.go +++ b/src/flag/flag_test.go @@ -9,6 +9,7 @@ import ( . "flag" "fmt" "io" + "io/ioutil" "os" "sort" "strconv" @@ -491,3 +492,55 @@ func TestGetters(t *testing.T) { t.Errorf("unexpected output: got %v, expected %v", fs.Output(), expectedOutput) } } + +func TestParseError(t *testing.T) { + for _, typ := range []string{"bool", "int", "int64", "uint", "uint64", "float64", "duration"} { + fs := NewFlagSet("parse error test", ContinueOnError) + fs.SetOutput(ioutil.Discard) + _ = fs.Bool("bool", false, "") + _ = fs.Int("int", 0, "") + _ = fs.Int64("int64", 0, "") + _ = fs.Uint("uint", 0, "") + _ = fs.Uint64("uint64", 0, "") + _ = fs.Float64("float64", 0, "") + _ = fs.Duration("duration", 0, "") + // Strings cannot give errors. + args := []string{"-" + typ + "=x"} + err := fs.Parse(args) // x is not a valid setting for any flag. + if err == nil { + t.Errorf("Parse(%q)=%v; expected parse error", args, err) + continue + } + if !strings.Contains(err.Error(), "invalid") || !strings.Contains(err.Error(), "parse error") { + t.Errorf("Parse(%q)=%v; expected parse error", args, err) + } + } +} + +func TestRangeError(t *testing.T) { + bad := []string{ + "-int=123456789012345678901", + "-int64=123456789012345678901", + "-uint=123456789012345678901", + "-uint64=123456789012345678901", + "-float64=1e1000", + } + for _, arg := range bad { + fs := NewFlagSet("parse error test", ContinueOnError) + fs.SetOutput(ioutil.Discard) + _ = fs.Int("int", 0, "") + _ = fs.Int64("int64", 0, "") + _ = fs.Uint("uint", 0, "") + _ = fs.Uint64("uint64", 0, "") + _ = fs.Float64("float64", 0, "") + // Strings cannot give errors, and bools and durations do not return strconv.NumError. + err := fs.Parse([]string{arg}) + if err == nil { + t.Errorf("Parse(%q)=%v; expected range error", arg, err) + continue + } + if !strings.Contains(err.Error(), "invalid") || !strings.Contains(err.Error(), "value out of range") { + t.Errorf("Parse(%q)=%v; expected range error", arg, err) + } + } +} diff --git a/src/fmt/example_test.go b/src/fmt/example_test.go index c77e78809cc56..56ce47f83639a 100644 --- a/src/fmt/example_test.go +++ b/src/fmt/example_test.go @@ -6,24 +6,360 @@ package fmt_test import ( "fmt" + "io" + "math" + "os" + "strings" + "time" ) -// Animal has a Name and an Age to represent an animal. -type Animal struct { - Name string - Age uint +// The Errorf function lets us use formatting features +// to create descriptive error messages. +func ExampleErrorf() { + const name, id = "bueller", 17 + err := fmt.Errorf("user %q (id %d) not found", name, id) + fmt.Println(err.Error()) + + // Output: user "bueller" (id 17) not found +} + +func ExampleFscanf() { + var ( + i int + b bool + s string + ) + r := strings.NewReader("5 true gophers") + n, err := fmt.Fscanf(r, "%d %t %s", &i, &b, &s) + if err != nil { + fmt.Fprintf(os.Stderr, "Fscanf: %v\n", err) + } + fmt.Println(i, b, s) + fmt.Println(n) + // Output: + // 5 true gophers + // 3 +} + +func ExampleFscanln() { + s := `dmr 1771 1.61803398875 + ken 271828 3.14159` + r := strings.NewReader(s) + var a string + var b int + var c float64 + for { + n, err := fmt.Fscanln(r, &a, &b, &c) + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Printf("%d: %s, %d, %f\n", n, a, b, c) + } + // Output: + // 3: dmr, 1771, 1.618034 + // 3: ken, 271828, 3.141590 +} + +func ExampleSscanf() { + var name string + var age int + n, err := fmt.Sscanf("Kim is 22 years old", "%s is %d years old", &name, &age) + if err != nil { + panic(err) + } + fmt.Printf("%d: %s, %d\n", n, name, age) + + // Output: + // 2: Kim, 22 +} + +func ExamplePrint() { + const name, age = "Kim", 22 + fmt.Print(name, " is ", age, " years old.\n") + + // It is conventional not to worry about any + // error returned by Print. + + // Output: + // Kim is 22 years old. +} + +func ExamplePrintln() { + const name, age = "Kim", 22 + fmt.Println(name, "is", age, "years old.") + + // It is conventional not to worry about any + // error returned by Println. + + // Output: + // Kim is 22 years old. +} + +func ExamplePrintf() { + const name, age = "Kim", 22 + fmt.Printf("%s is %d years old.\n", name, age) + + // It is conventional not to worry about any + // error returned by Printf. + + // Output: + // Kim is 22 years old. +} + +func ExampleSprint() { + const name, age = "Kim", 22 + s := fmt.Sprint(name, " is ", age, " years old.\n") + + io.WriteString(os.Stdout, s) // Ignoring error for simplicity. + + // Output: + // Kim is 22 years old. +} + +func ExampleSprintln() { + const name, age = "Kim", 22 + s := fmt.Sprintln(name, "is", age, "years old.") + + io.WriteString(os.Stdout, s) // Ignoring error for simplicity. + + // Output: + // Kim is 22 years old. +} + +func ExampleSprintf() { + const name, age = "Kim", 22 + s := fmt.Sprintf("%s is %d years old.\n", name, age) + + io.WriteString(os.Stdout, s) // Ignoring error for simplicity. + + // Output: + // Kim is 22 years old. } -// String makes Animal satisfy the Stringer interface. -func (a Animal) String() string { - return fmt.Sprintf("%v (%d)", a.Name, a.Age) +func ExampleFprint() { + const name, age = "Kim", 22 + n, err := fmt.Fprint(os.Stdout, name, " is ", age, " years old.\n") + + // The n and err return values from Fprint are + // those returned by the underlying io.Writer. + if err != nil { + fmt.Fprintf(os.Stderr, "Fprint: %v\n", err) + } + fmt.Print(n, " bytes written.\n") + + // Output: + // Kim is 22 years old. + // 21 bytes written. } -func ExampleStringer() { - a := Animal{ - Name: "Gopher", - Age: 2, +func ExampleFprintln() { + const name, age = "Kim", 22 + n, err := fmt.Fprintln(os.Stdout, name, "is", age, "years old.") + + // The n and err return values from Fprintln are + // those returned by the underlying io.Writer. + if err != nil { + fmt.Fprintf(os.Stderr, "Fprintln: %v\n", err) + } + fmt.Println(n, "bytes written.") + + // Output: + // Kim is 22 years old. + // 21 bytes written. +} + +func ExampleFprintf() { + const name, age = "Kim", 22 + n, err := fmt.Fprintf(os.Stdout, "%s is %d years old.\n", name, age) + + // The n and err return values from Fprintf are + // those returned by the underlying io.Writer. + if err != nil { + fmt.Fprintf(os.Stderr, "Fprintf: %v\n", err) } - fmt.Println(a) - // Output: Gopher (2) + fmt.Printf("%d bytes written.\n", n) + + // Output: + // Kim is 22 years old. + // 21 bytes written. +} + +// Print, Println, and Printf lay out their arguments differently. In this example +// we can compare their behaviors. Println always adds blanks between the items it +// prints, while Print adds blanks only between non-string arguments and Printf +// does exactly what it is told. +// Sprint, Sprintln, Sprintf, Fprint, Fprintln, and Fprintf behave the same as +// their corresponding Print, Println, and Printf functions shown here. +func Example_printers() { + a, b := 3.0, 4.0 + h := math.Hypot(a, b) + + // Print inserts blanks between arguments when neither is a string. + // It does not add a newline to the output, so we add one explicitly. + fmt.Print("The vector (", a, b, ") has length ", h, ".\n") + + // Println always inserts spaces between its arguments, + // so it cannot be used to produce the same output as Print in this case; + // its output has extra spaces. + // Also, Println always adds a newline to the output. + fmt.Println("The vector (", a, b, ") has length", h, ".") + + // Printf provides complete control but is more complex to use. + // It does not add a newline to the output, so we add one explicitly + // at the end of the format specifier string. + fmt.Printf("The vector (%g %g) has length %g.\n", a, b, h) + + // Output: + // The vector (3 4) has length 5. + // The vector ( 3 4 ) has length 5 . + // The vector (3 4) has length 5. +} + +// These examples demonstrate the basics of printing using a format string. Printf, +// Sprintf, and Fprintf all take a format string that specifies how to format the +// subsequent arguments. For example, %d (we call that a 'verb') says to print the +// corresponding argument, which must be an integer (or something containing an +// integer, such as a slice of ints) in decimal. The verb %v ('v' for 'value') +// always formats the argument in its default form, just how Print or Println would +// show it. The special verb %T ('T' for 'Type') prints the type of the argument +// rather than its value. The examples are not exhaustive; see the package comment +// for all the details. +func Example_formats() { + // A basic set of examples showing that %v is the default format, in this + // case decimal for integers, which can be explicitly requested with %d; + // the output is just what Println generates. + integer := 23 + // Each of these prints "23" (without the quotes). + fmt.Println(integer) + fmt.Printf("%v\n", integer) + fmt.Printf("%d\n", integer) + + // The special verb %T shows the type of an item rather than its value. + fmt.Printf("%T %T\n", integer, &integer) + // Result: int *int + + // Println(x) is the same as Printf("%v\n", x) so we will use only Printf + // in the following examples. Each one demonstrates how to format values of + // a particular type, such as integers or strings. We start each format + // string with %v to show the default output and follow that with one or + // more custom formats. + + // Booleans print as "true" or "false" with %v or %t. + truth := true + fmt.Printf("%v %t\n", truth, truth) + // Result: true true + + // Integers print as decimals with %v and %d, + // or in hex with %x, octal with %o, or binary with %b. + answer := 42 + fmt.Printf("%v %d %x %o %b\n", answer, answer, answer, answer, answer) + // Result: 42 42 2a 52 101010 + + // Floats have multiple formats: %v and %g print a compact representation, + // while %f prints a decimal point and %e uses exponential notation. The + // format %6.2f used here shows how to set the width and precision to + // control the appearance of a floating-point value. In this instance, 6 is + // the total width of the printed text for the value (note the extra spaces + // in the output) and 2 is the number of decimal places to show. + pi := math.Pi + fmt.Printf("%v %g %.2f (%6.2f) %e\n", pi, pi, pi, pi, pi) + // Result: 3.141592653589793 3.141592653589793 3.14 ( 3.14) 3.141593e+00 + + // Complex numbers format as parenthesized pairs of floats, with an 'i' + // after the imaginary part. + point := 110.7 + 22.5i + fmt.Printf("%v %g %.2f %.2e\n", point, point, point, point) + // Result: (110.7+22.5i) (110.7+22.5i) (110.70+22.50i) (1.11e+02+2.25e+01i) + + // Runes are integers but when printed with %c show the character with that + // Unicode value. The %q verb shows them as quoted characters, %U as a + // hex Unicode code point, and %#U as both a code point and a quoted + // printable form if the rune is printable. + smile := '😀' + fmt.Printf("%v %d %c %q %U %#U\n", smile, smile, smile, smile, smile, smile) + // Result: 128512 128512 😀 '😀' U+1F600 U+1F600 '😀' + + // Strings are formatted with %v and %s as-is, with %q as quoted strings, + // and %#q as backquoted strings. + placeholders := `foo "bar"` + fmt.Printf("%v %s %q %#q\n", placeholders, placeholders, placeholders, placeholders) + // Result: foo "bar" foo "bar" "foo \"bar\"" `foo "bar"` + + // Maps formatted with %v show keys and values in their default formats. + // The %#v form (the # is called a "flag" in this context) shows the map in + // the Go source format. Maps are printed in a consistent order, sorted + // by the values of the keys. + isLegume := map[string]bool{ + "peanut": true, + "dachshund": false, + } + fmt.Printf("%v %#v\n", isLegume, isLegume) + // Result: map[dachshund:false peanut:true] map[string]bool{"dachshund":false, "peanut":true} + + // Structs formatted with %v show field values in their default formats. + // The %+v form shows the fields by name, while %#v formats the struct in + // Go source format. + person := struct { + Name string + Age int + }{"Kim", 22} + fmt.Printf("%v %+v %#v\n", person, person, person) + // Result: {Kim 22} {Name:Kim Age:22} struct { Name string; Age int }{Name:"Kim", Age:22} + + // The default format for a pointer shows the underlying value preceded by + // an ampersand. The %p verb prints the pointer value in hex. We use a + // typed nil for the argument to %p here because the value of any non-nil + // pointer would change from run to run; run the commented-out Printf + // call yourself to see. + pointer := &person + fmt.Printf("%v %p\n", pointer, (*int)(nil)) + // Result: &{Kim 22} 0x0 + // fmt.Printf("%v %p\n", pointer, pointer) + // Result: &{Kim 22} 0x010203 // See comment above. + + // Arrays and slices are formatted by applying the format to each element. + greats := [5]string{"Katano", "Kobayashi", "Kurosawa", "Miyazaki", "Ozu"} + fmt.Printf("%v %q\n", greats, greats) + // Result: [Katano Kobayashi Kurosawa Miyazaki Ozu] ["Katano" "Kobayashi" "Kurosawa" "Miyazaki" "Ozu"] + + kGreats := greats[:3] + fmt.Printf("%v %q %#v\n", kGreats, kGreats, kGreats) + // Result: [Katano Kobayashi Kurosawa] ["Katano" "Kobayashi" "Kurosawa"] []string{"Katano", "Kobayashi", "Kurosawa"} + + // Byte slices are special. Integer verbs like %d print the elements in + // that format. The %s and %q forms treat the slice like a string. The %x + // verb has a special form with the space flag that puts a space between + // the bytes. + cmd := []byte("a⌘") + fmt.Printf("%v %d %s %q %x % x\n", cmd, cmd, cmd, cmd, cmd, cmd) + // Result: [97 226 140 152] [97 226 140 152] a⌘ "a⌘" 61e28c98 61 e2 8c 98 + + // Types that implement Stringer are printed the same as strings. Because + // Stringers return a string, we can print them using a string-specific + // verb such as %q. + now := time.Unix(123456789, 0).UTC() // time.Time implements fmt.Stringer. + fmt.Printf("%v %q\n", now, now) + // Result: 1973-11-29 21:33:09 +0000 UTC "1973-11-29 21:33:09 +0000 UTC" + + // Output: + // 23 + // 23 + // 23 + // int *int + // true true + // 42 42 2a 52 101010 + // 3.141592653589793 3.141592653589793 3.14 ( 3.14) 3.141593e+00 + // (110.7+22.5i) (110.7+22.5i) (110.70+22.50i) (1.11e+02+2.25e+01i) + // 128512 128512 😀 '😀' U+1F600 U+1F600 '😀' + // foo "bar" foo "bar" "foo \"bar\"" `foo "bar"` + // map[dachshund:false peanut:true] map[string]bool{"dachshund":false, "peanut":true} + // {Kim 22} {Name:Kim Age:22} struct { Name string; Age int }{Name:"Kim", Age:22} + // &{Kim 22} 0x0 + // [Katano Kobayashi Kurosawa Miyazaki Ozu] ["Katano" "Kobayashi" "Kurosawa" "Miyazaki" "Ozu"] + // [Katano Kobayashi Kurosawa] ["Katano" "Kobayashi" "Kurosawa"] []string{"Katano", "Kobayashi", "Kurosawa"} + // [97 226 140 152] [97 226 140 152] a⌘ "a⌘" 61e28c98 61 e2 8c 98 + // 1973-11-29 21:33:09 +0000 UTC "1973-11-29 21:33:09 +0000 UTC" } diff --git a/src/fmt/fmt_test.go b/src/fmt/fmt_test.go index 08e46b4e93542..068c2620a8a20 100644 --- a/src/fmt/fmt_test.go +++ b/src/fmt/fmt_test.go @@ -131,15 +131,10 @@ func (byteFormatter) Format(f State, _ rune) { var byteFormatterSlice = []byteFormatter{'h', 'e', 'l', 'l', 'o'} -// Copy of io.stringWriter interface used by writeStringFormatter for type assertion. -type stringWriter interface { - WriteString(s string) (n int, err error) -} - type writeStringFormatter string func (sf writeStringFormatter) Format(f State, c rune) { - if sw, ok := f.(stringWriter); ok { + if sw, ok := f.(io.StringWriter); ok { sw.WriteString("***" + string(sf) + "***") } } @@ -303,20 +298,30 @@ var fmtTests = []struct { // width {"%5s", "abc", " abc"}, + {"%5s", []byte("abc"), " abc"}, {"%2s", "\u263a", " ☺"}, + {"%2s", []byte("\u263a"), " ☺"}, {"%-5s", "abc", "abc "}, - {"%-8q", "abc", `"abc" `}, + {"%-5s", []byte("abc"), "abc "}, {"%05s", "abc", "00abc"}, - {"%08q", "abc", `000"abc"`}, + {"%05s", []byte("abc"), "00abc"}, {"%5s", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"}, + {"%5s", []byte("abcdefghijklmnopqrstuvwxyz"), "abcdefghijklmnopqrstuvwxyz"}, {"%.5s", "abcdefghijklmnopqrstuvwxyz", "abcde"}, + {"%.5s", []byte("abcdefghijklmnopqrstuvwxyz"), "abcde"}, {"%.0s", "日本語日本語", ""}, + {"%.0s", []byte("日本語日本語"), ""}, {"%.5s", "日本語日本語", "日本語日本"}, - {"%.10s", "日本語日本語", "日本語日本語"}, {"%.5s", []byte("日本語日本語"), "日本語日本"}, + {"%.10s", "日本語日本語", "日本語日本語"}, + {"%.10s", []byte("日本語日本語"), "日本語日本語"}, + {"%08q", "abc", `000"abc"`}, + {"%08q", []byte("abc"), `000"abc"`}, + {"%-8q", "abc", `"abc" `}, + {"%-8q", []byte("abc"), `"abc" `}, {"%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`}, - {"%.5x", "abcdefghijklmnopqrstuvwxyz", "6162636465"}, {"%.5q", []byte("abcdefghijklmnopqrstuvwxyz"), `"abcde"`}, + {"%.5x", "abcdefghijklmnopqrstuvwxyz", "6162636465"}, {"%.5x", []byte("abcdefghijklmnopqrstuvwxyz"), "6162636465"}, {"%.3q", "日本語日本語", `"日本語"`}, {"%.3q", []byte("日本語日本語"), `"日本語"`}, @@ -325,6 +330,7 @@ var fmtTests = []struct { {"%.1x", "日本語", "e6"}, {"%.1X", []byte("日本語"), "E6"}, {"%10.1q", "日本語日本語", ` "日"`}, + {"%10.1q", []byte("日本語日本語"), ` "日"`}, {"%10v", nil, " "}, {"%-10v", nil, " "}, @@ -690,6 +696,13 @@ var fmtTests = []struct { {"%#v", []int32(nil), "[]int32(nil)"}, {"%#v", 1.2345678, "1.2345678"}, {"%#v", float32(1.2345678), "1.2345678"}, + + // Whole number floats are printed without decimals. See Issue 27634. + {"%#v", 1.0, "1"}, + {"%#v", 1000000.0, "1e+06"}, + {"%#v", float32(1.0), "1"}, + {"%#v", float32(1000000.0), "1e+06"}, + // Only print []byte and []uint8 as type []byte if they appear at the top level. {"%#v", []byte(nil), "[]byte(nil)"}, {"%#v", []uint8(nil), "[]byte(nil)"}, @@ -861,13 +874,8 @@ var fmtTests = []struct { // Extra argument errors should format without flags set. {"%010.2", "12345", "%!(NOVERB)%!(EXTRA string=12345)"}, - // The "" show up because maps are printed by - // first obtaining a list of keys and then looking up - // each key. Since NaNs can be map keys but cannot - // be fetched directly, the lookup fails and returns a - // zero reflect.Value, which formats as . - // This test is just to check that it shows the two NaNs at all. - {"%v", map[float64]int{NaN: 1, NaN: 2}, "map[NaN: NaN:]"}, + // Test that maps with non-reflexive keys print all keys and values. + {"%v", map[float64]int{NaN: 1, NaN: 1}, "map[NaN:1 NaN:1]"}, // Comparison of padding rules with C printf. /* @@ -1033,7 +1041,7 @@ var fmtTests = []struct { {"%☠", &[]interface{}{I(1), G(2)}, "&[%!☠(fmt_test.I=1) %!☠(fmt_test.G=2)]"}, {"%☠", SI{&[]interface{}{I(1), G(2)}}, "{%!☠(*[]interface {}=&[1 2])}"}, {"%☠", reflect.Value{}, ""}, - {"%☠", map[float64]int{NaN: 1}, "map[%!☠(float64=NaN):%!☠()]"}, + {"%☠", map[float64]int{NaN: 1}, "map[%!☠(float64=NaN):%!☠(int=1)]"}, } // zeroFill generates zero-filled strings of the specified width. The length @@ -1214,7 +1222,16 @@ func BenchmarkSprintfString(b *testing.B) { func BenchmarkSprintfTruncateString(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - Sprintf("%.3s", "日本語日本語日本語") + Sprintf("%.3s", "日本語日本語日本語日本語") + } + }) +} + +func BenchmarkSprintfTruncateBytes(b *testing.B) { + var bytes interface{} = []byte("日本語日本語日本語日本語") + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + Sprintf("%.3s", bytes) } }) } @@ -1666,18 +1683,18 @@ var panictests = []struct { }{ // String {"%s", (*PanicS)(nil), ""}, // nil pointer special case - {"%s", PanicS{io.ErrUnexpectedEOF}, "%!s(PANIC=unexpected EOF)"}, - {"%s", PanicS{3}, "%!s(PANIC=3)"}, + {"%s", PanicS{io.ErrUnexpectedEOF}, "%!s(PANIC=String method: unexpected EOF)"}, + {"%s", PanicS{3}, "%!s(PANIC=String method: 3)"}, // GoString {"%#v", (*PanicGo)(nil), ""}, // nil pointer special case - {"%#v", PanicGo{io.ErrUnexpectedEOF}, "%!v(PANIC=unexpected EOF)"}, - {"%#v", PanicGo{3}, "%!v(PANIC=3)"}, + {"%#v", PanicGo{io.ErrUnexpectedEOF}, "%!v(PANIC=GoString method: unexpected EOF)"}, + {"%#v", PanicGo{3}, "%!v(PANIC=GoString method: 3)"}, // Issue 18282. catchPanic should not clear fmtFlags permanently. - {"%#v", []interface{}{PanicGo{3}, PanicGo{3}}, "[]interface {}{%!v(PANIC=3), %!v(PANIC=3)}"}, + {"%#v", []interface{}{PanicGo{3}, PanicGo{3}}, "[]interface {}{%!v(PANIC=GoString method: 3), %!v(PANIC=GoString method: 3)}"}, // Format {"%s", (*PanicF)(nil), ""}, // nil pointer special case - {"%s", PanicF{io.ErrUnexpectedEOF}, "%!s(PANIC=unexpected EOF)"}, - {"%s", PanicF{3}, "%!s(PANIC=3)"}, + {"%s", PanicF{io.ErrUnexpectedEOF}, "%!s(PANIC=Format method: unexpected EOF)"}, + {"%s", PanicF{3}, "%!s(PANIC=Format method: 3)"}, } func TestPanics(t *testing.T) { diff --git a/src/fmt/format.go b/src/fmt/format.go index 91103f2c07f18..d6da8aed1e3aa 100644 --- a/src/fmt/format.go +++ b/src/fmt/format.go @@ -308,8 +308,8 @@ func (f *fmt) fmtInteger(u uint64, base int, isSigned bool, digits string) { f.zero = oldZero } -// truncate truncates the string to the specified precision, if present. -func (f *fmt) truncate(s string) string { +// truncate truncates the string s to the specified precision, if present. +func (f *fmt) truncateString(s string) string { if f.precPresent { n := f.prec for i := range s { @@ -322,12 +322,37 @@ func (f *fmt) truncate(s string) string { return s } +// truncate truncates the byte slice b as a string of the specified precision, if present. +func (f *fmt) truncate(b []byte) []byte { + if f.precPresent { + n := f.prec + for i := 0; i < len(b); { + n-- + if n < 0 { + return b[:i] + } + wid := 1 + if b[i] >= utf8.RuneSelf { + _, wid = utf8.DecodeRune(b[i:]) + } + i += wid + } + } + return b +} + // fmtS formats a string. func (f *fmt) fmtS(s string) { - s = f.truncate(s) + s = f.truncateString(s) f.padString(s) } +// fmtBs formats the byte slice b as if it was formatted as string with fmtS. +func (f *fmt) fmtBs(b []byte) { + b = f.truncate(b) + f.pad(b) +} + // fmtSbx formats a string or byte slice as a hexadecimal encoding of its bytes. func (f *fmt) fmtSbx(s string, b []byte, digits string) { length := len(b) @@ -408,7 +433,7 @@ func (f *fmt) fmtBx(b []byte, digits string) { // If f.sharp is set a raw (backquoted) string may be returned instead // if the string does not contain any control characters other than tab. func (f *fmt) fmtQ(s string) { - s = f.truncate(s) + s = f.truncateString(s) if f.sharp && strconv.CanBackquote(s) { f.padString("`" + s + "`") return diff --git a/src/fmt/gostringer_example_test.go b/src/fmt/gostringer_example_test.go new file mode 100644 index 0000000000000..ab19ee3b94d2e --- /dev/null +++ b/src/fmt/gostringer_example_test.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fmt_test + +import ( + "fmt" +) + +// Address has a City, State and a Country. +type Address struct { + City string + State string + Country string +} + +// Person has a Name, Age and Address. +type Person struct { + Name string + Age uint + Addr *Address +} + +// GoString makes Person satisfy the GoStringer interface. +// The return value is valid Go code that can be used to reproduce the Person struct. +func (p Person) GoString() string { + if p.Addr != nil { + return fmt.Sprintf("Person{Name: %q, Age: %d, Addr: &Address{City: %q, State: %q, Country: %q}}", p.Name, int(p.Age), p.Addr.City, p.Addr.State, p.Addr.Country) + } + return fmt.Sprintf("Person{Name: %q, Age: %d}", p.Name, int(p.Age)) +} + +func ExampleGoStringer() { + p1 := Person{ + Name: "Warren", + Age: 31, + Addr: &Address{ + City: "Denver", + State: "CO", + Country: "U.S.A.", + }, + } + // If GoString() wasn't implemented, the output of `fmt.Printf("%#v", p1)` would be similar to + // Person{Name:"Warren", Age:0x1f, Addr:(*main.Address)(0x10448240)} + fmt.Printf("%#v\n", p1) + + p2 := Person{ + Name: "Theia", + Age: 4, + } + // If GoString() wasn't implemented, the output of `fmt.Printf("%#v", p2)` would be similar to + // Person{Name:"Theia", Age:0x4, Addr:(*main.Address)(nil)} + fmt.Printf("%#v\n", p2) + + // Output: + // Person{Name: "Warren", Age: 31, Addr: &Address{City: "Denver", State: "CO", Country: "U.S.A."}} + // Person{Name: "Theia", Age: 4} +} diff --git a/src/fmt/print.go b/src/fmt/print.go index f67f80560371a..42fcd8b979b01 100644 --- a/src/fmt/print.go +++ b/src/fmt/print.go @@ -6,6 +6,7 @@ package fmt import ( "errors" + "internal/fmtsort" "io" "os" "reflect" @@ -139,6 +140,16 @@ func newPrinter() *pp { // free saves used pp structs in ppFree; avoids an allocation per invocation. func (p *pp) free() { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + if cap(p.buf) > 64<<10 { + return + } + p.buf = p.buf[:0] p.arg = nil p.value = reflect.Value{} @@ -477,7 +488,7 @@ func (p *pp) fmtBytes(v []byte, verb rune, typeString string) { p.buf.WriteByte(']') } case 's': - p.fmt.fmtS(string(v)) + p.fmt.fmtBs(v) case 'x': p.fmt.fmtBx(v, ldigits) case 'X': @@ -527,7 +538,7 @@ func (p *pp) fmtPointer(value reflect.Value, verb rune) { } } -func (p *pp) catchPanic(arg interface{}, verb rune) { +func (p *pp) catchPanic(arg interface{}, verb rune, method string) { if err := recover(); err != nil { // If it's a nil pointer, just say "". The likeliest causes are a // Stringer that fails to guard against nil or a nil pointer for a @@ -550,6 +561,8 @@ func (p *pp) catchPanic(arg interface{}, verb rune) { p.buf.WriteString(percentBangString) p.buf.WriteRune(verb) p.buf.WriteString(panicString) + p.buf.WriteString(method) + p.buf.WriteString(" method: ") p.panicking = true p.printArg(err, 'v') p.panicking = false @@ -566,7 +579,7 @@ func (p *pp) handleMethods(verb rune) (handled bool) { // Is it a Formatter? if formatter, ok := p.arg.(Formatter); ok { handled = true - defer p.catchPanic(p.arg, verb) + defer p.catchPanic(p.arg, verb, "Format") formatter.Format(p, verb) return } @@ -575,7 +588,7 @@ func (p *pp) handleMethods(verb rune) (handled bool) { if p.fmt.sharpV { if stringer, ok := p.arg.(GoStringer); ok { handled = true - defer p.catchPanic(p.arg, verb) + defer p.catchPanic(p.arg, verb, "GoString") // Print the result of GoString unadorned. p.fmt.fmtS(stringer.GoString()) return @@ -593,13 +606,13 @@ func (p *pp) handleMethods(verb rune) (handled bool) { switch v := p.arg.(type) { case error: handled = true - defer p.catchPanic(p.arg, verb) + defer p.catchPanic(p.arg, verb, "Error") p.fmtString(v.Error(), verb) return case Stringer: handled = true - defer p.catchPanic(p.arg, verb) + defer p.catchPanic(p.arg, verb, "String") p.fmtString(v.String(), verb) return } @@ -743,8 +756,8 @@ func (p *pp) printValue(value reflect.Value, verb rune, depth int) { } else { p.buf.WriteString(mapString) } - keys := f.MapKeys() - for i, key := range keys { + sorted := fmtsort.Sort(f) + for i, key := range sorted.Key { if i > 0 { if p.fmt.sharpV { p.buf.WriteString(commaSpaceString) @@ -754,7 +767,7 @@ func (p *pp) printValue(value reflect.Value, verb rune, depth int) { } p.printValue(key, verb, depth+1) p.buf.WriteByte(':') - p.printValue(f.MapIndex(key), verb, depth+1) + p.printValue(sorted.Value[i], verb, depth+1) } if p.fmt.sharpV { p.buf.WriteByte('}') diff --git a/src/fmt/stringer_example_test.go b/src/fmt/stringer_example_test.go new file mode 100644 index 0000000000000..c77e78809cc56 --- /dev/null +++ b/src/fmt/stringer_example_test.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fmt_test + +import ( + "fmt" +) + +// Animal has a Name and an Age to represent an animal. +type Animal struct { + Name string + Age uint +} + +// String makes Animal satisfy the Stringer interface. +func (a Animal) String() string { + return fmt.Sprintf("%v (%d)", a.Name, a.Age) +} + +func ExampleStringer() { + a := Animal{ + Name: "Gopher", + Age: 2, + } + fmt.Println(a) + // Output: Gopher (2) +} diff --git a/src/go/build/build.go b/src/go/build/build.go index b68a712a7daba..0fa67201f88dc 100644 --- a/src/go/build/build.go +++ b/src/go/build/build.go @@ -12,6 +12,7 @@ import ( "go/doc" "go/parser" "go/token" + "internal/goroot" "io" "io/ioutil" "log" @@ -33,7 +34,7 @@ type Context struct { GOOS string // target operating system GOROOT string // Go root GOPATH string // Go path - CgoEnabled bool // whether cgo can be used + CgoEnabled bool // whether cgo files are included UseAllFiles bool // use files regardless of +build lines, file names Compiler string // compiler to assume when computing target paths @@ -42,8 +43,10 @@ type Context struct { // Clients creating a new context may customize BuildTags, which // defaults to empty, but it is usually an error to customize ReleaseTags, // which defaults to the list of Go releases the current release is compatible with. + // BuildTags is not set for the Default build Context. // In addition to the BuildTags and ReleaseTags, build constraints // consider the values of GOARCH and GOOS as satisfied tags. + // The last element in ReleaseTags is assumed to be the current release. BuildTags []string ReleaseTags []string @@ -295,7 +298,8 @@ func defaultContext() Context { // say "+build go1.x", and code that should only be built before Go 1.x // (perhaps it is the stub to use in that case) should say "+build !go1.x". // NOTE: If you add to this list, also update the doc comment in doc.go. - const version = 11 // go1.11 + // NOTE: The last element in ReleaseTags should be the current release. + const version = 12 // go1.12 for i := 1; i <= version; i++ { c.ReleaseTags = append(c.ReleaseTags, "go1."+strconv.Itoa(i)) } @@ -543,7 +547,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa inTestdata := func(sub string) bool { return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata" } - if ctxt.GOROOT != "" && ctxt.Compiler != "gccgo" { + if ctxt.GOROOT != "" { root := ctxt.joinPath(ctxt.GOROOT, "src") if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) { p.Goroot = true @@ -656,7 +660,7 @@ func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Packa } tried.goroot = dir } - if ctxt.Compiler == "gccgo" && isStandardPackage(path) { + if ctxt.Compiler == "gccgo" && goroot.IsStandardPackage(ctxt.GOROOT, ctxt.Compiler, path) { p.Dir = ctxt.joinPath(ctxt.GOROOT, "src", path) p.Goroot = true p.Root = ctxt.GOROOT @@ -714,6 +718,11 @@ Found: // non-nil *Package returned when an error occurs. // We need to do this before we return early on FindOnly flag. if IsLocalImport(path) && !ctxt.isDir(p.Dir) { + if ctxt.Compiler == "gccgo" && p.Goroot { + // gccgo has no sources for GOROOT packages. + return p, nil + } + // package was not found return p, fmt.Errorf("cannot find package %q in:\n\t%s", path, p.Dir) } diff --git a/src/go/build/build_test.go b/src/go/build/build_test.go index 091443f646f2f..db8b12eabf83c 100644 --- a/src/go/build/build_test.go +++ b/src/go/build/build_test.go @@ -351,12 +351,16 @@ func TestImportDirNotExist(t *testing.T) { func TestImportVendor(t *testing.T) { testenv.MustHaveGoBuild(t) // really must just have source ctxt := Default - ctxt.GOPATH = "" - p, err := ctxt.Import("golang_org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + ctxt.GOPATH = filepath.Join(wd, "testdata/withvendor") + p, err := ctxt.Import("c/d", filepath.Join(ctxt.GOPATH, "src/a/b"), 0) if err != nil { - t.Fatalf("cannot find vendored golang_org/x/net/http2/hpack from net/http directory: %v", err) + t.Fatalf("cannot find vendored c/d from testdata src/a/b directory: %v", err) } - want := "vendor/golang_org/x/net/http2/hpack" + want := "a/vendor/c/d" if p.ImportPath != want { t.Fatalf("Import succeeded but found %q, want %q", p.ImportPath, want) } @@ -365,8 +369,12 @@ func TestImportVendor(t *testing.T) { func TestImportVendorFailure(t *testing.T) { testenv.MustHaveGoBuild(t) // really must just have source ctxt := Default - ctxt.GOPATH = "" - p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + ctxt.GOPATH = filepath.Join(wd, "testdata/withvendor") + p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOPATH, "src/a/b"), 0) if err == nil { t.Fatalf("found made-up package x.com/y/z in %s", p.Dir) } @@ -380,9 +388,13 @@ func TestImportVendorFailure(t *testing.T) { func TestImportVendorParentFailure(t *testing.T) { testenv.MustHaveGoBuild(t) // really must just have source ctxt := Default - ctxt.GOPATH = "" - // This import should fail because the vendor/golang.org/x/net/http2 directory has no source code. - p, err := ctxt.Import("golang_org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + wd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + ctxt.GOPATH = filepath.Join(wd, "testdata/withvendor") + // This import should fail because the vendor/c directory has no source code. + p, err := ctxt.Import("c", filepath.Join(ctxt.GOPATH, "src/a/b"), 0) if err == nil { t.Fatalf("found empty parent in %s", p.Dir) } diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 29dbe47d29d47..2c29a3e6018f7 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -36,9 +36,10 @@ var pkgDeps = map[string][]string{ // L0 is the lowest level, core, nearly unavoidable packages. "errors": {}, "io": {"errors", "sync", "sync/atomic"}, - "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "internal/cpu", "internal/bytealg"}, + "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys", "runtime/internal/math", "internal/cpu", "internal/bytealg"}, "runtime/internal/sys": {}, - "runtime/internal/atomic": {"unsafe", "runtime/internal/sys"}, + "runtime/internal/atomic": {"unsafe", "internal/cpu"}, + "runtime/internal/math": {"runtime/internal/sys"}, "internal/race": {"runtime", "unsafe"}, "sync": {"internal/race", "runtime", "sync/atomic", "unsafe"}, "sync/atomic": {"unsafe"}, @@ -60,8 +61,8 @@ var pkgDeps = map[string][]string{ // L1 adds simple functions and strings processing, // but not Unicode tables. - "math": {"internal/cpu", "unsafe"}, - "math/bits": {}, + "math": {"internal/cpu", "unsafe", "math/bits"}, + "math/bits": {"unsafe"}, "math/cmplx": {"math"}, "math/rand": {"L0", "math"}, "strconv": {"L0", "unicode/utf8", "math", "math/bits"}, @@ -100,7 +101,7 @@ var pkgDeps = map[string][]string{ // and interface definitions, but nothing that makes // system calls. "crypto": {"L2", "hash"}, // interfaces - "crypto/cipher": {"L2", "crypto/subtle", "crypto/internal/subtle"}, + "crypto/cipher": {"L2", "crypto/subtle", "crypto/internal/subtle", "encoding/binary"}, "crypto/internal/subtle": {"unsafe", "reflect"}, // reflect behind a appengine tag "crypto/subtle": {}, "encoding/base32": {"L2"}, @@ -114,6 +115,7 @@ var pkgDeps = map[string][]string{ "image": {"L2", "image/color"}, // interfaces "image/color": {"L2"}, // interfaces "image/color/palette": {"L2", "image/color"}, + "internal/fmtsort": {"reflect", "sort"}, "reflect": {"L2"}, "sort": {"reflect"}, @@ -134,6 +136,7 @@ var pkgDeps = map[string][]string{ "image", "image/color", "image/color/palette", + "internal/fmtsort", "reflect", }, @@ -156,6 +159,7 @@ var pkgDeps = map[string][]string{ // Other time dependencies: "internal/syscall/windows/registry", "syscall", + "syscall/js", }, "internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"}, @@ -176,8 +180,8 @@ var pkgDeps = map[string][]string{ "time", }, - // Formatted I/O: few dependencies (L1) but we must add reflect. - "fmt": {"L1", "os", "reflect"}, + // Formatted I/O: few dependencies (L1) but we must add reflect and internal/fmtsort. + "fmt": {"L1", "os", "reflect", "internal/fmtsort"}, "log": {"L1", "os", "fmt", "time"}, // Packages used by testing must be low-level (L2+fmt). @@ -204,7 +208,7 @@ var pkgDeps = map[string][]string{ // Go parser. "go/ast": {"L4", "OS", "go/scanner", "go/token"}, - "go/doc": {"L4", "go/ast", "go/token", "regexp", "text/template"}, + "go/doc": {"L4", "OS", "go/ast", "go/token", "regexp", "text/template"}, "go/parser": {"L4", "OS", "go/ast", "go/scanner", "go/token"}, "go/printer": {"L4", "OS", "go/ast", "go/scanner", "go/token", "text/tabwriter"}, "go/scanner": {"L4", "OS", "go/token"}, @@ -226,54 +230,56 @@ var pkgDeps = map[string][]string{ "go/constant": {"L4", "go/token", "math/big"}, "go/importer": {"L4", "go/build", "go/internal/gccgoimporter", "go/internal/gcimporter", "go/internal/srcimporter", "go/token", "go/types"}, "go/internal/gcimporter": {"L4", "OS", "go/build", "go/constant", "go/token", "go/types", "text/scanner"}, - "go/internal/gccgoimporter": {"L4", "OS", "debug/elf", "go/constant", "go/token", "go/types", "text/scanner"}, + "go/internal/gccgoimporter": {"L4", "OS", "debug/elf", "go/constant", "go/token", "go/types", "internal/xcoff", "text/scanner"}, "go/internal/srcimporter": {"L4", "OS", "fmt", "go/ast", "go/build", "go/parser", "go/token", "go/types", "path/filepath"}, "go/types": {"L4", "GOPARSER", "container/heap", "go/constant"}, // One of a kind. - "archive/tar": {"L4", "OS", "syscall", "os/user"}, - "archive/zip": {"L4", "OS", "compress/flate"}, - "container/heap": {"sort"}, - "compress/bzip2": {"L4"}, - "compress/flate": {"L4"}, - "compress/gzip": {"L4", "compress/flate"}, - "compress/lzw": {"L4"}, - "compress/zlib": {"L4", "compress/flate"}, - "context": {"errors", "fmt", "reflect", "sync", "time"}, - "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"}, - "database/sql/driver": {"L4", "context", "time", "database/sql/internal"}, - "debug/dwarf": {"L4"}, - "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"}, - "debug/gosym": {"L4"}, - "debug/macho": {"L4", "OS", "debug/dwarf", "compress/zlib"}, - "debug/pe": {"L4", "OS", "debug/dwarf", "compress/zlib"}, - "debug/plan9obj": {"L4", "OS"}, - "encoding": {"L4"}, - "encoding/ascii85": {"L4"}, - "encoding/asn1": {"L4", "math/big"}, - "encoding/csv": {"L4"}, - "encoding/gob": {"L4", "OS", "encoding"}, - "encoding/hex": {"L4"}, - "encoding/json": {"L4", "encoding"}, - "encoding/pem": {"L4"}, - "encoding/xml": {"L4", "encoding"}, - "flag": {"L4", "OS"}, - "go/build": {"L4", "OS", "GOPARSER"}, - "html": {"L4"}, - "image/draw": {"L4", "image/internal/imageutil"}, - "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"}, - "image/internal/imageutil": {"L4"}, - "image/jpeg": {"L4", "image/internal/imageutil"}, - "image/png": {"L4", "compress/zlib"}, - "index/suffixarray": {"L4", "regexp"}, - "internal/singleflight": {"sync"}, - "internal/trace": {"L4", "OS"}, - "math/big": {"L4"}, - "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"}, - "mime/quotedprintable": {"L4"}, - "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"}, - "net/url": {"L4"}, - "plugin": {"L0", "OS", "CGO"}, + "archive/tar": {"L4", "OS", "syscall", "os/user"}, + "archive/zip": {"L4", "OS", "compress/flate"}, + "container/heap": {"sort"}, + "compress/bzip2": {"L4"}, + "compress/flate": {"L4"}, + "compress/gzip": {"L4", "compress/flate"}, + "compress/lzw": {"L4"}, + "compress/zlib": {"L4", "compress/flate"}, + "context": {"errors", "fmt", "reflect", "sync", "time"}, + "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"}, + "database/sql/driver": {"L4", "context", "time", "database/sql/internal"}, + "debug/dwarf": {"L4"}, + "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"}, + "debug/gosym": {"L4"}, + "debug/macho": {"L4", "OS", "debug/dwarf", "compress/zlib"}, + "debug/pe": {"L4", "OS", "debug/dwarf", "compress/zlib"}, + "debug/plan9obj": {"L4", "OS"}, + "encoding": {"L4"}, + "encoding/ascii85": {"L4"}, + "encoding/asn1": {"L4", "math/big"}, + "encoding/csv": {"L4"}, + "encoding/gob": {"L4", "OS", "encoding"}, + "encoding/hex": {"L4"}, + "encoding/json": {"L4", "encoding"}, + "encoding/pem": {"L4"}, + "encoding/xml": {"L4", "encoding"}, + "flag": {"L4", "OS"}, + "go/build": {"L4", "OS", "GOPARSER", "internal/goroot"}, + "html": {"L4"}, + "image/draw": {"L4", "image/internal/imageutil"}, + "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"}, + "image/internal/imageutil": {"L4"}, + "image/jpeg": {"L4", "image/internal/imageutil"}, + "image/png": {"L4", "compress/zlib"}, + "index/suffixarray": {"L4", "regexp"}, + "internal/goroot": {"L4", "OS"}, + "internal/singleflight": {"sync"}, + "internal/trace": {"L4", "OS", "container/heap"}, + "internal/xcoff": {"L4", "OS", "debug/dwarf"}, + "math/big": {"L4"}, + "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"}, + "mime/quotedprintable": {"L4"}, + "net/internal/socktest": {"L4", "OS", "syscall", "internal/syscall/windows"}, + "net/url": {"L4"}, + "plugin": {"L0", "OS", "CGO"}, "runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"}, "testing/internal/testdeps": {"L4", "internal/testlog", "runtime/pprof", "regexp"}, "text/scanner": {"L4", "OS"}, @@ -313,9 +319,9 @@ var pkgDeps = map[string][]string{ "net": { "L0", "CGO", "context", "math/rand", "os", "reflect", "sort", "syscall", "time", - "internal/nettrace", "internal/poll", + "internal/nettrace", "internal/poll", "internal/syscall/unix", "internal/syscall/windows", "internal/singleflight", "internal/race", - "golang_org/x/net/dns/dnsmessage", "golang_org/x/net/lif", "golang_org/x/net/route", + "internal/x/net/dns/dnsmessage", "internal/x/net/lif", "internal/x/net/route", }, // NET enables use of basic network-related packages. @@ -352,9 +358,9 @@ var pkgDeps = map[string][]string{ "crypto/sha1", "crypto/sha256", "crypto/sha512", - "golang_org/x/crypto/chacha20poly1305", - "golang_org/x/crypto/curve25519", - "golang_org/x/crypto/poly1305", + "internal/x/crypto/chacha20poly1305", + "internal/x/crypto/curve25519", + "internal/x/crypto/poly1305", }, // Random byte, number generation. @@ -382,13 +388,13 @@ var pkgDeps = map[string][]string{ // SSL/TLS. "crypto/tls": { - "L4", "CRYPTO-MATH", "OS", + "L4", "CRYPTO-MATH", "OS", "internal/x/crypto/cryptobyte", "internal/x/crypto/hkdf", "container/list", "crypto/x509", "encoding/pem", "net", "syscall", }, "crypto/x509": { "L4", "CRYPTO-MATH", "OS", "CGO", "crypto/x509/pkix", "encoding/pem", "encoding/hex", "net", "os/user", "syscall", "net/url", - "golang_org/x/crypto/cryptobyte", "golang_org/x/crypto/cryptobyte/asn1", + "internal/x/crypto/cryptobyte", "internal/x/crypto/cryptobyte/asn1", }, "crypto/x509/pkix": {"L4", "CRYPTO-MATH", "encoding/hex"}, @@ -404,12 +410,12 @@ var pkgDeps = map[string][]string{ "context", "crypto/rand", "crypto/tls", - "golang_org/x/net/http/httpguts", - "golang_org/x/net/http/httpproxy", - "golang_org/x/net/http2/hpack", - "golang_org/x/net/idna", - "golang_org/x/text/unicode/norm", - "golang_org/x/text/width", + "internal/x/net/http/httpguts", + "internal/x/net/http/httpproxy", + "internal/x/net/http2/hpack", + "internal/x/net/idna", + "internal/x/text/unicode/norm", + "internal/x/text/width", "internal/nettrace", "mime/multipart", "net/http/httptrace", @@ -427,9 +433,9 @@ var pkgDeps = map[string][]string{ "net/http/fcgi": {"L4", "NET", "OS", "context", "net/http", "net/http/cgi"}, "net/http/httptest": { "L4", "NET", "OS", "crypto/tls", "flag", "net/http", "net/http/internal", "crypto/x509", - "golang_org/x/net/http/httpguts", + "internal/x/net/http/httpguts", }, - "net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal"}, + "net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal", "internal/x/net/http/httpguts"}, "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"}, "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"}, "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"}, @@ -480,7 +486,7 @@ func listStdPkgs(goroot string) ([]string, error) { } name := filepath.ToSlash(path[len(src):]) - if name == "builtin" || name == "cmd" || strings.Contains(name, "golang_org") { + if name == "builtin" || name == "cmd" || strings.Contains(name, "internal/x/") { return filepath.SkipDir } diff --git a/src/go/build/doc.go b/src/go/build/doc.go index 69613e359c24c..8e3858feea926 100644 --- a/src/go/build/doc.go +++ b/src/go/build/doc.go @@ -108,8 +108,11 @@ // - "go1.9", from Go version 1.9 onward // - "go1.10", from Go version 1.10 onward // - "go1.11", from Go version 1.11 onward +// - "go1.12", from Go version 1.12 onward // - any additional words listed in ctxt.BuildTags // +// There are no build tags for beta or minor releases. +// // If a file's name, after stripping the extension and a possible _test suffix, // matches any of the following patterns: // *_GOOS diff --git a/src/go/build/gc.go b/src/go/build/gc.go index e2be2cbb1d18f..3025cd5681591 100644 --- a/src/go/build/gc.go +++ b/src/go/build/gc.go @@ -7,131 +7,11 @@ package build import ( - "os" - "os/exec" "path/filepath" "runtime" - "strings" - "sync" ) // getToolDir returns the default value of ToolDir. func getToolDir() string { return filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) } - -// isStandardPackage is not used for the gc toolchain. -// However, this function may be called when using `go build -compiler=gccgo`. -func isStandardPackage(path string) bool { - return gccgoSearch.isStandard(path) -} - -// gccgoSearch holds the gccgo search directories. -type gccgoDirs struct { - once sync.Once - dirs []string -} - -// gccgoSearch is used to check whether a gccgo package exists in the -// standard library. -var gccgoSearch gccgoDirs - -// init finds the gccgo search directories. If this fails it leaves dirs == nil. -func (gd *gccgoDirs) init() { - gccgo := os.Getenv("GCCGO") - if gccgo == "" { - gccgo = "gccgo" - } - bin, err := exec.LookPath(gccgo) - if err != nil { - return - } - - allDirs, err := exec.Command(bin, "-print-search-dirs").Output() - if err != nil { - return - } - versionB, err := exec.Command(bin, "-dumpversion").Output() - if err != nil { - return - } - version := strings.TrimSpace(string(versionB)) - machineB, err := exec.Command(bin, "-dumpmachine").Output() - if err != nil { - return - } - machine := strings.TrimSpace(string(machineB)) - - dirsEntries := strings.Split(string(allDirs), "\n") - const prefix = "libraries: =" - var dirs []string - for _, dirEntry := range dirsEntries { - if strings.HasPrefix(dirEntry, prefix) { - dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix)) - break - } - } - if len(dirs) == 0 { - return - } - - var lastDirs []string - for _, dir := range dirs { - goDir := filepath.Join(dir, "go", version) - if fi, err := os.Stat(goDir); err == nil && fi.IsDir() { - gd.dirs = append(gd.dirs, goDir) - goDir = filepath.Join(goDir, machine) - if fi, err = os.Stat(goDir); err == nil && fi.IsDir() { - gd.dirs = append(gd.dirs, goDir) - } - } - if fi, err := os.Stat(dir); err == nil && fi.IsDir() { - lastDirs = append(lastDirs, dir) - } - } - gd.dirs = append(gd.dirs, lastDirs...) -} - -// isStandard returns whether path is a standard library for gccgo. -func (gd *gccgoDirs) isStandard(path string) bool { - // Quick check: if the first path component has a '.', it's not - // in the standard library. This skips most GOPATH directories. - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - if strings.Contains(path[:i], ".") { - return false - } - - if path == "unsafe" { - // Special case. - return true - } - - gd.once.Do(gd.init) - if gd.dirs == nil { - // We couldn't find the gccgo search directories. - // Best guess, since the first component did not contain - // '.', is that this is a standard library package. - return true - } - - for _, dir := range gd.dirs { - full := filepath.Join(dir, path) - pkgdir, pkg := filepath.Split(full) - for _, p := range [...]string{ - full, - full + ".gox", - pkgdir + "lib" + pkg + ".so", - pkgdir + "lib" + pkg + ".a", - full + ".o", - } { - if fi, err := os.Stat(p); err == nil && !fi.IsDir() { - return true - } - } - } - - return false -} diff --git a/src/go/build/gccgo.go b/src/go/build/gccgo.go index 59e089d69db2c..c6aac9aa1bc2d 100644 --- a/src/go/build/gccgo.go +++ b/src/go/build/gccgo.go @@ -12,9 +12,3 @@ import "runtime" func getToolDir() string { return envOr("GCCGOTOOLDIR", runtime.GCCGOTOOLDIR) } - -// isStandardPackage returns whether path names a standard library package. -// This uses a list generated at build time. -func isStandardPackage(path string) bool { - return stdpkg[path] -} diff --git a/src/go/build/read_test.go b/src/go/build/read_test.go index 9cef657e13fc1..8636533f6978b 100644 --- a/src/go/build/read_test.go +++ b/src/go/build/read_test.go @@ -110,15 +110,12 @@ func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, erro if err != nil { if tt.err == "" { t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf)) - continue - } - if !strings.Contains(err.Error(), tt.err) { + } else if !strings.Contains(err.Error(), tt.err) { t.Errorf("#%d: err=%q, expected %q", i, err, tt.err) - continue } continue } - if err == nil && tt.err != "" { + if tt.err != "" { t.Errorf("#%d: success, expected %q", i, tt.err) continue } diff --git a/src/go/build/syslist.go b/src/go/build/syslist.go index d7938fad54f96..d13fe9c4f997f 100644 --- a/src/go/build/syslist.go +++ b/src/go/build/syslist.go @@ -4,5 +4,5 @@ package build -const goosList = "android darwin dragonfly freebsd js linux nacl netbsd openbsd plan9 solaris windows zos " +const goosList = "aix android darwin dragonfly freebsd hurd js linux nacl netbsd openbsd plan9 solaris windows zos " const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc riscv riscv64 s390 s390x sparc sparc64 wasm " diff --git a/src/go/build/testdata/withvendor/src/a/b/b.go b/src/go/build/testdata/withvendor/src/a/b/b.go new file mode 100644 index 0000000000000..4405d547a6288 --- /dev/null +++ b/src/go/build/testdata/withvendor/src/a/b/b.go @@ -0,0 +1,3 @@ +package b + +import _ "c/d" diff --git a/src/go/build/testdata/withvendor/src/a/vendor/c/d/d.go b/src/go/build/testdata/withvendor/src/a/vendor/c/d/d.go new file mode 100644 index 0000000000000..142fb423f66d1 --- /dev/null +++ b/src/go/build/testdata/withvendor/src/a/vendor/c/d/d.go @@ -0,0 +1 @@ +package d diff --git a/src/go/constant/value.go b/src/go/constant/value.go index 64f8eb660a881..0982243edbf68 100644 --- a/src/go/constant/value.go +++ b/src/go/constant/value.go @@ -170,7 +170,7 @@ func (x int64Val) String() string { return strconv.FormatInt(int64(x), 10) } func (x intVal) String() string { return x.val.String() } func (x ratVal) String() string { return rtof(x).String() } -// String returns returns a decimal approximation of the Float value. +// String returns a decimal approximation of the Float value. func (x floatVal) String() string { f := x.val diff --git a/src/go/constant/value_test.go b/src/go/constant/value_test.go index e6fca76e182bc..68b87eaa5571d 100644 --- a/src/go/constant/value_test.go +++ b/src/go/constant/value_test.go @@ -296,7 +296,7 @@ func val(lit string) Value { switch first, last := lit[0], lit[len(lit)-1]; { case first == '"' || first == '`': tok = token.STRING - lit = strings.Replace(lit, "_", " ", -1) + lit = strings.ReplaceAll(lit, "_", " ") case first == '\'': tok = token.CHAR case last == 'i': diff --git a/src/go/doc/comment.go b/src/go/doc/comment.go index d068d8960c602..0ec42643fdf82 100644 --- a/src/go/doc/comment.go +++ b/src/go/doc/comment.go @@ -7,40 +7,46 @@ package doc import ( + "bytes" "io" - "regexp" "strings" "text/template" // for HTMLEscape "unicode" "unicode/utf8" ) +const ( + ldquo = "“" + rdquo = "”" + ulquo = "“" + urquo = "”" +) + var ( - ldquo = []byte("“") - rdquo = []byte("”") + htmlQuoteReplacer = strings.NewReplacer(ulquo, ldquo, urquo, rdquo) + unicodeQuoteReplacer = strings.NewReplacer("``", ulquo, "''", urquo) ) // Escape comment text for HTML. If nice is set, // also turn `` into “ and '' into ”. func commentEscape(w io.Writer, text string, nice bool) { - last := 0 if nice { - for i := 0; i < len(text)-1; i++ { - ch := text[i] - if ch == text[i+1] && (ch == '`' || ch == '\'') { - template.HTMLEscape(w, []byte(text[last:i])) - last = i + 2 - switch ch { - case '`': - w.Write(ldquo) - case '\'': - w.Write(rdquo) - } - i++ // loop will add one more - } - } + // In the first pass, we convert `` and '' into their unicode equivalents. + // This prevents them from being escaped in HTMLEscape. + text = convertQuotes(text) + var buf bytes.Buffer + template.HTMLEscape(&buf, []byte(text)) + // Now we convert the unicode quotes to their HTML escaped entities to maintain old behavior. + // We need to use a temp buffer to read the string back and do the conversion, + // otherwise HTMLEscape will escape & to & + htmlQuoteReplacer.WriteString(w, buf.String()) + return } - template.HTMLEscape(w, []byte(text[last:])) + template.HTMLEscape(w, []byte(text)) +} + +func convertQuotes(text string) string { + return unicodeQuoteReplacer.Replace(text) } const ( @@ -48,7 +54,7 @@ const ( identRx = `[\pL_][\pL_0-9]*` // Regexp for URLs - // Match parens, and check in pairedParensPrefixLen for balance - see #5043 + // Match parens, and check later for balance - see #5043, #22285 // Match .,:;?! within path, but not at end - see #18139, #16565 // This excludes some rare yet valid urls ending in common punctuation // in order to allow sentences ending in URLs. @@ -63,7 +69,7 @@ const ( urlRx = protoPart + `://` + hostPart + pathPart ) -var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`) +var matchRx = newLazyRE(`(` + urlRx + `)|(` + identRx + `)`) var ( html_a = []byte(`\n") ) -// pairedParensPrefixLen returns the length of the longest prefix of s containing paired parentheses. -func pairedParensPrefixLen(s string) int { - parens := 0 - l := len(s) - for i, ch := range s { - switch ch { - case '(': - if parens == 0 { - l = i - } - parens++ - case ')': - parens-- - if parens == 0 { - l = len(s) - } else if parens < 0 { - return i - } - } - } - return l -} - // Emphasize and escape a line of text for HTML. URLs are converted into links; // if the URL also appears in the words map, the link is taken from the map (if // the corresponding map value is the empty string, the URL is not converted @@ -122,13 +105,27 @@ func emphasize(w io.Writer, line string, words map[string]string, nice bool) { // write text before match commentEscape(w, line[0:m[0]], nice) - // adjust match if necessary + // adjust match for URLs match := line[m[0]:m[1]] - if n := pairedParensPrefixLen(match); n < len(match) { - // match contains unpaired parentheses (rare); - // redo matching with shortened line for correct indices - m = matchRx.FindStringSubmatchIndex(line[:m[0]+n]) - match = match[:n] + if strings.Contains(match, "://") { + m0, m1 := m[0], m[1] + for _, s := range []string{"()", "{}", "[]"} { + open, close := s[:1], s[1:] // E.g., "(" and ")" + // require opening parentheses before closing parentheses (#22285) + if i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) { + m1 = m0 + i + match = line[m0:m1] + } + // require balanced pairs of parentheses (#5043) + for i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ { + m1 = strings.LastIndexAny(line[:m1], s) + match = line[m0:m1] + } + } + if m1 != m[1] { + // redo matching with shortened line for correct indices + m = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)]) + } } // analyze match @@ -249,7 +246,7 @@ func heading(line string) string { } // allow "." when followed by non-space - for b := line;; { + for b := line; ; { i := strings.IndexRune(b, '.') if i < 0 { break @@ -276,7 +273,7 @@ type block struct { lines []string } -var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`) +var nonAlphaNumRx = newLazyRE(`[^a-zA-Z0-9]`) func anchorID(line string) string { // Add a "hdr-" prefix to avoid conflicting with IDs used for package symbols. @@ -430,12 +427,14 @@ func ToText(w io.Writer, text string, indent, preIndent string, width int) { case opPara: // l.write will add leading newline if required for _, line := range b.lines { + line = convertQuotes(line) l.write(line) } l.flush() case opHead: w.Write(nl) for _, line := range b.lines { + line = convertQuotes(line) l.write(line + "\n") } l.flush() @@ -446,6 +445,7 @@ func ToText(w io.Writer, text string, indent, preIndent string, width int) { w.Write([]byte("\n")) } else { w.Write([]byte(preIndent)) + line = convertQuotes(line) w.Write([]byte(line)) } } diff --git a/src/go/doc/comment_test.go b/src/go/doc/comment_test.go index 0523ab899ee34..e0adeb2f5cf10 100644 --- a/src/go/doc/comment_test.go +++ b/src/go/doc/comment_test.go @@ -7,6 +7,7 @@ package doc import ( "bytes" "reflect" + "strings" "testing" ) @@ -150,6 +151,7 @@ func TestToText(t *testing.T) { var emphasizeTests = []struct { in, out string }{ + {"", ""}, {"http://[::1]:8080/foo.txt", `http://[::1]:8080/foo.txt`}, {"before (https://www.google.com) after", `before (https://www.google.com) after`}, {"before https://www.google.com:30/x/y/z:b::c. After", `before https://www.google.com:30/x/y/z:b::c. After`}, @@ -168,7 +170,13 @@ var emphasizeTests = []struct { {"Hello http://example.com/%2f/ /world.", `Hello http://example.com/%2f/ /world.`}, {"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"}, {"javascript://is/not/linked", "javascript://is/not/linked"}, + {"http://foo", `http://foo`}, + {"art by [[https://www.example.com/person/][Person Name]]", `art by [[https://www.example.com/person/][Person Name]]`}, + {"please visit (http://golang.org/)", `please visit (http://golang.org/)`}, + {"please visit http://golang.org/hello())", `please visit http://golang.org/hello())`}, {"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD`}, + {"https://foo.bar/bal/x(])", `https://foo.bar/bal/x(])`}, // inner ] causes (]) to be cut off from URL + {"foo [ http://bar(])", `foo [ http://bar(])`}, // outer [ causes ]) to be cut off from URL } func TestEmphasize(t *testing.T) { @@ -182,32 +190,18 @@ func TestEmphasize(t *testing.T) { } } -var pairedParensPrefixLenTests = []struct { - in, out string -}{ - {"", ""}, - {"foo", "foo"}, - {"()", "()"}, - {"foo()", "foo()"}, - {"foo()()()", "foo()()()"}, - {"foo()((()()))", "foo()((()()))"}, - {"foo()((()()))bar", "foo()((()()))bar"}, - {"foo)", "foo"}, - {"foo))", "foo"}, - {"foo)))))", "foo"}, - {"(foo", ""}, - {"((foo", ""}, - {"(((((foo", ""}, - {"(foo)", "(foo)"}, - {"((((foo))))", "((((foo))))"}, - {"foo()())", "foo()()"}, - {"foo((()())", "foo"}, - {"foo((()())) (() foo ", "foo((()())) "}, -} - -func TestPairedParensPrefixLen(t *testing.T) { - for i, tt := range pairedParensPrefixLenTests { - if out := tt.in[:pairedParensPrefixLen(tt.in)]; out != tt.out { +func TestCommentEscape(t *testing.T) { + commentTests := []struct { + in, out string + }{ + {"typically invoked as ``go tool asm'',", "typically invoked as " + ldquo + "go tool asm" + rdquo + ","}, + {"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ldquo + "go help test" + rdquo + " and " + ldquo + "go help testflag" + rdquo}, + } + for i, tt := range commentTests { + var buf strings.Builder + commentEscape(&buf, tt.in, true) + out := buf.String() + if out != tt.out { t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out) } } diff --git a/src/go/doc/doc.go b/src/go/doc/doc.go index 3c3e28d48fb88..d0d4d3265b61e 100644 --- a/src/go/doc/doc.go +++ b/src/go/doc/doc.go @@ -79,13 +79,18 @@ type Note struct { type Mode int const ( - // extract documentation for all package-level declarations, - // not just exported ones + // AllDecls says to extract documentation for all package-level + // declarations, not just exported ones. AllDecls Mode = 1 << iota - // show all embedded methods, not just the ones of - // invisible (unexported) anonymous fields + // AllMethods says to show all embedded methods, not just the ones of + // invisible (unexported) anonymous fields. AllMethods + + // PreserveAST says to leave the AST unmodified. Originally, pieces of + // the AST such as function bodies were nil-ed out to save memory in + // godoc, but not all programs want that behavior. + PreserveAST ) // New computes the package documentation for the given package AST. diff --git a/src/go/doc/doc_test.go b/src/go/doc/doc_test.go index ad8ba5378f3f1..0b2d2b63ccc09 100644 --- a/src/go/doc/doc_test.go +++ b/src/go/doc/doc_test.go @@ -40,7 +40,7 @@ func readTemplate(filename string) *template.Template { func nodeFmt(node interface{}, fset *token.FileSet) string { var buf bytes.Buffer printer.Fprint(&buf, fset, node) - return strings.Replace(strings.TrimSpace(buf.String()), "\n", "\n\t", -1) + return strings.ReplaceAll(strings.TrimSpace(buf.String()), "\n", "\n\t") } func synopsisFmt(s string) string { @@ -53,7 +53,7 @@ func synopsisFmt(s string) string { } s = strings.TrimSpace(s) + " ..." } - return "// " + strings.Replace(s, "\n", " ", -1) + return "// " + strings.ReplaceAll(s, "\n", " ") } func indentFmt(indent, s string) string { @@ -62,7 +62,7 @@ func indentFmt(indent, s string) string { end = "\n" s = s[:len(s)-1] } - return indent + strings.Replace(s, "\n", "\n"+indent, -1) + end + return indent + strings.ReplaceAll(s, "\n", "\n"+indent) + end } func isGoFile(fi os.FileInfo) bool { @@ -144,3 +144,12 @@ func Test(t *testing.T) { test(t, AllDecls) test(t, AllMethods) } + +func TestAnchorID(t *testing.T) { + const in = "Important Things 2 Know & Stuff" + const want = "hdr-Important_Things_2_Know___Stuff" + got := anchorID(in) + if got != want { + t.Errorf("anchorID(%q) = %q; want %q", in, got, want) + } +} diff --git a/src/go/doc/example.go b/src/go/doc/example.go index 5b40bb0fb25fb..81956f2fdbfe0 100644 --- a/src/go/doc/example.go +++ b/src/go/doc/example.go @@ -68,6 +68,9 @@ func Examples(files ...*ast.File) []*Example { if !isTest(name, "Example") { continue } + if f.Body == nil { // ast.File.Body nil dereference (see issue 28044) + continue + } var doc string if f.Doc != nil { doc = f.Doc.Text() @@ -216,6 +219,18 @@ func playExample(file *ast.File, f *ast.FuncDecl) *ast.File { for i := 0; i < len(depDecls); i++ { switch d := depDecls[i].(type) { case *ast.FuncDecl: + // Inspect types of parameters and results. See #28492. + if d.Type.Params != nil { + for _, p := range d.Type.Params.List { + ast.Inspect(p.Type, inspectFunc) + } + } + if d.Type.Results != nil { + for _, r := range d.Type.Results.List { + ast.Inspect(r.Type, inspectFunc) + } + } + ast.Inspect(d.Body, inspectFunc) case *ast.GenDecl: for _, spec := range d.Specs { @@ -253,6 +268,11 @@ func playExample(file *ast.File, f *ast.FuncDecl) *ast.File { if err != nil { continue } + if p == "syscall/js" { + // We don't support examples that import syscall/js, + // because the package syscall/js is not available in the playground. + return nil + } n := path.Base(p) if s.Name != nil { n = s.Name.Name @@ -406,6 +426,9 @@ func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast // lastComment returns the last comment inside the provided block. func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) { + if b == nil { + return + } pos, end := b.Pos(), b.End() for j, cg := range c { if cg.Pos() < pos { diff --git a/src/go/doc/example_test.go b/src/go/doc/example_test.go index 552a51bf74219..74fd10626d277 100644 --- a/src/go/doc/example_test.go +++ b/src/go/doc/example_test.go @@ -351,6 +351,103 @@ func TestExamplesWholeFile(t *testing.T) { } } +const exampleInspectSignature = `package foo_test + +import ( + "bytes" + "io" +) + +func getReader() io.Reader { return nil } + +func do(b bytes.Reader) {} + +func Example() { + getReader() + do() + // Output: +} + +func ExampleIgnored() { +} +` + +const exampleInspectSignatureOutput = `package main + +import ( + "bytes" + "io" +) + +func getReader() io.Reader { return nil } + +func do(b bytes.Reader) {} + +func main() { + getReader() + do() +} +` + +func TestExampleInspectSignature(t *testing.T) { + // Verify that "bytes" and "io" are imported. See issue #28492. + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleInspectSignature), parser.ParseComments) + if err != nil { + t.Fatal(err) + } + es := doc.Examples(file) + if len(es) != 2 { + t.Fatalf("wrong number of examples; got %d want 2", len(es)) + } + // We are interested in the first example only. + e := es[0] + if e.Name != "" { + t.Errorf("got Name == %q, want %q", e.Name, "") + } + if g, w := formatFile(t, fset, e.Play), exampleInspectSignatureOutput; g != w { + t.Errorf("got Play == %q, want %q", g, w) + } + if g, w := e.Output, ""; g != w { + t.Errorf("got Output == %q, want %q", g, w) + } +} + +const exampleEmpty = ` +package p +func Example() {} +func Example_a() +` + +const exampleEmptyOutput = `package main + +func main() {} +func main() +` + +func TestExampleEmpty(t *testing.T) { + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleEmpty), parser.ParseComments) + if err != nil { + t.Fatal(err) + } + + es := doc.Examples(file) + if len(es) != 1 { + t.Fatalf("wrong number of examples; got %d want 1", len(es)) + } + e := es[0] + if e.Name != "" { + t.Errorf("got Name == %q, want %q", e.Name, "") + } + if g, w := formatFile(t, fset, e.Play), exampleEmptyOutput; g != w { + t.Errorf("got Play == %q, want %q", g, w) + } + if g, w := e.Output, ""; g != w { + t.Errorf("got Output == %q, want %q", g, w) + } +} + func formatFile(t *testing.T, fset *token.FileSet, n *ast.File) string { if n == nil { return "" diff --git a/src/go/doc/lazyre.go b/src/go/doc/lazyre.go new file mode 100644 index 0000000000000..3fd97d42de688 --- /dev/null +++ b/src/go/doc/lazyre.go @@ -0,0 +1,51 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package doc + +import ( + "os" + "regexp" + "strings" + "sync" +) + +type lazyRE struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (r *lazyRE) re() *regexp.Regexp { + r.once.Do(r.build) + return r.rx +} + +func (r *lazyRE) build() { + r.rx = regexp.MustCompile(r.str) + r.str = "" +} + +func (r *lazyRE) FindStringSubmatchIndex(s string) []int { + return r.re().FindStringSubmatchIndex(s) +} + +func (r *lazyRE) ReplaceAllString(src, repl string) string { + return r.re().ReplaceAllString(src, repl) +} + +func (r *lazyRE) MatchString(s string) bool { + return r.re().MatchString(s) +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func newLazyRE(str string) *lazyRE { + lr := &lazyRE{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/src/go/doc/reader.go b/src/go/doc/reader.go index 21c02920ababf..6db5c21c4a222 100644 --- a/src/go/doc/reader.go +++ b/src/go/doc/reader.go @@ -7,7 +7,6 @@ package doc import ( "go/ast" "go/token" - "regexp" "sort" "strconv" ) @@ -37,9 +36,10 @@ func recvString(recv ast.Expr) string { // set creates the corresponding Func for f and adds it to mset. // If there are multiple f's with the same name, set keeps the first -// one with documentation; conflicts are ignored. +// one with documentation; conflicts are ignored. The boolean +// specifies whether to leave the AST untouched. // -func (mset methodSet) set(f *ast.FuncDecl) { +func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) { name := f.Name.Name if g := mset[name]; g != nil && g.Doc != "" { // A function with the same name has already been registered; @@ -66,7 +66,9 @@ func (mset methodSet) set(f *ast.FuncDecl) { Recv: recv, Orig: recv, } - f.Doc = nil // doc consumed - remove from AST + if !preserveAST { + f.Doc = nil // doc consumed - remove from AST + } } // add adds method m to the method set; m is ignored if the method set @@ -79,7 +81,7 @@ func (mset methodSet) add(m *Func) { mset[m.Name] = m return } - if old != nil && m.Level == old.Level { + if m.Level == old.Level { // conflict - mark it using a method with nil Decl mset[m.Name] = &Func{ Name: m.Name, @@ -300,8 +302,9 @@ func (r *reader) readValue(decl *ast.GenDecl) { Decl: decl, order: r.order, }) - decl.Doc = nil // doc consumed - remove from AST - + if r.mode&PreserveAST == 0 { + decl.Doc = nil // doc consumed - remove from AST + } // Note: It's important that the order used here is global because the cleanupTypes // methods may move values associated with types back into the global list. If the // order is list-specific, sorting is not deterministic because the same order value @@ -340,12 +343,14 @@ func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) { // compute documentation doc := spec.Doc - spec.Doc = nil // doc consumed - remove from AST if doc == nil { // no doc associated with the spec, use the declaration doc, if any doc = decl.Doc } - decl.Doc = nil // doc consumed - remove from AST + if r.mode&PreserveAST == 0 { + spec.Doc = nil // doc consumed - remove from AST + decl.Doc = nil // doc consumed - remove from AST + } typ.doc = doc.Text() // record anonymous fields (they may contribute methods) @@ -360,11 +365,19 @@ func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) { } } +// isPredeclared reports whether n denotes a predeclared type. +// +func (r *reader) isPredeclared(n string) bool { + return predeclaredTypes[n] && r.types[n] == nil +} + // readFunc processes a func or method declaration. // func (r *reader) readFunc(fun *ast.FuncDecl) { - // strip function body - fun.Body = nil + // strip function body if requested. + if r.mode&PreserveAST == 0 { + fun.Body = nil + } // associate methods with the receiver type, if any if fun.Recv != nil { @@ -381,7 +394,7 @@ func (r *reader) readFunc(fun *ast.FuncDecl) { return } if typ := r.lookupType(recvTypeName); typ != nil { - typ.methods.set(fun) + typ.methods.set(fun, r.mode&PreserveAST != 0) } // otherwise ignore the method // TODO(gri): There may be exported methods of non-exported types @@ -391,43 +404,44 @@ func (r *reader) readFunc(fun *ast.FuncDecl) { return } - // Associate factory functions with the first visible result type, if that - // is the only type returned. + // Associate factory functions with the first visible result type, as long as + // others are predeclared types. if fun.Type.Results.NumFields() >= 1 { var typ *namedType // type to associate the function with numResultTypes := 0 for _, res := range fun.Type.Results.List { - // exactly one (named or anonymous) result associated - // with the first type in result signature (there may - // be more than one result) factoryType := res.Type if t, ok := factoryType.(*ast.ArrayType); ok { // We consider functions that return slices or arrays of type // T (or pointers to T) as factory functions of T. factoryType = t.Elt } - if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) { + if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) { if t := r.lookupType(n); t != nil { typ = t numResultTypes++ + if numResultTypes > 1 { + break + } } } } - // If there is exactly one result type, associate the function with that type. + // If there is exactly one result type, + // associate the function with that type. if numResultTypes == 1 { - typ.funcs.set(fun) + typ.funcs.set(fun, r.mode&PreserveAST != 0) return } } // just an ordinary function - r.funcs.set(fun) + r.funcs.set(fun, r.mode&PreserveAST != 0) } var ( - noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char - noteMarkerRx = regexp.MustCompile(`^[ \t]*` + noteMarker) // MARKER(uid) at text start - noteCommentRx = regexp.MustCompile(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start + noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char + noteMarkerRx = newLazyRE(`^[ \t]*` + noteMarker) // MARKER(uid) at text start + noteCommentRx = newLazyRE(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start ) // readNote collects a single note from a sequence of comments. @@ -482,10 +496,12 @@ func (r *reader) readFile(src *ast.File) { // add package documentation if src.Doc != nil { r.readDoc(src.Doc) - src.Doc = nil // doc consumed - remove from AST + if r.mode&PreserveAST == 0 { + src.Doc = nil // doc consumed - remove from AST + } } - // add all declarations + // add all declarations but for functions which are processed in a separate pass for _, decl := range src.Decls { switch d := decl.(type) { case *ast.GenDecl: @@ -539,14 +555,14 @@ func (r *reader) readFile(src *ast.File) { } } } - case *ast.FuncDecl: - r.readFunc(d) } } // collect MARKER(...): annotations r.readNotes(src.Comments) - src.Comments = nil // consumed unassociated comments - remove from AST + if r.mode&PreserveAST == 0 { + src.Comments = nil // consumed unassociated comments - remove from AST + } } func (r *reader) readPackage(pkg *ast.Package, mode Mode) { @@ -575,6 +591,15 @@ func (r *reader) readPackage(pkg *ast.Package, mode Mode) { } r.readFile(f) } + + // process functions now that we have better type information + for _, f := range pkg.Files { + for _, decl := range f.Decls { + if d, ok := decl.(*ast.FuncDecl); ok { + r.readFunc(d) + } + } + } } // ---------------------------------------------------------------------------- diff --git a/src/go/doc/synopsis.go b/src/go/doc/synopsis.go index c90080b7cc175..3fa1616cd147b 100644 --- a/src/go/doc/synopsis.go +++ b/src/go/doc/synopsis.go @@ -72,6 +72,7 @@ func Synopsis(s string) string { return "" } } + s = convertQuotes(s) return s } diff --git a/src/go/doc/synopsis_test.go b/src/go/doc/synopsis_test.go index 59b253cb8dcea..3f443dc757883 100644 --- a/src/go/doc/synopsis_test.go +++ b/src/go/doc/synopsis_test.go @@ -35,6 +35,7 @@ var tests = []struct { {"All Rights reserved. Package foo does bar.", 20, ""}, {"All rights reserved. Package foo does bar.", 20, ""}, {"Authors: foo@bar.com. Package foo does bar.", 21, ""}, + {"typically invoked as ``go tool asm'',", 37, "typically invoked as " + ulquo + "go tool asm" + urquo + ","}, } func TestSynopsis(t *testing.T) { diff --git a/src/go/doc/testdata/issue12839.0.golden b/src/go/doc/testdata/issue12839.0.golden index 76c285556028c..6b59774fb9350 100644 --- a/src/go/doc/testdata/issue12839.0.golden +++ b/src/go/doc/testdata/issue12839.0.golden @@ -14,9 +14,15 @@ FUNCTIONS // F1 should not be associated with T1 func F1() (*T1, *T2) + // F10 should not be associated with T1. + func F10() (T1, T2, error) + // F4 should not be associated with a type (same as F1) func F4() (a T1, b T2) + // F9 should not be associated with T1. + func F9() (int, T1, T2) + TYPES // @@ -28,6 +34,18 @@ TYPES // F3 should be associated with T1 because b.T3 is from a ... func F3() (a T1, b p.T3) + // F5 should be associated with T1. + func F5() (T1, error) + + // F6 should be associated with T1. + func F6() (*T1, error) + + // F7 should be associated with T1. + func F7() (T1, string) + + // F8 should be associated with T1. + func F8() (int, T1, string) + // type T2 struct{} diff --git a/src/go/doc/testdata/issue12839.1.golden b/src/go/doc/testdata/issue12839.1.golden index b0a327ffd6416..4b9b9f6477080 100644 --- a/src/go/doc/testdata/issue12839.1.golden +++ b/src/go/doc/testdata/issue12839.1.golden @@ -14,9 +14,15 @@ FUNCTIONS // F1 should not be associated with T1 func F1() (*T1, *T2) + // F10 should not be associated with T1. + func F10() (T1, T2, error) + // F4 should not be associated with a type (same as F1) func F4() (a T1, b T2) + // F9 should not be associated with T1. + func F9() (int, T1, T2) + TYPES // @@ -28,6 +34,18 @@ TYPES // F3 should be associated with T1 because b.T3 is from a ... func F3() (a T1, b p.T3) + // F5 should be associated with T1. + func F5() (T1, error) + + // F6 should be associated with T1. + func F6() (*T1, error) + + // F7 should be associated with T1. + func F7() (T1, string) + + // F8 should be associated with T1. + func F8() (int, T1, string) + // func (t T1) hello() string diff --git a/src/go/doc/testdata/issue12839.2.golden b/src/go/doc/testdata/issue12839.2.golden index 76c285556028c..6b59774fb9350 100644 --- a/src/go/doc/testdata/issue12839.2.golden +++ b/src/go/doc/testdata/issue12839.2.golden @@ -14,9 +14,15 @@ FUNCTIONS // F1 should not be associated with T1 func F1() (*T1, *T2) + // F10 should not be associated with T1. + func F10() (T1, T2, error) + // F4 should not be associated with a type (same as F1) func F4() (a T1, b T2) + // F9 should not be associated with T1. + func F9() (int, T1, T2) + TYPES // @@ -28,6 +34,18 @@ TYPES // F3 should be associated with T1 because b.T3 is from a ... func F3() (a T1, b p.T3) + // F5 should be associated with T1. + func F5() (T1, error) + + // F6 should be associated with T1. + func F6() (*T1, error) + + // F7 should be associated with T1. + func F7() (T1, string) + + // F8 should be associated with T1. + func F8() (int, T1, string) + // type T2 struct{} diff --git a/src/go/doc/testdata/issue12839.go b/src/go/doc/testdata/issue12839.go index 500d49511b5c5..51c7ac1268189 100644 --- a/src/go/doc/testdata/issue12839.go +++ b/src/go/doc/testdata/issue12839.go @@ -5,6 +5,7 @@ // Package issue12839 is a go/doc test to test association of a function // that returns multiple types. // See golang.org/issue/12839. +// (See also golang.org/issue/27928.) package issue12839 import "p" @@ -36,3 +37,33 @@ func F3() (a T1, b p.T3) { func F4() (a T1, b T2) { return T1{}, T2{} } + +// F5 should be associated with T1. +func F5() (T1, error) { + return T1{}, nil +} + +// F6 should be associated with T1. +func F6() (*T1, error) { + return &T1{}, nil +} + +// F7 should be associated with T1. +func F7() (T1, string) { + return T1{}, nil +} + +// F8 should be associated with T1. +func F8() (int, T1, string) { + return 0, T1{}, nil +} + +// F9 should not be associated with T1. +func F9() (int, T1, T2) { + return 0, T1{}, T2{} +} + +// F10 should not be associated with T1. +func F10() (T1, T2, error) { + return T1{}, T2{}, nil +} diff --git a/src/go/format/benchmark_test.go b/src/go/format/benchmark_test.go new file mode 100644 index 0000000000000..7bd45c0e95b24 --- /dev/null +++ b/src/go/format/benchmark_test.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file provides a simple framework to add benchmarks +// based on generated input (source) files. + +package format_test + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "testing" +) + +var debug = flag.Bool("debug", false, "write .src files containing formatting input; for debugging") + +// array1 generates an array literal with n elements of the form: +// +// var _ = [...]byte{ +// // 0 +// 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, +// 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, +// 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, +// 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, +// 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, +// // 40 +// 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, +// 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, +// ... +// +func array1(buf *bytes.Buffer, n int) { + buf.WriteString("var _ = [...]byte{\n") + for i := 0; i < n; { + if i%10 == 0 { + fmt.Fprintf(buf, "\t// %d\n", i) + } + buf.WriteByte('\t') + for j := 0; j < 8; j++ { + fmt.Fprintf(buf, "0x%02x, ", byte(i)) + i++ + } + buf.WriteString("\n") + } + buf.WriteString("}\n") +} + +var tests = []struct { + name string + gen func(*bytes.Buffer, int) + n int +}{ + {"array1", array1, 10000}, + // add new test cases here as needed +} + +func BenchmarkFormat(b *testing.B) { + var src bytes.Buffer + for _, t := range tests { + src.Reset() + src.WriteString("package p\n") + t.gen(&src, t.n) + data := src.Bytes() + + if *debug { + filename := t.name + ".src" + err := ioutil.WriteFile(filename, data, 0660) + if err != nil { + b.Fatalf("couldn't write %s: %v", filename, err) + } + } + + b.Run(fmt.Sprintf("%s-%d", t.name, t.n), func(b *testing.B) { + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var err error + sink, err = format.Source(data) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +var sink []byte diff --git a/src/go/importer/importer.go b/src/go/importer/importer.go index f0a1ca2b76a18..c809c9ab8678c 100644 --- a/src/go/importer/importer.go +++ b/src/go/importer/importer.go @@ -20,7 +20,7 @@ import ( // a given import path, or an error if no matching package is found. type Lookup func(path string) (io.ReadCloser, error) -// For returns an Importer for importing from installed packages +// ForCompiler returns an Importer for importing from installed packages // for the compilers "gc" and "gccgo", or for importing directly // from the source if the compiler argument is "source". In this // latter case, importing may fail under circumstances where the @@ -39,10 +39,11 @@ type Lookup func(path string) (io.ReadCloser, error) // (not relative or absolute ones); it is assumed that the translation // to canonical import paths is being done by the client of the // importer. -func For(compiler string, lookup Lookup) types.Importer { +func ForCompiler(fset *token.FileSet, compiler string, lookup Lookup) types.Importer { switch compiler { case "gc": return &gcimports{ + fset: fset, packages: make(map[string]*types.Package), lookup: lookup, } @@ -63,13 +64,21 @@ func For(compiler string, lookup Lookup) types.Importer { panic("source importer for custom import path lookup not supported (issue #13847).") } - return srcimporter.New(&build.Default, token.NewFileSet(), make(map[string]*types.Package)) + return srcimporter.New(&build.Default, fset, make(map[string]*types.Package)) } // compiler not supported return nil } +// For calls ForCompiler with a new FileSet. +// +// Deprecated: use ForCompiler, which populates a FileSet +// with the positions of objects created by the importer. +func For(compiler string, lookup Lookup) types.Importer { + return ForCompiler(token.NewFileSet(), compiler, lookup) +} + // Default returns an Importer for the compiler that built the running binary. // If available, the result implements types.ImporterFrom. func Default() types.Importer { @@ -79,6 +88,7 @@ func Default() types.Importer { // gc importer type gcimports struct { + fset *token.FileSet packages map[string]*types.Package lookup Lookup } @@ -91,7 +101,7 @@ func (m *gcimports) ImportFrom(path, srcDir string, mode types.ImportMode) (*typ if mode != 0 { panic("mode must be 0") } - return gcimporter.Import(m.packages, path, srcDir, m.lookup) + return gcimporter.Import(m.fset, m.packages, path, srcDir, m.lookup) } // gccgo importer diff --git a/src/go/importer/importer_test.go b/src/go/importer/importer_test.go index 56e83136fb1a0..ff6e12c0da523 100644 --- a/src/go/importer/importer_test.go +++ b/src/go/importer/importer_test.go @@ -5,15 +5,18 @@ package importer import ( + "go/token" "internal/testenv" "io" + "io/ioutil" "os" "os/exec" + "runtime" "strings" "testing" ) -func TestFor(t *testing.T) { +func TestForCompiler(t *testing.T) { testenv.MustHaveGoBuild(t) const thePackage = "math/big" @@ -32,8 +35,10 @@ func TestFor(t *testing.T) { t.Skip("golang.org/issue/22500") } + fset := token.NewFileSet() + t.Run("LookupDefault", func(t *testing.T) { - imp := For(compiler, nil) + imp := ForCompiler(fset, compiler, nil) pkg, err := imp.Import(thePackage) if err != nil { t.Fatal(err) @@ -41,6 +46,21 @@ func TestFor(t *testing.T) { if pkg.Path() != thePackage { t.Fatalf("Path() = %q, want %q", pkg.Path(), thePackage) } + + // Check that the fileset positions are accurate. + // https://github.com/golang/go#28995 + mathBigInt := pkg.Scope().Lookup("Int") + posn := fset.Position(mathBigInt.Pos()) // "$GOROOT/src/math/big/int.go:25:1" + filename := strings.Replace(posn.Filename, "$GOROOT", runtime.GOROOT(), 1) + data, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("can't read file containing declaration of math/big.Int: %v", err) + } + lines := strings.Split(string(data), "\n") + if posn.Line > len(lines) || !strings.HasPrefix(lines[posn.Line-1], "type Int") { + t.Fatalf("Object %v position %s does not contain its declaration", + mathBigInt, posn) + } }) t.Run("LookupCustom", func(t *testing.T) { @@ -54,7 +74,7 @@ func TestFor(t *testing.T) { } return f, nil } - imp := For(compiler, lookup) + imp := ForCompiler(fset, compiler, lookup) pkg, err := imp.Import("math/bigger") if err != nil { t.Fatal(err) diff --git a/src/go/internal/gccgoimporter/ar.go b/src/go/internal/gccgoimporter/ar.go index ebd08b8f35957..443aa26a0cdf5 100644 --- a/src/go/internal/gccgoimporter/ar.go +++ b/src/go/internal/gccgoimporter/ar.go @@ -9,6 +9,7 @@ import ( "debug/elf" "errors" "fmt" + "internal/xcoff" "io" "strconv" "strings" @@ -65,13 +66,13 @@ func arExportData(archive io.ReadSeeker) (io.ReadSeeker, error) { case armagt: return nil, errors.New("unsupported thin archive") case armagb: - return nil, errors.New("unsupported AIX big archive") + return aixBigArExportData(archive) default: return nil, fmt.Errorf("unrecognized archive file format %q", buf[:]) } } -// standardArExportData returns export data form a standard archive. +// standardArExportData returns export data from a standard archive. func standardArExportData(archive io.ReadSeeker) (io.ReadSeeker, error) { off := int64(len(armag)) for { @@ -126,6 +127,28 @@ func elfFromAr(member *io.SectionReader) (io.ReadSeeker, error) { return sec.Open(), nil } +// aixBigArExportData returns export data from an AIX big archive. +func aixBigArExportData(archive io.ReadSeeker) (io.ReadSeeker, error) { + archiveAt := readerAtFromSeeker(archive) + arch, err := xcoff.NewArchive(archiveAt) + if err != nil { + return nil, err + } + + for _, mem := range arch.Members { + f, err := arch.GetFile(mem.Name) + if err != nil { + return nil, err + } + sdat := f.CSect(".go_export") + if sdat != nil { + return bytes.NewReader(sdat), nil + } + } + + return nil, fmt.Errorf(".go_export not found in this archive") +} + // readerAtFromSeeker turns an io.ReadSeeker into an io.ReaderAt. // This is only safe because there won't be any concurrent seeks // while this code is executing. diff --git a/src/go/internal/gccgoimporter/gccgoinstallation.go b/src/go/internal/gccgoimporter/gccgoinstallation.go index 622dfc8b69642..8fc7ce3232190 100644 --- a/src/go/internal/gccgoimporter/gccgoinstallation.go +++ b/src/go/internal/gccgoimporter/gccgoinstallation.go @@ -26,8 +26,10 @@ type GccgoInstallation struct { } // Ask the driver at the given path for information for this GccgoInstallation. -func (inst *GccgoInstallation) InitFromDriver(gccgoPath string) (err error) { - cmd := exec.Command(gccgoPath, "-###", "-S", "-x", "go", "-") +// The given arguments are passed directly to the call of the driver. +func (inst *GccgoInstallation) InitFromDriver(gccgoPath string, args ...string) (err error) { + argv := append([]string{"-###", "-S", "-x", "go", "-"}, args...) + cmd := exec.Command(gccgoPath, argv...) stderr, err := cmd.StderrPipe() if err != nil { return @@ -55,7 +57,8 @@ func (inst *GccgoInstallation) InitFromDriver(gccgoPath string) (err error) { } } - stdout, err := exec.Command(gccgoPath, "-dumpversion").Output() + argv = append([]string{"-dumpversion"}, args...) + stdout, err := exec.Command(gccgoPath, argv...).Output() if err != nil { return } diff --git a/src/go/internal/gccgoimporter/gccgoinstallation_test.go b/src/go/internal/gccgoimporter/gccgoinstallation_test.go index da4931ef1e722..b332babc7b685 100644 --- a/src/go/internal/gccgoimporter/gccgoinstallation_test.go +++ b/src/go/internal/gccgoimporter/gccgoinstallation_test.go @@ -6,10 +6,14 @@ package gccgoimporter import ( "go/types" - "runtime" "testing" ) +// importablePackages is a list of packages that we verify that we can +// import. This should be all standard library packages in all relevant +// versions of gccgo. Note that since gccgo follows a different release +// cycle, and since different systems have different versions installed, +// we can't use the last-two-versions rule of the gc toolchain. var importablePackages = [...]string{ "archive/tar", "archive/zip", @@ -56,7 +60,7 @@ var importablePackages = [...]string{ "encoding/binary", "encoding/csv", "encoding/gob", - "encoding", + // "encoding", // Added in GCC 4.9. "encoding/hex", "encoding/json", "encoding/pem", @@ -68,7 +72,7 @@ var importablePackages = [...]string{ "go/ast", "go/build", "go/doc", - "go/format", + // "go/format", // Added in GCC 4.8. "go/parser", "go/printer", "go/scanner", @@ -81,7 +85,7 @@ var importablePackages = [...]string{ "html", "html/template", "image/color", - "image/color/palette", + // "image/color/palette", // Added in GCC 4.9. "image/draw", "image/gif", "image", @@ -100,7 +104,7 @@ var importablePackages = [...]string{ "mime/multipart", "net", "net/http/cgi", - "net/http/cookiejar", + // "net/http/cookiejar", // Added in GCC 4.8. "net/http/fcgi", "net/http", "net/http/httptest", @@ -144,14 +148,14 @@ var importablePackages = [...]string{ } func TestInstallationImporter(t *testing.T) { - // This test relies on gccgo being around, which it most likely will be if we - // were compiled with gccgo. - if runtime.Compiler != "gccgo" { + // This test relies on gccgo being around. + gpath := gccgoPath() + if gpath == "" { t.Skip("This test needs gccgo") } var inst GccgoInstallation - err := inst.InitFromDriver("gccgo") + err := inst.InitFromDriver(gpath) if err != nil { t.Fatal(err) } @@ -176,12 +180,12 @@ func TestInstallationImporter(t *testing.T) { // Test for certain specific entities in the imported data. for _, test := range [...]importerTest{ - {pkgpath: "io", name: "Reader", want: "type Reader interface{Read(p []uint8) (n int, err error)}"}, + {pkgpath: "io", name: "Reader", want: "type Reader interface{Read(p []byte) (n int, err error)}"}, {pkgpath: "io", name: "ReadWriter", want: "type ReadWriter interface{Reader; Writer}"}, {pkgpath: "math", name: "Pi", want: "const Pi untyped float"}, {pkgpath: "math", name: "Sin", want: "func Sin(x float64) float64"}, {pkgpath: "sort", name: "Ints", want: "func Ints(a []int)"}, - {pkgpath: "unsafe", name: "Pointer", want: "type Pointer unsafe.Pointer"}, + {pkgpath: "unsafe", name: "Pointer", want: "type Pointer"}, } { runImporterTest(t, imp, nil, &test) } diff --git a/src/go/internal/gccgoimporter/importer.go b/src/go/internal/gccgoimporter/importer.go index 159cc50719f50..6856611026f32 100644 --- a/src/go/internal/gccgoimporter/importer.go +++ b/src/go/internal/gccgoimporter/importer.go @@ -6,9 +6,11 @@ package gccgoimporter // import "go/internal/gccgoimporter" import ( + "bytes" "debug/elf" "fmt" "go/types" + "internal/xcoff" "io" "os" "path/filepath" @@ -62,8 +64,10 @@ func findExportFile(searchpaths []string, pkgpath string) (string, error) { const ( gccgov1Magic = "v1;\n" gccgov2Magic = "v2;\n" + gccgov3Magic = "v3;\n" goimporterMagic = "\n$$ " archiveMagic = "! package object - typeMap map[int]types.Type // type number -> type + typeList []types.Type // type number -> type + typeData []string // unparsed type data (v3 and later) + fixups []fixupRecord // fixups to apply at end of parsing initdata InitData // package init priority data } +// When reading export data it's possible to encounter a defined type +// N1 with an underlying defined type N2 while we are still reading in +// that defined type N2; see issues #29006 and #29198 for instances +// of this. Example: +// +// type N1 N2 +// type N2 struct { +// ... +// p *N1 +// } +// +// To handle such cases, the parser generates a fixup record (below) and +// delays setting of N1's underlying type until parsing is complete, at +// which point fixups are applied. + +type fixupRecord struct { + toUpdate *types.Named // type to modify when fixup is processed + target types.Type // type that was incomplete when fixup was created +} + func (p *parser) init(filename string, src io.Reader, imports map[string]*types.Package) { + p.scanner = new(scanner.Scanner) + p.initScanner(filename, src) + p.imports = imports + p.typeList = make([]types.Type, 1 /* type numbers start at 1 */, 16) +} + +func (p *parser) initScanner(filename string, src io.Reader) { p.scanner.Init(src) p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } - p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments - p.scanner.Whitespace = 1<<'\t' | 1<<'\n' | 1<<' ' + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings + p.scanner.Whitespace = 1<<'\t' | 1<<' ' p.scanner.Filename = filename // for good error messages p.next() - p.imports = imports - p.typeMap = make(map[int]types.Type) } type importError struct { @@ -71,6 +99,13 @@ func (p *parser) expect(tok rune) string { return lit } +func (p *parser) expectEOL() { + if p.version == "v1" || p.version == "v2" { + p.expect(';') + } + p.expect('\n') +} + func (p *parser) expectKeyword(keyword string) { lit := p.expect(scanner.Ident) if lit != keyword { @@ -96,7 +131,7 @@ func (p *parser) parseUnquotedString() string { buf.WriteString(p.scanner.TokenText()) // This loop needs to examine each character before deciding whether to consume it. If we see a semicolon, // we need to let it be consumed by p.next(). - for ch := p.scanner.Peek(); ch != ';' && ch != scanner.EOF && p.scanner.Whitespace&(1<" . -func (p *parser) parseType(pkg *types.Package) (t types.Type) { +// Type = "<" "type" ( "-" int | int [ TypeSpec ] ) ">" . +// +// parseType updates the type map to t for all type numbers n. +// +func (p *parser) parseType(pkg *types.Package, n ...int) types.Type { p.expect('<') + return p.parseTypeAfterAngle(pkg, n...) +} + +// (*parser).Type after reading the "<". +func (p *parser) parseTypeAfterAngle(pkg *types.Package, n ...int) (t types.Type) { p.expectKeyword("type") switch p.tok { case scanner.Int: - n := p.parseInt() - + n1 := p.parseInt() if p.tok == '>' { - t = p.typeMap[int(n)] + if len(p.typeData) > 0 && p.typeList[n1] == nil { + p.parseSavedType(pkg, n1, n) + } + t = p.typeList[n1] + if len(p.typeData) == 0 && t == reserved { + p.errorf("invalid type cycle, type %d not yet defined (nlist=%v)", n1, n) + } + p.update(t, n) } else { - t = p.parseTypeDefinition(pkg, int(n)) + p.reserve(n1) + t = p.parseTypeSpec(pkg, append(n, n1)) } case '-': p.next() - n := p.parseInt() - t = lookupBuiltinType(int(n)) + n1 := p.parseInt() + t = lookupBuiltinType(n1) + p.update(t, n) default: p.errorf("expected type number, got %s (%q)", scanner.TokenString(p.tok), p.lit) return nil } + if t == nil || t == reserved { + p.errorf("internal error: bad return from parseType(%v)", n) + } + p.expect('>') return } +// InlineBody = "" .{NN} +// Reports whether a body was skipped. +func (p *parser) skipInlineBody() { + // We may or may not have seen the '<' already, depending on + // whether the function had a result type or not. + if p.tok == '<' { + p.next() + p.expectKeyword("inl") + } else if p.tok != scanner.Ident || p.lit != "inl" { + return + } else { + p.next() + } + + p.expect(':') + want := p.parseInt() + p.expect('>') + + defer func(w uint64) { + p.scanner.Whitespace = w + }(p.scanner.Whitespace) + p.scanner.Whitespace = 0 + + got := 0 + for got < want { + r := p.scanner.Next() + if r == scanner.EOF { + p.error("unexpected EOF") + } + got += utf8.RuneLen(r) + } +} + +// Types = "types" maxp1 exportedp1 (offset length)* . +func (p *parser) parseTypes(pkg *types.Package) { + maxp1 := p.parseInt() + exportedp1 := p.parseInt() + p.typeList = make([]types.Type, maxp1, maxp1) + + type typeOffset struct { + offset int + length int + } + var typeOffsets []typeOffset + + total := 0 + for i := 1; i < maxp1; i++ { + len := p.parseInt() + typeOffsets = append(typeOffsets, typeOffset{total, len}) + total += len + } + + defer func(w uint64) { + p.scanner.Whitespace = w + }(p.scanner.Whitespace) + p.scanner.Whitespace = 0 + + // We should now have p.tok pointing to the final newline. + // The next runes from the scanner should be the type data. + + var sb strings.Builder + for sb.Len() < total { + r := p.scanner.Next() + if r == scanner.EOF { + p.error("unexpected EOF") + } + sb.WriteRune(r) + } + allTypeData := sb.String() + + p.typeData = []string{""} // type 0, unused + for _, to := range typeOffsets { + p.typeData = append(p.typeData, allTypeData[to.offset:to.offset+to.length]) + } + + for i := 1; i < int(exportedp1); i++ { + p.parseSavedType(pkg, i, []int{}) + } +} + +// parseSavedType parses one saved type definition. +func (p *parser) parseSavedType(pkg *types.Package, i int, nlist []int) { + defer func(s *scanner.Scanner, tok rune, lit string) { + p.scanner = s + p.tok = tok + p.lit = lit + }(p.scanner, p.tok, p.lit) + + p.scanner = new(scanner.Scanner) + p.initScanner(p.scanner.Filename, strings.NewReader(p.typeData[i])) + p.expectKeyword("type") + id := p.parseInt() + if id != i { + p.errorf("type ID mismatch: got %d, want %d", id, i) + } + if p.typeList[i] == reserved { + p.errorf("internal error: %d already reserved in parseSavedType", i) + } + if p.typeList[i] == nil { + p.reserve(i) + p.parseTypeSpec(pkg, append(nlist, i)) + } + if p.typeList[i] == nil || p.typeList[i] == reserved { + p.errorf("internal error: parseSavedType(%d,%v) reserved/nil", i, nlist) + } +} + // PackageInit = unquotedString unquotedString int . func (p *parser) parsePackageInit() PackageInit { name := p.parseUnquotedString() initfunc := p.parseUnquotedString() priority := -1 if p.version == "v1" { - priority = int(p.parseInt()) + priority = p.parseInt() } return PackageInit{Name: name, InitFunc: initfunc, Priority: priority} } @@ -744,7 +1041,7 @@ func (p *parser) parsePackageInit() PackageInit { func (p *parser) discardDirectiveWhileParsingTypes(pkg *types.Package) { for { switch p.tok { - case ';': + case '\n', ';': return case '<': p.parseType(pkg) @@ -763,7 +1060,7 @@ func (p *parser) maybeCreatePackage() { } } -// InitDataDirective = ( "v1" | "v2" ) ";" | +// InitDataDirective = ( "v1" | "v2" | "v3" ) ";" | // "priority" int ";" | // "init" { PackageInit } ";" | // "checksum" unquotedString ";" . @@ -774,31 +1071,32 @@ func (p *parser) parseInitDataDirective() { } switch p.lit { - case "v1", "v2": + case "v1", "v2", "v3": p.version = p.lit p.next() p.expect(';') + p.expect('\n') case "priority": p.next() - p.initdata.Priority = int(p.parseInt()) - p.expect(';') + p.initdata.Priority = p.parseInt() + p.expectEOL() case "init": p.next() - for p.tok != ';' && p.tok != scanner.EOF { + for p.tok != '\n' && p.tok != ';' && p.tok != scanner.EOF { p.initdata.Inits = append(p.initdata.Inits, p.parsePackageInit()) } - p.expect(';') + p.expectEOL() case "init_graph": p.next() // The graph data is thrown away for now. - for p.tok != ';' && p.tok != scanner.EOF { - p.parseInt() - p.parseInt() + for p.tok != '\n' && p.tok != ';' && p.tok != scanner.EOF { + p.parseInt64() + p.parseInt64() } - p.expect(';') + p.expectEOL() case "checksum": // Don't let the scanner try to parse the checksum as a number. @@ -808,7 +1106,7 @@ func (p *parser) parseInitDataDirective() { p.scanner.Mode &^= scanner.ScanInts | scanner.ScanFloats p.next() p.parseUnquotedString() - p.expect(';') + p.expectEOL() default: p.errorf("unexpected identifier: %q", p.lit) @@ -820,6 +1118,7 @@ func (p *parser) parseInitDataDirective() { // "pkgpath" unquotedString ";" | // "prefix" unquotedString ";" | // "import" unquotedString unquotedString string ";" | +// "indirectimport" unquotedString unquotedstring ";" | // "func" Func ";" | // "type" Type ";" | // "var" Var ";" | @@ -831,29 +1130,29 @@ func (p *parser) parseDirective() { } switch p.lit { - case "v1", "v2", "priority", "init", "init_graph", "checksum": + case "v1", "v2", "v3", "priority", "init", "init_graph", "checksum": p.parseInitDataDirective() case "package": p.next() p.pkgname = p.parseUnquotedString() p.maybeCreatePackage() - if p.version == "v2" && p.tok != ';' { + if p.version != "v1" && p.tok != '\n' && p.tok != ';' { p.parseUnquotedString() p.parseUnquotedString() } - p.expect(';') + p.expectEOL() case "pkgpath": p.next() p.pkgpath = p.parseUnquotedString() p.maybeCreatePackage() - p.expect(';') + p.expectEOL() case "prefix": p.next() p.pkgpath = p.parseUnquotedString() - p.expect(';') + p.expectEOL() case "import": p.next() @@ -861,7 +1160,19 @@ func (p *parser) parseDirective() { pkgpath := p.parseUnquotedString() p.getPkg(pkgpath, pkgname) p.parseString() - p.expect(';') + p.expectEOL() + + case "indirectimport": + p.next() + pkgname := p.parseUnquotedString() + pkgpath := p.parseUnquotedString() + p.getPkg(pkgpath, pkgname) + p.expectEOL() + + case "types": + p.next() + p.parseTypes(p.pkg) + p.expectEOL() case "func": p.next() @@ -869,24 +1180,24 @@ func (p *parser) parseDirective() { if fun != nil { p.pkg.Scope().Insert(fun) } - p.expect(';') + p.expectEOL() case "type": p.next() p.parseType(p.pkg) - p.expect(';') + p.expectEOL() case "var": p.next() v := p.parseVar(p.pkg) p.pkg.Scope().Insert(v) - p.expect(';') + p.expectEOL() case "const": p.next() c := p.parseConst(p.pkg) p.pkg.Scope().Insert(c) - p.expect(';') + p.expectEOL() default: p.errorf("unexpected identifier: %q", p.lit) @@ -898,7 +1209,14 @@ func (p *parser) parsePackage() *types.Package { for p.tok != scanner.EOF { p.parseDirective() } - for _, typ := range p.typeMap { + for _, f := range p.fixups { + if f.target.Underlying() == nil { + p.errorf("internal error: fixup can't be applied, loop required") + } + f.toUpdate.SetUnderlying(f.target.Underlying()) + } + p.fixups = nil + for _, typ := range p.typeList { if it, ok := typ.(*types.Interface); ok { it.Complete() } diff --git a/src/go/internal/gccgoimporter/parser_test.go b/src/go/internal/gccgoimporter/parser_test.go index 4a103dc462af8..00128b44d2687 100644 --- a/src/go/internal/gccgoimporter/parser_test.go +++ b/src/go/internal/gccgoimporter/parser_test.go @@ -19,7 +19,7 @@ var typeParserTests = []struct { {id: "foo", typ: ">", want: "*error"}, {id: "foo", typ: "", want: "unsafe.Pointer"}, {id: "foo", typ: ">>", want: "foo.Bar", underlying: "*foo.Bar"}, - {id: "foo", typ: " func (? ) M (); >", want: "bar.Foo", underlying: "int8", methods: "func (bar.Foo).M()"}, + {id: "foo", typ: "\nfunc (? ) M ();\n>", want: "bar.Foo", underlying: "int8", methods: "func (bar.Foo).M()"}, {id: "foo", typ: ">", want: "bar.foo", underlying: "int8"}, {id: "foo", typ: ">", want: "[]int8"}, {id: "foo", typ: ">", want: "[42]int8"}, @@ -36,6 +36,7 @@ func TestTypeParser(t *testing.T) { for _, test := range typeParserTests { var p parser p.init("test.gox", strings.NewReader(test.typ), make(map[string]*types.Package)) + p.version = "v2" p.pkgname = test.id p.pkgpath = test.id p.maybeCreatePackage() diff --git a/src/go/internal/gccgoimporter/testdata/alias.gox b/src/go/internal/gccgoimporter/testdata/alias.gox deleted file mode 100644 index ced7d84c4f615..0000000000000 --- a/src/go/internal/gccgoimporter/testdata/alias.gox +++ /dev/null @@ -1,4 +0,0 @@ -v1; -package alias; -pkgpath alias; -type >>>) < type 114>; M2 () ; }>>; diff --git a/src/go/internal/gccgoimporter/testdata/aliases.go b/src/go/internal/gccgoimporter/testdata/aliases.go new file mode 100644 index 0000000000000..cfb59b3e315ba --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/aliases.go @@ -0,0 +1,65 @@ +package aliases + +type ( + T0 [10]int + T1 []byte + T2 struct { + x int + } + T3 interface { + m() T2 + } + T4 func(int, T0) chan T2 +) + +// basic aliases +type ( + Ai = int + A0 = T0 + A1 = T1 + A2 = T2 + A3 = T3 + A4 = T4 + + A10 = [10]int + A11 = []byte + A12 = struct { + x int + } + A13 = interface { + m() A2 + } + A14 = func(int, A0) chan A2 +) + +// alias receiver types +func (T0) m1() {} +func (A0) m2() {} + +// alias receiver types (long type declaration chains) +type ( + V0 = V1 + V1 = (V2) + V2 = (V3) + V3 = T0 +) + +func (V1) n() {} + +// cycles +type C0 struct { + f1 C1 + f2 C2 +} + +type ( + C1 *C0 + C2 = C1 +) + +type ( + C5 struct { + f *C6 + } + C6 = C5 +) diff --git a/src/go/internal/gccgoimporter/testdata/aliases.gox b/src/go/internal/gccgoimporter/testdata/aliases.gox new file mode 100644 index 0000000000000..2428c06874368 --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/aliases.gox @@ -0,0 +1,33 @@ +v2; +package aliases; +prefix go; +package aliases go.aliases go.aliases; +type > + func (? ) .go.aliases.m1 (); + func (? ) .go.aliases.m2 (); + func (? >>>) .go.aliases.n (); +>>; +type >>>; +type >>; +type >>; +type ; }>>; +type ; }>>>; }>>; +type , ? ) >>>; +type ; +type ; }>>>; +type , ? ) >>>>; +type >; +type >>; .go.aliases.f2 >; }>>; +type ; +type ; +type >>; }>>; +type ; +type ; +type ; +type ; +type ; +type ; +type >; +type ; +type ; +type ; diff --git a/src/go/internal/gccgoimporter/testdata/issue27856.go b/src/go/internal/gccgoimporter/testdata/issue27856.go new file mode 100644 index 0000000000000..bf361e1cd803b --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/issue27856.go @@ -0,0 +1,9 @@ +package lib + +type M struct { + E E +} +type F struct { + _ *M +} +type E = F diff --git a/src/go/internal/gccgoimporter/testdata/issue27856.gox b/src/go/internal/gccgoimporter/testdata/issue27856.gox new file mode 100644 index 0000000000000..6665e64021b95 --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/issue27856.gox @@ -0,0 +1,9 @@ +v2; +package main; +pkgpath main; +import runtime runtime "runtime"; +init runtime runtime..import sys runtime_internal_sys..import; +init_graph 0 1; +type ; }>>>; }>>>; +type ; +type ; diff --git a/src/go/internal/gccgoimporter/testdata/issue29198.go b/src/go/internal/gccgoimporter/testdata/issue29198.go new file mode 100644 index 0000000000000..75c2162d20454 --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/issue29198.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package server + +import ( + "context" + "errors" +) + +type A struct { + x int +} + +func (a *A) AMethod(y int) *Server { + return nil +} + +// FooServer is a server that provides Foo services +type FooServer Server + +func (f *FooServer) WriteEvents(ctx context.Context, x int) error { + return errors.New("hey!") +} + +type Server struct { + FooServer *FooServer + user string + ctx context.Context +} + +func New(sctx context.Context, u string) (*Server, error) { + s := &Server{user: u, ctx: sctx} + s.FooServer = (*FooServer)(s) + return s, nil +} diff --git a/src/go/internal/gccgoimporter/testdata/issue29198.gox b/src/go/internal/gccgoimporter/testdata/issue29198.gox new file mode 100644 index 0000000000000..905c86637ebf3 --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/issue29198.gox @@ -0,0 +1,86 @@ +v2; +package server; +pkgpath issue29198; +import context context "context"; +import errors errors "errors"; +init context context..import fmt fmt..import poll internal_poll..import testlog internal_testlog..import io io..import os os..import reflect reflect..import runtime runtime..import sys runtime_internal_sys..import strconv strconv..import sync sync..import syscall syscall..import time time..import unicode unicode..import; +init_graph 0 1 0 2 0 3 0 4 0 5 0 6 0 7 0 8 0 9 0 10 0 11 0 12 0 13 1 2 1 3 1 4 1 5 1 6 1 7 1 8 1 9 1 10 1 11 1 12 1 13 2 4 2 7 2 8 2 10 2 11 2 12 4 7 4 8 4 10 5 2 5 3 5 4 5 7 5 8 5 10 5 11 5 12 6 7 6 8 6 9 6 10 6 13 7 8 9 7 9 8 10 7 10 8 11 7 11 8 11 10 12 7 12 8 12 10 12 11; +type ; }> + func (a >) AMethod (y ) + func (f >) WriteEvents (ctx ; .time.ext ; .time.loc ; .time.zone ; .time.offset ; .time.isDST ; }>>>; .time.tx ; .time.index ; .time.isstd ; .time.isutc ; }>>>; .time.cacheStart ; .time.cacheEnd ; .time.cacheZone >; }> + func (l >) String () ; + func (l ) .time.lookupFirstZone () ; + func (l ) .time.get () ; + func (l ) .time.lookup (sec ) (name , offset , isDST , start , end ); + func (l ) .time.lookupName (name , unix ) (offset , ok ); + func (l ) .time.firstZoneUsed () ; +>>; }> + func (t ) In (loc ) ; + func (t ) .time.date (full ) (year , month + func (m ) String () ; +>, day , yday ); + func (t ) Sub (u ) + func (d ) Truncate (m ) ; + func (d ) String () ; + func (d ) Round (m ) ; + func (d ) Seconds () ; + func (d ) Nanoseconds () ; + func (d ) Minutes () ; + func (d ) Hours () ; +>; + func (t ) Add (d ) ; + func (t ) UTC () ; + func (t ) AddDate (years , months , days ) ; + func (t ) MarshalBinary () (? >, ? ); + func (t ) Nanosecond () ; + func (t ) Round (d ) ; + func (t ) Minute () ; + func (t ) Clock () (hour , min , sec ); + func (t ) ISOWeek () (year , week ); + func (t ) Day () ; + func (t >) .time.mono () ; + func (t ) UnixNano () ; + func (t ) .time.sec () ; + func (t ) Second () ; + func (t ) Before (u ) ; + func (t ) UnmarshalBinary (data >) ; + func (t ) Month () ; + func (t ) YearDay () ; + func (t ) Location () ; + func (t ) Zone () (name , offset ); + func (t ) Local () ; + func (t ) .time.setLoc (loc ); + func (t ) Truncate (d ) ; + func (t ) MarshalJSON () (? >, ? ); + func (t ) AppendFormat (b >, layout ) >; + func (t ) GobDecode (data >) ; + func (t ) UnmarshalJSON (data >) ; + func (t ) MarshalText () (? >, ? ); + func (t ) GobEncode () (? >, ? ); + func (t ) .time.stripMono (); + func (t ) After (u ) ; + func (t ) Hour () ; + func (t ) UnmarshalText (data >) ; + func (t ) Equal (u ) ; + func (t ) .time.setMono (m ); + func (t ) Year () ; + func (t ) IsZero () ; + func (t ) .time.addSec (d ); + func (t ) Weekday () + func (d ) String () ; +>; + func (t ) String () ; + func (t ) .time.nsec () ; + func (t ) Format (layout ) ; + func (t ) .time.unixSec () ; + func (t ) Unix () ; + func (t ) .time.abs () ; + func (t ) .time.locabs () (name , offset , abs ); + func (t ) Date () (year , month , day ); +>, ok ); Done () >; Err () ; Value (key ) ; }>>, x ) ; +>>; .issue29198.user ; .issue29198.ctx ; }>>>; +>; +type ; +func New (sctx , u ) (? >, ? ); +type ; +checksum 86C8D76B2582F55A8BD2CA9E00060358EC1CE214; diff --git a/src/go/internal/gccgoimporter/testdata/nointerface.go b/src/go/internal/gccgoimporter/testdata/nointerface.go new file mode 100644 index 0000000000000..6a545f24933fd --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/nointerface.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nointerface + +type I int + +//go:nointerface +func (p *I) Get() int { return int(*p) } + +func (p *I) Set(v int) { *p = I(v) } diff --git a/src/go/internal/gccgoimporter/testdata/nointerface.gox b/src/go/internal/gccgoimporter/testdata/nointerface.gox new file mode 100644 index 0000000000000..7b73d179e393c --- /dev/null +++ b/src/go/internal/gccgoimporter/testdata/nointerface.gox @@ -0,0 +1,8 @@ +v3; +package nointerface +pkgpath nointerface +types 3 2 133 17 +type 1 "I" + func /*nointerface*/ (p ) Get () + func (p ) Set (v ) +type 2 * diff --git a/src/go/internal/gccgoimporter/testdata/v1reflect.gox b/src/go/internal/gccgoimporter/testdata/v1reflect.gox new file mode 100644 index 0000000000000..ea468414d9fa8 Binary files /dev/null and b/src/go/internal/gccgoimporter/testdata/v1reflect.gox differ diff --git a/src/go/internal/gcimporter/gcimporter.go b/src/go/internal/gcimporter/gcimporter.go index d117f6fe4d3f4..3aed6de6ae0b3 100644 --- a/src/go/internal/gcimporter/gcimporter.go +++ b/src/go/internal/gcimporter/gcimporter.go @@ -85,7 +85,7 @@ func FindPkg(path, srcDir string) (filename, id string) { // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. // -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var id string if lookup != nil { @@ -152,10 +152,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func break } - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older // binary export format starts with a 'c', 'd', or 'v' // (from "version"). Select appropriate importer. diff --git a/src/go/internal/gcimporter/gcimporter_test.go b/src/go/internal/gcimporter/gcimporter_test.go index d496f2e57d384..3b7636806e7e5 100644 --- a/src/go/internal/gcimporter/gcimporter_test.go +++ b/src/go/internal/gcimporter/gcimporter_test.go @@ -17,6 +17,7 @@ import ( "testing" "time" + "go/token" "go/types" ) @@ -34,21 +35,29 @@ func skipSpecialPlatforms(t *testing.T) { } } -func compile(t *testing.T, dirname, filename string) string { - cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", filename) +// compile runs the compiler on filename, with dirname as the working directory, +// and writes the output file to outdirname. +func compile(t *testing.T, dirname, filename, outdirname string) string { + // filename must end with ".go" + if !strings.HasSuffix(filename, ".go") { + t.Fatalf("filename doesn't end in .go: %s", filename) + } + basename := filepath.Base(filename) + outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o") + cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", outname, filename) cmd.Dir = dirname out, err := cmd.CombinedOutput() if err != nil { t.Logf("%s", out) t.Fatalf("go tool compile %s failed: %s", filename, err) } - // filename should end with ".go" - return filepath.Join(dirname, filename[:len(filename)-2]+"o") + return outname } func testPath(t *testing.T, path, srcDir string) *types.Package { t0 := time.Now() - pkg, err := Import(make(map[string]*types.Package), path, srcDir, nil) + fset := token.NewFileSet() + pkg, err := Import(fset, make(map[string]*types.Package), path, srcDir, nil) if err != nil { t.Errorf("testPath(%s): %s", path, err) return nil @@ -88,17 +97,30 @@ func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) { return } +func mktmpdir(t *testing.T) string { + tmpdir, err := ioutil.TempDir("", "gcimporter_test") + if err != nil { + t.Fatal("mktmpdir:", err) + } + if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil { + os.RemoveAll(tmpdir) + t.Fatal("mktmpdir:", err) + } + return tmpdir +} + func TestImportTestdata(t *testing.T) { // This package only handles gc export data. if runtime.Compiler != "gc" { t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) } - if outFn := compile(t, "testdata", "exports.go"); outFn != "" { - defer os.Remove(outFn) - } + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) - if pkg := testPath(t, "./testdata/exports", "."); pkg != nil { + compile(t, "testdata", "exports.go", filepath.Join(tmpdir, "testdata")) + + if pkg := testPath(t, "./testdata/exports", tmpdir); pkg != nil { // The package's Imports list must include all packages // explicitly imported by exports.go, plus all packages // referenced indirectly via exported objects in exports.go. @@ -131,6 +153,15 @@ func TestVersionHandling(t *testing.T) { t.Fatal(err) } + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + corruptdir := filepath.Join(tmpdir, "testdata", "versions") + if err := os.Mkdir(corruptdir, 0700); err != nil { + t.Fatal(err) + } + + fset := token.NewFileSet() + for _, f := range list { name := f.Name() if !strings.HasSuffix(name, ".a") { @@ -146,7 +177,7 @@ func TestVersionHandling(t *testing.T) { } // test that export data can be imported - _, err := Import(make(map[string]*types.Package), pkgpath, dir, nil) + _, err := Import(fset, make(map[string]*types.Package), pkgpath, dir, nil) if err != nil { // ok to fail if it fails with a newer version error for select files if strings.Contains(err.Error(), "newer version") { @@ -178,12 +209,11 @@ func TestVersionHandling(t *testing.T) { } // 4) write the file pkgpath += "_corrupted" - filename := filepath.Join(dir, pkgpath) + ".a" + filename := filepath.Join(corruptdir, pkgpath) + ".a" ioutil.WriteFile(filename, data, 0666) - defer os.Remove(filename) // test that importing the corrupted file results in an error - _, err = Import(make(map[string]*types.Package), pkgpath, dir, nil) + _, err = Import(fset, make(map[string]*types.Package), pkgpath, corruptdir, nil) if err == nil { t.Errorf("import corrupted %q succeeded", pkgpath) } else if msg := err.Error(); !strings.Contains(msg, "version skew") { @@ -240,6 +270,7 @@ func TestImportedTypes(t *testing.T) { t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) } + fset := token.NewFileSet() for _, test := range importedObjectTests { s := strings.Split(test.name, ".") if len(s) != 2 { @@ -248,7 +279,7 @@ func TestImportedTypes(t *testing.T) { importPath := s[0] objName := s[1] - pkg, err := Import(make(map[string]*types.Package), importPath, ".", nil) + pkg, err := Import(fset, make(map[string]*types.Package), importPath, ".", nil) if err != nil { t.Error(err) continue @@ -315,7 +346,7 @@ func TestIssue5815(t *testing.T) { t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler) } - pkg := importPkg(t, "strings") + pkg := importPkg(t, "strings", ".") scope := pkg.Scope() for _, name := range scope.Names() { @@ -345,7 +376,8 @@ func TestCorrectMethodPackage(t *testing.T) { } imports := make(map[string]*types.Package) - _, err := Import(imports, "net/http", ".", nil) + fset := token.NewFileSet() + _, err := Import(fset, imports, "net/http", ".", nil) if err != nil { t.Fatal(err) } @@ -373,15 +405,22 @@ func TestIssue13566(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "a.go"); f != "" { - defer os.Remove(f) - } - if f := compile(t, "testdata", "b.go"); f != "" { - defer os.Remove(f) + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + testoutdir := filepath.Join(tmpdir, "testdata") + + // b.go needs to be compiled from the output directory so that the compiler can + // find the compiled package a. We pass the full path to compile() so that we + // don't have to copy the file to that directory. + bpath, err := filepath.Abs(filepath.Join("testdata", "b.go")) + if err != nil { + t.Fatal(err) } + compile(t, "testdata", "a.go", testoutdir) + compile(t, testoutdir, bpath, testoutdir) // import must succeed (test for issue at hand) - pkg := importPkg(t, "./testdata/b") + pkg := importPkg(t, "./testdata/b", tmpdir) // make sure all indirectly imported packages have names for _, imp := range pkg.Imports() { @@ -400,8 +439,9 @@ func TestIssue13898(t *testing.T) { } // import go/internal/gcimporter which imports go/types partially + fset := token.NewFileSet() imports := make(map[string]*types.Package) - _, err := Import(imports, "go/internal/gcimporter", ".", nil) + _, err := Import(fset, imports, "go/internal/gcimporter", ".", nil) if err != nil { t.Fatal(err) } @@ -451,9 +491,10 @@ func TestIssue15517(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "p.go"); f != "" { - defer os.Remove(f) - } + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + + compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata")) // Multiple imports of p must succeed without redeclaration errors. // We use an import path that's not cleaned up so that the eventual @@ -468,8 +509,9 @@ func TestIssue15517(t *testing.T) { // file and package path are different, exposing the problem if present. // The same issue occurs with vendoring.) imports := make(map[string]*types.Package) + fset := token.NewFileSet() for i := 0; i < 3; i++ { - if _, err := Import(imports, "./././testdata/p", ".", nil); err != nil { + if _, err := Import(fset, imports, "./././testdata/p", tmpdir, nil); err != nil { t.Fatal(err) } } @@ -489,11 +531,7 @@ func TestIssue15920(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "issue15920.go"); f != "" { - defer os.Remove(f) - } - - importPkg(t, "./testdata/issue15920") + compileAndImportPkg(t, "issue15920") } func TestIssue20046(t *testing.T) { @@ -510,12 +548,8 @@ func TestIssue20046(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "issue20046.go"); f != "" { - defer os.Remove(f) - } - // "./issue20046".V.M must exist - pkg := importPkg(t, "./testdata/issue20046") + pkg := compileAndImportPkg(t, "issue20046") obj := lookupObj(t, pkg.Scope(), "V") if m, index, indirect := types.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil { t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect) @@ -535,11 +569,7 @@ func TestIssue25301(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "issue25301.go"); f != "" { - defer os.Remove(f) - } - - importPkg(t, "./testdata/issue25301") + compileAndImportPkg(t, "issue25301") } func TestIssue25596(t *testing.T) { @@ -556,21 +586,25 @@ func TestIssue25596(t *testing.T) { t.Skip("avoid dealing with relative paths/drive letters on windows") } - if f := compile(t, "testdata", "issue25596.go"); f != "" { - defer os.Remove(f) - } - - importPkg(t, "./testdata/issue25596") + compileAndImportPkg(t, "issue25596") } -func importPkg(t *testing.T, path string) *types.Package { - pkg, err := Import(make(map[string]*types.Package), path, ".", nil) +func importPkg(t *testing.T, path, srcDir string) *types.Package { + fset := token.NewFileSet() + pkg, err := Import(fset, make(map[string]*types.Package), path, srcDir, nil) if err != nil { t.Fatal(err) } return pkg } +func compileAndImportPkg(t *testing.T, name string) *types.Package { + tmpdir := mktmpdir(t) + defer os.RemoveAll(tmpdir) + compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata")) + return importPkg(t, "./testdata/"+name, tmpdir) +} + func lookupObj(t *testing.T, scope *types.Scope, name string) types.Object { if obj := scope.Lookup(name); obj != nil { return obj diff --git a/src/go/internal/srcimporter/srcimporter_test.go b/src/go/internal/srcimporter/srcimporter_test.go index b9caa90fc5888..b84672610c6f8 100644 --- a/src/go/internal/srcimporter/srcimporter_test.go +++ b/src/go/internal/srcimporter/srcimporter_test.go @@ -99,7 +99,7 @@ var importedObjectTests = []struct { {"math.Pi", "const Pi untyped float"}, {"math.Sin", "func Sin(x float64) float64"}, {"math/big.Int", "type Int struct{neg bool; abs nat}"}, - {"golang_org/x/text/unicode/norm.MaxSegmentSize", "const MaxSegmentSize untyped int"}, + {"internal/x/text/unicode/norm.MaxSegmentSize", "const MaxSegmentSize untyped int"}, } func TestImportedTypes(t *testing.T) { diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index 189bfb42236ec..ba16b652246f1 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -300,7 +300,7 @@ func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline // Advance to the next non-comment token. In the process, collect // any comment groups encountered, and remember the last lead and -// and line comments. +// line comments. // // A lead comment is a comment group that starts and ends in a // line without any other tokens and that is followed by a non-comment diff --git a/src/go/printer/example_test.go b/src/go/printer/example_test.go index e570040ba1f6d..30816931a8b2f 100644 --- a/src/go/printer/example_test.go +++ b/src/go/printer/example_test.go @@ -48,7 +48,7 @@ func ExampleFprint() { // and trim leading and trailing white space. s := buf.String() s = s[1 : len(s)-1] - s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1)) + s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n")) // Print the cleaned-up body text to stdout. fmt.Println(s) @@ -61,7 +61,7 @@ func ExampleFprint() { // // s := buf.String() // s = s[1 : len(s)-1] - // s = strings.TrimSpace(strings.Replace(s, "\n\t", "\n", -1)) + // s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n")) // // fmt.Println(s) } diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go index 1de7cd81b2cd0..0f2029cadaa7d 100644 --- a/src/go/printer/nodes.go +++ b/src/go/printer/nodes.go @@ -976,7 +976,7 @@ func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { return false } -// selectorExpr handles an *ast.SelectorExpr node and returns whether x spans +// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans // multiple lines. func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool { p.expr1(x.X, token.HighestPrec, depth) @@ -1134,7 +1134,7 @@ func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, po // than starting at the first line break). // func (p *printer) indentList(list []ast.Expr) bool { - // Heuristic: indentList returns true if there are more than one multi- + // Heuristic: indentList reports whether there are more than one multi- // line element in the list, or if there is any element that is not // starting on the same line as the previous one ends. if len(list) >= 2 { @@ -1537,7 +1537,7 @@ func (p *printer) genDecl(d *ast.GenDecl) { p.setComment(d.Doc) p.print(d.Pos(), d.Tok, blank) - if d.Lparen.IsValid() { + if d.Lparen.IsValid() || len(d.Specs) > 1 { // group of parenthesized declarations p.print(d.Lparen, token.LPAREN) if n := len(d.Specs); n > 0 { diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go index 27d46df6b186b..91eca585c0956 100644 --- a/src/go/printer/printer_test.go +++ b/src/go/printer/printer_test.go @@ -736,3 +736,35 @@ func TestIssue11151(t *testing.T) { t.Errorf("%v\norig: %q\ngot : %q", err, src, got) } } + +// If a declaration has multiple specifications, a parenthesized +// declaration must be printed even if Lparen is token.NoPos. +func TestParenthesizedDecl(t *testing.T) { + // a package with multiple specs in a single declaration + const src = "package p; var ( a float64; b int )" + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", src, 0) + + // print the original package + var buf bytes.Buffer + err = Fprint(&buf, fset, f) + if err != nil { + t.Fatal(err) + } + original := buf.String() + + // now remove parentheses from the declaration + for i := 0; i != len(f.Decls); i++ { + f.Decls[i].(*ast.GenDecl).Lparen = token.NoPos + } + buf.Reset() + err = Fprint(&buf, fset, f) + if err != nil { + t.Fatal(err) + } + noparen := buf.String() + + if noparen != original { + t.Errorf("got %q, want %q", noparen, original) + } +} diff --git a/src/go/printer/testdata/parser.go b/src/go/printer/testdata/parser.go index 44dfa19ff350e..80b476cf2ee60 100644 --- a/src/go/printer/testdata/parser.go +++ b/src/go/printer/testdata/parser.go @@ -290,7 +290,7 @@ func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) // Advance to the next non-comment token. In the process, collect // any comment groups encountered, and remember the last lead and -// and line comments. +// line comments. // // A lead comment is a comment group that starts and ends in a // line without any other tokens and that is followed by a non-comment diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go index 23bbb2885fb27..e78abf12a27cf 100644 --- a/src/go/scanner/scanner.go +++ b/src/go/scanner/scanner.go @@ -85,6 +85,15 @@ func (s *Scanner) next() { } } +// peek returns the byte following the most recently read character without +// advancing the scanner. If the scanner is at EOF, peek returns 0. +func (s *Scanner) peek() byte { + if s.rdOffset < len(s.src) { + return s.src[s.rdOffset] + } + return 0 +} + // A mode value is a set of flags (or 0). // They control scanner behavior. // @@ -735,14 +744,13 @@ scanAgain: if '0' <= s.ch && s.ch <= '9' { insertSemi = true tok, lit = s.scanNumber(true) - } else if s.ch == '.' { - s.next() - if s.ch == '.' { + } else { + tok = token.PERIOD + if s.ch == '.' && s.peek() == '.' { s.next() + s.next() // consume last '.' tok = token.ELLIPSIS } - } else { - tok = token.PERIOD } case ',': tok = token.COMMA diff --git a/src/go/scanner/scanner_test.go b/src/go/scanner/scanner_test.go index 0aad3680990d9..36c962209ce1d 100644 --- a/src/go/scanner/scanner_test.go +++ b/src/go/scanner/scanner_test.go @@ -757,6 +757,7 @@ var errors = []struct { {"\a", token.ILLEGAL, 0, "", "illegal character U+0007"}, {`#`, token.ILLEGAL, 0, "", "illegal character U+0023 '#'"}, {`…`, token.ILLEGAL, 0, "", "illegal character U+2026 '…'"}, + {"..", token.PERIOD, 0, "", ""}, // two periods, not invalid token (issue #28112) {`' '`, token.CHAR, 0, `' '`, ""}, {`''`, token.CHAR, 0, `''`, "illegal rune literal"}, {`'12'`, token.CHAR, 0, `'12'`, "illegal rune literal"}, @@ -822,7 +823,7 @@ func TestScanErrors(t *testing.T) { // Verify that no comments show up as literal values when skipping comments. func TestIssue10213(t *testing.T) { - var src = ` + const src = ` var ( A = 1 // foo ) @@ -855,6 +856,23 @@ func TestIssue10213(t *testing.T) { } } +func TestIssue28112(t *testing.T) { + const src = "... .. 0.. .." // make sure to have stand-alone ".." immediately before EOF to test EOF behavior + tokens := []token.Token{token.ELLIPSIS, token.PERIOD, token.PERIOD, token.FLOAT, token.PERIOD, token.PERIOD, token.PERIOD, token.EOF} + var s Scanner + s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), nil, 0) + for _, want := range tokens { + pos, got, lit := s.Scan() + if got != want { + t.Errorf("%s: got %s, want %s", fset.Position(pos), got, want) + } + // literals expect to have a (non-empty) literal string and we don't care about other tokens for this test + if tokenclass(got) == literal && lit == "" { + t.Errorf("%s: for %s got empty literal string", fset.Position(pos), got) + } + } +} + func BenchmarkScan(b *testing.B) { b.StopTimer() fset := token.NewFileSet() diff --git a/src/go/token/position.go b/src/go/token/position.go index 241133fe263b3..3f5a390078d0d 100644 --- a/src/go/token/position.go +++ b/src/go/token/position.go @@ -146,7 +146,7 @@ func (f *File) AddLine(offset int) { // MergeLine will panic if given an invalid line number. // func (f *File) MergeLine(line int) { - if line <= 0 { + if line < 1 { panic("illegal line number (line numbering starts at 1)") } f.mutex.Lock() @@ -209,6 +209,21 @@ func (f *File) SetLinesForContent(content []byte) { f.mutex.Unlock() } +// LineStart returns the Pos value of the start of the specified line. +// It ignores any alternative positions set using AddLineColumnInfo. +// LineStart panics if the 1-based line number is invalid. +func (f *File) LineStart(line int) Pos { + if line < 1 { + panic("illegal line number (line numbering starts at 1)") + } + f.mutex.Lock() + defer f.mutex.Unlock() + if line > len(f.lines) { + panic("illegal line number") + } + return Pos(f.base + f.lines[line-1]) +} + // A lineInfo object describes alternative file, line, and column // number information (such as provided via a //line directive) // for a given file offset. diff --git a/src/go/token/position_test.go b/src/go/token/position_test.go index 63984bc872c82..7d465dffa621f 100644 --- a/src/go/token/position_test.go +++ b/src/go/token/position_test.go @@ -324,3 +324,18 @@ done checkPos(t, "3. Position", got3, want) } } + +func TestLineStart(t *testing.T) { + const src = "one\ntwo\nthree\n" + fset := NewFileSet() + f := fset.AddFile("input", -1, len(src)) + f.SetLinesForContent([]byte(src)) + + for line := 1; line <= 3; line++ { + pos := f.LineStart(line) + position := fset.Position(pos) + if position.Line != line || position.Column != 1 { + t.Errorf("LineStart(%d) returned wrong pos %d: %s", line, pos, position) + } + } +} diff --git a/src/go/types/api.go b/src/go/types/api.go index fcefddf48835c..1252aade35f9a 100644 --- a/src/go/types/api.go +++ b/src/go/types/api.go @@ -180,7 +180,7 @@ type Info struct { // // *ast.ImportSpec *PkgName for imports without renames // *ast.CaseClause type-specific *Var for each type switch case clause (incl. default) - // *ast.Field anonymous parameter *Var + // *ast.Field anonymous parameter *Var (incl. unnamed results) // Implicits map[ast.Node]Object @@ -240,7 +240,7 @@ func (info *Info) TypeOf(e ast.Expr) Type { // or nil if not found. // // If id is an embedded struct field, ObjectOf returns the field (*Var) -// it uses, not the type (*TypeName) it defines. +// it defines, not the type (*TypeName) it uses. // // Precondition: the Uses and Defs maps are populated. // @@ -353,20 +353,20 @@ func (conf *Config) Check(path string, fset *token.FileSet, files []*ast.File, i // AssertableTo reports whether a value of type V can be asserted to have type T. func AssertableTo(V *Interface, T Type) bool { - m, _ := assertableTo(V, T) + m, _ := (*Checker)(nil).assertableTo(V, T) return m == nil } // AssignableTo reports whether a value of type V is assignable to a variable of type T. func AssignableTo(V, T Type) bool { x := operand{mode: value, typ: V} - return x.assignableTo(nil, T, nil) // config not needed for non-constant x + return x.assignableTo(nil, T, nil) // check not needed for non-constant x } // ConvertibleTo reports whether a value of type V is convertible to a value of type T. func ConvertibleTo(V, T Type) bool { x := operand{mode: value, typ: V} - return x.convertibleTo(nil, T) // config not needed for non-constant x + return x.convertibleTo(nil, T) // check not needed for non-constant x } // Implements reports whether type V implements interface T. diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go index 1fe20794ea934..fe3950a52d87c 100644 --- a/src/go/types/api_test.go +++ b/src/go/types/api_test.go @@ -257,10 +257,23 @@ func TestTypesInfo(t *testing.T) { `(string, bool)`, }, + // issue 28277 + {`package issue28277_a; func f(...int)`, + `...int`, + `[]int`, + }, + {`package issue28277_b; func f(a, b int, c ...[]struct{})`, + `...[]struct{}`, + `[][]struct{}`, + }, + // tests for broken code that doesn't parse or type-check {`package x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`}, {`package x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`}, {`package x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a; f: b;}}`, `b`, `string`}, + {`package x3; var x = panic("");`, `panic`, `func(interface{})`}, + {`package x4; func _() { panic("") }`, `panic`, `func(interface{})`}, + {`package x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string][-1]int`}, } for _, test := range tests { @@ -386,6 +399,8 @@ func TestPredicatesInfo(t *testing.T) { {`package t0; type _ int`, `int`, `type`}, {`package t1; type _ []int`, `[]int`, `type`}, {`package t2; type _ func()`, `func()`, `type`}, + {`package t3; type _ func(int)`, `int`, `type`}, + {`package t3; type _ func(...int)`, `...int`, `type`}, // built-ins {`package b0; var _ = len("")`, `len`, `builtin`}, diff --git a/src/go/types/assignments.go b/src/go/types/assignments.go index 27002f6699403..efa0cbba50270 100644 --- a/src/go/types/assignments.go +++ b/src/go/types/assignments.go @@ -57,7 +57,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) { return } - if reason := ""; !x.assignableTo(check.conf, T, &reason) { + if reason := ""; !x.assignableTo(check, T, &reason) { if reason != "" { check.errorf(x.pos(), "cannot use %s as %s value in %s: %s", x, T, context, reason) } else { diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go index 05e032423ca2c..ece6d4f530cfe 100644 --- a/src/go/types/builtins.go +++ b/src/go/types/builtins.go @@ -13,7 +13,7 @@ import ( ) // builtin type-checks a call to the built-in specified by id and -// returns true if the call is valid, with *x holding the result; +// reports whether the call is valid, with *x holding the result; // but x.expr is not set. If the call is invalid, the result is // false, and *x is undefined. // @@ -95,7 +95,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // spec: "As a special case, append also accepts a first argument assignable // to type []byte with a second argument of string type followed by ... . // This form appends the bytes of the string. - if nargs == 2 && call.Ellipsis.IsValid() && x.assignableTo(check.conf, NewSlice(universeByte), nil) { + if nargs == 2 && call.Ellipsis.IsValid() && x.assignableTo(check, NewSlice(universeByte), nil) { arg(x, 1) if x.mode == invalid { return @@ -345,7 +345,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b return } - if !x.assignableTo(check.conf, m.key, nil) { + if !x.assignableTo(check, m.key, nil) { check.invalidArg(x.pos(), "%s is not assignable to %s", x, m.key) return } @@ -476,7 +476,7 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b // panic(x) // record panic call if inside a function with result parameters // (for use in Checker.isTerminating) - if check.sig.results.Len() > 0 { + if check.sig != nil && check.sig.results.Len() > 0 { // function has result parameters p := check.isPanic if p == nil { diff --git a/src/go/types/call.go b/src/go/types/call.go index d5c196afe8463..1abc1d8a5e47d 100644 --- a/src/go/types/call.go +++ b/src/go/types/call.go @@ -233,6 +233,7 @@ func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature, } // evaluate arguments + context := check.sprintf("argument to %s", call.Fun) for i := 0; i < n; i++ { arg(x, i) if x.mode != invalid { @@ -240,7 +241,7 @@ func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature, if i == n-1 && call.Ellipsis.IsValid() { ellipsis = call.Ellipsis } - check.argument(call.Fun, sig, i, x, ellipsis) + check.argument(call.Fun, sig, i, x, ellipsis, context) } } @@ -258,7 +259,7 @@ func (check *Checker) arguments(x *operand, call *ast.CallExpr, sig *Signature, // argument checks passing of argument x to the i'th parameter of the given signature. // If ellipsis is valid, the argument is followed by ... at that position in the call. -func (check *Checker) argument(fun ast.Expr, sig *Signature, i int, x *operand, ellipsis token.Pos) { +func (check *Checker) argument(fun ast.Expr, sig *Signature, i int, x *operand, ellipsis token.Pos, context string) { check.singleValue(x) if x.mode == invalid { return @@ -298,7 +299,7 @@ func (check *Checker) argument(fun ast.Expr, sig *Signature, i int, x *operand, typ = typ.(*Slice).elem } - check.assignment(x, typ, check.sprintf("argument to %s", fun)) + check.assignment(x, typ, context) } func (check *Checker) selector(x *operand, e *ast.SelectorExpr) { @@ -374,20 +375,28 @@ func (check *Checker) selector(x *operand, e *ast.SelectorExpr) { switch { case index != nil: // TODO(gri) should provide actual type where the conflict happens - check.invalidOp(e.Sel.Pos(), "ambiguous selector %s", sel) + check.errorf(e.Sel.Pos(), "ambiguous selector %s", sel) case indirect: - check.invalidOp(e.Sel.Pos(), "%s is not in method set of %s", sel, x.typ) + // TODO(gri) be more specific with this error message + check.errorf(e.Sel.Pos(), "%s is not in method set of %s", sel, x.typ) default: - check.invalidOp(e.Sel.Pos(), "%s has no field or method %s", x, sel) + // TODO(gri) should check if capitalization of sel matters and provide better error message in that case + check.errorf(e.Sel.Pos(), "%s.%s undefined (type %s has no field or method %s)", x.expr, sel, x.typ, sel) } goto Error } + // methods may not have a fully set up signature yet + if m, _ := obj.(*Func); m != nil { + check.objDecl(m, nil) + } + if x.mode == typexpr { // method expression m, _ := obj.(*Func) if m == nil { - check.invalidOp(e.Sel.Pos(), "%s has no method %s", x, sel) + // TODO(gri) should check if capitalization of sel matters and provide better error message in that case + check.errorf(e.Sel.Pos(), "%s.%s undefined (type %s has no method %s)", x.expr, sel, x.typ, sel) goto Error } diff --git a/src/go/types/check.go b/src/go/types/check.go index 76d9c8917cb76..b48d09de22a07 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -76,7 +76,7 @@ type Checker struct { fset *token.FileSet pkg *Package *Info - objMap map[Object]*declInfo // maps package-level object to declaration info + objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package // information collected during type-checking of a set of package files @@ -85,8 +85,9 @@ type Checker struct { files []*ast.File // package files unusedDotImports map[*Scope]map[*Package]token.Pos // positions of unused dot-imported packages for each file scope - firstErr error // first error encountered - methods map[*TypeName][]*Func // maps package scope type names to associated non-blank, non-interface methods + firstErr error // first error encountered + methods map[*TypeName][]*Func // maps package scope type names to associated non-blank, non-interface methods + // TODO(gri) move interfaces up to the group of fields persistent across check.Files invocations (see also comment in Checker.initFiles) interfaces map[*TypeName]*ifaceInfo // maps interface type names to corresponding interface infos untyped map[ast.Expr]exprInfo // map of expressions without final type delayed []func() // stack of delayed actions @@ -160,11 +161,6 @@ func (check *Checker) pop() Object { return obj } -// pathString returns a string of the form a->b-> ... ->g for an object path [a, b, ... g]. -func (check *Checker) pathString() string { - return objPathString(check.objPath) -} - // NewChecker returns a new Checker instance for a given package. // Package files may be added incrementally via checker.Files. func NewChecker(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker { @@ -197,7 +193,15 @@ func (check *Checker) initFiles(files []*ast.File) { check.firstErr = nil check.methods = nil - check.interfaces = nil + // Don't clear the interfaces cache! It's important that we don't recompute + // ifaceInfos repeatedly (due to multiple check.Files calls) because when + // they are recomputed, they are not used in the context of their original + // declaration (because those types are already type-checked, typically) and + // then they will get the wrong receiver types, which matters for go/types + // clients. It is also safe to not reset the interfaces cache because files + // added to a package cannot change (add methods to) existing interface types; + // they can only add new interfaces. See also the respective comment in + // checker.infoFromTypeName (interfaces.go). Was bug - see issue #29029. check.untyped = nil check.delayed = nil diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go index 2bdfc150f4191..45e1fcb60561f 100644 --- a/src/go/types/check_test.go +++ b/src/go/types/check_test.go @@ -92,6 +92,9 @@ var tests = [][]string{ {"testdata/blank.src"}, {"testdata/issue25008b.src", "testdata/issue25008a.src"}, // order (b before a) is crucial! {"testdata/issue26390.src"}, // stand-alone test to ensure case is triggered + {"testdata/issue23203a.src"}, + {"testdata/issue23203b.src"}, + {"testdata/issue28251.src"}, } var fset = token.NewFileSet() diff --git a/src/go/types/conversions.go b/src/go/types/conversions.go index 81a65838fe091..fecb7b617ffd7 100644 --- a/src/go/types/conversions.go +++ b/src/go/types/conversions.go @@ -18,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) { case constArg && isConstType(T): // constant conversion switch t := T.Underlying().(*Basic); { - case representableConst(x.val, check.conf, t, &x.val): + case representableConst(x.val, check, t, &x.val): ok = true case isInteger(x.typ) && isString(t): codepoint := int64(-1) @@ -31,7 +31,7 @@ func (check *Checker) conversion(x *operand, T Type) { x.val = constant.MakeString(string(codepoint)) ok = true } - case x.convertibleTo(check.conf, T): + case x.convertibleTo(check, T): // non-constant conversion x.mode = value ok = true @@ -76,9 +76,12 @@ func (check *Checker) conversion(x *operand, T Type) { // is tricky because we'd have to run updateExprType on the argument first. // (Issue #21982.) -func (x *operand) convertibleTo(conf *Config, T Type) bool { +// convertibleTo reports whether T(x) is valid. +// The check parameter may be nil if convertibleTo is invoked through an +// exported API call, i.e., when all methods have been type-checked. +func (x *operand) convertibleTo(check *Checker, T Type) bool { // "x is assignable to T" - if x.assignableTo(conf, T, nil) { + if x.assignableTo(check, T, nil) { return true } diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 11b68583e38c0..1e2790a1711cf 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -64,18 +64,11 @@ func objPathString(path []Object) string { return s } -// useCycleMarking enables the new coloring-based cycle marking scheme -// for package-level objects. Set this flag to false to disable this -// code quickly and revert to the existing mechanism (and comment out -// some of the new tests in cycles5.src that will fail again). -// TODO(gri) remove this for Go 1.12 -const useCycleMarking = true - // objDecl type-checks the declaration of obj in its respective (file) context. -// See check.typ for the details on def and path. -func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) { +// For the meaning of def, see Checker.definedType, in typexpr.go. +func (check *Checker) objDecl(obj Object, def *Named) { if trace { - check.trace(obj.Pos(), "-- checking %s %s (path = %s, objPath = %s)", obj.color(), obj, pathString(path), check.pathString()) + check.trace(obj.Pos(), "-- checking %s %s (objPath = %s)", obj.color(), obj, objPathString(check.objPath)) check.indent++ defer func() { check.indent-- @@ -147,50 +140,17 @@ func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) { // order code. switch obj := obj.(type) { case *Const: - if useCycleMarking && check.typeCycle(obj) { - obj.typ = Typ[Invalid] - break - } - if obj.typ == nil { + if check.typeCycle(obj) || obj.typ == nil { obj.typ = Typ[Invalid] } case *Var: - if useCycleMarking && check.typeCycle(obj) { - obj.typ = Typ[Invalid] - break - } - if obj.typ == nil { + if check.typeCycle(obj) || obj.typ == nil { obj.typ = Typ[Invalid] } case *TypeName: - // fixFor26390 enables a temporary work-around to handle alias type names - // that have not been given a type yet even though the underlying type - // is already known. See testdata/issue26390.src for a simple example. - // Set this flag to false to disable this code quickly (and comment - // out the new test in decls4.src that will fail again). - // TODO(gri) remove this for Go 1.12 in favor of a more comprehensive fix - const fixFor26390 = true - if fixFor26390 { - // If we have a package-level alias type name that has not been - // given a type yet but the underlying type is a type name that - // has been given a type already, don't report a cycle but use - // the underlying type name's type instead. The cycle shouldn't - // exist in the first place in this case and is due to the way - // methods are type-checked at the moment. See also the comment - // at the end of Checker.typeDecl below. - if d := check.objMap[obj]; d != nil && d.alias && obj.typ == Typ[Invalid] { - // If we can find the underlying type name syntactically - // and it has a type, use that type. - if tname := check.resolveBaseTypeName(ast.NewIdent(obj.name)); tname != nil && tname.typ != nil { - obj.typ = tname.typ - break - } - } - } - - if useCycleMarking && check.typeCycle(obj) { + if check.typeCycle(obj) { // break cycle // (without this, calling underlying() // below may lead to an endless loop @@ -200,7 +160,7 @@ func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) { } case *Func: - if useCycleMarking && check.typeCycle(obj) { + if check.typeCycle(obj) { // Don't set obj.typ to Typ[Invalid] here // because plenty of code type-asserts that // functions have a *Signature type. Grey @@ -244,7 +204,7 @@ func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) { check.varDecl(obj, d.lhs, d.typ, d.init) case *TypeName: // invalid recursive types are detected via path - check.typeDecl(obj, d.typ, def, path, d.alias) + check.typeDecl(obj, d.typ, def, d.alias) case *Func: // functions may be recursive - no need to track dependencies check.funcDecl(obj, d) @@ -260,23 +220,20 @@ func (check *Checker) objDecl(obj Object, def *Named, path []*TypeName) { // Indirections are used to break type cycles. var indir = NewTypeName(token.NoPos, nil, "*", nil) -// cutCycle is a sentinel type name that is pushed onto the object path -// to indicate that a cycle doesn't actually exist. This is currently -// needed to break cycles formed via method declarations because they -// are type-checked together with their receiver base types. Once methods -// are type-checked separately (see also TODO in Checker.typeDecl), we -// can get rid of this. -var cutCycle = NewTypeName(token.NoPos, nil, "!", nil) - // typeCycle checks if the cycle starting with obj is valid and // reports an error if it is not. // TODO(gri) rename s/typeCycle/cycle/ once we don't need the other // cycle method anymore. func (check *Checker) typeCycle(obj Object) (isCycle bool) { - d := check.objMap[obj] - if d == nil { - check.dump("%v: %s should have been declared", obj.Pos(), obj) - unreachable() + // The object map contains the package scope objects and the non-interface methods. + if debug { + info := check.objMap[obj] + inObjMap := info != nil && (info.fdecl == nil || info.fdecl.Recv == nil) // exclude methods + isPkgObj := obj.Parent() == check.pkg.scope + if isPkgObj != inObjMap { + check.dump("%v: inconsistent object map for %s (isPkgObj = %v, inObjMap = %v)", obj.Pos(), obj, isPkgObj, inObjMap) + unreachable() + } } // Given the number of constants and variables (nval) in the cycle @@ -303,17 +260,28 @@ func (check *Checker) typeCycle(obj Object) (isCycle bool) { case *Const, *Var: nval++ case *TypeName: - switch { - case obj == indir: + if obj == indir { ncycle-- // don't count (indirections are not objects) hasIndir = true - case obj == cutCycle: - // The cycle is not real and only caused by the fact - // that we type-check methods when we type-check their - // receiver base types. - return false - case !check.objMap[obj].alias: - hasTDef = true + } else { + // Determine if the type name is an alias or not. For + // package-level objects, use the object map which + // provides syntactic information (which doesn't rely + // on the order in which the objects are set up). For + // local objects, we can rely on the order, so use + // the object's predicate. + // TODO(gri) It would be less fragile to always access + // the syntactic information. We should consider storing + // this information explicitly in the object. + var alias bool + if d := check.objMap[obj]; d != nil { + alias = d.alias // package-level object + } else { + alias = obj.IsAlias() // function local object + } + if !alias { + hasTDef = true + } } case *Func: // ignored for now @@ -373,7 +341,7 @@ func (check *Checker) constDecl(obj *Const, typ, init ast.Expr) { // determine type, if any if typ != nil { - t := check.typExpr(typ, nil, nil) + t := check.typ(typ) if !isConstType(t) { // don't report an error if the type is an invalid C (defined) type // (issue #22090) @@ -399,7 +367,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init ast.Expr) { // determine type, if any if typ != nil { - obj.typ = check.typExpr(typ, nil, nil) + obj.typ = check.typ(typ) // We cannot spread the type to all lhs variables if there // are more than one since that would mark them as checked // (see Checker.objDecl) and the assignment of init exprs, @@ -474,13 +442,13 @@ func (n *Named) setUnderlying(typ Type) { } } -func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, path []*TypeName, alias bool) { +func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, alias bool) { assert(obj.typ == nil) if alias { obj.typ = Typ[Invalid] - obj.typ = check.typExpr(typ, nil, append(path, obj)) + obj.typ = check.typ(typ) } else { @@ -489,7 +457,7 @@ func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, path []* obj.typ = named // make sure recursive type declarations terminate // determine underlying type of named - check.typExpr(typ, named, append(path, obj)) + check.definedType(typ, named) // The underlying type of named may be itself a named type that is // incomplete: @@ -508,11 +476,6 @@ func (check *Checker) typeDecl(obj *TypeName, typ ast.Expr, def *Named, path []* } - // check and add associated methods - // TODO(gri) It's easy to create pathological cases where the - // current approach is incorrect: In general we need to know - // and add all methods _before_ type-checking the type. - // See https://play.golang.org/p/WMpE0q2wK8 check.addMethodDecls(obj) } @@ -552,17 +515,7 @@ func (check *Checker) addMethodDecls(obj *TypeName) { } } - if useCycleMarking { - // Suppress detection of type cycles occurring through method - // declarations - they wouldn't exist if methods were type- - // checked separately from their receiver base types. See also - // comment at the end of Checker.typeDecl. - // TODO(gri) Remove this once methods are type-checked separately. - check.push(cutCycle) - defer check.pop() - } - - // type-check methods + // add valid methods for _, m := range methods { // spec: "For a base type, the non-blank names of methods bound // to it must be unique." @@ -580,9 +533,6 @@ func (check *Checker) addMethodDecls(obj *TypeName) { continue } - // type-check - check.objDecl(m, nil, nil) - if base != nil { base.methods = append(base.methods, m) } @@ -730,8 +680,10 @@ func (check *Checker) declStmt(decl ast.Decl) { // the innermost containing block." scopePos := s.Name.Pos() check.declare(check.scope, s.Name, obj, scopePos) - check.typeDecl(obj, s.Type, nil, nil, s.Assign.IsValid()) - + // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) + obj.setColor(grey + color(check.push(obj))) + check.typeDecl(obj, s.Type, nil, s.Assign.IsValid()) + check.pop().setColor(black) default: check.invalidAST(s.Pos(), "const, type, or var declaration expected") } diff --git a/src/go/types/errors.go b/src/go/types/errors.go index 4c8d8537ee6a6..68c96c037eacf 100644 --- a/src/go/types/errors.go +++ b/src/go/types/errors.go @@ -10,6 +10,7 @@ import ( "fmt" "go/ast" "go/token" + "path" "strings" ) @@ -25,7 +26,7 @@ func unreachable() { func (check *Checker) qualifier(pkg *Package) string { if pkg != check.pkg { - return pkg.path + return path.Base(pkg.path) // avoid excessively long path names in error messages } return "" } diff --git a/src/go/types/example_test.go b/src/go/types/example_test.go index 2a2fb3fc591d3..492127bbab576 100644 --- a/src/go/types/example_test.go +++ b/src/go/types/example_test.go @@ -51,6 +51,7 @@ type Celsius float64 func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) } func FToC(f float64) Celsius { return Celsius(f - 32 / 9 * 5) } const Boiling Celsius = 100 +func Unused() { {}; {{ var x int; _ = x }} } // make sure empty block scopes get printed `}, } { f, err := parser.ParseFile(fset, file.name, file.input, 0) @@ -81,23 +82,33 @@ const Boiling Celsius = 100 // . const temperature.Boiling temperature.Celsius // . type temperature.Celsius float64 // . func temperature.FToC(f float64) temperature.Celsius + // . func temperature.Unused() // . func temperature.main() - // // . main.go scope { // . . package fmt - // // . . function scope { // . . . var freezing temperature.Celsius - // . . }. } + // . . } + // . } // . celsius.go scope { // . . package fmt - // // . . function scope { // . . . var c temperature.Celsius // . . } // . . function scope { // . . . var f float64 - // . . }. }} + // . . } + // . . function scope { + // . . . block scope { + // . . . } + // . . . block scope { + // . . . . block scope { + // . . . . . var x int + // . . . . } + // . . . } + // . . } + // . } + // } } // ExampleMethodSet prints the method sets of various types. diff --git a/src/go/types/expr.go b/src/go/types/expr.go index c1deaf8325aaf..0dc007069fadf 100644 --- a/src/go/types/expr.go +++ b/src/go/types/expr.go @@ -187,11 +187,20 @@ func roundFloat64(x constant.Value) constant.Value { // representable floating-point and complex values, and to an Int // value for integer values; it is left alone otherwise. // It is ok to provide the addressof the first argument for rounded. -func representableConst(x constant.Value, conf *Config, typ *Basic, rounded *constant.Value) bool { +// +// The check parameter may be nil if representableConst is invoked +// (indirectly) through an exported API call (AssignableTo, ConvertibleTo) +// because we don't need the Checker's config for those calls. +func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *constant.Value) bool { if x.Kind() == constant.Unknown { return true // avoid follow-up errors } + var conf *Config + if check != nil { + conf = check.conf + } + switch { case isInteger(typ): x := constant.ToInt(x) @@ -323,7 +332,7 @@ func representableConst(x constant.Value, conf *Config, typ *Basic, rounded *con // representable checks that a constant operand is representable in the given basic type. func (check *Checker) representable(x *operand, typ *Basic) { assert(x.mode == constant_) - if !representableConst(x.val, check.conf, typ, &x.val) { + if !representableConst(x.val, check, typ, &x.val) { var msg string if isNumeric(x.typ) && isNumeric(typ) { // numeric conversion : error msg @@ -452,7 +461,11 @@ func (check *Checker) updateExprType(x ast.Expr, typ Type, final bool) { check.invalidOp(x.Pos(), "shifted operand %s (type %s) must be integer", x, typ) return } - } else if old.val != nil { + // Even if we have an integer, if the value is a constant we + // still must check that it is representable as the specific + // int type requested (was issue #22969). Fall through here. + } + if old.val != nil { // If x is a constant, it must be representable as a value of typ. c := operand{old.mode, x, old.typ, old.val, 0} check.convertUntyped(&c, typ) @@ -576,15 +589,15 @@ func (check *Checker) comparison(x, y *operand, op token.Token) { // spec: "In any comparison, the first operand must be assignable // to the type of the second operand, or vice versa." err := "" - if x.assignableTo(check.conf, y.typ, nil) || y.assignableTo(check.conf, x.typ, nil) { + if x.assignableTo(check, y.typ, nil) || y.assignableTo(check, x.typ, nil) { defined := false switch op { case token.EQL, token.NEQ: // spec: "The equality operators == and != apply to operands that are comparable." - defined = Comparable(x.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ) + defined = Comparable(x.typ) && Comparable(y.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ) case token.LSS, token.LEQ, token.GTR, token.GEQ: // spec: The ordering operators <, <=, >, and >= apply to operands that are ordered." - defined = isOrdered(x.typ) + defined = isOrdered(x.typ) && isOrdered(y.typ) default: unreachable() } @@ -1010,7 +1023,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { goto Error // error was reported before case *ast.Ident: - check.ident(x, e, nil, nil) + check.ident(x, e, nil, false) case *ast.Ellipsis: // ellipses are handled explicitly where they are legal @@ -1064,7 +1077,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { break } } - typ = check.typExpr(e.Type, nil, nil) + typ = check.typ(e.Type) base = typ case hint != nil: @@ -1156,12 +1169,23 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { goto Error } n := check.indexedElts(e.Elts, utyp.elem, utyp.len) - // If we have an "open" [...]T array, set the length now that we know it - // and record the type for [...] (usually done by check.typExpr which is - // not called for [...]). + // If we have an array of unknown length (usually [...]T arrays, but also + // arrays [n]T where n is invalid) set the length now that we know it and + // record the type for the array (usually done by check.typ which is not + // called for [...]T). We handle [...]T arrays and arrays with invalid + // length the same here because it makes sense to "guess" the length for + // the latter if we have a composite literal; e.g. for [n]int{1, 2, 3} + // where n is invalid for some reason, it seems fair to assume it should + // be 3 (see also Checked.arrayLength and issue #27346). if utyp.len < 0 { utyp.len = n - check.recordTypeAndValue(e.Type, typexpr, utyp, nil) + // e.Type is missing if we have a composite literal element + // that is itself a composite literal with omitted type. In + // that case there is nothing to record (there is no type in + // the source at that point). + if e.Type != nil { + check.recordTypeAndValue(e.Type, typexpr, utyp, nil) + } } case *Slice: @@ -1433,7 +1457,7 @@ func (check *Checker) exprInternal(x *operand, e ast.Expr, hint Type) exprKind { check.invalidAST(e.Pos(), "use of .(type) outside type switch") goto Error } - T := check.typExpr(e.Type, nil, nil) + T := check.typ(e.Type) if T == Typ[Invalid] { goto Error } @@ -1536,7 +1560,7 @@ func keyVal(x constant.Value) interface{} { // typeAssertion checks that x.(T) is legal; xtyp must be the type of x. func (check *Checker) typeAssertion(pos token.Pos, x *operand, xtyp *Interface, T Type) { - method, wrongType := assertableTo(xtyp, T) + method, wrongType := check.assertableTo(xtyp, T) if method == nil { return } diff --git a/src/go/types/gotype.go b/src/go/types/gotype.go index cde373f3556c7..19dd702c45192 100644 --- a/src/go/types/gotype.go +++ b/src/go/types/gotype.go @@ -297,7 +297,7 @@ func checkPkgFiles(files []*ast.File) { } report(err) }, - Importer: importer.For(*compiler, nil), + Importer: importer.ForCompiler(fset, *compiler, nil), Sizes: types.SizesFor(build.Default.Compiler, build.Default.GOARCH), } diff --git a/src/go/types/interfaces.go b/src/go/types/interfaces.go index e4b42dc5a36b9..e0ef83adcc507 100644 --- a/src/go/types/interfaces.go +++ b/src/go/types/interfaces.go @@ -144,7 +144,7 @@ func (check *Checker) infoFromTypeLit(scope *Scope, iface *ast.InterfaceType, tn } if trace { - check.trace(iface.Pos(), "-- collect methods for %v (path = %s, objPath = %s)", iface, pathString(path), check.pathString()) + check.trace(iface.Pos(), "-- collect methods for %v (path = %s, objPath = %s)", iface, pathString(path), objPathString(check.objPath)) check.indent++ defer func() { check.indent-- @@ -336,6 +336,14 @@ typenameLoop: return check.infoFromQualifiedTypeName(decl.file, typ) case *ast.InterfaceType: // type tname interface{...} + // If tname is fully type-checked at this point (tname.color() == black) + // we could use infoFromType here. But in this case, the interface must + // be in the check.interfaces cache as well, which will be hit when we + // call infoFromTypeLit below, and which will be faster. It is important + // that we use that previously computed interface because its methods + // have the correct receiver type (for go/types clients). Thus, the + // check.interfaces cache must be up-to-date across even across multiple + // check.Files calls (was bug - see issue #29029). return check.infoFromTypeLit(decl.file, typ, tname, path) } // type tname X // and X is not an interface type diff --git a/src/go/types/issues_test.go b/src/go/types/issues_test.go index 8560bb9b7dc8d..c9f5413920614 100644 --- a/src/go/types/issues_test.go +++ b/src/go/types/issues_test.go @@ -7,6 +7,7 @@ package types_test import ( + "bytes" "fmt" "go/ast" "go/importer" @@ -19,15 +20,17 @@ import ( . "go/types" ) -func TestIssue5770(t *testing.T) { - src := `package p; type S struct{T}` +func mustParse(t *testing.T, src string) *ast.File { f, err := parser.ParseFile(fset, "", src, 0) if err != nil { t.Fatal(err) } - + return f +} +func TestIssue5770(t *testing.T) { + f := mustParse(t, `package p; type S struct{T}`) conf := Config{Importer: importer.Default()} - _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) // do not crash + _, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, nil) // do not crash want := "undeclared name: T" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("got: %v; want: %s", err, want) @@ -46,14 +49,11 @@ var ( _ = (interface{})("foo") _ = (interface{})(nil) )` - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, src) var conf Config types := make(map[ast.Expr]TypeAndValue) - _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types}) + _, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types}) if err != nil { t.Fatal(err) } @@ -94,14 +94,11 @@ func f() int { return 0 } ` - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, src) var conf Config types := make(map[ast.Expr]TypeAndValue) - _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types}) + _, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Types: types}) if err != nil { t.Fatal(err) } @@ -128,14 +125,11 @@ package p func (T) m() (res bool) { return } type T struct{} // receiver type after method declaration ` - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, src) var conf Config defs := make(map[*ast.Ident]Object) - _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs}) + _, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs}) if err != nil { t.Fatal(err) } @@ -162,6 +156,8 @@ func _() { _, _, _ = x, y, z // uses x, y, z } ` + f := mustParse(t, src) + const want = `L3 defs func p._() L4 defs const w untyped int L5 defs var x int @@ -173,16 +169,11 @@ L7 uses var x int L7 uses var y int L7 uses var z int` - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } - // don't abort at the first error conf := Config{Error: func(err error) { t.Log(err) }} defs := make(map[*ast.Ident]Object) uses := make(map[*ast.Ident]Object) - _, err = conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs, Uses: uses}) + _, err := conf.Check(f.Name.Name, fset, []*ast.File{f}, &Info{Defs: defs, Uses: uses}) if s := fmt.Sprint(err); !strings.HasSuffix(s, "cannot assign to w") { t.Errorf("Check: unexpected error: %s", s) } @@ -261,13 +252,10 @@ func main() { } ` f := func(test, src string) { - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, src) cfg := Config{Importer: importer.Default()} info := Info{Uses: make(map[*ast.Ident]Object)} - _, err = cfg.Check("main", fset, []*ast.File{f}, &info) + _, err := cfg.Check("main", fset, []*ast.File{f}, &info) if err != nil { t.Fatal(err) } @@ -294,11 +282,7 @@ func main() { } func TestIssue22525(t *testing.T) { - src := `package p; func f() { var a, b, c, d, e int }` - f, err := parser.ParseFile(fset, "", src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, `package p; func f() { var a, b, c, d, e int }`) got := "\n" conf := Config{Error: func(err error) { got += err.Error() + "\n" }} @@ -328,14 +312,11 @@ func TestIssue25627(t *testing.T) { `struct { *I }`, `struct { a int; b Missing; *Missing }`, } { - f, err := parser.ParseFile(fset, "", prefix+src, 0) - if err != nil { - t.Fatal(err) - } + f := mustParse(t, prefix+src) cfg := Config{Importer: importer.Default(), Error: func(err error) {}} info := &Info{Types: make(map[ast.Expr]TypeAndValue)} - _, err = cfg.Check(f.Name.Name, fset, []*ast.File{f}, info) + _, err := cfg.Check(f.Name.Name, fset, []*ast.File{f}, info) if err != nil { if _, ok := err.(Error); !ok { t.Fatal(err) @@ -355,3 +336,132 @@ func TestIssue25627(t *testing.T) { }) } } + +func TestIssue28005(t *testing.T) { + // method names must match defining interface name for this test + // (see last comment in this function) + sources := [...]string{ + "package p; type A interface{ A() }", + "package p; type B interface{ B() }", + "package p; type X interface{ A; B }", + } + + // compute original file ASTs + var orig [len(sources)]*ast.File + for i, src := range sources { + orig[i] = mustParse(t, src) + } + + // run the test for all order permutations of the incoming files + for _, perm := range [][len(sources)]int{ + {0, 1, 2}, + {0, 2, 1}, + {1, 0, 2}, + {1, 2, 0}, + {2, 0, 1}, + {2, 1, 0}, + } { + // create file order permutation + files := make([]*ast.File, len(sources)) + for i := range perm { + files[i] = orig[perm[i]] + } + + // type-check package with given file order permutation + var conf Config + info := &Info{Defs: make(map[*ast.Ident]Object)} + _, err := conf.Check("", fset, files, info) + if err != nil { + t.Fatal(err) + } + + // look for interface object X + var obj Object + for name, def := range info.Defs { + if name.Name == "X" { + obj = def + break + } + } + if obj == nil { + t.Fatal("interface not found") + } + iface := obj.Type().Underlying().(*Interface) // I must be an interface + + // Each iface method m is embedded; and m's receiver base type name + // must match the method's name per the choice in the source file. + for i := 0; i < iface.NumMethods(); i++ { + m := iface.Method(i) + recvName := m.Type().(*Signature).Recv().Type().(*Named).Obj().Name() + if recvName != m.Name() { + t.Errorf("perm %v: got recv %s; want %s", perm, recvName, m.Name()) + } + } + } +} + +func TestIssue28282(t *testing.T) { + // create type interface { error } + et := Universe.Lookup("error").Type() + it := NewInterfaceType(nil, []Type{et}) + it.Complete() + // verify that after completing the interface, the embedded method remains unchanged + want := et.Underlying().(*Interface).Method(0) + got := it.Method(0) + if got != want { + t.Fatalf("%s.Method(0): got %q (%p); want %q (%p)", it, got, got, want, want) + } + // verify that lookup finds the same method in both interfaces (redundant check) + obj, _, _ := LookupFieldOrMethod(et, false, nil, "Error") + if obj != want { + t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", et, obj, obj, want, want) + } + obj, _, _ = LookupFieldOrMethod(it, false, nil, "Error") + if obj != want { + t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", it, obj, obj, want, want) + } +} + +func TestIssue29029(t *testing.T) { + f1 := mustParse(t, `package p; type A interface { M() }`) + f2 := mustParse(t, `package p; var B interface { A }`) + + // printInfo prints the *Func definitions recorded in info, one *Func per line. + printInfo := func(info *Info) string { + var buf bytes.Buffer + for _, obj := range info.Defs { + if fn, ok := obj.(*Func); ok { + fmt.Fprintln(&buf, fn) + } + } + return buf.String() + } + + // The *Func (method) definitions for package p must be the same + // independent on whether f1 and f2 are type-checked together, or + // incrementally. + + // type-check together + var conf Config + info := &Info{Defs: make(map[*ast.Ident]Object)} + check := NewChecker(&conf, fset, NewPackage("", "p"), info) + if err := check.Files([]*ast.File{f1, f2}); err != nil { + t.Fatal(err) + } + want := printInfo(info) + + // type-check incrementally + info = &Info{Defs: make(map[*ast.Ident]Object)} + check = NewChecker(&conf, fset, NewPackage("", "p"), info) + if err := check.Files([]*ast.File{f1}); err != nil { + t.Fatal(err) + } + if err := check.Files([]*ast.File{f2}); err != nil { + t.Fatal(err) + } + got := printInfo(info) + + if got != want { + t.Errorf("\ngot : %swant: %s", got, want) + } +} diff --git a/src/go/types/lookup.go b/src/go/types/lookup.go index f31ef9cfe94b9..e6764f45a0e61 100644 --- a/src/go/types/lookup.go +++ b/src/go/types/lookup.go @@ -6,6 +6,11 @@ package types +// Internal use of LookupFieldOrMethod: If the obj result is a method +// associated with a concrete (non-interface) type, the method's signature +// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing +// the method's type. + // LookupFieldOrMethod looks up a field or method with given package and name // in T and returns the corresponding *Var or *Func, an index sequence, and a // bool indicating if there were any pointer indirections on the path to the @@ -112,7 +117,7 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o // look for a matching attached method if i, m := lookupMethod(named.methods, pkg, name); m != nil { // potential match - assert(m.typ != nil) + // caution: method may not have a proper signature yet index = concat(e.index, i) if obj != nil || e.multiples { return nil, index, false // collision @@ -248,6 +253,14 @@ func lookupType(m map[Type]int, typ Type) (int, bool) { // x is of interface type V). // func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) { + return (*Checker)(nil).missingMethod(V, T, static) +} + +// missingMethod is like MissingMethod but accepts a receiver. +// The receiver may be nil if missingMethod is invoked through +// an exported API call (such as MissingMethod), i.e., when all +// methods have been type-checked. +func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) { // fast path for common case if T.Empty() { return @@ -275,11 +288,17 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b for _, m := range T.allMethods { obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name) + // we must have a method (not a field of matching function type) f, _ := obj.(*Func) if f == nil { return m, false } + // methods may not have a fully set up signature yet + if check != nil { + check.objDecl(f, nil) + } + if !Identical(f.typ, m.typ) { return m, true } @@ -291,14 +310,16 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b // assertableTo reports whether a value of type V can be asserted to have type T. // It returns (nil, false) as affirmative answer. Otherwise it returns a missing // method required by V and whether it is missing or just has the wrong type. -func assertableTo(V *Interface, T Type) (method *Func, wrongType bool) { +// The receiver may be nil if assertableTo is invoked through an exported API call +// (such as AssertableTo), i.e., when all methods have been type-checked. +func (check *Checker) assertableTo(V *Interface, T Type) (method *Func, wrongType bool) { // no static check is required if T is an interface // spec: "If T is an interface type, x.(T) asserts that the // dynamic type of x implements the interface T." if _, ok := T.Underlying().(*Interface); ok && !strict { return } - return MissingMethod(T, V, false) + return check.missingMethod(T, V, false) } // deref dereferences typ if it is a *Pointer and returns its base and true. diff --git a/src/go/types/methodset.go b/src/go/types/methodset.go index 2b810da728340..619c4484923f2 100644 --- a/src/go/types/methodset.go +++ b/src/go/types/methodset.go @@ -255,8 +255,20 @@ func (s methodSet) add(list []*Func, index []int, indirect bool, multiples bool) } // ptrRecv reports whether the receiver is of the form *T. -// The receiver must exist. func ptrRecv(f *Func) bool { - _, isPtr := deref(f.typ.(*Signature).recv.typ) - return isPtr + // If a method's receiver type is set, use that as the source of truth for the receiver. + // Caution: Checker.funcDecl (decl.go) marks a function by setting its type to an empty + // signature. We may reach here before the signature is fully set up: we must explicitly + // check if the receiver is set (we cannot just look for non-nil f.typ). + if sig, _ := f.typ.(*Signature); sig != nil && sig.recv != nil { + _, isPtr := deref(sig.recv.typ) + return isPtr + } + + // If a method's type is not set it may be a method/function that is: + // 1) client-supplied (via NewFunc with no signature), or + // 2) internally created but not yet type-checked. + // For case 1) we can't do anything; the client must know what they are doing. + // For case 2) we can use the information gathered by the resolver. + return f.hasPtrRecv } diff --git a/src/go/types/object.go b/src/go/types/object.go index 07adfbc34c390..cf773238a0df6 100644 --- a/src/go/types/object.go +++ b/src/go/types/object.go @@ -294,6 +294,7 @@ func (*Var) isDependency() {} // a variable may be a dependency of an initializa // An abstract method may belong to many interfaces due to embedding. type Func struct { object + hasPtrRecv bool // only valid for methods that don't have a type yet } // NewFunc returns a new function with the given signature, representing @@ -304,7 +305,7 @@ func NewFunc(pos token.Pos, pkg *Package, name string, sig *Signature) *Func { if sig != nil { typ = sig } - return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), token.NoPos}} + return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), token.NoPos}, false} } // FullName returns the package- or receiver-type-qualified name of diff --git a/src/go/types/operand.go b/src/go/types/operand.go index 07247bd6f5883..97ca6c622fd97 100644 --- a/src/go/types/operand.go +++ b/src/go/types/operand.go @@ -201,7 +201,9 @@ func (x *operand) isNil() bool { // assignableTo reports whether x is assignable to a variable of type T. // If the result is false and a non-nil reason is provided, it may be set // to a more detailed explanation of the failure (result != ""). -func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool { +// The check parameter may be nil if assignableTo is invoked through +// an exported API call, i.e., when all methods have been type-checked. +func (x *operand) assignableTo(check *Checker, T Type, reason *string) bool { if x.mode == invalid || T == Typ[Invalid] { return true // avoid spurious errors } @@ -226,7 +228,7 @@ func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool { return true } if x.mode == constant_ { - return representableConst(x.val, conf, t, nil) + return representableConst(x.val, check, t, nil) } // The result of a comparison is an untyped boolean, // but may not be a constant. @@ -249,7 +251,7 @@ func (x *operand) assignableTo(conf *Config, T Type, reason *string) bool { // T is an interface type and x implements T if Ti, ok := Tu.(*Interface); ok { - if m, wrongType := MissingMethod(x.typ, Ti, true); m != nil /* Implements(x.typ, Ti) */ { + if m, wrongType := check.missingMethod(x.typ, Ti, true); m != nil /* Implements(x.typ, Ti) */ { if reason != nil { if wrongType { *reason = "wrong type for method " + m.Name() diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index 5cbaba187b964..41741e5882de5 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -420,6 +420,10 @@ func (check *Checker) collectObjects() { check.recordDef(d.Name, obj) } info := &declInfo{file: fileScope, fdecl: d} + // Methods are not package-level objects but we still track them in the + // object map so that we can handle them like regular functions (if the + // receiver is invalid); also we need their fdecl info when associating + // them with their receiver base type, below. check.objMap[obj] = info obj.setOrder(uint32(len(check.objMap))) @@ -456,67 +460,98 @@ func (check *Checker) collectObjects() { for _, f := range methods { fdecl := check.objMap[f].fdecl if list := fdecl.Recv.List; len(list) > 0 { - // f is a method - // receiver may be of the form T or *T, possibly with parentheses - typ := unparen(list[0].Type) - if ptr, _ := typ.(*ast.StarExpr); ptr != nil { - typ = unparen(ptr.X) - } - if base, _ := typ.(*ast.Ident); base != nil { - // base is a potential base type name; determine - // "underlying" defined type and associate f with it - if tname := check.resolveBaseTypeName(base); tname != nil { - check.methods[tname] = append(check.methods[tname], f) - } + // f is a method. + // Determine the receiver base type and associate f with it. + ptr, base := check.resolveBaseTypeName(list[0].Type) + if base != nil { + f.hasPtrRecv = ptr + check.methods[base] = append(check.methods[base], f) } } } } -// resolveBaseTypeName returns the non-alias receiver base type name, -// explicitly declared in the package scope, for the given receiver -// type name; or nil. -func (check *Checker) resolveBaseTypeName(name *ast.Ident) *TypeName { +// resolveBaseTypeName returns the non-alias base type name for typ, and whether +// there was a pointer indirection to get to it. The base type name must be declared +// in package scope, and there can be at most one pointer indirection. If no such type +// name exists, the returned base is nil. +func (check *Checker) resolveBaseTypeName(typ ast.Expr) (ptr bool, base *TypeName) { + // Algorithm: Starting from a type expression, which may be a name, + // we follow that type through alias declarations until we reach a + // non-alias type name. If we encounter anything but pointer types or + // parentheses we're done. If we encounter more than one pointer type + // we're done. var path []*TypeName for { + typ = unparen(typ) + + // check if we have a pointer type + if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil { + // if we've already seen a pointer, we're done + if ptr { + return false, nil + } + ptr = true + typ = unparen(pexpr.X) // continue with pointer base type + } + + // typ must be the name + name, _ := typ.(*ast.Ident) + if name == nil { + return false, nil + } + // name must denote an object found in the current package scope // (note that dot-imported objects are not in the package scope!) obj := check.pkg.scope.Lookup(name.Name) if obj == nil { - return nil + return false, nil } + // the object must be a type name... tname, _ := obj.(*TypeName) if tname == nil { - return nil + return false, nil } // ... which we have not seen before if check.cycle(tname, path, false) { - return nil + return false, nil } // we're done if tdecl defined tname as a new type // (rather than an alias) tdecl := check.objMap[tname] // must exist for objects in package scope if !tdecl.alias { - return tname + return ptr, tname } - // Otherwise, if tdecl defined an alias for a (possibly parenthesized) - // type which is not an (unqualified) named type, we're done because - // receiver base types must be named types declared in this package. - typ := unparen(tdecl.typ) // a type may be parenthesized - name, _ = typ.(*ast.Ident) - if name == nil { - return nil - } - - // continue resolving name + // otherwise, continue resolving + typ = tdecl.typ path = append(path, tname) } } +// cycle reports whether obj appears in path or not. +// If it does, and report is set, it also reports a cycle error. +func (check *Checker) cycle(obj *TypeName, path []*TypeName, report bool) bool { + // (it's ok to iterate forward because each named type appears at most once in path) + for i, prev := range path { + if prev == obj { + if report { + check.errorf(obj.pos, "illegal cycle in declaration of %s", obj.name) + // print cycle + for _, obj := range path[i:] { + check.errorf(obj.Pos(), "\t%s refers to", obj.Name()) // secondary error, \t indented + } + check.errorf(obj.Pos(), "\t%s", obj.Name()) + } + return true + } + } + return false +} + // packageObjects typechecks all package objects, but not function bodies. func (check *Checker) packageObjects() { // process package objects in source order for reproducible results @@ -535,11 +570,26 @@ func (check *Checker) packageObjects() { } } - // pre-allocate space for type declaration paths so that the underlying array is reused - typePath := make([]*TypeName, 0, 8) - + // We process non-alias declarations first, in order to avoid situations where + // the type of an alias declaration is needed before it is available. In general + // this is still not enough, as it is possible to create sufficiently convoluted + // recursive type definitions that will cause a type alias to be needed before it + // is available (see issue #25838 for examples). + // As an aside, the cmd/compiler suffers from the same problem (#25838). + var aliasList []*TypeName + // phase 1 for _, obj := range objList { - check.objDecl(obj, nil, typePath) + // If we have a type alias, collect it for the 2nd phase. + if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].alias { + aliasList = append(aliasList, tname) + continue + } + + check.objDecl(obj, nil) + } + // phase 2 + for _, obj := range aliasList { + check.objDecl(obj, nil) } // At this point we may have a non-empty check.methods map; this means that not all diff --git a/src/go/types/scope.go b/src/go/types/scope.go index 39e42d758add8..b50ee2fd5f38d 100644 --- a/src/go/types/scope.go +++ b/src/go/types/scope.go @@ -15,9 +15,6 @@ import ( "strings" ) -// TODO(gri) Provide scopes with a name or other mechanism so that -// objects can use that information for better printing. - // A Scope maintains a set of objects and links to its containing // (parent) and contained (children) scopes. Objects may be inserted // and looked up by name. The zero value for Scope is a ready-to-use @@ -118,7 +115,7 @@ func (s *Scope) Insert(obj Object) Object { func (s *Scope) Pos() token.Pos { return s.pos } func (s *Scope) End() token.Pos { return s.end } -// Contains returns true if pos is within the scope's extent. +// Contains reports whether pos is within the scope's extent. // The result is guaranteed to be valid only if the type-checked // AST has complete position information. func (s *Scope) Contains(pos token.Pos) bool { @@ -161,13 +158,8 @@ func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) { const ind = ". " indn := strings.Repeat(ind, n) - fmt.Fprintf(w, "%s%s scope %p {", indn, s.comment, s) - if len(s.elems) == 0 { - fmt.Fprintf(w, "}\n") - return - } + fmt.Fprintf(w, "%s%s scope %p {\n", indn, s.comment, s) - fmt.Fprintln(w) indn1 := indn + ind for _, name := range s.Names() { fmt.Fprintf(w, "%s%s\n", indn1, s.elems[name]) @@ -175,12 +167,11 @@ func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) { if recurse { for _, s := range s.children { - fmt.Fprintln(w) s.WriteTo(w, n+1, recurse) } } - fmt.Fprintf(w, "%s}", indn) + fmt.Fprintf(w, "%s}\n", indn) } // String returns a string representation of the scope, for debugging. diff --git a/src/go/types/sizes.go b/src/go/types/sizes.go index 7b5410167f99b..f890c303774ae 100644 --- a/src/go/types/sizes.go +++ b/src/go/types/sizes.go @@ -169,6 +169,7 @@ var gcArchSizes = map[string]*StdSizes{ "ppc64le": {8, 8}, "riscv64": {8, 8}, "s390x": {8, 8}, + "sparc64": {8, 8}, "wasm": {8, 8}, // When adding more architectures here, // update the doc string of SizesFor below. @@ -179,7 +180,7 @@ var gcArchSizes = map[string]*StdSizes{ // // Supported architectures for compiler "gc": // "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", -// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "wasm". +// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". func SizesFor(compiler, arch string) Sizes { if compiler != "gc" { return nil diff --git a/src/go/types/stdlib_test.go b/src/go/types/stdlib_test.go index 229d2030995b6..84908fd190c4e 100644 --- a/src/go/types/stdlib_test.go +++ b/src/go/types/stdlib_test.go @@ -46,8 +46,11 @@ func TestStdlib(t *testing.T) { } } -// firstComment returns the contents of the first comment in -// the given file, assuming there's one within the first KB. +// firstComment returns the contents of the first non-empty comment in +// the given file, "skip", or the empty string. No matter the present +// comments, if any of them contains a build tag, the result is always +// "skip". Only comments before the "package" token and within the first +// 4K of the file are considered. func firstComment(filename string) string { f, err := os.Open(filename) if err != nil { @@ -55,11 +58,12 @@ func firstComment(filename string) string { } defer f.Close() - var src [1 << 10]byte // read at most 1KB + var src [4 << 10]byte // read at most 4KB n, _ := f.Read(src[:]) + var first string var s scanner.Scanner - s.Init(fset.AddFile("", fset.Base(), n), src[:n], nil, scanner.ScanComments) + s.Init(fset.AddFile("", fset.Base(), n), src[:n], nil /* ignore errors */, scanner.ScanComments) for { _, tok, lit := s.Scan() switch tok { @@ -68,9 +72,17 @@ func firstComment(filename string) string { if lit[1] == '*' { lit = lit[:len(lit)-2] } - return strings.TrimSpace(lit[2:]) - case token.EOF: - return "" + contents := strings.TrimSpace(lit[2:]) + if strings.HasPrefix(contents, "+build ") { + return "skip" + } + if first == "" { + first = contents // contents may be "" but that's ok + } + // continue as we may still see build tags + + case token.PACKAGE, token.EOF: + return first } } } @@ -142,15 +154,8 @@ func TestStdTest(t *testing.T) { t.Skip("skipping in short mode") } - // test/recover4.go is only built for Linux and Darwin. - // TODO(gri) Remove once tests consider +build tags (issue 10370). - if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { - return - } - testTestDir(t, filepath.Join(runtime.GOROOT(), "test"), "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore - "sigchld.go", // don't work on Windows; testTestDir should consult build tags ) } @@ -166,7 +171,6 @@ func TestStdFixed(t *testing.T) { "issue6889.go", // gc-specific test "issue7746.go", // large constants - consumes too much memory "issue11362.go", // canonical import path check - "issue15002.go", // uses Mmap; testTestDir should consult build tags "issue16369.go", // go/types handles this correctly - not an issue "issue18459.go", // go/types doesn't check validity of //go:xxx directives "issue18882.go", // go/types doesn't check validity of //go:xxx directives diff --git a/src/go/types/testdata/cycles.src b/src/go/types/testdata/cycles.src index 59f112dba1db7..a9af46a9337b1 100644 --- a/src/go/types/testdata/cycles.src +++ b/src/go/types/testdata/cycles.src @@ -158,6 +158,8 @@ type T20 chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte type T22 = chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte func _() { - type T1 chan [unsafe.Sizeof(func(ch T1){ _ = <-ch })]byte - type T2 = chan [unsafe.Sizeof(func(ch T2){ _ = <-ch })]byte + type T0 func(T0) + type T1 /* ERROR cycle */ = func(T1) + type T2 chan [unsafe.Sizeof(func(ch T2){ _ = <-ch })]byte + type T3 /* ERROR cycle */ = chan [unsafe.Sizeof(func(ch T3){ _ = <-ch })]byte } diff --git a/src/go/types/testdata/cycles2.src b/src/go/types/testdata/cycles2.src index a7f4bc60f5b23..fd0df4bf2729a 100644 --- a/src/go/types/testdata/cycles2.src +++ b/src/go/types/testdata/cycles2.src @@ -88,22 +88,10 @@ type T3 /* ERROR cycle */ interface { var x3 T3 type T4 /* ERROR cycle */ interface { - m() [unsafe.Sizeof(cast4(x4.m))]int + m() [unsafe.Sizeof(cast4(x4.m))]int // cast is invalid but we have a cycle, so all bets are off } var x4 T4 var _ = cast4(x4.m) type cast4 func() - -// This test is symmetric to the T4 case: Here the cast is -// "correct", but it doesn't work inside the T5 interface. - -type T5 /* ERROR cycle */ interface { - m() [unsafe.Sizeof(cast5(x5.m))]int -} - -var x5 T5 -var _ = cast5(x5.m) - -type cast5 func() [0]int diff --git a/src/go/types/testdata/cycles5.src b/src/go/types/testdata/cycles5.src index 9c2822e738ac1..aa6528a6312e1 100644 --- a/src/go/types/testdata/cycles5.src +++ b/src/go/types/testdata/cycles5.src @@ -162,20 +162,29 @@ func makeArray() (res T12) { return } var r /* ERROR cycle */ = newReader() func newReader() r -// variations of the theme of #8699 amd #20770 +// variations of the theme of #8699 and #20770 var arr /* ERROR cycle */ = f() func f() [len(arr)]int -// TODO(gri) here we should only get one error -func ff /* ERROR cycle */ (ff /* ERROR not a type */ ) +// issue #25790 +func ff(ff /* ERROR not a type */ ) +func gg((gg /* ERROR not a type */ )) type T13 /* ERROR cycle */ [len(b13)]int var b13 T13 -func g /* ERROR cycle */ () [unsafe.Sizeof(x)]int -var x = g +func g1() [unsafe.Sizeof(g1)]int +func g2() [unsafe.Sizeof(x2)]int +var x2 = g2 -func h /* ERROR cycle */ () [h /* ERROR no value */ ()[0]]int { panic(0) } +// verify that we get the correct sizes for the functions above +// (note: assert is statically evaluated in go/types test mode) +func init() { + assert(unsafe.Sizeof(g1) == 8) + assert(unsafe.Sizeof(x2) == 8) +} + +func h() [h /* ERROR no value */ ()[0]]int { panic(0) } var c14 /* ERROR cycle */ T14 type T14 [uintptr(unsafe.Sizeof(&c14))]byte diff --git a/src/go/types/testdata/decls0.src b/src/go/types/testdata/decls0.src index 162dfeda04e3d..56adbbfaae671 100644 --- a/src/go/types/testdata/decls0.src +++ b/src/go/types/testdata/decls0.src @@ -183,16 +183,16 @@ type ( ) // cycles in function/method declarations -// (test cases for issue 5217 and variants) -func f1 /* ERROR cycle */ (x f1 /* ERROR "not a type" */ ) {} -func f2 /* ERROR cycle */ (x *f2 /* ERROR "not a type" */ ) {} -func f3 /* ERROR cycle */ () (x f3 /* ERROR "not a type" */ ) { return } -func f4 /* ERROR cycle */ () (x *f4 /* ERROR "not a type" */ ) { return } - -func (S0) m1(x S0.m1 /* ERROR "field or method" */ ) {} -func (S0) m2(x *S0.m2 /* ERROR "field or method" */ ) {} -func (S0) m3() (x S0.m3 /* ERROR "field or method" */ ) { return } -func (S0) m4() (x *S0.m4 /* ERROR "field or method" */ ) { return } +// (test cases for issues #5217, #25790 and variants) +func f1(x f1 /* ERROR "not a type" */ ) {} +func f2(x *f2 /* ERROR "not a type" */ ) {} +func f3() (x f3 /* ERROR "not a type" */ ) { return } +func f4() (x *f4 /* ERROR "not a type" */ ) { return } + +func (S0) m1 /* ERROR illegal cycle */ (x S0 /* ERROR value .* is not a type */ .m1) {} +func (S0) m2 /* ERROR illegal cycle */ (x *S0 /* ERROR value .* is not a type */ .m2) {} +func (S0) m3 /* ERROR illegal cycle */ () (x S0 /* ERROR value .* is not a type */ .m3) { return } +func (S0) m4 /* ERROR illegal cycle */ () (x *S0 /* ERROR value .* is not a type */ .m4) { return } // interfaces may not have any blank methods type BlankI interface { diff --git a/src/go/types/testdata/expr2.src b/src/go/types/testdata/expr2.src index 31dc5f021c029..0c959e8011944 100644 --- a/src/go/types/testdata/expr2.src +++ b/src/go/types/testdata/expr2.src @@ -208,6 +208,19 @@ func interfaces() { _ = i /* ERROR mismatched types */ == s2 _ = i /* ERROR mismatched types */ == &s2 + + // issue #28164 + // testcase from issue + _ = interface /* ERROR cannot compare */ {}(nil) == []int(nil) + + // related cases + var e interface{} + var s []int + var x int + _ = e /* ERROR cannot compare */ == s + _ = s /* ERROR cannot compare */ == e + _ = e /* ERROR cannot compare */ < x + _ = x /* ERROR cannot compare */ < e } func slices() { diff --git a/src/go/types/testdata/expr3.src b/src/go/types/testdata/expr3.src index b4c816332462e..d562f0b16b668 100644 --- a/src/go/types/testdata/expr3.src +++ b/src/go/types/testdata/expr3.src @@ -497,7 +497,7 @@ func _calls() { f1(x ... /* ERROR "cannot use ..." */ ) f1(g0 /* ERROR "used as value" */ ()) f1(g1()) - // f1(g2()) // TODO(gri) missing position in error message + f1(g2 /* ERROR "cannot use g2" */ /* ERROR "too many arguments" */ ()) f2() /* ERROR "too few arguments" */ f2(3.14) /* ERROR "too few arguments" */ diff --git a/src/go/types/testdata/issue23203a.src b/src/go/types/testdata/issue23203a.src new file mode 100644 index 0000000000000..48cb5889cd90e --- /dev/null +++ b/src/go/types/testdata/issue23203a.src @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +type T struct{} + +func (T) m1() {} +func (T) m2([unsafe.Sizeof(T.m1)]int) {} + +func main() {} diff --git a/src/go/types/testdata/issue23203b.src b/src/go/types/testdata/issue23203b.src new file mode 100644 index 0000000000000..638ec6c5ce34c --- /dev/null +++ b/src/go/types/testdata/issue23203b.src @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "unsafe" + +type T struct{} + +func (T) m2([unsafe.Sizeof(T.m1)]int) {} +func (T) m1() {} + +func main() {} diff --git a/src/go/types/testdata/issue28251.src b/src/go/types/testdata/issue28251.src new file mode 100644 index 0000000000000..a456f5c27edc2 --- /dev/null +++ b/src/go/types/testdata/issue28251.src @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains test cases for various forms of +// method receiver declarations, per the spec clarification +// https://golang.org/cl/142757. + +package issue28251 + +// test case from issue28251 +type T struct{} + +type T0 = *T + +func (T0) m() {} + +func _() { (&T{}).m() } + +// various alternative forms +type ( + T1 = (((T))) +) + +func ((*(T1))) m1() {} +func _() { (T{}).m2() } +func _() { (&T{}).m2() } + +type ( + T2 = (((T3))) + T3 = T +) + +func (T2) m2() {} +func _() { (T{}).m2() } +func _() { (&T{}).m2() } + +type ( + T4 = ((*(T5))) + T5 = T +) + +func (T4) m4() {} +func _() { (T{}).m4 /* ERROR m4 is not in method set of T */ () } +func _() { (&T{}).m4() } + +type ( + T6 = (((T7))) + T7 = (*(T8)) + T8 = T +) + +func (T6) m6() {} +func _() { (T{}).m6 /* ERROR m6 is not in method set of T */ () } +func _() { (&T{}).m6() } + +type ( + T9 = *T10 + T10 = *T11 + T11 = T +) + +func (T9 /* ERROR invalid receiver \*\*T */ ) m9() {} +func _() { (T{}).m9 /* ERROR has no field or method m9 */ () } +func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () } diff --git a/src/go/types/testdata/issues.src b/src/go/types/testdata/issues.src index d85e04e68cbfa..d02030110935e 100644 --- a/src/go/types/testdata/issues.src +++ b/src/go/types/testdata/issues.src @@ -5,6 +5,7 @@ package issues import "fmt" +import syn "cmd/compile/internal/syntax" func issue7035() { type T struct{ X int } @@ -294,3 +295,46 @@ type registry struct { type allocator struct { _ [int(preloadLimit)]int } + +// Test that we don't crash when type-checking composite literals +// containing errors in the type. +var issue27346 = [][n /* ERROR undeclared */ ]int{ + 0: {}, +} + +var issue22467 = map[int][... /* ERROR invalid use of ... */ ]int{0: {}} + +// Test that invalid use of ... in parameter lists is recognized +// (issue #28281). +func issue28281a(int, int, ...int) +func issue28281b(a, b int, c ...int) +func issue28281c(a, b, c ... /* ERROR can only use ... with final parameter */ int) +func issue28281d(... /* ERROR can only use ... with final parameter */ int, int) +func issue28281e(a, b, c ... /* ERROR can only use ... with final parameter */ int, d int) +func issue28281f(... /* ERROR can only use ... with final parameter */ int, ... /* ERROR can only use ... with final parameter */ int, int) +func (... /* ERROR expected type */ TT) f() +func issue28281g() (... /* ERROR expected type */ TT) + +// Issue #26234: Make various field/method lookup errors easier to read by matching cmd/compile's output +func issue26234a(f *syn.File) { + // The error message below should refer to the actual package path base (syntax) + // not the local package name (syn). + f.foo /* ERROR f.foo undefined \(type \*syntax.File has no field or method foo\) */ +} + +type T struct { + x int + E1 + E2 +} + +type E1 struct{ f int } +type E2 struct{ f int } + +func issue26234b(x T) { + _ = x.f /* ERROR ambiguous selector f */ +} + +func issue26234c() { + T.x /* ERROR T.x undefined \(type T has no method x\) */ () +} diff --git a/src/go/types/testdata/shifts.src b/src/go/types/testdata/shifts.src index ca288290d69ce..52e340ec65cb3 100644 --- a/src/go/types/testdata/shifts.src +++ b/src/go/types/testdata/shifts.src @@ -354,3 +354,15 @@ func issue21727() { var _ = string(1 << s) var _ = string(1.0 /* ERROR "cannot convert" */ << s) } + +func issue22969() { + var s uint + var a []byte + _ = a[0xffffffffffffffff /* ERROR "overflows int" */ <= 0 { return n } @@ -438,14 +414,14 @@ func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicO ftype := field.Type if t, _ := ftype.(*ast.Ellipsis); t != nil { ftype = t.Elt - if variadicOk && i == len(list.List)-1 { + if variadicOk && i == len(list.List)-1 && len(field.Names) <= 1 { variadic = true } else { - check.invalidAST(field.Pos(), "... not permitted") + check.softErrorf(t.Pos(), "can only use ... with final parameter in list") // ignore ... and continue } } - typ := check.typ(ftype) + typ := check.indirectType(ftype) // The parser ensures that f.Tag is nil and we don't // care if a constructed AST contains a non-nil tag. if len(field.Names) > 0 { @@ -475,9 +451,12 @@ func (check *Checker) collectParams(scope *Scope, list *ast.FieldList, variadicO } // For a variadic function, change the last parameter's type from T to []T. - if variadic && len(params) > 0 { + // Since we type-checked T rather than ...T, we also need to retro-actively + // record the type for ...T. + if variadic { last := params[len(params)-1] last.typ = &Slice{elem: last.typ} + check.recordTypeAndValue(list.List[len(list.List)-1].Type, typexpr, last.typ, nil) } return @@ -492,7 +471,7 @@ func (check *Checker) declareInSet(oset *objset, pos token.Pos, obj Object) bool return true } -func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named, path []*TypeName) { +func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, def *Named) { // fast-track empty interface if iface.Methods.List == nil { ityp.allMethods = markComplete @@ -525,7 +504,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d for _, f := range iface.Methods.List { if len(f.Names) == 0 { - typ := check.typ(f.Type) + typ := check.indirectType(f.Type) // typ should be a named type denoting an interface // (the parser will make sure it's a named type but // constructed ASTs may be wrong). @@ -555,12 +534,14 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d // compute method set var tname *TypeName + var path []*TypeName if def != nil { tname = def.obj + path = []*TypeName{tname} } info := check.infoFromTypeLit(check.scope, iface, tname, path) if info == nil || info == &emptyIfaceInfo { - // error or empty interface - exit early + // we got an error or the empty interface - exit early ityp.allMethods = markComplete return } @@ -571,6 +552,15 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d recvTyp = def } + // Correct receiver type for all methods explicitly declared + // by this interface after we're done with type-checking at + // this level. See comment below for details. + check.later(func() { + for _, m := range ityp.methods { + m.typ.(*Signature).recv.typ = recvTyp + } + }) + // collect methods var sigfix []*methodInfo for i, minfo := range info.methods { @@ -580,9 +570,27 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d pos := name.Pos() // Don't type-check signature yet - use an // empty signature now and update it later. - // Since we know the receiver, set it up now - // (required to avoid crash in ptrRecv; see - // e.g. test case for issue 6638). + // But set up receiver since we know it and + // its position, and because interface method + // signatures don't get a receiver via regular + // type-checking (there isn't a receiver in the + // method's AST). Setting the receiver type is + // also important for ptrRecv() (see methodset.go). + // + // Note: For embedded methods, the receiver type + // should be the type of the interface that declared + // the methods in the first place. Since we get the + // methods here via methodInfo, which may be computed + // before we have all relevant interface types, we use + // the current interface's type (recvType). This may be + // the type of the interface embedding the interface that + // declared the methods. This doesn't matter for type- + // checking (we only care about the receiver type for + // the ptrRecv predicate, and it's never a pointer recv + // for interfaces), but it matters for go/types clients + // and for printing. We correct the receiver after type- + // checking. + // // TODO(gri) Consider marking methods signatures // as incomplete, for better error messages. See // also the T4 and T5 tests in testdata/cycles2.src. @@ -606,7 +614,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *ast.InterfaceType, d // (possibly embedded) methods must be type-checked within their scope and // type-checking them must not affect the current context (was issue #23914) check.context = context{scope: minfo.scope} - typ := check.typ(minfo.src.Type) + typ := check.indirectType(minfo.src.Type) sig, _ := typ.(*Signature) if sig == nil { if typ != Typ[Invalid] { @@ -665,7 +673,7 @@ func (check *Checker) tag(t *ast.BasicLit) string { return "" } -func (check *Checker) structType(styp *Struct, e *ast.StructType, path []*TypeName) { +func (check *Checker) structType(styp *Struct, e *ast.StructType) { list := e.Fields if list == nil { return @@ -709,7 +717,7 @@ func (check *Checker) structType(styp *Struct, e *ast.StructType, path []*TypeNa } for _, f := range list.List { - typ = check.typExpr(f.Type, nil, path) + typ = check.typ(f.Type) tag = check.tag(f.Tag) if len(f.Names) > 0 { // named fields diff --git a/src/hash/crc32/crc32_arm64.go b/src/hash/crc32/crc32_arm64.go index 1f8779d506fc6..0242d1d8a774a 100644 --- a/src/hash/crc32/crc32_arm64.go +++ b/src/hash/crc32/crc32_arm64.go @@ -13,20 +13,18 @@ import "internal/cpu" func castagnoliUpdate(crc uint32, p []byte) uint32 func ieeeUpdate(crc uint32, p []byte) uint32 -var hasCRC32 = cpu.ARM64.HasCRC32 - func archAvailableCastagnoli() bool { - return hasCRC32 + return cpu.ARM64.HasCRC32 } func archInitCastagnoli() { - if !hasCRC32 { + if !cpu.ARM64.HasCRC32 { panic("arch-specific crc32 instruction for Catagnoli not available") } } func archUpdateCastagnoli(crc uint32, p []byte) uint32 { - if !hasCRC32 { + if !cpu.ARM64.HasCRC32 { panic("arch-specific crc32 instruction for Castagnoli not available") } @@ -34,17 +32,17 @@ func archUpdateCastagnoli(crc uint32, p []byte) uint32 { } func archAvailableIEEE() bool { - return hasCRC32 + return cpu.ARM64.HasCRC32 } func archInitIEEE() { - if !hasCRC32 { + if !cpu.ARM64.HasCRC32 { panic("arch-specific crc32 instruction for IEEE not available") } } func archUpdateIEEE(crc uint32, p []byte) uint32 { - if !hasCRC32 { + if !cpu.ARM64.HasCRC32 { panic("arch-specific crc32 instruction for IEEE not available") } diff --git a/src/hash/crc64/crc64.go b/src/hash/crc64/crc64.go index a799a017c938c..063c63c6a3b09 100644 --- a/src/hash/crc64/crc64.go +++ b/src/hash/crc64/crc64.go @@ -10,6 +10,7 @@ package crc64 import ( "errors" "hash" + "sync" ) // The size of a CRC-64 checksum in bytes. @@ -28,13 +29,24 @@ const ( type Table [256]uint64 var ( - slicing8TableISO = makeSlicingBy8Table(makeTable(ISO)) - slicing8TableECMA = makeSlicingBy8Table(makeTable(ECMA)) + slicing8TablesBuildOnce sync.Once + slicing8TableISO *[8]Table + slicing8TableECMA *[8]Table ) +func buildSlicing8TablesOnce() { + slicing8TablesBuildOnce.Do(buildSlicing8Tables) +} + +func buildSlicing8Tables() { + slicing8TableISO = makeSlicingBy8Table(makeTable(ISO)) + slicing8TableECMA = makeSlicingBy8Table(makeTable(ECMA)) +} + // MakeTable returns a Table constructed from the specified polynomial. // The contents of this Table must not be modified. func MakeTable(poly uint64) *Table { + buildSlicing8TablesOnce() switch poly { case ISO: return &slicing8TableISO[0] @@ -141,6 +153,7 @@ func readUint64(b []byte) uint64 { } func update(crc uint64, tab *Table, p []byte) uint64 { + buildSlicing8TablesOnce() crc = ^crc // Table comparison is somewhat expensive, so avoid it for small sizes for len(p) >= 64 { diff --git a/src/hash/crc64/crc64_test.go b/src/hash/crc64/crc64_test.go index 9db05b02fe98c..9cf602c82f3ee 100644 --- a/src/hash/crc64/crc64_test.go +++ b/src/hash/crc64/crc64_test.go @@ -62,15 +62,13 @@ func TestGolden(t *testing.T) { io.WriteString(c, g.in) s := c.Sum64() if s != g.outISO { - t.Errorf("ISO crc64(%s) = 0x%x want 0x%x", g.in, s, g.outISO) - t.FailNow() + t.Fatalf("ISO crc64(%s) = 0x%x want 0x%x", g.in, s, g.outISO) } c = New(tabECMA) io.WriteString(c, g.in) s = c.Sum64() if s != g.outECMA { - t.Errorf("ECMA crc64(%s) = 0x%x want 0x%x", g.in, s, g.outECMA) - t.FailNow() + t.Fatalf("ECMA crc64(%s) = 0x%x want 0x%x", g.in, s, g.outECMA) } } } diff --git a/src/hash/fnv/fnv.go b/src/hash/fnv/fnv.go index 7662315d43cf8..0fce177cb3379 100644 --- a/src/hash/fnv/fnv.go +++ b/src/hash/fnv/fnv.go @@ -15,6 +15,7 @@ package fnv import ( "errors" "hash" + "math/bits" ) type ( @@ -137,18 +138,12 @@ func (s *sum64a) Write(data []byte) (int, error) { func (s *sum128) Write(data []byte) (int, error) { for _, c := range data { - // Compute the multiplication in 4 parts to simplify carrying - s1l := (s[1] & 0xffffffff) * prime128Lower - s1h := (s[1] >> 32) * prime128Lower - s0l := (s[0]&0xffffffff)*prime128Lower + (s[1]&0xffffffff)<>32)*prime128Lower + (s[1]>>32)<> 32 - s0l += s1h >> 32 - s0h += s0l >> 32 + // Compute the multiplication + s0, s1 := bits.Mul64(prime128Lower, s[1]) + s0 += s[1]<> 32) * prime128Lower - s0l := (s[0]&0xffffffff)*prime128Lower + (s[1]&0xffffffff)<>32)*prime128Lower + (s[1]>>32)<> 32 - s0l += s1h >> 32 - s0h += s0l >> 32 + // Compute the multiplication + s0, s1 := bits.Mul64(prime128Lower, s[1]) + s0 += s[1]<>").Parse(text)) + + err := t.Execute(os.Stdout, data) + if err != nil { + log.Fatal(err) + } + + // Output: + // Hello {{.Name}} +} + // The following example is duplicated in text/template; keep them in sync. func ExampleTemplate_block() { diff --git a/src/html/template/js.go b/src/html/template/js.go index 33a18b41864cc..872f6786b3555 100644 --- a/src/html/template/js.go +++ b/src/html/template/js.go @@ -172,7 +172,7 @@ func jsValEscaper(args ...interface{}) string { // turning into // x//* error marshaling y: // second line of error message */null - return fmt.Sprintf(" /* %s */null ", strings.Replace(err.Error(), "*/", "* /", -1)) + return fmt.Sprintf(" /* %s */null ", strings.ReplaceAll(err.Error(), "*/", "* /")) } // TODO: maybe post-process output to prevent it from containing @@ -371,7 +371,7 @@ func isJSIdentPart(r rune) bool { return false } -// isJSType returns true if the given MIME type should be considered JavaScript. +// isJSType reports whether the given MIME type should be considered JavaScript. // // It is used to determine whether a script tag with a type attribute is a javascript container. func isJSType(mimeType string) bool { @@ -391,6 +391,7 @@ func isJSType(mimeType string) bool { "application/ecmascript", "application/javascript", "application/json", + "application/ld+json", "application/x-ecmascript", "application/x-javascript", "text/ecmascript", diff --git a/src/html/template/js_test.go b/src/html/template/js_test.go index 12a850d6e3bfe..05fa105be02c7 100644 --- a/src/html/template/js_test.go +++ b/src/html/template/js_test.go @@ -343,6 +343,7 @@ func TestIsJsMimeType(t *testing.T) { {"application/javascript/version=1.8", false}, {"text/javascript", true}, {"application/json", true}, + {"application/ld+json", true}, } for _, test := range tests { diff --git a/src/html/template/url.go b/src/html/template/url.go index f0516300deb8c..6f8185a4e90e6 100644 --- a/src/html/template/url.go +++ b/src/html/template/url.go @@ -86,7 +86,7 @@ func urlProcessor(norm bool, args ...interface{}) string { } // processURLOnto appends a normalized URL corresponding to its input to b -// and returns true if the appended content differs from s. +// and reports whether the appended content differs from s. func processURLOnto(s string, norm bool, b *bytes.Buffer) bool { b.Grow(len(s) + 16) written := 0 @@ -156,7 +156,7 @@ func srcsetFilterAndEscaper(args ...interface{}) string { s = b.String() } // Additionally, commas separate one source from another. - return strings.Replace(s, ",", "%2c", -1) + return strings.ReplaceAll(s, ",", "%2c") } var b bytes.Buffer diff --git a/src/image/draw/draw.go b/src/image/draw/draw.go index 977d7c522153a..3ff1828dc0c0a 100644 --- a/src/image/draw/draw.go +++ b/src/image/draw/draw.go @@ -309,23 +309,20 @@ func drawCopyOver(dst *image.RGBA, r image.Rectangle, src *image.RGBA, sp image. dpix := dst.Pix[d0:] spix := src.Pix[s0:] for i := i0; i != i1; i += idelta { - sr := uint32(spix[i+0]) * 0x101 - sg := uint32(spix[i+1]) * 0x101 - sb := uint32(spix[i+2]) * 0x101 - sa := uint32(spix[i+3]) * 0x101 - - dr := &dpix[i+0] - dg := &dpix[i+1] - db := &dpix[i+2] - da := &dpix[i+3] + s := spix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + sr := uint32(s[0]) * 0x101 + sg := uint32(s[1]) * 0x101 + sb := uint32(s[2]) * 0x101 + sa := uint32(s[3]) * 0x101 // The 0x101 is here for the same reason as in drawRGBA. a := (m - sa) * 0x101 - *dr = uint8((uint32(*dr)*a/m + sr) >> 8) - *dg = uint8((uint32(*dg)*a/m + sg) >> 8) - *db = uint8((uint32(*db)*a/m + sb) >> 8) - *da = uint8((uint32(*da)*a/m + sa) >> 8) + d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + d[0] = uint8((uint32(d[0])*a/m + sr) >> 8) + d[1] = uint8((uint32(d[1])*a/m + sg) >> 8) + d[2] = uint8((uint32(d[2])*a/m + sb) >> 8) + d[3] = uint8((uint32(d[3])*a/m + sa) >> 8) } d0 += ddelta s0 += sdelta @@ -372,23 +369,25 @@ func drawNRGBAOver(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp imag for i, si := i0, si0; i < i1; i, si = i+4, si+4 { // Convert from non-premultiplied color to pre-multiplied color. - sa := uint32(spix[si+3]) * 0x101 - sr := uint32(spix[si+0]) * sa / 0xff - sg := uint32(spix[si+1]) * sa / 0xff - sb := uint32(spix[si+2]) * sa / 0xff - - dr := uint32(dpix[i+0]) - dg := uint32(dpix[i+1]) - db := uint32(dpix[i+2]) - da := uint32(dpix[i+3]) + s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 + sa := uint32(s[3]) * 0x101 + sr := uint32(s[0]) * sa / 0xff + sg := uint32(s[1]) * sa / 0xff + sb := uint32(s[2]) * sa / 0xff + + d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + dr := uint32(d[0]) + dg := uint32(d[1]) + db := uint32(d[2]) + da := uint32(d[3]) // The 0x101 is here for the same reason as in drawRGBA. a := (m - sa) * 0x101 - dpix[i+0] = uint8((dr*a/m + sr) >> 8) - dpix[i+1] = uint8((dg*a/m + sg) >> 8) - dpix[i+2] = uint8((db*a/m + sb) >> 8) - dpix[i+3] = uint8((da*a/m + sa) >> 8) + d[0] = uint8((dr*a/m + sr) >> 8) + d[1] = uint8((dg*a/m + sg) >> 8) + d[2] = uint8((db*a/m + sb) >> 8) + d[3] = uint8((da*a/m + sa) >> 8) } } } @@ -407,15 +406,17 @@ func drawNRGBASrc(dst *image.RGBA, r image.Rectangle, src *image.NRGBA, sp image for i, si := i0, si0; i < i1; i, si = i+4, si+4 { // Convert from non-premultiplied color to pre-multiplied color. - sa := uint32(spix[si+3]) * 0x101 - sr := uint32(spix[si+0]) * sa / 0xff - sg := uint32(spix[si+1]) * sa / 0xff - sb := uint32(spix[si+2]) * sa / 0xff - - dpix[i+0] = uint8(sr >> 8) - dpix[i+1] = uint8(sg >> 8) - dpix[i+2] = uint8(sb >> 8) - dpix[i+3] = uint8(sa >> 8) + s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 + sa := uint32(s[3]) * 0x101 + sr := uint32(s[0]) * sa / 0xff + sg := uint32(s[1]) * sa / 0xff + sb := uint32(s[2]) * sa / 0xff + + d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + d[0] = uint8(sr >> 8) + d[1] = uint8(sg >> 8) + d[2] = uint8(sb >> 8) + d[3] = uint8(sa >> 8) } } } @@ -434,10 +435,11 @@ func drawGray(dst *image.RGBA, r image.Rectangle, src *image.Gray, sp image.Poin for i, si := i0, si0; i < i1; i, si = i+4, si+1 { p := spix[si] - dpix[i+0] = p - dpix[i+1] = p - dpix[i+2] = p - dpix[i+3] = 255 + d := dpix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + d[0] = p + d[1] = p + d[2] = p + d[3] = 255 } } } @@ -455,9 +457,10 @@ func drawCMYK(dst *image.RGBA, r image.Rectangle, src *image.CMYK, sp image.Poin spix := src.Pix[sy*src.Stride:] for i, si := i0, si0; i < i1; i, si = i+4, si+4 { - dpix[i+0], dpix[i+1], dpix[i+2] = - color.CMYKToRGB(spix[si+0], spix[si+1], spix[si+2], spix[si+3]) - dpix[i+3] = 255 + s := spix[si : si+4 : si+4] // Small cap improves performance, see https://golang.org/issue/27857 + d := dpix[i : i+4 : i+4] + d[0], d[1], d[2] = color.CMYKToRGB(s[0], s[1], s[2], s[3]) + d[3] = 255 } } } @@ -475,18 +478,14 @@ func drawGlyphOver(dst *image.RGBA, r image.Rectangle, src *image.Uniform, mask } ma |= ma << 8 - dr := &dst.Pix[i+0] - dg := &dst.Pix[i+1] - db := &dst.Pix[i+2] - da := &dst.Pix[i+3] - // The 0x101 is here for the same reason as in drawRGBA. a := (m - (sa * ma / m)) * 0x101 - *dr = uint8((uint32(*dr)*a + sr*ma) / m >> 8) - *dg = uint8((uint32(*dg)*a + sg*ma) / m >> 8) - *db = uint8((uint32(*db)*a + sb*ma) / m >> 8) - *da = uint8((uint32(*da)*a + sa*ma) / m >> 8) + d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + d[0] = uint8((uint32(d[0])*a + sr*ma) / m >> 8) + d[1] = uint8((uint32(d[1])*a + sg*ma) / m >> 8) + d[2] = uint8((uint32(d[2])*a + sb*ma) / m >> 8) + d[3] = uint8((uint32(d[3])*a + sa*ma) / m >> 8) } i0 += dst.Stride i1 += dst.Stride @@ -518,11 +517,12 @@ func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Poin _, _, _, ma = mask.At(mx, my).RGBA() } sr, sg, sb, sa := src.At(sx, sy).RGBA() + d := dst.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 if op == Over { - dr := uint32(dst.Pix[i+0]) - dg := uint32(dst.Pix[i+1]) - db := uint32(dst.Pix[i+2]) - da := uint32(dst.Pix[i+3]) + dr := uint32(d[0]) + dg := uint32(d[1]) + db := uint32(d[2]) + da := uint32(d[3]) // dr, dg, db and da are all 8-bit color at the moment, ranging in [0,255]. // We work in 16-bit color, and so would normally do: @@ -532,16 +532,16 @@ func drawRGBA(dst *image.RGBA, r image.Rectangle, src image.Image, sp image.Poin // This yields the same result, but is fewer arithmetic operations. a := (m - (sa * ma / m)) * 0x101 - dst.Pix[i+0] = uint8((dr*a + sr*ma) / m >> 8) - dst.Pix[i+1] = uint8((dg*a + sg*ma) / m >> 8) - dst.Pix[i+2] = uint8((db*a + sb*ma) / m >> 8) - dst.Pix[i+3] = uint8((da*a + sa*ma) / m >> 8) + d[0] = uint8((dr*a + sr*ma) / m >> 8) + d[1] = uint8((dg*a + sg*ma) / m >> 8) + d[2] = uint8((db*a + sb*ma) / m >> 8) + d[3] = uint8((da*a + sa*ma) / m >> 8) } else { - dst.Pix[i+0] = uint8(sr * ma / m >> 8) - dst.Pix[i+1] = uint8(sg * ma / m >> 8) - dst.Pix[i+2] = uint8(sb * ma / m >> 8) - dst.Pix[i+3] = uint8(sa * ma / m >> 8) + d[0] = uint8(sr * ma / m >> 8) + d[1] = uint8(sg * ma / m >> 8) + d[2] = uint8(sb * ma / m >> 8) + d[3] = uint8(sa * ma / m >> 8) } } i0 += dy * dst.Stride diff --git a/src/image/format.go b/src/image/format.go index 3668de4e6858a..a53b8f9b55488 100644 --- a/src/image/format.go +++ b/src/image/format.go @@ -8,6 +8,8 @@ import ( "bufio" "errors" "io" + "sync" + "sync/atomic" ) // ErrFormat indicates that decoding encountered an unknown format. @@ -21,7 +23,10 @@ type format struct { } // Formats is the list of registered formats. -var formats []format +var ( + formatsMu sync.Mutex + atomicFormats atomic.Value +) // RegisterFormat registers an image format for use by Decode. // Name is the name of the format, like "jpeg" or "png". @@ -30,7 +35,10 @@ var formats []format // Decode is the function that decodes the encoded image. // DecodeConfig is the function that decodes just its configuration. func RegisterFormat(name, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error)) { - formats = append(formats, format{name, magic, decode, decodeConfig}) + formatsMu.Lock() + formats, _ := atomicFormats.Load().([]format) + atomicFormats.Store(append(formats, format{name, magic, decode, decodeConfig})) + formatsMu.Unlock() } // A reader is an io.Reader that can also peek ahead. @@ -62,6 +70,7 @@ func match(magic string, b []byte) bool { // Sniff determines the format of r's data. func sniff(r reader) format { + formats, _ := atomicFormats.Load().([]format) for _, f := range formats { b, err := r.Peek(len(f.magic)) if err == nil && match(f.magic, b) { diff --git a/src/image/image.go b/src/image/image.go index bebb9f70fa65e..ffd6de73837e8 100644 --- a/src/image/image.go +++ b/src/image/image.go @@ -80,7 +80,8 @@ func (p *RGBA) RGBAAt(x, y int) color.RGBA { return color.RGBA{} } i := p.PixOffset(x, y) - return color.RGBA{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], p.Pix[i+3]} + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + return color.RGBA{s[0], s[1], s[2], s[3]} } // PixOffset returns the index of the first element of Pix that corresponds to @@ -95,10 +96,11 @@ func (p *RGBA) Set(x, y int, c color.Color) { } i := p.PixOffset(x, y) c1 := color.RGBAModel.Convert(c).(color.RGBA) - p.Pix[i+0] = c1.R - p.Pix[i+1] = c1.G - p.Pix[i+2] = c1.B - p.Pix[i+3] = c1.A + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c1.R + s[1] = c1.G + s[2] = c1.B + s[3] = c1.A } func (p *RGBA) SetRGBA(x, y int, c color.RGBA) { @@ -106,10 +108,11 @@ func (p *RGBA) SetRGBA(x, y int, c color.RGBA) { return } i := p.PixOffset(x, y) - p.Pix[i+0] = c.R - p.Pix[i+1] = c.G - p.Pix[i+2] = c.B - p.Pix[i+3] = c.A + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c.R + s[1] = c.G + s[2] = c.B + s[3] = c.A } // SubImage returns an image representing the portion of the image p visible @@ -179,11 +182,12 @@ func (p *RGBA64) RGBA64At(x, y int) color.RGBA64 { return color.RGBA64{} } i := p.PixOffset(x, y) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 return color.RGBA64{ - uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1]), - uint16(p.Pix[i+2])<<8 | uint16(p.Pix[i+3]), - uint16(p.Pix[i+4])<<8 | uint16(p.Pix[i+5]), - uint16(p.Pix[i+6])<<8 | uint16(p.Pix[i+7]), + uint16(s[0])<<8 | uint16(s[1]), + uint16(s[2])<<8 | uint16(s[3]), + uint16(s[4])<<8 | uint16(s[5]), + uint16(s[6])<<8 | uint16(s[7]), } } @@ -199,14 +203,15 @@ func (p *RGBA64) Set(x, y int, c color.Color) { } i := p.PixOffset(x, y) c1 := color.RGBA64Model.Convert(c).(color.RGBA64) - p.Pix[i+0] = uint8(c1.R >> 8) - p.Pix[i+1] = uint8(c1.R) - p.Pix[i+2] = uint8(c1.G >> 8) - p.Pix[i+3] = uint8(c1.G) - p.Pix[i+4] = uint8(c1.B >> 8) - p.Pix[i+5] = uint8(c1.B) - p.Pix[i+6] = uint8(c1.A >> 8) - p.Pix[i+7] = uint8(c1.A) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = uint8(c1.R >> 8) + s[1] = uint8(c1.R) + s[2] = uint8(c1.G >> 8) + s[3] = uint8(c1.G) + s[4] = uint8(c1.B >> 8) + s[5] = uint8(c1.B) + s[6] = uint8(c1.A >> 8) + s[7] = uint8(c1.A) } func (p *RGBA64) SetRGBA64(x, y int, c color.RGBA64) { @@ -214,14 +219,15 @@ func (p *RGBA64) SetRGBA64(x, y int, c color.RGBA64) { return } i := p.PixOffset(x, y) - p.Pix[i+0] = uint8(c.R >> 8) - p.Pix[i+1] = uint8(c.R) - p.Pix[i+2] = uint8(c.G >> 8) - p.Pix[i+3] = uint8(c.G) - p.Pix[i+4] = uint8(c.B >> 8) - p.Pix[i+5] = uint8(c.B) - p.Pix[i+6] = uint8(c.A >> 8) - p.Pix[i+7] = uint8(c.A) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = uint8(c.R >> 8) + s[1] = uint8(c.R) + s[2] = uint8(c.G >> 8) + s[3] = uint8(c.G) + s[4] = uint8(c.B >> 8) + s[5] = uint8(c.B) + s[6] = uint8(c.A >> 8) + s[7] = uint8(c.A) } // SubImage returns an image representing the portion of the image p visible @@ -291,7 +297,8 @@ func (p *NRGBA) NRGBAAt(x, y int) color.NRGBA { return color.NRGBA{} } i := p.PixOffset(x, y) - return color.NRGBA{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], p.Pix[i+3]} + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + return color.NRGBA{s[0], s[1], s[2], s[3]} } // PixOffset returns the index of the first element of Pix that corresponds to @@ -306,10 +313,11 @@ func (p *NRGBA) Set(x, y int, c color.Color) { } i := p.PixOffset(x, y) c1 := color.NRGBAModel.Convert(c).(color.NRGBA) - p.Pix[i+0] = c1.R - p.Pix[i+1] = c1.G - p.Pix[i+2] = c1.B - p.Pix[i+3] = c1.A + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c1.R + s[1] = c1.G + s[2] = c1.B + s[3] = c1.A } func (p *NRGBA) SetNRGBA(x, y int, c color.NRGBA) { @@ -317,10 +325,11 @@ func (p *NRGBA) SetNRGBA(x, y int, c color.NRGBA) { return } i := p.PixOffset(x, y) - p.Pix[i+0] = c.R - p.Pix[i+1] = c.G - p.Pix[i+2] = c.B - p.Pix[i+3] = c.A + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c.R + s[1] = c.G + s[2] = c.B + s[3] = c.A } // SubImage returns an image representing the portion of the image p visible @@ -390,11 +399,12 @@ func (p *NRGBA64) NRGBA64At(x, y int) color.NRGBA64 { return color.NRGBA64{} } i := p.PixOffset(x, y) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 return color.NRGBA64{ - uint16(p.Pix[i+0])<<8 | uint16(p.Pix[i+1]), - uint16(p.Pix[i+2])<<8 | uint16(p.Pix[i+3]), - uint16(p.Pix[i+4])<<8 | uint16(p.Pix[i+5]), - uint16(p.Pix[i+6])<<8 | uint16(p.Pix[i+7]), + uint16(s[0])<<8 | uint16(s[1]), + uint16(s[2])<<8 | uint16(s[3]), + uint16(s[4])<<8 | uint16(s[5]), + uint16(s[6])<<8 | uint16(s[7]), } } @@ -410,14 +420,15 @@ func (p *NRGBA64) Set(x, y int, c color.Color) { } i := p.PixOffset(x, y) c1 := color.NRGBA64Model.Convert(c).(color.NRGBA64) - p.Pix[i+0] = uint8(c1.R >> 8) - p.Pix[i+1] = uint8(c1.R) - p.Pix[i+2] = uint8(c1.G >> 8) - p.Pix[i+3] = uint8(c1.G) - p.Pix[i+4] = uint8(c1.B >> 8) - p.Pix[i+5] = uint8(c1.B) - p.Pix[i+6] = uint8(c1.A >> 8) - p.Pix[i+7] = uint8(c1.A) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = uint8(c1.R >> 8) + s[1] = uint8(c1.R) + s[2] = uint8(c1.G >> 8) + s[3] = uint8(c1.G) + s[4] = uint8(c1.B >> 8) + s[5] = uint8(c1.B) + s[6] = uint8(c1.A >> 8) + s[7] = uint8(c1.A) } func (p *NRGBA64) SetNRGBA64(x, y int, c color.NRGBA64) { @@ -425,14 +436,15 @@ func (p *NRGBA64) SetNRGBA64(x, y int, c color.NRGBA64) { return } i := p.PixOffset(x, y) - p.Pix[i+0] = uint8(c.R >> 8) - p.Pix[i+1] = uint8(c.R) - p.Pix[i+2] = uint8(c.G >> 8) - p.Pix[i+3] = uint8(c.G) - p.Pix[i+4] = uint8(c.B >> 8) - p.Pix[i+5] = uint8(c.B) - p.Pix[i+6] = uint8(c.A >> 8) - p.Pix[i+7] = uint8(c.A) + s := p.Pix[i : i+8 : i+8] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = uint8(c.R >> 8) + s[1] = uint8(c.R) + s[2] = uint8(c.G >> 8) + s[3] = uint8(c.G) + s[4] = uint8(c.B >> 8) + s[5] = uint8(c.B) + s[6] = uint8(c.A >> 8) + s[7] = uint8(c.A) } // SubImage returns an image representing the portion of the image p visible @@ -850,7 +862,8 @@ func (p *CMYK) CMYKAt(x, y int) color.CMYK { return color.CMYK{} } i := p.PixOffset(x, y) - return color.CMYK{p.Pix[i+0], p.Pix[i+1], p.Pix[i+2], p.Pix[i+3]} + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + return color.CMYK{s[0], s[1], s[2], s[3]} } // PixOffset returns the index of the first element of Pix that corresponds to @@ -865,10 +878,11 @@ func (p *CMYK) Set(x, y int, c color.Color) { } i := p.PixOffset(x, y) c1 := color.CMYKModel.Convert(c).(color.CMYK) - p.Pix[i+0] = c1.C - p.Pix[i+1] = c1.M - p.Pix[i+2] = c1.Y - p.Pix[i+3] = c1.K + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c1.C + s[1] = c1.M + s[2] = c1.Y + s[3] = c1.K } func (p *CMYK) SetCMYK(x, y int, c color.CMYK) { @@ -876,10 +890,11 @@ func (p *CMYK) SetCMYK(x, y int, c color.CMYK) { return } i := p.PixOffset(x, y) - p.Pix[i+0] = c.C - p.Pix[i+1] = c.M - p.Pix[i+2] = c.Y - p.Pix[i+3] = c.K + s := p.Pix[i : i+4 : i+4] // Small cap improves performance, see https://golang.org/issue/27857 + s[0] = c.C + s[1] = c.M + s[2] = c.Y + s[3] = c.K } // SubImage returns an image representing the portion of the image p visible diff --git a/src/image/image_test.go b/src/image/image_test.go index 08ba61ea0c704..dfd8eb35a804d 100644 --- a/src/image/image_test.go +++ b/src/image/image_test.go @@ -22,22 +22,29 @@ func cmp(cm color.Model, c0, c1 color.Color) bool { return r0 == r1 && g0 == g1 && b0 == b1 && a0 == a1 } -func TestImage(t *testing.T) { - testImage := []image{ - NewRGBA(Rect(0, 0, 10, 10)), - NewRGBA64(Rect(0, 0, 10, 10)), - NewNRGBA(Rect(0, 0, 10, 10)), - NewNRGBA64(Rect(0, 0, 10, 10)), - NewAlpha(Rect(0, 0, 10, 10)), - NewAlpha16(Rect(0, 0, 10, 10)), - NewGray(Rect(0, 0, 10, 10)), - NewGray16(Rect(0, 0, 10, 10)), - NewPaletted(Rect(0, 0, 10, 10), color.Palette{ +var testImages = []struct { + name string + image func() image +}{ + {"rgba", func() image { return NewRGBA(Rect(0, 0, 10, 10)) }}, + {"rgba64", func() image { return NewRGBA64(Rect(0, 0, 10, 10)) }}, + {"nrgba", func() image { return NewNRGBA(Rect(0, 0, 10, 10)) }}, + {"nrgba64", func() image { return NewNRGBA64(Rect(0, 0, 10, 10)) }}, + {"alpha", func() image { return NewAlpha(Rect(0, 0, 10, 10)) }}, + {"alpha16", func() image { return NewAlpha16(Rect(0, 0, 10, 10)) }}, + {"gray", func() image { return NewGray(Rect(0, 0, 10, 10)) }}, + {"gray16", func() image { return NewGray16(Rect(0, 0, 10, 10)) }}, + {"paletted", func() image { + return NewPaletted(Rect(0, 0, 10, 10), color.Palette{ Transparent, Opaque, - }), - } - for _, m := range testImage { + }) + }}, +} + +func TestImage(t *testing.T) { + for _, tc := range testImages { + m := tc.image() if !Rect(0, 0, 10, 10).Eq(m.Bounds()) { t.Errorf("%T: want bounds %v, got %v", m, Rect(0, 0, 10, 10), m.Bounds()) continue @@ -111,3 +118,182 @@ func Test16BitsPerColorChannel(t *testing.T) { } } } + +func BenchmarkAt(b *testing.B) { + for _, tc := range testImages { + b.Run(tc.name, func(b *testing.B) { + m := tc.image() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.At(4, 5) + } + }) + } +} + +func BenchmarkSet(b *testing.B) { + c := color.Gray{0xff} + for _, tc := range testImages { + b.Run(tc.name, func(b *testing.B) { + m := tc.image() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(4, 5, c) + } + }) + } +} + +func BenchmarkRGBAAt(b *testing.B) { + m := NewRGBA(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.RGBAAt(4, 5) + } +} + +func BenchmarkRGBASetRGBA(b *testing.B) { + m := NewRGBA(Rect(0, 0, 10, 10)) + c := color.RGBA{0xff, 0xff, 0xff, 0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetRGBA(4, 5, c) + } +} + +func BenchmarkRGBA64At(b *testing.B) { + m := NewRGBA64(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.RGBA64At(4, 5) + } +} + +func BenchmarkRGBA64SetRGBA64(b *testing.B) { + m := NewRGBA64(Rect(0, 0, 10, 10)) + c := color.RGBA64{0xffff, 0xffff, 0xffff, 0x1357} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetRGBA64(4, 5, c) + } +} + +func BenchmarkNRGBAAt(b *testing.B) { + m := NewNRGBA(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.NRGBAAt(4, 5) + } +} + +func BenchmarkNRGBASetNRGBA(b *testing.B) { + m := NewNRGBA(Rect(0, 0, 10, 10)) + c := color.NRGBA{0xff, 0xff, 0xff, 0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetNRGBA(4, 5, c) + } +} + +func BenchmarkNRGBA64At(b *testing.B) { + m := NewNRGBA64(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.NRGBA64At(4, 5) + } +} + +func BenchmarkNRGBA64SetNRGBA64(b *testing.B) { + m := NewNRGBA64(Rect(0, 0, 10, 10)) + c := color.NRGBA64{0xffff, 0xffff, 0xffff, 0x1357} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetNRGBA64(4, 5, c) + } +} + +func BenchmarkAlphaAt(b *testing.B) { + m := NewAlpha(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.AlphaAt(4, 5) + } +} + +func BenchmarkAlphaSetAlpha(b *testing.B) { + m := NewAlpha(Rect(0, 0, 10, 10)) + c := color.Alpha{0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetAlpha(4, 5, c) + } +} + +func BenchmarkAlpha16At(b *testing.B) { + m := NewAlpha16(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.Alpha16At(4, 5) + } +} + +func BenchmarkAlphaSetAlpha16(b *testing.B) { + m := NewAlpha16(Rect(0, 0, 10, 10)) + c := color.Alpha16{0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetAlpha16(4, 5, c) + } +} + +func BenchmarkGrayAt(b *testing.B) { + m := NewGray(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.GrayAt(4, 5) + } +} + +func BenchmarkGraySetGray(b *testing.B) { + m := NewGray(Rect(0, 0, 10, 10)) + c := color.Gray{0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetGray(4, 5, c) + } +} + +func BenchmarkGray16At(b *testing.B) { + m := NewGray16(Rect(0, 0, 10, 10)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.Gray16At(4, 5) + } +} + +func BenchmarkGraySetGray16(b *testing.B) { + m := NewGray16(Rect(0, 0, 10, 10)) + c := color.Gray16{0x13} + b.ResetTimer() + + for i := 0; i < b.N; i++ { + m.SetGray16(4, 5, c) + } +} diff --git a/src/image/jpeg/fdct.go b/src/image/jpeg/fdct.go index 3f8be4e32608e..201a5abd0bab4 100644 --- a/src/image/jpeg/fdct.go +++ b/src/image/jpeg/fdct.go @@ -123,14 +123,14 @@ func fdct(b *block) { tmp13 = tmp1 + tmp3 z1 = (tmp12 + tmp13) * fix_1_175875602 z1 += 1 << (constBits - pass1Bits - 1) - tmp0 = tmp0 * fix_1_501321110 - tmp1 = tmp1 * fix_3_072711026 - tmp2 = tmp2 * fix_2_053119869 - tmp3 = tmp3 * fix_0_298631336 - tmp10 = tmp10 * -fix_0_899976223 - tmp11 = tmp11 * -fix_2_562915447 - tmp12 = tmp12 * -fix_0_390180644 - tmp13 = tmp13 * -fix_1_961570560 + tmp0 *= fix_1_501321110 + tmp1 *= fix_3_072711026 + tmp2 *= fix_2_053119869 + tmp3 *= fix_0_298631336 + tmp10 *= -fix_0_899976223 + tmp11 *= -fix_2_562915447 + tmp12 *= -fix_0_390180644 + tmp13 *= -fix_1_961570560 tmp12 += z1 tmp13 += z1 @@ -171,14 +171,14 @@ func fdct(b *block) { tmp13 = tmp1 + tmp3 z1 = (tmp12 + tmp13) * fix_1_175875602 z1 += 1 << (constBits + pass1Bits - 1) - tmp0 = tmp0 * fix_1_501321110 - tmp1 = tmp1 * fix_3_072711026 - tmp2 = tmp2 * fix_2_053119869 - tmp3 = tmp3 * fix_0_298631336 - tmp10 = tmp10 * -fix_0_899976223 - tmp11 = tmp11 * -fix_2_562915447 - tmp12 = tmp12 * -fix_0_390180644 - tmp13 = tmp13 * -fix_1_961570560 + tmp0 *= fix_1_501321110 + tmp1 *= fix_3_072711026 + tmp2 *= fix_2_053119869 + tmp3 *= fix_0_298631336 + tmp10 *= -fix_0_899976223 + tmp11 *= -fix_2_562915447 + tmp12 *= -fix_0_390180644 + tmp13 *= -fix_1_961570560 tmp12 += z1 tmp13 += z1 diff --git a/src/image/png/reader_test.go b/src/image/png/reader_test.go index 66bcfcb437eb2..33dcd3debcc96 100644 --- a/src/image/png/reader_test.go +++ b/src/image/png/reader_test.go @@ -364,10 +364,6 @@ func TestReader(t *testing.T) { } defer sf.Close() sb := bufio.NewScanner(sf) - if err != nil { - t.Error(fn, err) - continue - } // Compare the two, in SNG format, line by line. for { diff --git a/src/image/png/writer.go b/src/image/png/writer.go index 49f1ad2e7fa1f..c03335120eb27 100644 --- a/src/image/png/writer.go +++ b/src/image/png/writer.go @@ -7,6 +7,7 @@ package png import ( "bufio" "compress/zlib" + "encoding/binary" "hash/crc32" "image" "image/color" @@ -62,14 +63,6 @@ const ( // compression level, although that is not implemented yet. ) -// Big-endian. -func writeUint32(b []uint8, u uint32) { - b[0] = uint8(u >> 24) - b[1] = uint8(u >> 16) - b[2] = uint8(u >> 8) - b[3] = uint8(u >> 0) -} - type opaquer interface { Opaque() bool } @@ -108,7 +101,7 @@ func (e *encoder) writeChunk(b []byte, name string) { e.err = UnsupportedError(name + " chunk is too large: " + strconv.Itoa(len(b))) return } - writeUint32(e.header[:4], n) + binary.BigEndian.PutUint32(e.header[:4], n) e.header[4] = name[0] e.header[5] = name[1] e.header[6] = name[2] @@ -116,7 +109,7 @@ func (e *encoder) writeChunk(b []byte, name string) { crc := crc32.NewIEEE() crc.Write(e.header[4:8]) crc.Write(b) - writeUint32(e.footer[:4], crc.Sum32()) + binary.BigEndian.PutUint32(e.footer[:4], crc.Sum32()) _, e.err = e.w.Write(e.header[:8]) if e.err != nil { @@ -131,8 +124,8 @@ func (e *encoder) writeChunk(b []byte, name string) { func (e *encoder) writeIHDR() { b := e.m.Bounds() - writeUint32(e.tmp[0:4], uint32(b.Dx())) - writeUint32(e.tmp[4:8], uint32(b.Dy())) + binary.BigEndian.PutUint32(e.tmp[0:4], uint32(b.Dx())) + binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dy())) // Set bit depth and color type. switch e.cb { case cbG8: @@ -144,6 +137,15 @@ func (e *encoder) writeIHDR() { case cbP8: e.tmp[8] = 8 e.tmp[9] = ctPaletted + case cbP4: + e.tmp[8] = 4 + e.tmp[9] = ctPaletted + case cbP2: + e.tmp[8] = 2 + e.tmp[9] = ctPaletted + case cbP1: + e.tmp[8] = 1 + e.tmp[9] = ctPaletted case cbTCA8: e.tmp[8] = 8 e.tmp[9] = ctTrueColorAlpha @@ -312,31 +314,38 @@ func (e *encoder) writeImage(w io.Writer, m image.Image, cb int, level int) erro } defer e.zw.Close() - bpp := 0 // Bytes per pixel. + bitsPerPixel := 0 switch cb { case cbG8: - bpp = 1 + bitsPerPixel = 8 case cbTC8: - bpp = 3 + bitsPerPixel = 24 case cbP8: - bpp = 1 + bitsPerPixel = 8 + case cbP4: + bitsPerPixel = 4 + case cbP2: + bitsPerPixel = 2 + case cbP1: + bitsPerPixel = 1 case cbTCA8: - bpp = 4 + bitsPerPixel = 32 case cbTC16: - bpp = 6 + bitsPerPixel = 48 case cbTCA16: - bpp = 8 + bitsPerPixel = 64 case cbG16: - bpp = 2 + bitsPerPixel = 16 } + // cr[*] and pr are the bytes for the current and previous row. // cr[0] is unfiltered (or equivalently, filtered with the ftNone filter). // cr[ft], for non-zero filter types ft, are buffers for transforming cr[0] under the // other PNG filter types. These buffers are allocated once and re-used for each row. // The +1 is for the per-row filter type, which is at cr[*][0]. b := m.Bounds() - sz := 1 + bpp*b.Dx() + sz := 1 + (bitsPerPixel*b.Dx()+7)/8 for i := range e.cr { if cap(e.cr[i]) < sz { e.cr[i] = make([]uint8, sz) @@ -412,6 +421,30 @@ func (e *encoder) writeImage(w io.Writer, m image.Image, cb int, level int) erro i += 1 } } + + case cbP4, cbP2, cbP1: + pi := m.(image.PalettedImage) + + var a uint8 + var c int + for x := b.Min.X; x < b.Max.X; x++ { + a = a<(SB) -TEXT bytes·Compare(SB),NOSPLIT,$0-28 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVL a_base+0(FP), SI - MOVL a_len+4(FP), BX - MOVL b_base+12(FP), DI - MOVL b_len+16(FP), DX - LEAL ret+24(FP), AX - JMP cmpbody<>(SB) - TEXT runtime·cmpstring(SB),NOSPLIT,$0-20 MOVL a_base+0(FP), SI MOVL a_len+4(FP), BX @@ -45,7 +36,7 @@ TEXT cmpbody<>(SB),NOSPLIT,$0-0 JEQ allsame CMPL BP, $4 JB small - CMPB runtime·support_sse2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 JNE mediumloop largeloop: CMPL BP, $16 diff --git a/src/internal/bytealg/compare_amd64.s b/src/internal/bytealg/compare_amd64.s index 277d77c545ada..900b92a21e1e7 100644 --- a/src/internal/bytealg/compare_amd64.s +++ b/src/internal/bytealg/compare_amd64.s @@ -13,15 +13,6 @@ TEXT ·Compare(SB),NOSPLIT,$0-56 LEAQ ret+48(FP), R9 JMP cmpbody<>(SB) -TEXT bytes·Compare(SB),NOSPLIT,$0-56 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVQ a_base+0(FP), SI - MOVQ a_len+8(FP), BX - MOVQ b_base+24(FP), DI - MOVQ b_len+32(FP), DX - LEAQ ret+48(FP), R9 - JMP cmpbody<>(SB) - TEXT runtime·cmpstring(SB),NOSPLIT,$0-40 MOVQ a_base+0(FP), SI MOVQ a_len+8(FP), BX @@ -47,7 +38,7 @@ TEXT cmpbody<>(SB),NOSPLIT,$0-0 CMPQ R8, $63 JBE loop - CMPB internal∕cpu·X86+const_x86_HasAVX2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JEQ big_loop_avx2 JMP big_loop loop: @@ -63,7 +54,7 @@ loop: ADDQ $16, DI SUBQ $16, R8 JMP loop - + diff64: ADDQ $48, SI ADDQ $48, DI diff --git a/src/internal/bytealg/compare_amd64p32.s b/src/internal/bytealg/compare_amd64p32.s index 85ba6fa9ac766..cb4107386ef8c 100644 --- a/src/internal/bytealg/compare_amd64p32.s +++ b/src/internal/bytealg/compare_amd64p32.s @@ -14,16 +14,6 @@ TEXT ·Compare(SB),NOSPLIT,$0-28 MOVL AX, ret+24(FP) RET -TEXT bytes·Compare(SB),NOSPLIT,$0-28 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVL a_base+0(FP), SI - MOVL a_len+4(FP), BX - MOVL b_base+12(FP), DI - MOVL b_len+16(FP), DX - CALL cmpbody<>(SB) - MOVL AX, ret+24(FP) - RET - TEXT runtime·cmpstring(SB),NOSPLIT,$0-20 MOVL a_base+0(FP), SI MOVL a_len+4(FP), BX @@ -62,7 +52,7 @@ loop: ADDQ $16, DI SUBQ $16, R8 JMP loop - + // AX = bit mask of differences diff16: BSFQ AX, BX // index of first byte that differs diff --git a/src/internal/bytealg/compare_arm.s b/src/internal/bytealg/compare_arm.s index d58345223f061..c5bfdda33fb02 100644 --- a/src/internal/bytealg/compare_arm.s +++ b/src/internal/bytealg/compare_arm.s @@ -13,15 +13,6 @@ TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-28 ADD $28, R13, R7 B cmpbody<>(SB) -TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-28 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVW a_base+0(FP), R2 - MOVW a_len+4(FP), R0 - MOVW b_base+12(FP), R3 - MOVW b_len+16(FP), R1 - ADD $28, R13, R7 - B cmpbody<>(SB) - TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-20 MOVW a_base+0(FP), R2 MOVW a_len+4(FP), R0 diff --git a/src/internal/bytealg/compare_arm64.s b/src/internal/bytealg/compare_arm64.s index db614b6afe3fd..32e2ba200dacc 100644 --- a/src/internal/bytealg/compare_arm64.s +++ b/src/internal/bytealg/compare_arm64.s @@ -13,15 +13,6 @@ TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56 MOVD $ret+48(FP), R7 B cmpbody<>(SB) -TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-56 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVD a_base+0(FP), R2 - MOVD a_len+8(FP), R0 - MOVD b_base+24(FP), R3 - MOVD b_len+32(FP), R1 - MOVD $ret+48(FP), R7 - B cmpbody<>(SB) - TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40 MOVD a_base+0(FP), R2 MOVD a_len+8(FP), R0 diff --git a/src/internal/bytealg/compare_generic.go b/src/internal/bytealg/compare_generic.go index 5c35a1ac4abdf..2ac60f3df939d 100644 --- a/src/internal/bytealg/compare_generic.go +++ b/src/internal/bytealg/compare_generic.go @@ -35,34 +35,6 @@ samebytes: return 0 } -//go:linkname bytes_Compare bytes.Compare -func bytes_Compare(a, b []byte) int { - l := len(a) - if len(b) < l { - l = len(b) - } - if l == 0 || &a[0] == &b[0] { - goto samebytes - } - for i := 0; i < l; i++ { - c1, c2 := a[i], b[i] - if c1 < c2 { - return -1 - } - if c1 > c2 { - return +1 - } - } -samebytes: - if len(a) < len(b) { - return -1 - } - if len(a) > len(b) { - return +1 - } - return 0 -} - //go:linkname runtime_cmpstring runtime.cmpstring func runtime_cmpstring(a, b string) int { l := len(a) diff --git a/src/internal/bytealg/compare_mipsx.s b/src/internal/bytealg/compare_mipsx.s index 85ba1a9455e0f..9ac5ba5687752 100644 --- a/src/internal/bytealg/compare_mipsx.s +++ b/src/internal/bytealg/compare_mipsx.s @@ -39,39 +39,6 @@ cmp_ret: MOVW R8, ret+24(FP) RET -TEXT bytes·Compare(SB),NOSPLIT,$0-28 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVW a_base+0(FP), R3 - MOVW b_base+12(FP), R4 - MOVW a_len+4(FP), R1 - MOVW b_len+16(FP), R2 - BEQ R3, R4, samebytes - SGTU R1, R2, R7 - MOVW R1, R8 - CMOVN R7, R2, R8 // R8 is min(R1, R2) - - ADDU R3, R8 // R3 is current byte in a, R8 is last byte in a to compare -loop: - BEQ R3, R8, samebytes - - MOVBU (R3), R6 - ADDU $1, R3 - MOVBU (R4), R7 - ADDU $1, R4 - BEQ R6, R7 , loop - - SGTU R6, R7, R8 - MOVW $-1, R6 - CMOVZ R8, R6, R8 - JMP cmp_ret -samebytes: - SGTU R1, R2, R6 - SGTU R2, R1, R7 - SUBU R7, R6, R8 -cmp_ret: - MOVW R8, ret+24(FP) - RET - TEXT runtime·cmpstring(SB),NOSPLIT,$0-20 MOVW a_base+0(FP), R3 MOVW a_len+4(FP), R1 diff --git a/src/internal/bytealg/compare_native.go b/src/internal/bytealg/compare_native.go index d4ff61938c18f..b14aa8c72c94c 100644 --- a/src/internal/bytealg/compare_native.go +++ b/src/internal/bytealg/compare_native.go @@ -6,5 +6,14 @@ package bytealg +import _ "unsafe" // For go:linkname + //go:noescape func Compare(a, b []byte) int + +// The declaration below generates ABI wrappers for functions +// implemented in assembly in this package but declared in another +// package. + +//go:linkname abigen_runtime_cmpstring runtime.cmpstring +func abigen_runtime_cmpstring(a, b string) int diff --git a/src/internal/bytealg/compare_ppc64x.s b/src/internal/bytealg/compare_ppc64x.s index 67bfcd11163ee..7819da31cd011 100644 --- a/src/internal/bytealg/compare_ppc64x.s +++ b/src/internal/bytealg/compare_ppc64x.s @@ -23,37 +23,6 @@ TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56 BR cmpbodyBE<>(SB) #endif -equal: - BEQ CR6,done - MOVD $1, R8 - BGT CR6,greater - NEG R8 - -greater: - MOVD R8, (R7) - RET - -done: - MOVD $0, (R7) - RET - -TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-56 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVD a_base+0(FP), R5 - MOVD b_base+24(FP), R6 - MOVD a_len+8(FP), R3 - CMP R5,R6,CR7 - MOVD b_len+32(FP), R4 - MOVD $ret+48(FP), R7 - CMP R3,R4,CR6 - BEQ CR7,equal - -#ifdef GOARCH_ppc64le - BR cmpbodyLE<>(SB) -#else - BR cmpbodyBE<>(SB) -#endif - equal: BEQ CR6,done MOVD $1, R8 diff --git a/src/internal/bytealg/compare_s390x.s b/src/internal/bytealg/compare_s390x.s index 4bc4624906690..539454870d338 100644 --- a/src/internal/bytealg/compare_s390x.s +++ b/src/internal/bytealg/compare_s390x.s @@ -13,15 +13,6 @@ TEXT ·Compare(SB),NOSPLIT|NOFRAME,$0-56 LA ret+48(FP), R7 BR cmpbody<>(SB) -TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-56 - FUNCDATA $0, ·Compare·args_stackmap(SB) - MOVD a_base+0(FP), R3 - MOVD a_len+8(FP), R4 - MOVD b_base+24(FP), R5 - MOVD b_len+32(FP), R6 - LA ret+48(FP), R7 - BR cmpbody<>(SB) - TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40 MOVD a_base+0(FP), R3 MOVD a_len+8(FP), R4 diff --git a/src/internal/bytealg/compare_wasm.s b/src/internal/bytealg/compare_wasm.s index 1eb63c70dae89..b2a20a08f6e52 100644 --- a/src/internal/bytealg/compare_wasm.s +++ b/src/internal/bytealg/compare_wasm.s @@ -15,17 +15,6 @@ TEXT ·Compare(SB), NOSPLIT, $0-56 I64Store ret+48(FP) RET -TEXT bytes·Compare(SB), NOSPLIT, $0-56 - FUNCDATA $0, ·Compare·args_stackmap(SB) - Get SP - I64Load a_base+0(FP) - I64Load a_len+8(FP) - I64Load b_base+24(FP) - I64Load b_len+32(FP) - Call cmpbody<>(SB) - I64Store ret+48(FP) - RET - TEXT runtime·cmpstring(SB), NOSPLIT, $0-40 Get SP I64Load a_base+0(FP) diff --git a/src/internal/bytealg/count_amd64.s b/src/internal/bytealg/count_amd64.s index cecba11cf9ff0..fa864c4c76631 100644 --- a/src/internal/bytealg/count_amd64.s +++ b/src/internal/bytealg/count_amd64.s @@ -6,7 +6,7 @@ #include "textflag.h" TEXT ·Count(SB),NOSPLIT,$0-40 - CMPB internal∕cpu·X86+const_x86_HasPOPCNT(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1 JEQ 2(PC) JMP ·countGeneric(SB) MOVQ b_base+0(FP), SI @@ -16,7 +16,7 @@ TEXT ·Count(SB),NOSPLIT,$0-40 JMP countbody<>(SB) TEXT ·CountString(SB),NOSPLIT,$0-32 - CMPB internal∕cpu·X86+const_x86_HasPOPCNT(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasPOPCNT(SB), $1 JEQ 2(PC) JMP ·countGenericString(SB) MOVQ s_base+0(FP), SI @@ -151,7 +151,7 @@ endofpage: RET avx2: - CMPB internal∕cpu·X86+const_x86_HasAVX2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse MOVD AX, X0 LEAQ -32(SI)(BX*1), R11 diff --git a/src/internal/bytealg/count_generic.go b/src/internal/bytealg/count_generic.go index a763b3bc616be..e24b2b7fa076f 100644 --- a/src/internal/bytealg/count_generic.go +++ b/src/internal/bytealg/count_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm64 +// +build !amd64,!arm64,!ppc64le,!ppc64 package bytealg diff --git a/src/internal/bytealg/count_native.go b/src/internal/bytealg/count_native.go index a62c4cb5c0963..e6a91b3c0e265 100644 --- a/src/internal/bytealg/count_native.go +++ b/src/internal/bytealg/count_native.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64 arm64 +// +build amd64 arm64 ppc64le ppc64 package bytealg diff --git a/src/internal/bytealg/count_ppc64x.s b/src/internal/bytealg/count_ppc64x.s new file mode 100644 index 0000000000000..7abdce1954f56 --- /dev/null +++ b/src/internal/bytealg/count_ppc64x.s @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le ppc64 + +#include "go_asm.h" +#include "textflag.h" + +TEXT ·Count(SB), NOSPLIT|NOFRAME, $0-40 + MOVD b_base+0(FP), R3 // R3 = byte array pointer + MOVD b_len+8(FP), R4 // R4 = length + MOVBZ c+24(FP), R5 // R5 = byte + MOVD $ret+32(FP), R14 // R14 = &ret + BR countbytebody<>(SB) + +TEXT ·CountString(SB), NOSPLIT|NOFRAME, $0-32 + MOVD s_base+0(FP), R3 // R3 = string + MOVD s_len+8(FP), R4 // R4 = length + MOVBZ c+16(FP), R5 // R5 = byte + MOVD $ret+24(FP), R14 // R14 = &ret + BR countbytebody<>(SB) + +// R3: addr of string +// R4: len of string +// R5: byte to count +// R14: addr for return value +// endianness shouldn't matter since we are just counting and order +// is irrelevant +TEXT countbytebody<>(SB), NOSPLIT|NOFRAME, $0-0 + DCBT (R3) // Prepare cache line. + MOVD R0, R18 // byte count + MOVD R3, R19 // Save base address for calculating the index later. + MOVD R4, R16 + + MOVD R5, R6 + RLDIMI $8, R6, $48, R6 + RLDIMI $16, R6, $32, R6 + RLDIMI $32, R6, $0, R6 // fill reg with the byte to count + + VSPLTISW $3, V4 // used for shift + MTVRD R6, V1 // move compare byte + VSPLTB $7, V1, V1 // replicate byte across V1 + + CMPU R4, $32 // Check if it's a small string (<32 bytes) + BLT tail // Jump to the small string case + XXLXOR VS37, VS37, VS37 // clear V5 (aka VS37) to use as accumulator + +cmploop: + LXVW4X (R3), VS32 // load bytes from string + + // when the bytes match, the corresonding byte contains all 1s + VCMPEQUB V1, V0, V2 // compare bytes + VPOPCNTD V2, V3 // each double word contains its count + VADDUDM V3, V5, V5 // accumulate bit count in each double word + ADD $16, R3, R3 // increment pointer + SUB $16, R16, R16 // remaining bytes + CMP R16, $16 // at least 16 remaining? + BGE cmploop + VSRD V5, V4, V5 // shift by 3 to convert bits to bytes + VSLDOI $8, V5, V5, V6 // get the double word values from vector + MFVSRD V5, R9 + MFVSRD V6, R10 + ADD R9, R10, R9 + ADD R9, R18, R18 + +tail: + CMP R16, $8 // 8 bytes left? + BLT small + + MOVD (R3), R12 // load 8 bytes + CMPB R12, R6, R17 // compare bytes + POPCNTD R17, R15 // bit count + SRD $3, R15, R15 // byte count + ADD R15, R18, R18 // add to byte count + +next1: + ADD $8, R3, R3 + SUB $8, R16, R16 // remaining bytes + BR tail + +small: + CMP $0, R16 // any remaining + BEQ done + MOVBZ (R3), R12 // check each remaining byte + CMP R12, R5 + BNE next2 + ADD $1, R18 + +next2: + SUB $1, R16 + ADD $1, R3 // inc address + BR small + +done: + MOVD R18, (R14) // return count + RET diff --git a/src/internal/bytealg/equal_386.s b/src/internal/bytealg/equal_386.s index c048b6cebc87c..ad7da0ea8b3a6 100644 --- a/src/internal/bytealg/equal_386.s +++ b/src/internal/bytealg/equal_386.s @@ -23,25 +23,6 @@ eq: MOVB $1, ret+24(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-25 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVL a_len+4(FP), BX - MOVL b_len+16(FP), CX - CMPL BX, CX - JNE neq - MOVL a_base+0(FP), SI - MOVL b_base+12(FP), DI - CMPL SI, DI - JEQ eq - LEAL ret+24(FP), AX - JMP memeqbody<>(SB) -neq: - MOVB $0, ret+24(FP) - RET -eq: - MOVB $1, ret+24(FP) - RET - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT,$0-13 MOVL a+0(FP), SI @@ -80,7 +61,7 @@ TEXT memeqbody<>(SB),NOSPLIT,$0-0 hugeloop: CMPL BX, $64 JB bigloop - CMPB internal∕cpu·X86+const_x86_HasSSE2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 JNE bigloop MOVOU (SI), X0 MOVOU (DI), X1 diff --git a/src/internal/bytealg/equal_amd64.s b/src/internal/bytealg/equal_amd64.s index cbc62dc1d8f3d..fa82589644512 100644 --- a/src/internal/bytealg/equal_amd64.s +++ b/src/internal/bytealg/equal_amd64.s @@ -23,25 +23,6 @@ eq: MOVB $1, ret+48(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVQ a_len+8(FP), BX - MOVQ b_len+32(FP), CX - CMPQ BX, CX - JNE neq - MOVQ a_base+0(FP), SI - MOVQ b_base+24(FP), DI - CMPQ SI, DI - JEQ eq - LEAQ ret+48(FP), AX - JMP memeqbody<>(SB) -neq: - MOVB $0, ret+48(FP) - RET -eq: - MOVB $1, ret+48(FP) - RET - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT,$0-25 MOVQ a+0(FP), SI @@ -77,9 +58,9 @@ TEXT memeqbody<>(SB),NOSPLIT,$0-0 JB small CMPQ BX, $64 JB bigloop - CMPB internal∕cpu·X86+const_x86_HasAVX2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JE hugeloop_avx2 - + // 64 bytes at a time using xmm registers hugeloop: CMPQ BX, $64 diff --git a/src/internal/bytealg/equal_amd64p32.s b/src/internal/bytealg/equal_amd64p32.s index c841f98b2f2a5..00d5c0afcc6a1 100644 --- a/src/internal/bytealg/equal_amd64p32.s +++ b/src/internal/bytealg/equal_amd64p32.s @@ -24,26 +24,6 @@ eq: MOVB $1, ret+24(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-25 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVL a_len+4(FP), BX - MOVL b_len+16(FP), CX - CMPL BX, CX - JNE neq - MOVL a_base+0(FP), SI - MOVL b_base+12(FP), DI - CMPL SI, DI - JEQ eq - CALL memeqbody<>(SB) - MOVB AX, ret+24(FP) - RET -neq: - MOVB $0, ret+24(FP) - RET -eq: - MOVB $1, ret+24(FP) - RET - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT,$0-17 MOVL a+0(FP), SI @@ -80,7 +60,7 @@ TEXT memeqbody<>(SB),NOSPLIT,$0-0 CMPQ BX, $8 JB small - + // 64 bytes at a time using xmm registers hugeloop: CMPQ BX, $64 diff --git a/src/internal/bytealg/equal_arm.s b/src/internal/bytealg/equal_arm.s index 6b0d7deed9336..0d23260945c5c 100644 --- a/src/internal/bytealg/equal_arm.s +++ b/src/internal/bytealg/equal_arm.s @@ -9,7 +9,7 @@ TEXT ·Equal(SB),NOSPLIT,$0-25 MOVW a_len+4(FP), R1 MOVW b_len+16(FP), R3 - + CMP R1, R3 // unequal lengths are not equal B.NE notequal @@ -35,10 +35,6 @@ equal: MOVBU R0, ret+24(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-25 - FUNCDATA $0, ·Equal·args_stackmap(SB) - JMP ·Equal(SB) - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-13 MOVW a+0(FP), R1 diff --git a/src/internal/bytealg/equal_arm64.s b/src/internal/bytealg/equal_arm64.s index 30abd980c5166..2c6af01e0ae2f 100644 --- a/src/internal/bytealg/equal_arm64.s +++ b/src/internal/bytealg/equal_arm64.s @@ -25,27 +25,6 @@ not_equal: MOVB ZR, ret+48(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVD a_len+8(FP), R1 - MOVD b_len+32(FP), R3 - CMP R1, R3 - // unequal lengths are not equal - BNE not_equal - // short path to handle 0-byte case - CBZ R1, equal - MOVD a_base+0(FP), R0 - MOVD b_base+24(FP), R2 - MOVD $ret+48(FP), R8 - B memeqbody<>(SB) -equal: - MOVD $1, R0 - MOVB R0, ret+48(FP) - RET -not_equal: - MOVB ZR, ret+48(FP) - RET - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 MOVD size+16(FP), R1 @@ -67,6 +46,7 @@ TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 CMP R3, R4 BEQ eq MOVD 8(R26), R5 // compiler stores size at offset 8 in the closure + CBZ R5, eq MOVD R3, 8(RSP) MOVD R4, 16(RSP) MOVD R5, 24(RSP) @@ -119,30 +99,41 @@ chunk16: CBZ R3, tail ADD R3, R0, R6 // end of chunks chunk16_loop: - VLD1.P (R0), [V0.D2] - VLD1.P (R2), [V1.D2] - VCMEQ V0.D2, V1.D2, V2.D2 + LDP.P 16(R0), (R4, R5) + LDP.P 16(R2), (R7, R9) + EOR R4, R7 + CBNZ R7, not_equal + EOR R5, R9 + CBNZ R9, not_equal CMP R0, R6 - VMOV V2.D[0], R4 - VMOV V2.D[1], R5 - CBZ R4, not_equal - CBZ R5, not_equal BNE chunk16_loop AND $0xf, R1, R1 CBZ R1, equal tail: // special compare of tail with length < 16 TBZ $3, R1, lt_8 - MOVD.P 8(R0), R4 - MOVD.P 8(R2), R5 - CMP R4, R5 - BNE not_equal + MOVD (R0), R4 + MOVD (R2), R5 + EOR R4, R5 + CBNZ R5, not_equal + SUB $8, R1, R6 // offset of the last 8 bytes + MOVD (R0)(R6), R4 + MOVD (R2)(R6), R5 + EOR R4, R5 + CBNZ R5, not_equal + B equal lt_8: TBZ $2, R1, lt_4 - MOVWU.P 4(R0), R4 - MOVWU.P 4(R2), R5 - CMP R4, R5 - BNE not_equal + MOVWU (R0), R4 + MOVWU (R2), R5 + EOR R4, R5 + CBNZ R5, not_equal + SUB $4, R1, R6 // offset of the last 4 bytes + MOVWU (R0)(R6), R4 + MOVWU (R2)(R6), R5 + EOR R4, R5 + CBNZ R5, not_equal + B equal lt_4: TBZ $1, R1, lt_2 MOVHU.P 2(R0), R4 @@ -150,7 +141,7 @@ lt_4: CMP R4, R5 BNE not_equal lt_2: - TBZ $0, R1, equal + TBZ $0, R1, equal one: MOVBU (R0), R4 MOVBU (R2), R5 diff --git a/src/internal/bytealg/equal_mips64x.s b/src/internal/bytealg/equal_mips64x.s index a005864483512..a75b957e8b50e 100644 --- a/src/internal/bytealg/equal_mips64x.s +++ b/src/internal/bytealg/equal_mips64x.s @@ -35,10 +35,6 @@ equal: MOVB R1, ret+48(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - JMP ·Equal(SB) - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 MOVV a+0(FP), R1 diff --git a/src/internal/bytealg/equal_mipsx.s b/src/internal/bytealg/equal_mipsx.s index 22ab450e66fd6..70d579d5d434e 100644 --- a/src/internal/bytealg/equal_mipsx.s +++ b/src/internal/bytealg/equal_mipsx.s @@ -35,10 +35,6 @@ equal: MOVB R1, ret+24(FP) RET -TEXT bytes·Equal(SB),NOSPLIT,$0-25 - FUNCDATA $0, ·Equal·args_stackmap(SB) - JMP ·Equal(SB) - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT,$0-13 MOVW a+0(FP), R1 diff --git a/src/internal/bytealg/equal_native.go b/src/internal/bytealg/equal_native.go index b5c453086c9c9..995f0749d4419 100644 --- a/src/internal/bytealg/equal_native.go +++ b/src/internal/bytealg/equal_native.go @@ -4,11 +4,23 @@ package bytealg +import "unsafe" + // Note: there's no equal_generic.go because every platform must implement at least memequal_varlen in assembly. //go:noescape func Equal(a, b []byte) bool +// The declarations below generate ABI wrappers for functions +// implemented in assembly in this package but declared in another +// package. + // The compiler generates calls to runtime.memequal and runtime.memequal_varlen. // In addition, the runtime calls runtime.memequal explicitly. // Those functions are implemented in this package. + +//go:linkname abigen_runtime_memequal runtime.memequal +func abigen_runtime_memequal(a, b unsafe.Pointer, size uintptr) bool + +//go:linkname abigen_runtime_memequal_varlen runtime.memequal_varlen +func abigen_runtime_memequal_varlen(a, b unsafe.Pointer) bool diff --git a/src/internal/bytealg/equal_ppc64x.s b/src/internal/bytealg/equal_ppc64x.s index 9c9cf77588ad1..74ea34834dc4e 100644 --- a/src/internal/bytealg/equal_ppc64x.s +++ b/src/internal/bytealg/equal_ppc64x.s @@ -7,39 +7,15 @@ #include "go_asm.h" #include "textflag.h" -TEXT ·Equal(SB),NOSPLIT,$0-49 +TEXT ·Equal(SB),NOSPLIT|NOFRAME,$0-49 MOVD a_len+8(FP), R4 MOVD b_len+32(FP), R5 CMP R5, R4 // unequal lengths are not equal BNE noteq MOVD a_base+0(FP), R3 MOVD b_base+24(FP), R4 - BL memeqbody<>(SB) - - MOVBZ R9,ret+48(FP) - RET - -noteq: - MOVBZ $0,ret+48(FP) - RET - -equal: - MOVD $1,R3 - MOVBZ R3,ret+48(FP) - RET - -TEXT bytes·Equal(SB),NOSPLIT,$0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVD a_len+8(FP), R4 - MOVD b_len+32(FP), R5 - CMP R5, R4 // unequal lengths are not equal - BNE noteq - MOVD a_base+0(FP), R3 - MOVD b_base+24(FP), R4 - BL memeqbody<>(SB) - - MOVBZ R9,ret+48(FP) - RET + MOVD $ret+48(FP), R10 + BR memeqbody<>(SB) noteq: MOVBZ $0,ret+48(FP) @@ -51,25 +27,23 @@ equal: RET // memequal(a, b unsafe.Pointer, size uintptr) bool -TEXT runtime·memequal(SB),NOSPLIT,$0-25 +TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 MOVD a+0(FP), R3 MOVD b+8(FP), R4 MOVD size+16(FP), R5 + MOVD $ret+24(FP), R10 - BL memeqbody<>(SB) - MOVB R9, ret+24(FP) - RET + BR memeqbody<>(SB) // memequal_varlen(a, b unsafe.Pointer) bool -TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 +TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-17 MOVD a+0(FP), R3 MOVD b+8(FP), R4 CMP R3, R4 BEQ eq MOVD 8(R11), R5 // compiler stores size at offset 8 in the closure - BL memeqbody<>(SB) - MOVB R9, ret+16(FP) - RET + MOVD $ret+16(FP), R10 + BR memeqbody<>(SB) eq: MOVD $1, R3 MOVB R3, ret+16(FP) @@ -79,7 +53,7 @@ eq: // R3 = s1 // R4 = s2 // R5 = len -// R9 = return value +// R10 = addr of return value (byte) TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0 MOVD R5,CTR CMP R5,$8 // only optimize >=8 @@ -92,26 +66,19 @@ TEXT memeqbody<>(SB),NOSPLIT|NOFRAME,$0-0 setup32a: // 8 byte aligned, >= 32 bytes SRADCC $5,R5,R6 // number of 32 byte chunks to compare MOVD R6,CTR + MOVD $16,R14 // index for VSX loads and stores loop32a: - MOVD 0(R3),R6 // doublewords to compare - MOVD 0(R4),R7 - MOVD 8(R3),R8 // - MOVD 8(R4),R9 - CMP R6,R7 // bytes batch? - BNE noteq - MOVD 16(R3),R6 - MOVD 16(R4),R7 - CMP R8,R9 // bytes match? - MOVD 24(R3),R8 - MOVD 24(R4),R9 - BNE noteq - CMP R6,R7 // bytes match? - BNE noteq + LXVD2X (R3+R0), VS32 // VS32 = V0 + LXVD2X (R4+R0), VS33 // VS33 = V1 + VCMPEQUBCC V0, V1, V2 // compare, setting CR6 + BGE CR6, noteq + LXVD2X (R3+R14), VS32 + LXVD2X (R4+R14), VS33 + VCMPEQUBCC V0, V1, V2 + BGE CR6, noteq ADD $32,R3 // bump up to next 32 ADD $32,R4 - CMP R8,R9 // bytes match? - BC 8,2,loop32a // br ctr and cr - BNE noteq + BC 16, 0, loop32a // br ctr and cr ANDCC $24,R5,R6 // Any 8 byte chunks? BEQ leftover // and result is 0 setup8a: @@ -145,9 +112,10 @@ simple: BNE noteq BR equal noteq: - MOVD $0, R9 + MOVB $0, (R10) RET equal: - MOVD $1, R9 + MOVD $1, R3 + MOVB R3, (R10) RET diff --git a/src/internal/bytealg/equal_s390x.s b/src/internal/bytealg/equal_s390x.s index 84dbdbfe187d6..d7724747d4b76 100644 --- a/src/internal/bytealg/equal_s390x.s +++ b/src/internal/bytealg/equal_s390x.s @@ -17,19 +17,6 @@ notequal: MOVB $0, ret+48(FP) RET -TEXT bytes·Equal(SB),NOSPLIT|NOFRAME,$0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVD a_len+8(FP), R2 - MOVD b_len+32(FP), R6 - MOVD a_base+0(FP), R3 - MOVD b_base+24(FP), R5 - LA ret+48(FP), R7 - CMPBNE R2, R6, notequal - BR memeqbody<>(SB) -notequal: - MOVB $0, ret+48(FP) - RET - // memequal(a, b unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 MOVD a+0(FP), R3 diff --git a/src/internal/bytealg/equal_wasm.s b/src/internal/bytealg/equal_wasm.s index cb85a934c736e..cac3fb2d13702 100644 --- a/src/internal/bytealg/equal_wasm.s +++ b/src/internal/bytealg/equal_wasm.s @@ -25,27 +25,6 @@ TEXT ·Equal(SB), NOSPLIT, $0-49 End RET -TEXT bytes·Equal(SB), NOSPLIT, $0-49 - FUNCDATA $0, ·Equal·args_stackmap(SB) - MOVD a_len+8(FP), R0 - MOVD b_len+32(FP), R1 - Get R0 - Get R1 - I64Eq - If - Get SP - I64Load a+0(FP) - I64Load b+24(FP) - Get R0 - Call memeqbody<>(SB) - I64Store8 ret+48(FP) - Else - Get SP - I64Const $0 - I64Store8 ret+48(FP) - End - RET - // memequal(p, q unsafe.Pointer, size uintptr) bool TEXT runtime·memequal(SB), NOSPLIT, $0-25 Get SP diff --git a/src/internal/bytealg/index_amd64.s b/src/internal/bytealg/index_amd64.s index f7297c0cab4eb..4459820801082 100644 --- a/src/internal/bytealg/index_amd64.s +++ b/src/internal/bytealg/index_amd64.s @@ -233,7 +233,7 @@ success_avx2: VZEROUPPER JMP success sse42: - CMPB internal∕cpu·X86+const_x86_HasSSE42(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE42(SB), $1 JNE no_sse42 CMPQ AX, $12 // PCMPESTRI is slower than normal compare, diff --git a/src/internal/bytealg/index_arm64.s b/src/internal/bytealg/index_arm64.s index 20d68ba9b8bf8..3a551a72da139 100644 --- a/src/internal/bytealg/index_arm64.s +++ b/src/internal/bytealg/index_arm64.s @@ -32,7 +32,7 @@ TEXT indexbody<>(SB),NOSPLIT,$0-56 // to avoid repeatedly re-load it again and again // for sebsequent substring comparisons SUB R3, R1, R4 - // R4 contains the start of last substring for comparsion + // R4 contains the start of last substring for comparison ADD R0, R4, R4 ADD $1, R0, R8 diff --git a/src/internal/bytealg/indexbyte_386.s b/src/internal/bytealg/indexbyte_386.s index ce7645e771ad3..8a030542d4cbb 100644 --- a/src/internal/bytealg/indexbyte_386.s +++ b/src/internal/bytealg/indexbyte_386.s @@ -32,11 +32,3 @@ TEXT ·IndexByteString(SB),NOSPLIT,$0-16 SUBL $1, DI MOVL DI, ret+12(FP) RET - -TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - JMP ·IndexByte(SB) - -TEXT strings·IndexByte(SB),NOSPLIT,$0-16 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - JMP ·IndexByteString(SB) diff --git a/src/internal/bytealg/indexbyte_amd64.s b/src/internal/bytealg/indexbyte_amd64.s index 359f38904b1d3..f78093c539013 100644 --- a/src/internal/bytealg/indexbyte_amd64.s +++ b/src/internal/bytealg/indexbyte_amd64.s @@ -19,30 +19,6 @@ TEXT ·IndexByteString(SB), NOSPLIT, $0-32 LEAQ ret+24(FP), R8 JMP indexbytebody<>(SB) - // Provide direct access to these functions from other packages. - // This is the equivlant of doing: - // package bytes - // func IndexByte(b []byte, c byte) int { - // return bytealg.IndexByte(s, c) - // } - // but involves no call overhead. - // TODO: remove this hack when midstack inlining is enabled? -TEXT bytes·IndexByte(SB), NOSPLIT, $0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), BX - MOVB c+24(FP), AL - LEAQ ret+32(FP), R8 - JMP indexbytebody<>(SB) - -TEXT strings·IndexByte(SB), NOSPLIT, $0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - MOVQ s_base+0(FP), SI - MOVQ s_len+8(FP), BX - MOVB c+16(FP), AL - LEAQ ret+24(FP), R8 - JMP indexbytebody<>(SB) - // input: // SI: data // BX: data len @@ -139,7 +115,7 @@ endofpage: RET avx2: - CMPB internal∕cpu·X86+const_x86_HasAVX2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1 JNE sse MOVD AX, X0 LEAQ -32(SI)(BX*1), R11 diff --git a/src/internal/bytealg/indexbyte_amd64p32.s b/src/internal/bytealg/indexbyte_amd64p32.s index a791c7396a8d9..c445a7ebc1364 100644 --- a/src/internal/bytealg/indexbyte_amd64p32.s +++ b/src/internal/bytealg/indexbyte_amd64p32.s @@ -21,24 +21,6 @@ TEXT ·IndexByteString(SB),NOSPLIT,$0-20 MOVL AX, ret+16(FP) RET -TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - MOVL b_base+0(FP), SI - MOVL b_len+4(FP), BX - MOVB c+12(FP), AL - CALL indexbytebody<>(SB) - MOVL AX, ret+16(FP) - RET - -TEXT strings·IndexByte(SB),NOSPLIT,$0-20 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - MOVL s_base+0(FP), SI - MOVL s_len+4(FP), BX - MOVB c+8(FP), AL - CALL indexbytebody<>(SB) - MOVL AX, ret+16(FP) - RET - // input: // SI: data // BX: data len diff --git a/src/internal/bytealg/indexbyte_arm.s b/src/internal/bytealg/indexbyte_arm.s index 6c746c6869092..7d9bbb183d803 100644 --- a/src/internal/bytealg/indexbyte_arm.s +++ b/src/internal/bytealg/indexbyte_arm.s @@ -52,11 +52,3 @@ _sib_notfound: MOVW $-1, R0 MOVW R0, ret+12(FP) RET - -TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - JMP ·IndexByte(SB) - -TEXT strings·IndexByte(SB),NOSPLIT,$0-16 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - JMP ·IndexByteString(SB) diff --git a/src/internal/bytealg/indexbyte_arm64.s b/src/internal/bytealg/indexbyte_arm64.s index 6991ccec15cd3..40843fbc5b443 100644 --- a/src/internal/bytealg/indexbyte_arm64.s +++ b/src/internal/bytealg/indexbyte_arm64.s @@ -18,22 +18,6 @@ TEXT ·IndexByteString(SB),NOSPLIT,$0-32 MOVD $ret+24(FP), R8 B indexbytebody<>(SB) -TEXT bytes·IndexByte(SB),NOSPLIT,$0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - MOVD b_base+0(FP), R0 - MOVD b_len+8(FP), R2 - MOVBU c+24(FP), R1 - MOVD $ret+32(FP), R8 - B indexbytebody<>(SB) - -TEXT strings·IndexByte(SB),NOSPLIT,$0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - MOVD s_base+0(FP), R0 - MOVD s_len+8(FP), R2 - MOVBU c+16(FP), R1 - MOVD $ret+24(FP), R8 - B indexbytebody<>(SB) - // input: // R0: data // R1: byte to search diff --git a/src/internal/bytealg/indexbyte_generic.go b/src/internal/bytealg/indexbyte_generic.go index ef7801e5e1655..6bff31ceee492 100644 --- a/src/internal/bytealg/indexbyte_generic.go +++ b/src/internal/bytealg/indexbyte_generic.go @@ -6,8 +6,6 @@ package bytealg -import _ "unsafe" // for go:linkname - func IndexByte(b []byte, c byte) int { for i, x := range b { if x == c { @@ -25,23 +23,3 @@ func IndexByteString(s string, c byte) int { } return -1 } - -//go:linkname bytes_IndexByte bytes.IndexByte -func bytes_IndexByte(b []byte, c byte) int { - for i, x := range b { - if x == c { - return i - } - } - return -1 -} - -//go:linkname strings_IndexByte strings.IndexByte -func strings_IndexByte(s string, c byte) int { - for i := 0; i < len(s); i++ { - if s[i] == c { - return i - } - } - return -1 -} diff --git a/src/internal/bytealg/indexbyte_mips64x.s b/src/internal/bytealg/indexbyte_mips64x.s index 9c421174b9577..6ebf0dee24b79 100644 --- a/src/internal/bytealg/indexbyte_mips64x.s +++ b/src/internal/bytealg/indexbyte_mips64x.s @@ -52,11 +52,3 @@ notfound: MOVV $-1, R1 MOVV R1, ret+24(FP) RET - -TEXT bytes·IndexByte(SB),NOSPLIT,$0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - JMP ·IndexByte(SB) - -TEXT strings·IndexByte(SB),NOSPLIT,$0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - JMP ·IndexByteString(SB) diff --git a/src/internal/bytealg/indexbyte_mipsx.s b/src/internal/bytealg/indexbyte_mipsx.s index bc7258f1d1fe8..e44440b5f9eeb 100644 --- a/src/internal/bytealg/indexbyte_mipsx.s +++ b/src/internal/bytealg/indexbyte_mipsx.s @@ -50,11 +50,3 @@ notfound: MOVW $-1, R1 MOVW R1, ret+12(FP) RET - -TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - JMP ·IndexByte(SB) - -TEXT strings·IndexByte(SB),NOSPLIT,$0-16 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - JMP ·IndexByteString(SB) diff --git a/src/internal/bytealg/indexbyte_ppc64x.s b/src/internal/bytealg/indexbyte_ppc64x.s index ccf897d99c451..6e14e80af1341 100644 --- a/src/internal/bytealg/indexbyte_ppc64x.s +++ b/src/internal/bytealg/indexbyte_ppc64x.s @@ -21,31 +21,15 @@ TEXT ·IndexByteString(SB),NOSPLIT|NOFRAME,$0-32 MOVD $ret+24(FP), R14 // R14 = &ret BR indexbytebody<>(SB) -TEXT bytes·IndexByte(SB),NOSPLIT|NOFRAME,$0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - MOVD b_base+0(FP), R3 // R3 = byte array pointer - MOVD b_len+8(FP), R4 // R4 = length - MOVBZ c+24(FP), R5 // R5 = byte - MOVD $ret+32(FP), R14 // R14 = &ret - BR indexbytebody<>(SB) - -TEXT strings·IndexByte(SB),NOSPLIT|NOFRAME,$0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - MOVD s_base+0(FP), R3 // R3 = string - MOVD s_len+8(FP), R4 // R4 = length - MOVBZ c+16(FP), R5 // R5 = byte - MOVD $ret+24(FP), R14 // R14 = &ret - BR indexbytebody<>(SB) - TEXT indexbytebody<>(SB),NOSPLIT|NOFRAME,$0-0 - DCBT (R3) // Prepare cache line. MOVD R3,R17 // Save base address for calculating the index later. RLDICR $0,R3,$60,R8 // Align address to doubleword boundary in R8. RLDIMI $8,R5,$48,R5 // Replicating the byte across the register. ADD R4,R3,R7 // Last acceptable address in R7. + DCBT (R8) // Prepare cache line. RLDIMI $16,R5,$32,R5 - CMPU R4,$32 // Check if it's a small string (<32 bytes). Those will be processed differently. + CMPU R4,$32 // Check if it's a small string (≤32 bytes). Those will be processed differently. MOVD $-1,R9 WORD $0x54661EB8 // Calculate padding in R6 (rlwinm r6,r3,3,26,28). RLDIMI $32,R5,$0,R5 @@ -56,7 +40,7 @@ TEXT indexbytebody<>(SB),NOSPLIT|NOFRAME,$0-0 #else SRD R6,R9,R9 // Same for Big Endian #endif - BLE small_string // Jump to the small string case if it's <32 bytes. + BLE small_string // Jump to the small string case if it's ≤32 bytes. // If we are 64-byte aligned, branch to qw_align just to get the auxiliary values // in V0, V1 and V10, then branch to the preloop. @@ -97,7 +81,7 @@ qw_align: LVSL (R0+R0),V11 VSLB V11,V10,V10 VSPLTB $7,V1,V1 // Replicate byte across V1 - CMPU R4, $64 // If len <= 64, don't use the vectorized loop + CMPU R4, $64 // If len ≤ 64, don't use the vectorized loop BLE tail // We will load 4 quardwords per iteration in the loop, so check for @@ -131,7 +115,7 @@ qw_align: // 64-byte aligned. Prepare for the main loop. preloop: CMPU R4,$64 - BLE tail // If len <= 64, don't use the vectorized loop + BLE tail // If len ≤ 64, don't use the vectorized loop // We are now aligned to a 64-byte boundary. We will load 4 quadwords // per loop iteration. The last doubleword is in R10, so our loop counter @@ -140,30 +124,34 @@ preloop: SRD $6,R6,R9 // Loop counter in R9 MOVD R9,CTR + ADD $-64,R8,R8 // Adjust index for loop entry MOVD $16,R11 // Load offsets for the vector loads MOVD $32,R9 MOVD $48,R7 // Main loop we will load 64 bytes per iteration loop: + ADD $64,R8,R8 // Fuse addi+lvx for performance LVX (R8+R0),V2 // Load 4 16-byte vectors - LVX (R11+R8),V3 - LVX (R9+R8),V4 - LVX (R7+R8),V5 + LVX (R8+R11),V3 VCMPEQUB V1,V2,V6 // Look for byte in each vector VCMPEQUB V1,V3,V7 + + LVX (R8+R9),V4 + LVX (R8+R7),V5 VCMPEQUB V1,V4,V8 VCMPEQUB V1,V5,V9 + VOR V6,V7,V11 // Compress the result in a single vector VOR V8,V9,V12 - VOR V11,V12,V11 - VCMPEQUBCC V0,V11,V11 // Check for byte + VOR V11,V12,V13 + VCMPEQUBCC V0,V13,V14 // Check for byte BGE CR6,found - ADD $64,R8,R8 BC 16,0,loop // bdnz loop - // Handle the tailing bytes or R4 <= 64 + // Handle the tailing bytes or R4 ≤ 64 RLDICL $0,R6,$58,R4 + ADD $64,R8,R8 tail: CMPU R4,$0 BEQ notfound diff --git a/src/internal/bytealg/indexbyte_s390x.s b/src/internal/bytealg/indexbyte_s390x.s index 15fd2935b4a9e..cf88d92a24bb8 100644 --- a/src/internal/bytealg/indexbyte_s390x.s +++ b/src/internal/bytealg/indexbyte_s390x.s @@ -19,22 +19,6 @@ TEXT ·IndexByteString(SB),NOSPLIT|NOFRAME,$0-32 MOVD $ret+24(FP), R2 // &ret => R9 BR indexbytebody<>(SB) -TEXT bytes·IndexByte(SB),NOSPLIT|NOFRAME,$0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - MOVD b_base+0(FP), R3// b_base => R3 - MOVD b_len+8(FP), R4 // b_len => R4 - MOVBZ c+24(FP), R5 // c => R5 - MOVD $ret+32(FP), R2 // &ret => R9 - BR indexbytebody<>(SB) - -TEXT strings·IndexByte(SB),NOSPLIT|NOFRAME,$0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - MOVD s_base+0(FP), R3// s_base => R3 - MOVD s_len+8(FP), R4 // s_len => R4 - MOVBZ c+16(FP), R5 // c => R5 - MOVD $ret+24(FP), R2 // &ret => R9 - BR indexbytebody<>(SB) - // input: // R3: s // R4: s_len @@ -64,7 +48,7 @@ notfound: RET large: - MOVBZ internal∕cpu·S390X+const_s390x_HasVX(SB), R1 + MOVBZ internal∕cpu·S390X+const_offsetS390xHasVX(SB), R1 CMPBNE R1, $0, vectorimpl srstimpl: // no vector facility diff --git a/src/internal/bytealg/indexbyte_wasm.s b/src/internal/bytealg/indexbyte_wasm.s index 5e64aa031ac4c..aae11b30a6eaa 100644 --- a/src/internal/bytealg/indexbyte_wasm.s +++ b/src/internal/bytealg/indexbyte_wasm.s @@ -49,51 +49,6 @@ TEXT ·IndexByteString(SB), NOSPLIT, $0-32 RET -TEXT bytes·IndexByte(SB), NOSPLIT, $0-40 - FUNCDATA $0, ·IndexByte·args_stackmap(SB) - Get SP - I64Load b_base+0(FP) - I32WrapI64 - I32Load8U c+24(FP) - I64Load b_len+8(FP) - I32WrapI64 - Call memchr<>(SB) - I64ExtendSI32 - Set R0 - - I64Const $-1 - Get R0 - I64Load b_base+0(FP) - I64Sub - Get R0 - I64Eqz $0 - Select - I64Store ret+32(FP) - - RET - -TEXT strings·IndexByte(SB), NOSPLIT, $0-32 - FUNCDATA $0, ·IndexByteString·args_stackmap(SB) - Get SP - I64Load s_base+0(FP) - I32WrapI64 - I32Load8U c+16(FP) - I64Load s_len+8(FP) - I32WrapI64 - Call memchr<>(SB) - I64ExtendSI32 - Set R0 - - I64Const $-1 - Get R0 - I64Load s_base+0(FP) - I64Sub - Get R0 - I64Eqz $0 - Select - I64Store ret+24(FP) - RET - // compiled with emscripten // params: s, c, len // ret: index diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go index 2569024245d0f..eb74a9fa82a59 100644 --- a/src/internal/cpu/cpu.go +++ b/src/internal/cpu/cpu.go @@ -6,9 +6,18 @@ // used by the Go standard library. package cpu -// debugOptions is set to true by the runtime if go was compiled with GOEXPERIMENT=debugcpu -// and GOOS is Linux or Darwin. This variable is linknamed in runtime/proc.go. -var debugOptions bool +// DebugOptions is set to true by the runtime if the OS supports reading +// GODEBUG early in runtime startup. +// This should not be changed after it is initialized. +var DebugOptions bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [CacheLinePadSize]byte } + +// CacheLineSize is the CPU's assumed cache line size. +// There is currently no runtime detection of the real cache line size +// so we use the constant per GOARCH CacheLinePadSize as an approximation. +var CacheLineSize uintptr = CacheLinePadSize var X86 x86 @@ -17,7 +26,7 @@ var X86 x86 // in addition to the cpuid feature bit being set. // The struct is padded to avoid false sharing. type x86 struct { - _ [CacheLineSize]byte + _ CacheLinePad HasAES bool HasADX bool HasAVX bool @@ -34,32 +43,34 @@ type x86 struct { HasSSSE3 bool HasSSE41 bool HasSSE42 bool - _ [CacheLineSize]byte + _ CacheLinePad } var PPC64 ppc64 -// For ppc64x, it is safe to check only for ISA level starting on ISA v3.00, +// For ppc64(le), it is safe to check only for ISA level starting on ISA v3.00, // since there are no optional categories. There are some exceptions that also // require kernel support to work (darn, scv), so there are feature bits for -// those as well. The minimum processor requirement is POWER8 (ISA 2.07), so we -// maintain some of the old feature checks for optional categories for -// safety. +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). // The struct is padded to avoid false sharing. type ppc64 struct { - _ [CacheLineSize]byte - HasVMX bool // Vector unit (Altivec) - HasDFP bool // Decimal Floating Point unit - HasVSX bool // Vector-scalar unit - HasHTM bool // Hardware Transactional Memory - HasISEL bool // Integer select - HasVCRYPTO bool // Vector cryptography - HasHTMNOSC bool // HTM: kernel-aborted transaction in syscalls - HasDARN bool // Hardware random number generator (requires kernel enablement) - HasSCV bool // Syscall vectored (requires kernel enablement) - IsPOWER8 bool // ISA v2.07 (POWER8) - IsPOWER9 bool // ISA v3.00 (POWER9) - _ [CacheLineSize]byte + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +var ARM arm + +// The booleans in arm contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +type arm struct { + _ CacheLinePad + HasVFPv4 bool + HasIDIVA bool + _ CacheLinePad } var ARM64 arm64 @@ -67,7 +78,7 @@ var ARM64 arm64 // The booleans in arm64 contain the correspondingly named cpu feature bit. // The struct is padded to avoid false sharing. type arm64 struct { - _ [CacheLineSize]byte + _ CacheLinePad HasFP bool HasASIMD bool HasEVTSTRM bool @@ -92,13 +103,13 @@ type arm64 struct { HasSHA512 bool HasSVE bool HasASIMDFHM bool - _ [CacheLineSize]byte + _ CacheLinePad } var S390X s390x type s390x struct { - _ [CacheLineSize]byte + _ CacheLinePad HasZArch bool // z architecture mode is active [mandatory] HasSTFLE bool // store facility list extended [mandatory] HasLDisp bool // long (20-bit) displacements [mandatory] @@ -115,19 +126,20 @@ type s390x struct { HasSHA256 bool // K{I,L}MD-SHA-256 functions HasSHA512 bool // K{I,L}MD-SHA-512 functions HasVX bool // vector facility. Note: the runtime sets this when it processes auxv records. - _ [CacheLineSize]byte + HasVE1 bool // vector-enhancement 1 + _ CacheLinePad } -// initialize examines the processor and sets the relevant variables above. +// Initialize examines the processor and sets the relevant variables above. // This is called by the runtime package early in program initialization, -// before normal init functions are run. env is set by runtime on Linux and Darwin -// if go was compiled with GOEXPERIMENT=debugcpu. -func initialize(env string) { +// before normal init functions are run. env is set by runtime if the OS supports +// cpu feature options in GODEBUG. +func Initialize(env string) { doinit() processOptions(env) } -// options contains the cpu debug options that can be used in GODEBUGCPU. +// options contains the cpu debug options that can be used in GODEBUG. // Options are arch dependent and are added by the arch specific doinit functions. // Features that are mandatory for the specific GOARCH should not be added to options // (e.g. SSE2 on amd64). @@ -135,16 +147,19 @@ var options []option // Option names should be lower case. e.g. avx instead of AVX. type option struct { - Name string - Feature *bool + Name string + Feature *bool + Specified bool // whether feature value was specified in GODEBUG + Enable bool // whether feature should be enabled + Required bool // whether feature is mandatory and can not be disabled } -// processOptions disables CPU feature values based on the parsed env string. -// The env string is expected to be of the form feature1=0,feature2=0... +// processOptions enables or disables CPU feature values based on the parsed env string. +// The env string is expected to be of the form cpu.feature1=value1,cpu.feature2=value2... // where feature names is one of the architecture specifc list stored in the -// cpu packages options variable. If env contains all=0 then all capabilities -// referenced through the options variable are disabled. Other feature -// names and values other than 0 are silently ignored. +// cpu packages options variable and values are either 'on' or 'off'. +// If env contains cpu.all=off then all cpu features referenced through the options +// variable are disabled. Other feature names and values result in warning messages. func processOptions(env string) { field: for env != "" { @@ -155,28 +170,62 @@ field: } else { field, env = env[:i], env[i+1:] } + if len(field) < 4 || field[:4] != "cpu." { + continue + } i = indexByte(field, '=') if i < 0 { + print("GODEBUG: no value specified for \"", field, "\"\n") continue } - key, value := field[:i], field[i+1:] - - // Only allow turning off CPU features by specifying '0'. - if value == "0" { - if key == "all" { - for _, v := range options { - *v.Feature = false - } - return - } else { - for _, v := range options { - if v.Name == key { - *v.Feature = false - continue field - } - } + key, value := field[4:i], field[i+1:] // e.g. "SSE2", "on" + + var enable bool + switch value { + case "on": + enable = true + case "off": + enable = false + default: + print("GODEBUG: value \"", value, "\" not supported for cpu option \"", key, "\"\n") + continue field + } + + if key == "all" { + for i := range options { + options[i].Specified = true + options[i].Enable = enable || options[i].Required + } + continue field + } + + for i := range options { + if options[i].Name == key { + options[i].Specified = true + options[i].Enable = enable + continue field } } + + print("GODEBUG: unknown cpu feature \"", key, "\"\n") + } + + for _, o := range options { + if !o.Specified { + continue + } + + if o.Enable && !*o.Feature { + print("GODEBUG: can not enable \"", o.Name, "\", missing CPU support\n") + continue + } + + if !o.Enable && o.Required { + print("GODEBUG: can not disable \"", o.Name, "\", required CPU feature\n") + continue + } + + *o.Feature = o.Enable } } diff --git a/src/internal/cpu/cpu_arm.go b/src/internal/cpu/cpu_arm.go index 078a6c3b80a28..772b67147c8d3 100644 --- a/src/internal/cpu/cpu_arm.go +++ b/src/internal/cpu/cpu_arm.go @@ -4,4 +4,32 @@ package cpu -const CacheLineSize = 32 +const CacheLinePadSize = 32 + +// arm doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2. +// These are linknamed in runtime/os_(linux|freebsd)_arm.go and are +// initialized by archauxv(). +// These should not be changed after they are initialized. +var HWCap uint +var HWCap2 uint + +// HWCAP/HWCAP2 bits. These are exposed by Linux and FreeBSD. +const ( + hwcap_VFPv4 = 1 << 16 + hwcap_IDIVA = 1 << 17 +) + +func doinit() { + options = []option{ + {Name: "vfpv4", Feature: &ARM.HasVFPv4}, + {Name: "idiva", Feature: &ARM.HasIDIVA}, + } + + // HWCAP feature bits + ARM.HasVFPv4 = isSet(HWCap, hwcap_VFPv4) + ARM.HasIDIVA = isSet(HWCap, hwcap_IDIVA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/src/internal/cpu/cpu_arm64.go b/src/internal/cpu/cpu_arm64.go index 487ccf8e424b2..0b3ee8e069f33 100644 --- a/src/internal/cpu/cpu_arm64.go +++ b/src/internal/cpu/cpu_arm64.go @@ -4,97 +4,97 @@ package cpu -const CacheLineSize = 64 +const CacheLinePadSize = 64 // arm64 doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2. -// These are linknamed in runtime/os_linux_arm64.go and are initialized by -// archauxv(). -var hwcap uint -var hwcap2 uint +// These are initialized by archauxv in runtime/os_linux_arm64.go. +// These should not be changed after they are initialized. +var HWCap uint +var HWCap2 uint // HWCAP/HWCAP2 bits. These are exposed by Linux. const ( - hwcap_FP = (1 << 0) - hwcap_ASIMD = (1 << 1) - hwcap_EVTSTRM = (1 << 2) - hwcap_AES = (1 << 3) - hwcap_PMULL = (1 << 4) - hwcap_SHA1 = (1 << 5) - hwcap_SHA2 = (1 << 6) - hwcap_CRC32 = (1 << 7) - hwcap_ATOMICS = (1 << 8) - hwcap_FPHP = (1 << 9) - hwcap_ASIMDHP = (1 << 10) - hwcap_CPUID = (1 << 11) - hwcap_ASIMDRDM = (1 << 12) - hwcap_JSCVT = (1 << 13) - hwcap_FCMA = (1 << 14) - hwcap_LRCPC = (1 << 15) - hwcap_DCPOP = (1 << 16) - hwcap_SHA3 = (1 << 17) - hwcap_SM3 = (1 << 18) - hwcap_SM4 = (1 << 19) - hwcap_ASIMDDP = (1 << 20) - hwcap_SHA512 = (1 << 21) - hwcap_SVE = (1 << 22) - hwcap_ASIMDFHM = (1 << 23) + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 ) func doinit() { options = []option{ - {"evtstrm", &ARM64.HasEVTSTRM}, - {"aes", &ARM64.HasAES}, - {"pmull", &ARM64.HasPMULL}, - {"sha1", &ARM64.HasSHA1}, - {"sha2", &ARM64.HasSHA2}, - {"crc32", &ARM64.HasCRC32}, - {"atomics", &ARM64.HasATOMICS}, - {"fphp", &ARM64.HasFPHP}, - {"asimdhp", &ARM64.HasASIMDHP}, - {"cpuid", &ARM64.HasCPUID}, - {"asimdrdm", &ARM64.HasASIMDRDM}, - {"jscvt", &ARM64.HasJSCVT}, - {"fcma", &ARM64.HasFCMA}, - {"lrcpc", &ARM64.HasLRCPC}, - {"dcpop", &ARM64.HasDCPOP}, - {"sha3", &ARM64.HasSHA3}, - {"sm3", &ARM64.HasSM3}, - {"sm4", &ARM64.HasSM4}, - {"asimddp", &ARM64.HasASIMDDP}, - {"sha512", &ARM64.HasSHA512}, - {"sve", &ARM64.HasSVE}, - {"asimdfhm", &ARM64.HasASIMDFHM}, + {Name: "evtstrm", Feature: &ARM64.HasEVTSTRM}, + {Name: "aes", Feature: &ARM64.HasAES}, + {Name: "pmull", Feature: &ARM64.HasPMULL}, + {Name: "sha1", Feature: &ARM64.HasSHA1}, + {Name: "sha2", Feature: &ARM64.HasSHA2}, + {Name: "crc32", Feature: &ARM64.HasCRC32}, + {Name: "atomics", Feature: &ARM64.HasATOMICS}, + {Name: "fphp", Feature: &ARM64.HasFPHP}, + {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, + {Name: "cpuid", Feature: &ARM64.HasCPUID}, + {Name: "asimdrdm", Feature: &ARM64.HasASIMDRDM}, + {Name: "jscvt", Feature: &ARM64.HasJSCVT}, + {Name: "fcma", Feature: &ARM64.HasFCMA}, + {Name: "lrcpc", Feature: &ARM64.HasLRCPC}, + {Name: "dcpop", Feature: &ARM64.HasDCPOP}, + {Name: "sha3", Feature: &ARM64.HasSHA3}, + {Name: "sm3", Feature: &ARM64.HasSM3}, + {Name: "sm4", Feature: &ARM64.HasSM4}, + {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, + {Name: "sha512", Feature: &ARM64.HasSHA512}, + {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, // These capabilities should always be enabled on arm64: - // {"fp", &ARM64.HasFP}, - // {"asimd", &ARM64.HasASIMD}, + {Name: "fp", Feature: &ARM64.HasFP, Required: true}, + {Name: "asimd", Feature: &ARM64.HasASIMD, Required: true}, } // HWCAP feature bits - ARM64.HasFP = isSet(hwcap, hwcap_FP) - ARM64.HasASIMD = isSet(hwcap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwcap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwcap, hwcap_AES) - ARM64.HasPMULL = isSet(hwcap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwcap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwcap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwcap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwcap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwcap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwcap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwcap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwcap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwcap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwcap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwcap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwcap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwcap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwcap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwcap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwcap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwcap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwcap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwcap, hwcap_ASIMDFHM) + ARM64.HasFP = isSet(HWCap, hwcap_FP) + ARM64.HasASIMD = isSet(HWCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(HWCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(HWCap, hwcap_AES) + ARM64.HasPMULL = isSet(HWCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(HWCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(HWCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(HWCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(HWCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(HWCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(HWCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(HWCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(HWCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(HWCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(HWCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(HWCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(HWCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(HWCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(HWCap, hwcap_SM3) + ARM64.HasSM4 = isSet(HWCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(HWCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(HWCap, hwcap_SHA512) + ARM64.HasSVE = isSet(HWCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(HWCap, hwcap_ASIMDFHM) } func isSet(hwc uint, value uint) bool { diff --git a/src/internal/cpu/cpu_arm64_test.go b/src/internal/cpu/cpu_arm64_test.go deleted file mode 100644 index f4c419a23f315..0000000000000 --- a/src/internal/cpu/cpu_arm64_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu_test - -import ( - . "internal/cpu" - "runtime" - "testing" -) - -func TestARM64minimalFeatures(t *testing.T) { - switch runtime.GOOS { - case "linux", "android": - default: - t.Skipf("%s/arm64 is not supported", runtime.GOOS) - } - - if !ARM64.HasASIMD { - t.Fatalf("HasASIMD expected true, got false") - } - if !ARM64.HasFP { - t.Fatalf("HasFP expected true, got false") - } -} diff --git a/src/internal/cpu/cpu_mips.go b/src/internal/cpu/cpu_mips.go index 078a6c3b80a28..0f821e44e7798 100644 --- a/src/internal/cpu/cpu_mips.go +++ b/src/internal/cpu/cpu_mips.go @@ -4,4 +4,4 @@ package cpu -const CacheLineSize = 32 +const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64.go b/src/internal/cpu/cpu_mips64.go index 078a6c3b80a28..0f821e44e7798 100644 --- a/src/internal/cpu/cpu_mips64.go +++ b/src/internal/cpu/cpu_mips64.go @@ -4,4 +4,4 @@ package cpu -const CacheLineSize = 32 +const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mips64le.go b/src/internal/cpu/cpu_mips64le.go index 078a6c3b80a28..0f821e44e7798 100644 --- a/src/internal/cpu/cpu_mips64le.go +++ b/src/internal/cpu/cpu_mips64le.go @@ -4,4 +4,4 @@ package cpu -const CacheLineSize = 32 +const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_mipsle.go b/src/internal/cpu/cpu_mipsle.go index 078a6c3b80a28..0f821e44e7798 100644 --- a/src/internal/cpu/cpu_mipsle.go +++ b/src/internal/cpu/cpu_mipsle.go @@ -4,4 +4,4 @@ package cpu -const CacheLineSize = 32 +const CacheLinePadSize = 32 diff --git a/src/internal/cpu/cpu_no_init.go b/src/internal/cpu/cpu_no_init.go index 1be4f29dddd98..777ea9de8bdbf 100644 --- a/src/internal/cpu/cpu_no_init.go +++ b/src/internal/cpu/cpu_no_init.go @@ -5,6 +5,7 @@ // +build !386 // +build !amd64 // +build !amd64p32 +// +build !arm // +build !arm64 // +build !ppc64 // +build !ppc64le diff --git a/src/internal/cpu/cpu_ppc64x.go b/src/internal/cpu/cpu_ppc64x.go index 995cf02081c9a..880c4e1d01806 100644 --- a/src/internal/cpu/cpu_ppc64x.go +++ b/src/internal/cpu/cpu_ppc64x.go @@ -6,61 +6,41 @@ package cpu -const CacheLineSize = 128 +const CacheLinePadSize = 128 // ppc64x doesn't have a 'cpuid' equivalent, so we rely on HWCAP/HWCAP2. -// These are linknamed in runtime/os_linux_ppc64x.go and are initialized by -// archauxv(). -var hwcap uint -var hwcap2 uint +// These are initialized by archauxv in runtime/os_linux_ppc64x.go. +// These should not be changed after they are initialized. +// On aix/ppc64, these values are initialized early in the runtime in runtime/os_aix.go. +var HWCap uint +var HWCap2 uint // HWCAP/HWCAP2 bits. These are exposed by the kernel. const ( // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + PPC_FEATURE2_ARCH_2_07 = 0x80000000 + PPC_FEATURE2_ARCH_3_00 = 0x00800000 // CPU features - _PPC_FEATURE_HAS_ALTIVEC = 0x10000000 - _PPC_FEATURE_HAS_DFP = 0x00000400 - _PPC_FEATURE_HAS_VSX = 0x00000080 - _PPC_FEATURE2_HAS_HTM = 0x40000000 - _PPC_FEATURE2_HAS_ISEL = 0x08000000 - _PPC_FEATURE2_HAS_VEC_CRYPTO = 0x02000000 - _PPC_FEATURE2_HTM_NOSC = 0x01000000 - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 + PPC_FEATURE2_DARN = 0x00200000 + PPC_FEATURE2_SCV = 0x00100000 ) func doinit() { options = []option{ - {"htm", &PPC64.HasHTM}, - {"htmnosc", &PPC64.HasHTMNOSC}, - {"darn", &PPC64.HasDARN}, - {"scv", &PPC64.HasSCV}, + {Name: "darn", Feature: &PPC64.HasDARN}, + {Name: "scv", Feature: &PPC64.HasSCV}, + {Name: "power9", Feature: &PPC64.IsPOWER9}, // These capabilities should always be enabled on ppc64 and ppc64le: - // {"vmx", &PPC64.HasVMX}, - // {"dfp", &PPC64.HasDFP}, - // {"vsx", &PPC64.HasVSX}, - // {"isel", &PPC64.HasISEL}, - // {"vcrypto", &PPC64.HasVCRYPTO}, + {Name: "power8", Feature: &PPC64.IsPOWER8, Required: true}, } - // HWCAP feature bits - PPC64.HasVMX = isSet(hwcap, _PPC_FEATURE_HAS_ALTIVEC) - PPC64.HasDFP = isSet(hwcap, _PPC_FEATURE_HAS_DFP) - PPC64.HasVSX = isSet(hwcap, _PPC_FEATURE_HAS_VSX) - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwcap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.HasHTM = isSet(hwcap2, _PPC_FEATURE2_HAS_HTM) - PPC64.HasISEL = isSet(hwcap2, _PPC_FEATURE2_HAS_ISEL) - PPC64.HasVCRYPTO = isSet(hwcap2, _PPC_FEATURE2_HAS_VEC_CRYPTO) - PPC64.HasHTMNOSC = isSet(hwcap2, _PPC_FEATURE2_HTM_NOSC) - PPC64.IsPOWER9 = isSet(hwcap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwcap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwcap2, _PPC_FEATURE2_SCV) + PPC64.IsPOWER8 = isSet(HWCap2, PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(HWCap2, PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(HWCap2, PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(HWCap2, PPC_FEATURE2_SCV) } func isSet(hwc uint, value uint) bool { diff --git a/src/internal/cpu/cpu_ppc64x_test.go b/src/internal/cpu/cpu_ppc64x_test.go deleted file mode 100644 index 9c43d1e804973..0000000000000 --- a/src/internal/cpu/cpu_ppc64x_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ppc64 ppc64le - -package cpu_test - -import ( - . "internal/cpu" - "testing" -) - -func TestPPC64minimalFeatures(t *testing.T) { - if !PPC64.IsPOWER8 { - t.Fatalf("IsPOWER8 expected true, got false") - } - if !PPC64.HasVMX { - t.Fatalf("HasVMX expected true, got false") - } - if !PPC64.HasDFP { - t.Fatalf("HasDFP expected true, got false") - } - if !PPC64.HasVSX { - t.Fatalf("HasVSX expected true, got false") - } - if !PPC64.HasISEL { - t.Fatalf("HasISEL expected true, got false") - } - if !PPC64.HasVCRYPTO { - t.Fatalf("HasVCRYPTO expected true, got false") - } -} diff --git a/src/internal/cpu/cpu_s390x.go b/src/internal/cpu/cpu_s390x.go index 389a058c32aef..4d63ef60d101f 100644 --- a/src/internal/cpu/cpu_s390x.go +++ b/src/internal/cpu/cpu_s390x.go @@ -4,7 +4,7 @@ package cpu -const CacheLineSize = 256 +const CacheLinePadSize = 256 // bitIsSet reports whether the bit at index is set. The bit index // is in big endian order, so bit index 0 is the leftmost bit. @@ -18,16 +18,16 @@ type function uint8 const ( // KM{,A,C,CTR} function codes aes128 function = 18 // AES-128 - aes192 = 19 // AES-192 - aes256 = 20 // AES-256 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 // K{I,L}MD function codes - sha1 = 1 // SHA-1 - sha256 = 2 // SHA-256 - sha512 = 3 // SHA-512 + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 // KLMD function codes - ghash = 65 // GHASH + ghash function = 65 // GHASH ) // queryResult contains the result of a Query function @@ -56,20 +56,23 @@ type facility uint8 const ( // mandatory facilities zarch facility = 1 // z architecture mode is active - stflef = 7 // store-facility-list-extended - ldisp = 18 // long-displacement - eimm = 21 // extended-immediate + stflef facility = 7 // store-facility-list-extended + ldisp facility = 18 // long-displacement + eimm facility = 21 // extended-immediate // miscellaneous facilities - dfp = 42 // decimal-floating-point - etf3eh = 30 // extended-translation 3 enhancement + dfp facility = 42 // decimal-floating-point + etf3eh facility = 30 // extended-translation 3 enhancement // cryptography facilities - msa = 17 // message-security-assist - msa3 = 76 // message-security-assist extension 3 - msa4 = 77 // message-security-assist extension 4 - msa5 = 57 // message-security-assist extension 5 - msa8 = 146 // message-security-assist extension 8 + msa facility = 17 // message-security-assist + msa3 facility = 76 // message-security-assist extension 3 + msa4 facility = 77 // message-security-assist extension 4 + msa5 facility = 57 // message-security-assist extension 5 + msa8 facility = 146 // message-security-assist extension 8 + + // vector facilities + ve1 facility = 135 // vector-enhancements 1 // Note: vx and highgprs are excluded because they require // kernel support and so must be fetched from HWCAP. @@ -107,14 +110,15 @@ func klmdQuery() queryResult func doinit() { options = []option{ - {"zarch", &S390X.HasZArch}, - {"stfle", &S390X.HasSTFLE}, - {"ldisp", &S390X.HasLDisp}, - {"msa", &S390X.HasMSA}, - {"eimm", &S390X.HasEImm}, - {"dfp", &S390X.HasDFP}, - {"etf3eh", &S390X.HasETF3Enhanced}, - {"vx", &S390X.HasVX}, + {Name: "zarch", Feature: &S390X.HasZArch}, + {Name: "stfle", Feature: &S390X.HasSTFLE}, + {Name: "ldisp", Feature: &S390X.HasLDisp}, + {Name: "msa", Feature: &S390X.HasMSA}, + {Name: "eimm", Feature: &S390X.HasEImm}, + {Name: "dfp", Feature: &S390X.HasDFP}, + {Name: "etf3eh", Feature: &S390X.HasETF3Enhanced}, + {Name: "vx", Feature: &S390X.HasVX}, + {Name: "ve1", Feature: &S390X.HasVE1}, } aes := []function{aes128, aes192, aes256} @@ -150,4 +154,7 @@ func doinit() { S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist } + if S390X.HasVX { + S390X.HasVE1 = facilities.Has(ve1) + } } diff --git a/src/internal/cpu/cpu_test.go b/src/internal/cpu/cpu_test.go index d4115a1b87512..e09bd2d8b99f7 100644 --- a/src/internal/cpu/cpu_test.go +++ b/src/internal/cpu/cpu_test.go @@ -9,28 +9,48 @@ import ( "internal/testenv" "os" "os/exec" + "runtime" "strings" "testing" ) -func MustHaveDebugOptionsEnabled(t *testing.T) { +func TestMinimalFeatures(t *testing.T) { + if runtime.GOARCH == "arm64" { + switch runtime.GOOS { + case "linux", "android": + default: + t.Skipf("%s/%s is not supported", runtime.GOOS, runtime.GOARCH) + } + } + + for _, o := range Options { + if o.Required && !*o.Feature { + t.Errorf("%v expected true, got false", o.Name) + } + } +} + +func MustHaveDebugOptionsSupport(t *testing.T) { if !DebugOptions { - t.Skipf("skipping test: cpu feature options not enabled") + t.Skipf("skipping test: cpu feature options not supported by OS") } } func runDebugOptionsTest(t *testing.T, test string, options string) { - MustHaveDebugOptionsEnabled(t) + MustHaveDebugOptionsSupport(t) testenv.MustHaveExec(t) - env := "GODEBUGCPU=" + options + env := "GODEBUG=" + options cmd := exec.Command(os.Args[0], "-test.run="+test) cmd.Env = append(cmd.Env, env) output, err := cmd.CombinedOutput() - got := strings.TrimSpace(string(output)) + lines := strings.Fields(string(output)) + lastline := lines[len(lines)-1] + + got := strings.TrimSpace(lastline) want := "PASS" if err != nil || got != want { t.Fatalf("%s with %s: want %s, got %v", test, env, want, got) @@ -38,19 +58,20 @@ func runDebugOptionsTest(t *testing.T, test string, options string) { } func TestDisableAllCapabilities(t *testing.T) { - runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "all=0") + runDebugOptionsTest(t, "TestAllCapabilitiesDisabled", "cpu.all=off") } func TestAllCapabilitiesDisabled(t *testing.T) { - MustHaveDebugOptionsEnabled(t) + MustHaveDebugOptionsSupport(t) - if os.Getenv("GODEBUGCPU") != "all=0" { - t.Skipf("skipping test: GODEBUGCPU=all=0 not set") + if os.Getenv("GODEBUG") != "cpu.all=off" { + t.Skipf("skipping test: GODEBUG=cpu.all=off not set") } for _, o := range Options { - if got := *o.Feature; got != false { - t.Errorf("%v: expected false, got %v", o.Name, got) + want := o.Required + if got := *o.Feature; got != want { + t.Errorf("%v: expected %v, got %v", o.Name, want, got) } } } diff --git a/src/internal/cpu/cpu_wasm.go b/src/internal/cpu/cpu_wasm.go index 1107a7ad6f7ad..b459738770510 100644 --- a/src/internal/cpu/cpu_wasm.go +++ b/src/internal/cpu/cpu_wasm.go @@ -4,4 +4,4 @@ package cpu -const CacheLineSize = 64 +const CacheLinePadSize = 64 diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go index 7d9d3aaf76028..5d357be62b2cc 100644 --- a/src/internal/cpu/cpu_x86.go +++ b/src/internal/cpu/cpu_x86.go @@ -6,7 +6,7 @@ package cpu -const CacheLineSize = 64 +const CacheLinePadSize = 64 // cpuid is implemented in cpu_x86.s. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) @@ -40,28 +40,23 @@ const ( func doinit() { options = []option{ - {"adx", &X86.HasADX}, - {"aes", &X86.HasAES}, - {"avx", &X86.HasAVX}, - {"avx2", &X86.HasAVX2}, - {"bmi1", &X86.HasBMI1}, - {"bmi2", &X86.HasBMI2}, - {"erms", &X86.HasERMS}, - {"fma", &X86.HasFMA}, - {"pclmulqdq", &X86.HasPCLMULQDQ}, - {"popcnt", &X86.HasPOPCNT}, - {"sse3", &X86.HasSSE3}, - {"sse41", &X86.HasSSE41}, - {"sse42", &X86.HasSSE42}, - {"ssse3", &X86.HasSSSE3}, - - // sse2 set as last element so it can easily be removed again. See code below. - {"sse2", &X86.HasSSE2}, - } - - // Remove sse2 from options on amd64(p32) because SSE2 is a mandatory feature for these GOARCHs. - if GOARCH == "amd64" || GOARCH == "amd64p32" { - options = options[:len(options)-1] + {Name: "adx", Feature: &X86.HasADX}, + {Name: "aes", Feature: &X86.HasAES}, + {Name: "avx", Feature: &X86.HasAVX}, + {Name: "avx2", Feature: &X86.HasAVX2}, + {Name: "bmi1", Feature: &X86.HasBMI1}, + {Name: "bmi2", Feature: &X86.HasBMI2}, + {Name: "erms", Feature: &X86.HasERMS}, + {Name: "fma", Feature: &X86.HasFMA}, + {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, + {Name: "popcnt", Feature: &X86.HasPOPCNT}, + {Name: "sse3", Feature: &X86.HasSSE3}, + {Name: "sse41", Feature: &X86.HasSSE41}, + {Name: "sse42", Feature: &X86.HasSSE42}, + {Name: "ssse3", Feature: &X86.HasSSSE3}, + + // These capabilities should always be enabled on amd64(p32): + {Name: "sse2", Feature: &X86.HasSSE2, Required: GOARCH == "amd64" || GOARCH == "amd64p32"}, } maxID, _, _, _ := cpuid(0, 0) diff --git a/src/internal/cpu/cpu_x86_test.go b/src/internal/cpu/cpu_x86_test.go index d03306c907a69..9e93d1af5d72c 100644 --- a/src/internal/cpu/cpu_x86_test.go +++ b/src/internal/cpu/cpu_x86_test.go @@ -13,16 +13,6 @@ import ( "testing" ) -func TestAMD64minimalFeatures(t *testing.T) { - if runtime.GOARCH != "amd64" { - return - } - - if !X86.HasSSE2 { - t.Fatalf("HasSSE2 expected true, got false") - } -} - func TestX86ifAVX2hasAVX(t *testing.T) { if X86.HasAVX2 && !X86.HasAVX { t.Fatalf("HasAVX expected true when HasAVX2 is true, got false") @@ -30,14 +20,14 @@ func TestX86ifAVX2hasAVX(t *testing.T) { } func TestDisableSSE2(t *testing.T) { - runDebugOptionsTest(t, "TestSSE2DebugOption", "sse2=0") + runDebugOptionsTest(t, "TestSSE2DebugOption", "cpu.sse2=off") } func TestSSE2DebugOption(t *testing.T) { - MustHaveDebugOptionsEnabled(t) + MustHaveDebugOptionsSupport(t) - if os.Getenv("GODEBUGCPU") != "sse2=0" { - t.Skipf("skipping test: GODEBUGCPU=sse2=0 not set") + if os.Getenv("GODEBUG") != "cpu.sse2=off" { + t.Skipf("skipping test: GODEBUG=cpu.sse2=off not set") } want := runtime.GOARCH != "386" // SSE2 can only be disabled on 386. @@ -45,3 +35,20 @@ func TestSSE2DebugOption(t *testing.T) { t.Errorf("X86.HasSSE2 on %s expected %v, got %v", runtime.GOARCH, want, got) } } + +func TestDisableSSE3(t *testing.T) { + runDebugOptionsTest(t, "TestSSE3DebugOption", "cpu.sse3=off") +} + +func TestSSE3DebugOption(t *testing.T) { + MustHaveDebugOptionsSupport(t) + + if os.Getenv("GODEBUG") != "cpu.sse3=off" { + t.Skipf("skipping test: GODEBUG=cpu.sse3=off not set") + } + + want := false + if got := X86.HasSSE3; got != want { + t.Errorf("X86.HasSSE3 expected %v, got %v", want, got) + } +} diff --git a/src/internal/cpu/export_test.go b/src/internal/cpu/export_test.go index 4e53c5a0841f3..91bfc1bbc3b35 100644 --- a/src/internal/cpu/export_test.go +++ b/src/internal/cpu/export_test.go @@ -5,6 +5,5 @@ package cpu var ( - Options = options - DebugOptions = debugOptions + Options = options ) diff --git a/src/internal/fmtsort/export_test.go b/src/internal/fmtsort/export_test.go new file mode 100644 index 0000000000000..25cbb5d4fca9b --- /dev/null +++ b/src/internal/fmtsort/export_test.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fmtsort + +import "reflect" + +func Compare(a, b reflect.Value) int { + return compare(a, b) +} diff --git a/src/internal/fmtsort/sort.go b/src/internal/fmtsort/sort.go new file mode 100644 index 0000000000000..c959cbee1f8af --- /dev/null +++ b/src/internal/fmtsort/sort.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fmtsort provides a general stable ordering mechanism +// for maps, on behalf of the fmt and text/template packages. +// It is not guaranteed to be efficient and works only for types +// that are valid map keys. +package fmtsort + +import ( + "reflect" + "sort" +) + +// Note: Throughout this package we avoid calling reflect.Value.Interface as +// it is not always legal to do so and it's easier to avoid the issue than to face it. + +// SortedMap represents a map's keys and values. The keys and values are +// aligned in index order: Value[i] is the value in the map corresponding to Key[i]. +type SortedMap struct { + Key []reflect.Value + Value []reflect.Value +} + +func (o *SortedMap) Len() int { return len(o.Key) } +func (o *SortedMap) Less(i, j int) bool { return compare(o.Key[i], o.Key[j]) < 0 } +func (o *SortedMap) Swap(i, j int) { + o.Key[i], o.Key[j] = o.Key[j], o.Key[i] + o.Value[i], o.Value[j] = o.Value[j], o.Value[i] +} + +// Sort accepts a map and returns a SortedMap that has the same keys and +// values but in a stable sorted order according to the keys, modulo issues +// raised by unorderable key values such as NaNs. +// +// The ordering rules are more general than with Go's < operator: +// +// - when applicable, nil compares low +// - ints, floats, and strings order by < +// - NaN compares less than non-NaN floats +// - bool compares false before true +// - complex compares real, then imag +// - pointers compare by machine address +// - channel values compare by machine address +// - structs compare each field in turn +// - arrays compare each element in turn. +// Otherwise identical arrays compare by length. +// - interface values compare first by reflect.Type describing the concrete type +// and then by concrete value as described in the previous rules. +// +func Sort(mapValue reflect.Value) *SortedMap { + if mapValue.Type().Kind() != reflect.Map { + return nil + } + key := make([]reflect.Value, mapValue.Len()) + value := make([]reflect.Value, len(key)) + iter := mapValue.MapRange() + for i := 0; iter.Next(); i++ { + key[i] = iter.Key() + value[i] = iter.Value() + } + sorted := &SortedMap{ + Key: key, + Value: value, + } + sort.Stable(sorted) + return sorted +} + +// compare compares two values of the same type. It returns -1, 0, 1 +// according to whether a > b (1), a == b (0), or a < b (-1). +// If the types differ, it returns -1. +// See the comment on Sort for the comparison rules. +func compare(aVal, bVal reflect.Value) int { + aType, bType := aVal.Type(), bVal.Type() + if aType != bType { + return -1 // No good answer possible, but don't return 0: they're not equal. + } + switch aVal.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := aVal.Int(), bVal.Int() + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := aVal.Uint(), bVal.Uint() + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } + case reflect.String: + a, b := aVal.String(), bVal.String() + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } + case reflect.Float32, reflect.Float64: + return floatCompare(aVal.Float(), bVal.Float()) + case reflect.Complex64, reflect.Complex128: + a, b := aVal.Complex(), bVal.Complex() + if c := floatCompare(real(a), real(b)); c != 0 { + return c + } + return floatCompare(imag(a), imag(b)) + case reflect.Bool: + a, b := aVal.Bool(), bVal.Bool() + switch { + case a == b: + return 0 + case a: + return 1 + default: + return -1 + } + case reflect.Ptr: + a, b := aVal.Pointer(), bVal.Pointer() + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } + case reflect.Chan: + if c, ok := nilCompare(aVal, bVal); ok { + return c + } + ap, bp := aVal.Pointer(), bVal.Pointer() + switch { + case ap < bp: + return -1 + case ap > bp: + return 1 + default: + return 0 + } + case reflect.Struct: + for i := 0; i < aVal.NumField(); i++ { + if c := compare(aVal.Field(i), bVal.Field(i)); c != 0 { + return c + } + } + return 0 + case reflect.Array: + for i := 0; i < aVal.Len(); i++ { + if c := compare(aVal.Index(i), bVal.Index(i)); c != 0 { + return c + } + } + return 0 + case reflect.Interface: + if c, ok := nilCompare(aVal, bVal); ok { + return c + } + c := compare(reflect.ValueOf(aType), reflect.ValueOf(bType)) + if c != 0 { + return c + } + return compare(aVal.Elem(), bVal.Elem()) + default: + // Certain types cannot appear as keys (maps, funcs, slices), but be explicit. + panic("bad type in compare: " + aType.String()) + } +} + +// nilCompare checks whether either value is nil. If not, the boolean is false. +// If either value is nil, the boolean is true and the integer is the comparison +// value. The comparison is defined to be 0 if both are nil, otherwise the one +// nil value compares low. Both arguments must represent a chan, func, +// interface, map, pointer, or slice. +func nilCompare(aVal, bVal reflect.Value) (int, bool) { + if aVal.IsNil() { + if bVal.IsNil() { + return 0, true + } + return -1, true + } + if bVal.IsNil() { + return 1, true + } + return 0, false +} + +// floatCompare compares two floating-point values. NaNs compare low. +func floatCompare(a, b float64) int { + switch { + case isNaN(a): + return -1 // No good answer if b is a NaN so don't bother checking. + case isNaN(b): + return 1 + case a < b: + return -1 + case a > b: + return 1 + } + return 0 +} + +func isNaN(a float64) bool { + return a != a +} diff --git a/src/internal/fmtsort/sort_test.go b/src/internal/fmtsort/sort_test.go new file mode 100644 index 0000000000000..6b10c775b0e93 --- /dev/null +++ b/src/internal/fmtsort/sort_test.go @@ -0,0 +1,212 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fmtsort_test + +import ( + "fmt" + "internal/fmtsort" + "math" + "reflect" + "strings" + "testing" +) + +var compareTests = [][]reflect.Value{ + ct(reflect.TypeOf(int(0)), -1, 0, 1), + ct(reflect.TypeOf(int8(0)), -1, 0, 1), + ct(reflect.TypeOf(int16(0)), -1, 0, 1), + ct(reflect.TypeOf(int32(0)), -1, 0, 1), + ct(reflect.TypeOf(int64(0)), -1, 0, 1), + ct(reflect.TypeOf(uint(0)), 0, 1, 5), + ct(reflect.TypeOf(uint8(0)), 0, 1, 5), + ct(reflect.TypeOf(uint16(0)), 0, 1, 5), + ct(reflect.TypeOf(uint32(0)), 0, 1, 5), + ct(reflect.TypeOf(uint64(0)), 0, 1, 5), + ct(reflect.TypeOf(uintptr(0)), 0, 1, 5), + ct(reflect.TypeOf(string("")), "", "a", "ab"), + ct(reflect.TypeOf(float32(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)), + ct(reflect.TypeOf(float64(0)), math.NaN(), math.Inf(-1), -1e10, 0, 1e10, math.Inf(1)), + ct(reflect.TypeOf(complex64(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i), + ct(reflect.TypeOf(complex128(0+1i)), -1-1i, -1+0i, -1+1i, 0-1i, 0+0i, 0+1i, 1-1i, 1+0i, 1+1i), + ct(reflect.TypeOf(false), false, true), + ct(reflect.TypeOf(&ints[0]), &ints[0], &ints[1], &ints[2]), + ct(reflect.TypeOf(chans[0]), chans[0], chans[1], chans[2]), + ct(reflect.TypeOf(toy{}), toy{0, 1}, toy{0, 2}, toy{1, -1}, toy{1, 1}), + ct(reflect.TypeOf([2]int{}), [2]int{1, 1}, [2]int{1, 2}, [2]int{2, 0}), + ct(reflect.TypeOf(interface{}(interface{}(0))), iFace, 1, 2, 3), +} + +var iFace interface{} + +func ct(typ reflect.Type, args ...interface{}) []reflect.Value { + value := make([]reflect.Value, len(args)) + for i, v := range args { + x := reflect.ValueOf(v) + if !x.IsValid() { // Make it a typed nil. + x = reflect.Zero(typ) + } else { + x = x.Convert(typ) + } + value[i] = x + } + return value +} + +func TestCompare(t *testing.T) { + for _, test := range compareTests { + for i, v0 := range test { + for j, v1 := range test { + c := fmtsort.Compare(v0, v1) + var expect int + switch { + case i == j: + expect = 0 + // NaNs are tricky. + if typ := v0.Type(); (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && math.IsNaN(v0.Float()) { + expect = -1 + } + case i < j: + expect = -1 + case i > j: + expect = 1 + } + if c != expect { + t.Errorf("%s: compare(%v,%v)=%d; expect %d", v0.Type(), v0, v1, c, expect) + } + } + } + } +} + +type sortTest struct { + data interface{} // Always a map. + print string // Printed result using our custom printer. +} + +var sortTests = []sortTest{ + { + map[int]string{7: "bar", -3: "foo"}, + "-3:foo 7:bar", + }, + { + map[uint8]string{7: "bar", 3: "foo"}, + "3:foo 7:bar", + }, + { + map[string]string{"7": "bar", "3": "foo"}, + "3:foo 7:bar", + }, + { + map[float64]string{7: "bar", -3: "foo", math.NaN(): "nan", math.Inf(0): "inf"}, + "NaN:nan -3:foo 7:bar +Inf:inf", + }, + { + map[complex128]string{7 + 2i: "bar2", 7 + 1i: "bar", -3: "foo", complex(math.NaN(), 0i): "nan", complex(math.Inf(0), 0i): "inf"}, + "(NaN+0i):nan (-3+0i):foo (7+1i):bar (7+2i):bar2 (+Inf+0i):inf", + }, + { + map[bool]string{true: "true", false: "false"}, + "false:false true:true", + }, + { + chanMap(), + "CHAN0:0 CHAN1:1 CHAN2:2", + }, + { + pointerMap(), + "PTR0:0 PTR1:1 PTR2:2", + }, + { + map[toy]string{toy{7, 2}: "72", toy{7, 1}: "71", toy{3, 4}: "34"}, + "{3 4}:34 {7 1}:71 {7 2}:72", + }, + { + map[[2]int]string{{7, 2}: "72", {7, 1}: "71", {3, 4}: "34"}, + "[3 4]:34 [7 1]:71 [7 2]:72", + }, + { + map[interface{}]string{7: "7", 4: "4", 3: "3", nil: "nil"}, + ":nil 3:3 4:4 7:7", + }, +} + +func sprint(data interface{}) string { + om := fmtsort.Sort(reflect.ValueOf(data)) + if om == nil { + return "nil" + } + b := new(strings.Builder) + for i, key := range om.Key { + if i > 0 { + b.WriteRune(' ') + } + b.WriteString(sprintKey(key)) + b.WriteRune(':') + b.WriteString(fmt.Sprint(om.Value[i])) + } + return b.String() +} + +// sprintKey formats a reflect.Value but gives reproducible values for some +// problematic types such as pointers. Note that it only does special handling +// for the troublesome types used in the test cases; it is not a general +// printer. +func sprintKey(key reflect.Value) string { + switch str := key.Type().String(); str { + case "*int": + ptr := key.Interface().(*int) + for i := range ints { + if ptr == &ints[i] { + return fmt.Sprintf("PTR%d", i) + } + } + return "PTR???" + case "chan int": + c := key.Interface().(chan int) + for i := range chans { + if c == chans[i] { + return fmt.Sprintf("CHAN%d", i) + } + } + return "CHAN???" + default: + return fmt.Sprint(key) + } +} + +var ( + ints [3]int + chans = [3]chan int{make(chan int), make(chan int), make(chan int)} +) + +func pointerMap() map[*int]string { + m := make(map[*int]string) + for i := 2; i >= 0; i-- { + m[&ints[i]] = fmt.Sprint(i) + } + return m +} + +func chanMap() map[chan int]string { + m := make(map[chan int]string) + for i := 2; i >= 0; i-- { + m[chans[i]] = fmt.Sprint(i) + } + return m +} + +type toy struct { + A int // Exported. + b int // Unexported. +} + +func TestOrder(t *testing.T) { + for _, test := range sortTests { + got := sprint(test.data) + if got != test.print { + t.Errorf("%s: got %q, want %q", reflect.TypeOf(test.data), got, test.print) + } + } +} diff --git a/src/internal/goroot/gc.go b/src/internal/goroot/gc.go new file mode 100644 index 0000000000000..9d846d8c49ac1 --- /dev/null +++ b/src/internal/goroot/gc.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +package goroot + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +// IsStandardPackage reports whether path is a standard package, +// given goroot and compiler. +func IsStandardPackage(goroot, compiler, path string) bool { + switch compiler { + case "gc": + dir := filepath.Join(goroot, "src", path) + _, err := os.Stat(dir) + return err == nil + case "gccgo": + return gccgoSearch.isStandard(path) + default: + panic("unknown compiler " + compiler) + } +} + +// gccgoSearch holds the gccgo search directories. +type gccgoDirs struct { + once sync.Once + dirs []string +} + +// gccgoSearch is used to check whether a gccgo package exists in the +// standard library. +var gccgoSearch gccgoDirs + +// init finds the gccgo search directories. If this fails it leaves dirs == nil. +func (gd *gccgoDirs) init() { + gccgo := os.Getenv("GCCGO") + if gccgo == "" { + gccgo = "gccgo" + } + bin, err := exec.LookPath(gccgo) + if err != nil { + return + } + + allDirs, err := exec.Command(bin, "-print-search-dirs").Output() + if err != nil { + return + } + versionB, err := exec.Command(bin, "-dumpversion").Output() + if err != nil { + return + } + version := strings.TrimSpace(string(versionB)) + machineB, err := exec.Command(bin, "-dumpmachine").Output() + if err != nil { + return + } + machine := strings.TrimSpace(string(machineB)) + + dirsEntries := strings.Split(string(allDirs), "\n") + const prefix = "libraries: =" + var dirs []string + for _, dirEntry := range dirsEntries { + if strings.HasPrefix(dirEntry, prefix) { + dirs = filepath.SplitList(strings.TrimPrefix(dirEntry, prefix)) + break + } + } + if len(dirs) == 0 { + return + } + + var lastDirs []string + for _, dir := range dirs { + goDir := filepath.Join(dir, "go", version) + if fi, err := os.Stat(goDir); err == nil && fi.IsDir() { + gd.dirs = append(gd.dirs, goDir) + goDir = filepath.Join(goDir, machine) + if fi, err = os.Stat(goDir); err == nil && fi.IsDir() { + gd.dirs = append(gd.dirs, goDir) + } + } + if fi, err := os.Stat(dir); err == nil && fi.IsDir() { + lastDirs = append(lastDirs, dir) + } + } + gd.dirs = append(gd.dirs, lastDirs...) +} + +// isStandard reports whether path is a standard library for gccgo. +func (gd *gccgoDirs) isStandard(path string) bool { + // Quick check: if the first path component has a '.', it's not + // in the standard library. This skips most GOPATH directories. + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + if strings.Contains(path[:i], ".") { + return false + } + + if path == "unsafe" { + // Special case. + return true + } + + gd.once.Do(gd.init) + if gd.dirs == nil { + // We couldn't find the gccgo search directories. + // Best guess, since the first component did not contain + // '.', is that this is a standard library package. + return true + } + + for _, dir := range gd.dirs { + full := filepath.Join(dir, path) + pkgdir, pkg := filepath.Split(full) + for _, p := range [...]string{ + full, + full + ".gox", + pkgdir + "lib" + pkg + ".so", + pkgdir + "lib" + pkg + ".a", + full + ".o", + } { + if fi, err := os.Stat(p); err == nil && !fi.IsDir() { + return true + } + } + } + + return false +} diff --git a/src/internal/goroot/gccgo.go b/src/internal/goroot/gccgo.go new file mode 100644 index 0000000000000..3530e59a15f88 --- /dev/null +++ b/src/internal/goroot/gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package goroot + +import ( + "os" + "path/filepath" +) + +// IsStandardPackage reports whether path is a standard package, +// given goroot and compiler. +func IsStandardPackage(goroot, compiler, path string) bool { + switch compiler { + case "gc": + dir := filepath.Join(goroot, "src", path) + _, err := os.Stat(dir) + return err == nil + case "gccgo": + return stdpkg[path] + default: + panic("unknown compiler " + compiler) + } +} diff --git a/src/internal/poll/export_posix_test.go b/src/internal/poll/export_posix_test.go index 73b2c11e1e099..6b9bb8b7d0dfd 100644 --- a/src/internal/poll/export_posix_test.go +++ b/src/internal/poll/export_posix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows // Export guts for testing on posix. // Since testing imports os and os imports internal/poll, diff --git a/src/internal/poll/fd_fsync_darwin.go b/src/internal/poll/fd_fsync_darwin.go new file mode 100644 index 0000000000000..c68ec9782a897 --- /dev/null +++ b/src/internal/poll/fd_fsync_darwin.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poll + +import ( + "syscall" + _ "unsafe" // for go:linkname +) + +// Fsync invokes SYS_FCNTL with SYS_FULLFSYNC because +// on OS X, SYS_FSYNC doesn't fully flush contents to disk. +// See Issue #26650 as well as the man page for fsync on OS X. +func (fd *FD) Fsync() error { + if err := fd.incref(); err != nil { + return err + } + defer fd.decref() + + _, e1 := fcntl(fd.Sysfd, syscall.F_FULLFSYNC, 0) + return e1 +} + +// Implemented in syscall/syscall_darwin.go. +//go:linkname fcntl syscall.fcntl +func fcntl(fd int, cmd int, arg int) (int, error) diff --git a/src/internal/poll/fd_fsync_posix.go b/src/internal/poll/fd_fsync_posix.go new file mode 100644 index 0000000000000..6705a3e524cd4 --- /dev/null +++ b/src/internal/poll/fd_fsync_posix.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris + +package poll + +import "syscall" + +// Fsync wraps syscall.Fsync. +func (fd *FD) Fsync() error { + if err := fd.incref(); err != nil { + return err + } + defer fd.decref() + return syscall.Fsync(fd.Sysfd) +} + +func fcntl(fd int, cmd int, arg int) (int, error) { + r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + if e != 0 { + return int(r), syscall.Errno(e) + } + return int(r), nil +} diff --git a/src/internal/poll/fd_fsync_windows.go b/src/internal/poll/fd_fsync_windows.go new file mode 100644 index 0000000000000..fb1211985db2a --- /dev/null +++ b/src/internal/poll/fd_fsync_windows.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poll + +import "syscall" + +// Fsync wraps syscall.Fsync. +func (fd *FD) Fsync() error { + if err := fd.incref(); err != nil { + return err + } + defer fd.decref() + return syscall.Fsync(fd.Sysfd) +} diff --git a/src/internal/poll/fd_opendir_ios.go b/src/internal/poll/fd_opendir_ios.go new file mode 100644 index 0000000000000..e646bd9a9656a --- /dev/null +++ b/src/internal/poll/fd_opendir_ios.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin +// +build arm arm64 + +package poll + +import ( + "syscall" + _ "unsafe" // for go:linkname +) + +// OpenDir returns a pointer to a DIR structure suitable for +// ReadDir. In case of an error, the name of the failed +// syscall is returned along with a syscall.Errno. +func (fd *FD) OpenDir() (uintptr, string, error) { + // fdopendir(3) takes control of the file descriptor, + // so use a dup. + fd2, call, err := fd.Dup() + if err != nil { + return 0, call, err + } + dir, err := fdopendir(fd2) + if err != nil { + syscall.Close(fd2) + return 0, "fdopendir", err + } + return dir, "", nil +} + +// Implemented in syscall/syscall_darwin.go. +//go:linkname fdopendir syscall.fdopendir +func fdopendir(fd int) (dir uintptr, err error) diff --git a/src/internal/poll/fd_plan9.go b/src/internal/poll/fd_plan9.go index 107f454523339..0fce32915e7dd 100644 --- a/src/internal/poll/fd_plan9.go +++ b/src/internal/poll/fd_plan9.go @@ -193,10 +193,10 @@ func isInterrupted(err error) bool { return err != nil && stringsHasSuffix(err.Error(), "interrupted") } -// PollDescriptor returns the descriptor being used by the poller, -// or ^uintptr(0) if there isn't one. This is only used for testing. -func PollDescriptor() uintptr { - return ^uintptr(0) +// IsPollDescriptor reports whether fd is the descriptor being used by the poller. +// This is only used for testing. +func IsPollDescriptor(fd uintptr) bool { + return false } // RawControl invokes the user-defined function f for a non-IO diff --git a/src/internal/poll/fd_poll_nacljs.go b/src/internal/poll/fd_poll_nacljs.go index 832dddb4aa447..0871f342d423b 100644 --- a/src/internal/poll/fd_poll_nacljs.go +++ b/src/internal/poll/fd_poll_nacljs.go @@ -92,8 +92,8 @@ func setDeadlineImpl(fd *FD, t time.Time, mode int) error { return nil } -// PollDescriptor returns the descriptor being used by the poller, -// or ^uintptr(0) if there isn't one. This is only used for testing. -func PollDescriptor() uintptr { - return ^uintptr(0) +// IsPollDescriptor reports whether fd is the descriptor being used by the poller. +// This is only used for testing. +func IsPollDescriptor(fd uintptr) bool { + return false } diff --git a/src/internal/poll/fd_poll_runtime.go b/src/internal/poll/fd_poll_runtime.go index 87a01a8b69937..687f702556ff0 100644 --- a/src/internal/poll/fd_poll_runtime.go +++ b/src/internal/poll/fd_poll_runtime.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd windows solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd windows solaris package poll @@ -11,13 +11,14 @@ import ( "sync" "syscall" "time" + _ "unsafe" // for go:linkname ) // runtimeNano returns the current value of the runtime clock in nanoseconds. +//go:linkname runtimeNano runtime.nanotime func runtimeNano() int64 func runtime_pollServerInit() -func runtime_pollServerDescriptor() uintptr func runtime_pollOpen(fd uintptr) (uintptr, int) func runtime_pollClose(ctx uintptr) func runtime_pollWait(ctx uintptr, mode int) int @@ -25,6 +26,7 @@ func runtime_pollWaitCanceled(ctx uintptr, mode int) int func runtime_pollReset(ctx uintptr, mode int) int func runtime_pollSetDeadline(ctx uintptr, d int64, mode int) func runtime_pollUnblock(ctx uintptr) +func runtime_isPollServerDescriptor(fd uintptr) bool type pollDesc struct { runtimeCtx uintptr @@ -134,15 +136,12 @@ func (fd *FD) SetWriteDeadline(t time.Time) error { } func setDeadlineImpl(fd *FD, t time.Time, mode int) error { - diff := int64(time.Until(t)) - d := runtimeNano() + diff - if d <= 0 && diff > 0 { - // If the user has a deadline in the future, but the delay calculation - // overflows, then set the deadline to the maximum possible value. - d = 1<<63 - 1 - } - if t.IsZero() { - d = 0 + var d int64 + if !t.IsZero() { + d = int64(time.Until(t)) + if d == 0 { + d = -1 // don't confuse deadline right now with no deadline + } } if err := fd.incref(); err != nil { return err @@ -155,8 +154,8 @@ func setDeadlineImpl(fd *FD, t time.Time, mode int) error { return nil } -// PollDescriptor returns the descriptor being used by the poller, -// or ^uintptr(0) if there isn't one. This is only used for testing. -func PollDescriptor() uintptr { - return runtime_pollServerDescriptor() +// IsPollDescriptor reports whether fd is the descriptor being used by the poller. +// This is only used for testing. +func IsPollDescriptor(fd uintptr) bool { + return runtime_isPollServerDescriptor(fd) } diff --git a/src/internal/poll/fd_posix.go b/src/internal/poll/fd_posix.go index f178a6fa0ae1b..b43ad51799640 100644 --- a/src/internal/poll/fd_posix.go +++ b/src/internal/poll/fd_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package poll @@ -46,12 +46,3 @@ func (fd *FD) Ftruncate(size int64) error { defer fd.decref() return syscall.Ftruncate(fd.Sysfd, size) } - -// Fsync wraps syscall.Fsync. -func (fd *FD) Fsync() error { - if err := fd.incref(); err != nil { - return err - } - defer fd.decref() - return syscall.Fsync(fd.Sysfd) -} diff --git a/src/internal/poll/fd_posix_test.go b/src/internal/poll/fd_posix_test.go index cbe015edbd120..246d4989e1cf8 100644 --- a/src/internal/poll/fd_posix_test.go +++ b/src/internal/poll/fd_posix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows package poll_test diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index b311049ad701c..8185269c53ca6 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package poll @@ -452,10 +452,11 @@ var tryDupCloexec = int32(1) // DupCloseOnExec dups fd and marks it close-on-exec. func DupCloseOnExec(fd int) (int, string, error) { if atomic.LoadInt32(&tryDupCloexec) == 1 { - r0, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_DUPFD_CLOEXEC, 0) - switch e1 { - case 0: - return int(r0), "", nil + r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0) + if e1 == nil { + return r0, "", nil + } + switch e1.(syscall.Errno) { case syscall.EINVAL, syscall.ENOSYS: // Old kernel, or js/wasm (which returns // ENOSYS). Fall back to the portable way from diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index d04d332696db4..19d9a12dad83b 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -116,11 +116,17 @@ func (o *operation) InitBufs(buf *[][]byte) { o.bufs = o.bufs[:0] } for _, b := range *buf { - var p *byte + if len(b) == 0 { + o.bufs = append(o.bufs, syscall.WSABuf{}) + continue + } + for len(b) > maxRW { + o.bufs = append(o.bufs, syscall.WSABuf{Len: maxRW, Buf: &b[0]}) + b = b[maxRW:] + } if len(b) > 0 { - p = &b[0] + o.bufs = append(o.bufs, syscall.WSABuf{Len: uint32(len(b)), Buf: &b[0]}) } - o.bufs = append(o.bufs, syscall.WSABuf{Len: uint32(len(b)), Buf: p}) } } @@ -461,6 +467,11 @@ func (fd *FD) Shutdown(how int) error { return syscall.Shutdown(fd.Sysfd, how) } +// Windows ReadFile and WSARecv use DWORD (uint32) parameter to pass buffer length. +// This prevents us reading blocks larger than 4GB. +// See golang.org/issue/26923. +const maxRW = 1 << 30 // 1GB is large enough and keeps subsequent reads aligned + // Read implements io.Reader. func (fd *FD) Read(buf []byte) (int, error) { if err := fd.readLock(); err != nil { @@ -468,6 +479,10 @@ func (fd *FD) Read(buf []byte) (int, error) { } defer fd.readUnlock() + if len(buf) > maxRW { + buf = buf[:maxRW] + } + var n int var err error if fd.isFile || fd.isDir || fd.isConsole { @@ -581,6 +596,10 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) { } defer fd.decref() + if len(b) > maxRW { + b = b[:maxRW] + } + fd.l.Lock() defer fd.l.Unlock() curoffset, e := syscall.Seek(fd.Sysfd, 0, io.SeekCurrent) @@ -611,6 +630,9 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) { if len(buf) == 0 { return 0, nil, nil } + if len(buf) > maxRW { + buf = buf[:maxRW] + } if err := fd.readLock(); err != nil { return 0, nil, err } @@ -639,30 +661,42 @@ func (fd *FD) Write(buf []byte) (int, error) { } defer fd.writeUnlock() - var n int - var err error - if fd.isFile || fd.isDir || fd.isConsole { - fd.l.Lock() - defer fd.l.Unlock() - if fd.isConsole { - n, err = fd.writeConsole(buf) + ntotal := 0 + for len(buf) > 0 { + b := buf + if len(b) > maxRW { + b = b[:maxRW] + } + var n int + var err error + if fd.isFile || fd.isDir || fd.isConsole { + fd.l.Lock() + defer fd.l.Unlock() + if fd.isConsole { + n, err = fd.writeConsole(b) + } else { + n, err = syscall.Write(fd.Sysfd, b) + } + if err != nil { + n = 0 + } } else { - n, err = syscall.Write(fd.Sysfd, buf) + if race.Enabled { + race.ReleaseMerge(unsafe.Pointer(&ioSync)) + } + o := &fd.wop + o.InitBuf(b) + n, err = wsrv.ExecIO(o, func(o *operation) error { + return syscall.WSASend(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil) + }) } + ntotal += n if err != nil { - n = 0 + return ntotal, err } - } else { - if race.Enabled { - race.ReleaseMerge(unsafe.Pointer(&ioSync)) - } - o := &fd.wop - o.InitBuf(buf) - n, err = wsrv.ExecIO(o, func(o *operation) error { - return syscall.WSASend(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil) - }) + buf = buf[n:] } - return n, err + return ntotal, nil } // writeConsole writes len(b) bytes to the console File. @@ -709,7 +743,7 @@ func (fd *FD) writeConsole(b []byte) (int, error) { } // Pwrite emulates the Unix pwrite system call. -func (fd *FD) Pwrite(b []byte, off int64) (int, error) { +func (fd *FD) Pwrite(buf []byte, off int64) (int, error) { // Call incref, not writeLock, because since pwrite specifies the // offset it is independent from other writes. if err := fd.incref(); err != nil { @@ -724,16 +758,27 @@ func (fd *FD) Pwrite(b []byte, off int64) (int, error) { return 0, e } defer syscall.Seek(fd.Sysfd, curoffset, io.SeekStart) - o := syscall.Overlapped{ - OffsetHigh: uint32(off >> 32), - Offset: uint32(off), - } - var done uint32 - e = syscall.WriteFile(fd.Sysfd, b, &done, &o) - if e != nil { - return 0, e + + ntotal := 0 + for len(buf) > 0 { + b := buf + if len(b) > maxRW { + b = b[:maxRW] + } + var n uint32 + o := syscall.Overlapped{ + OffsetHigh: uint32(off >> 32), + Offset: uint32(off), + } + e = syscall.WriteFile(fd.Sysfd, b, &n, &o) + ntotal += int(n) + if e != nil { + return ntotal, e + } + buf = buf[n:] + off += int64(n) } - return int(done), nil + return ntotal, nil } // Writev emulates the Unix writev system call. @@ -761,20 +806,41 @@ func (fd *FD) Writev(buf *[][]byte) (int64, error) { // WriteTo wraps the sendto network call. func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) { - if len(buf) == 0 { - return 0, nil - } if err := fd.writeLock(); err != nil { return 0, err } defer fd.writeUnlock() - o := &fd.wop - o.InitBuf(buf) - o.sa = sa - n, err := wsrv.ExecIO(o, func(o *operation) error { - return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil) - }) - return n, err + + if len(buf) == 0 { + // handle zero-byte payload + o := &fd.wop + o.InitBuf(buf) + o.sa = sa + n, err := wsrv.ExecIO(o, func(o *operation) error { + return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil) + }) + return n, err + } + + ntotal := 0 + for len(buf) > 0 { + b := buf + if len(b) > maxRW { + b = b[:maxRW] + } + o := &fd.wop + o.InitBuf(b) + o.sa = sa + n, err := wsrv.ExecIO(o, func(o *operation) error { + return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil) + }) + ntotal += int(n) + if err != nil { + return ntotal, err + } + buf = buf[n:] + } + return ntotal, nil } // Call ConnectEx. This doesn't need any locking, since it is only @@ -989,6 +1055,10 @@ func (fd *FD) ReadMsg(p []byte, oob []byte) (int, int, int, syscall.Sockaddr, er } defer fd.readUnlock() + if len(p) > maxRW { + p = p[:maxRW] + } + o := &fd.rop o.InitMsg(p, oob) o.rsa = new(syscall.RawSockaddrAny) @@ -1007,6 +1077,10 @@ func (fd *FD) ReadMsg(p []byte, oob []byte) (int, int, int, syscall.Sockaddr, er // WriteMsg wraps the WSASendMsg network call. func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) { + if len(p) > maxRW { + return 0, 0, errors.New("packet is too large (only 1GB is allowed)") + } + if err := fd.writeLock(); err != nil { return 0, 0, err } diff --git a/src/internal/poll/fd_writev_darwin.go b/src/internal/poll/fd_writev_darwin.go new file mode 100644 index 0000000000000..e2024471d4fb0 --- /dev/null +++ b/src/internal/poll/fd_writev_darwin.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package poll + +import ( + "syscall" + _ "unsafe" // for go:linkname +) + +// Implemented in syscall/syscall_darwin.go. +//go:linkname writev syscall.writev +func writev(fd int, iovecs []syscall.Iovec) (uintptr, error) diff --git a/src/internal/poll/fd_writev_unix.go b/src/internal/poll/fd_writev_unix.go new file mode 100644 index 0000000000000..86af795b5a58a --- /dev/null +++ b/src/internal/poll/fd_writev_unix.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux netbsd openbsd + +package poll + +import ( + "syscall" + "unsafe" +) + +func writev(fd int, iovecs []syscall.Iovec) (uintptr, error) { + r, _, e := syscall.Syscall(syscall.SYS_WRITEV, uintptr(fd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(len(iovecs))) + if e != 0 { + return r, syscall.Errno(e) + } + return r, nil +} diff --git a/src/internal/poll/hook_unix.go b/src/internal/poll/hook_unix.go index c2ad17eb1ad8d..a7512b1255f6f 100644 --- a/src/internal/poll/hook_unix.go +++ b/src/internal/poll/hook_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package poll diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 980a75afa7fc3..40ae3468b0745 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -32,8 +32,7 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) { pos += int64(n) written += int64(n) remain -= int64(n) - } - if n == 0 && err1 == nil { + } else if n == 0 && err1 == nil { break } if err1 == syscall.EAGAIN { diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index 52955a19d0b4f..8e938065f17ad 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -29,8 +29,7 @@ func SendFile(dstFD *FD, src int, remain int64) (int64, error) { if n > 0 { written += int64(n) remain -= int64(n) - } - if n == 0 && err1 == nil { + } else if n == 0 && err1 == nil { break } if err1 == syscall.EAGAIN { diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index 9093d464834f4..762992e9eb398 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -39,8 +39,7 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error) { pos += int64(n) written += int64(n) remain -= int64(n) - } - if n == 0 && err1 == nil { + } else if n == 0 && err1 == nil { break } if err1 == syscall.EAGAIN { diff --git a/src/internal/poll/sendfile_windows.go b/src/internal/poll/sendfile_windows.go index 1a4d0ca191253..17a3681064b66 100644 --- a/src/internal/poll/sendfile_windows.go +++ b/src/internal/poll/sendfile_windows.go @@ -32,11 +32,17 @@ func SendFile(fd *FD, src syscall.Handle, n int64) (int64, error) { return 0, err } - o.o.OffsetHigh = uint32(curpos) - o.o.Offset = uint32(curpos >> 32) + o.o.Offset = uint32(curpos) + o.o.OffsetHigh = uint32(curpos >> 32) done, err := wsrv.ExecIO(o, func(o *operation) error { return syscall.TransmitFile(o.fd.Sysfd, o.handle, o.qty, 0, &o.o, nil, syscall.TF_WRITE_BEHIND) }) + if err == nil { + // Some versions of Windows (Windows 10 1803) do not set + // file position after TransmitFile completes. + // So just use Seek to set file position. + _, err = syscall.Seek(o.handle, curpos+int64(done), 0) + } return int64(done), err } diff --git a/src/internal/poll/sockopt.go b/src/internal/poll/sockopt.go index f86ce707a123a..bb5ea02c0a658 100644 --- a/src/internal/poll/sockopt.go +++ b/src/internal/poll/sockopt.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package poll diff --git a/src/internal/poll/sockopt_unix.go b/src/internal/poll/sockopt_unix.go index b33644db762c3..bd942c293456f 100644 --- a/src/internal/poll/sockopt_unix.go +++ b/src/internal/poll/sockopt_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package poll diff --git a/src/internal/poll/sockoptip.go b/src/internal/poll/sockoptip.go index 1ee490c257876..c55a1e3c5b19c 100644 --- a/src/internal/poll/sockoptip.go +++ b/src/internal/poll/sockoptip.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package poll diff --git a/src/internal/poll/splice_linux.go b/src/internal/poll/splice_linux.go index aa237e587adab..4f97298417c3d 100644 --- a/src/internal/poll/splice_linux.go +++ b/src/internal/poll/splice_linux.go @@ -32,8 +32,6 @@ func Splice(dst, src *FD, remain int64) (written int64, handled bool, sc string, return 0, false, sc, err } defer destroyTempPipe(prfd, pwfd) - // From here on, the operation should be considered handled, - // even if Splice doesn't transfer any data. var inPipe, n int for err == nil && remain > 0 { max := maxSpliceSize @@ -41,9 +39,18 @@ func Splice(dst, src *FD, remain int64) (written int64, handled bool, sc string, max = int(remain) } inPipe, err = spliceDrain(pwfd, src, max) + // The operation is considered handled if splice returns no + // error, or an error other than EINVAL. An EINVAL means the + // kernel does not support splice for the socket type of src. + // The failed syscall does not consume any data so it is safe + // to fall back to a generic copy. + // // spliceDrain should never return EAGAIN, so if err != nil, - // Splice cannot continue. If inPipe == 0 && err == nil, - // src is at EOF, and the transfer is complete. + // Splice cannot continue. + // + // If inPipe == 0 && err == nil, src is at EOF, and the + // transfer is complete. + handled = handled || (err != syscall.EINVAL) if err != nil || (inPipe == 0 && err == nil) { break } @@ -54,7 +61,7 @@ func Splice(dst, src *FD, remain int64) (written int64, handled bool, sc string, } } if err != nil { - return written, true, "splice", err + return written, handled, "splice", err } return written, true, "", nil } diff --git a/src/internal/poll/sys_cloexec.go b/src/internal/poll/sys_cloexec.go index 7bafa0d81a1c4..64e46127d4502 100644 --- a/src/internal/poll/sys_cloexec.go +++ b/src/internal/poll/sys_cloexec.go @@ -5,7 +5,7 @@ // This file implements sysSocket and accept for platforms that do not // provide a fast path for setting SetNonblock and CloseOnExec. -// +build darwin js,wasm nacl solaris +// +build aix darwin js,wasm nacl solaris package poll diff --git a/src/internal/poll/writev.go b/src/internal/poll/writev.go index 4bf8804e217c1..04e3522d8a19a 100644 --- a/src/internal/poll/writev.go +++ b/src/internal/poll/writev.go @@ -9,7 +9,6 @@ package poll import ( "io" "syscall" - "unsafe" ) // Writev wraps the writev system call. @@ -54,24 +53,20 @@ func (fd *FD) Writev(v *[][]byte) (int64, error) { } fd.iovecs = &iovecs // cache - wrote, _, e0 := syscall.Syscall(syscall.SYS_WRITEV, - uintptr(fd.Sysfd), - uintptr(unsafe.Pointer(&iovecs[0])), - uintptr(len(iovecs))) + var wrote uintptr + wrote, err = writev(fd.Sysfd, iovecs) if wrote == ^uintptr(0) { wrote = 0 } TestHookDidWritev(int(wrote)) n += int64(wrote) consume(v, int64(wrote)) - if e0 == syscall.EAGAIN { - if err = fd.pd.waitWrite(fd.isFile); err == nil { - continue - } - } else if e0 != 0 { - err = syscall.Errno(e0) - } if err != nil { + if err.(syscall.Errno) == syscall.EAGAIN { + if err = fd.pd.waitWrite(fd.isFile); err == nil { + continue + } + } break } if n == 0 { diff --git a/src/internal/syscall/unix/asm_aix_ppc64.s b/src/internal/syscall/unix/asm_aix_ppc64.s new file mode 100644 index 0000000000000..9e82e3eb88bb8 --- /dev/null +++ b/src/internal/syscall/unix/asm_aix_ppc64.s @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// +// System calls for aix/ppc64 are implemented in syscall/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0 + JMP syscall·syscall6(SB) diff --git a/src/internal/syscall/unix/asm_solaris.s b/src/internal/syscall/unix/asm_solaris.s new file mode 100644 index 0000000000000..2057338315861 --- /dev/null +++ b/src/internal/syscall/unix/asm_solaris.s @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// System calls for Solaris are implemented in runtime/syscall_solaris.go + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) diff --git a/src/internal/syscall/unix/at.go b/src/internal/syscall/unix/at.go new file mode 100644 index 0000000000000..f857d6828004e --- /dev/null +++ b/src/internal/syscall/unix/at.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux openbsd netbsd dragonfly + +package unix + +import ( + "syscall" + "unsafe" +) + +func Unlinkat(dirfd int, path string, flags int) error { + var p *byte + p, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + _, _, errno := syscall.Syscall(unlinkatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags)) + if errno != 0 { + return errno + } + + return nil +} + +func Openat(dirfd int, path string, flags int, perm uint32) (int, error) { + var p *byte + p, err := syscall.BytePtrFromString(path) + if err != nil { + return 0, err + } + + fd, _, errno := syscall.Syscall6(openatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0) + if errno != 0 { + return 0, errno + } + + return int(fd), nil +} + +func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error { + var p *byte + p, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + _, _, errno := syscall.Syscall6(fstatatTrap, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if errno != 0 { + return errno + } + + return nil + +} diff --git a/src/internal/syscall/unix/at_aix.go b/src/internal/syscall/unix/at_aix.go new file mode 100644 index 0000000000000..425df982118ae --- /dev/null +++ b/src/internal/syscall/unix/at_aix.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_openat openat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.a/shr_64.o" + +const ( + AT_REMOVEDIR = 0x1 + AT_SYMLINK_NOFOLLOW = 0x1 +) diff --git a/src/internal/syscall/unix/at_darwin.go b/src/internal/syscall/unix/at_darwin.go new file mode 100644 index 0000000000000..a88a27e0c6c52 --- /dev/null +++ b/src/internal/syscall/unix/at_darwin.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "syscall" + _ "unsafe" // for linkname +) + +func Unlinkat(dirfd int, path string, flags int) error { + return unlinkat(dirfd, path, flags) +} + +func Openat(dirfd int, path string, flags int, perm uint32) (int, error) { + return openat(dirfd, path, flags, perm) +} + +func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error { + return fstatat(dirfd, path, stat, flags) +} + +//go:linkname unlinkat syscall.unlinkat +func unlinkat(dirfd int, path string, flags int) error + +//go:linkname openat syscall.openat +func openat(dirfd int, path string, flags int, perm uint32) (int, error) + +//go:linkname fstatat syscall.fstatat +func fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error diff --git a/src/internal/syscall/unix/at_freebsd.go b/src/internal/syscall/unix/at_freebsd.go new file mode 100644 index 0000000000000..e171f4dbb5a29 --- /dev/null +++ b/src/internal/syscall/unix/at_freebsd.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "syscall" + "unsafe" +) + +const ( + AT_REMOVEDIR = 0x800 + AT_SYMLINK_NOFOLLOW = 0x200 +) + +func Unlinkat(dirfd int, path string, flags int) error { + p, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + _, _, errno := syscall.Syscall(syscall.SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags)) + if errno != 0 { + return errno + } + + return nil +} + +func Openat(dirfd int, path string, flags int, perm uint32) (int, error) { + p, err := syscall.BytePtrFromString(path) + if err != nil { + return 0, err + } + + fd, _, errno := syscall.Syscall6(syscall.SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0) + if errno != 0 { + return 0, errno + } + + return int(fd), nil +} + +func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error { + return syscall.Fstatat(dirfd, path, stat, flags) +} diff --git a/src/internal/syscall/unix/at_libc.go b/src/internal/syscall/unix/at_libc.go new file mode 100644 index 0000000000000..6c3a8c916039f --- /dev/null +++ b/src/internal/syscall/unix/at_libc.go @@ -0,0 +1,64 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix solaris + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname procFstatat libc_fstatat +//go:linkname procOpenat libc_openat +//go:linkname procUnlinkat libc_unlinkat + +var ( + procFstatat, + procOpenat, + procUnlinkat uintptr +) + +func Unlinkat(dirfd int, path string, flags int) error { + p, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + _, _, errno := syscall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0) + if errno != 0 { + return errno + } + + return nil +} + +func Openat(dirfd int, path string, flags int, perm uint32) (int, error) { + p, err := syscall.BytePtrFromString(path) + if err != nil { + return 0, err + } + + fd, _, errno := syscall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(flags), uintptr(perm), 0, 0) + if errno != 0 { + return 0, errno + } + + return int(fd), nil +} + +func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error { + p, err := syscall.BytePtrFromString(path) + if err != nil { + return err + } + + _, _, errno := syscall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(p)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if errno != 0 { + return errno + } + + return nil +} diff --git a/src/internal/syscall/unix/at_solaris.go b/src/internal/syscall/unix/at_solaris.go new file mode 100644 index 0000000000000..e917c4fc9be11 --- /dev/null +++ b/src/internal/syscall/unix/at_solaris.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +// Implemented as sysvicall6 in runtime/syscall_solaris.go. +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" +//go:cgo_import_dynamic libc_openat openat "libc.so" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +const ( + AT_REMOVEDIR = 0x1 + AT_SYMLINK_NOFOLLOW = 0x1000 +) diff --git a/src/internal/syscall/unix/at_sysnum_darwin.go b/src/internal/syscall/unix/at_sysnum_darwin.go new file mode 100644 index 0000000000000..12b7d7988230b --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +const unlinkatTrap uintptr = 472 +const openatTrap uintptr = 463 +const fstatatTrap uintptr = 470 + +const AT_REMOVEDIR = 0x80 +const AT_SYMLINK_NOFOLLOW = 0x0020 diff --git a/src/internal/syscall/unix/at_sysnum_dragonfly.go b/src/internal/syscall/unix/at_sysnum_dragonfly.go new file mode 100644 index 0000000000000..cec9abce6a28e --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_dragonfly.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +const unlinkatTrap uintptr = syscall.SYS_UNLINKAT +const openatTrap uintptr = syscall.SYS_OPENAT +const fstatatTrap uintptr = syscall.SYS_FSTATAT + +const AT_REMOVEDIR = 0x2 +const AT_SYMLINK_NOFOLLOW = 0x1 diff --git a/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go new file mode 100644 index 0000000000000..c6ea206c121b0 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_fstatat64_linux.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 + +package unix + +import "syscall" + +const fstatatTrap uintptr = syscall.SYS_FSTATAT64 diff --git a/src/internal/syscall/unix/at_sysnum_fstatat_linux.go b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go new file mode 100644 index 0000000000000..580e7997f8d19 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_fstatat_linux.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 + +package unix + +import "syscall" + +const fstatatTrap uintptr = syscall.SYS_FSTATAT diff --git a/src/internal/syscall/unix/at_sysnum_linux.go b/src/internal/syscall/unix/at_sysnum_linux.go new file mode 100644 index 0000000000000..fa7cd75d42653 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_linux.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +const unlinkatTrap uintptr = syscall.SYS_UNLINKAT +const openatTrap uintptr = syscall.SYS_OPENAT + +const AT_REMOVEDIR = 0x200 +const AT_SYMLINK_NOFOLLOW = 0x100 diff --git a/src/internal/syscall/unix/at_sysnum_netbsd.go b/src/internal/syscall/unix/at_sysnum_netbsd.go new file mode 100644 index 0000000000000..fe45e296d7ec2 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_netbsd.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +const unlinkatTrap uintptr = syscall.SYS_UNLINKAT +const openatTrap uintptr = syscall.SYS_OPENAT +const fstatatTrap uintptr = syscall.SYS_FSTATAT + +const AT_REMOVEDIR = 0x800 +const AT_SYMLINK_NOFOLLOW = 0x200 diff --git a/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go b/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go new file mode 100644 index 0000000000000..e76c1cbdceba5 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_newfstatat_linux.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 mips64 mips64le ppc64 ppc64le s390x + +package unix + +import "syscall" + +const fstatatTrap uintptr = syscall.SYS_NEWFSTATAT diff --git a/src/internal/syscall/unix/at_sysnum_openbsd.go b/src/internal/syscall/unix/at_sysnum_openbsd.go new file mode 100644 index 0000000000000..c2d48b9914d36 --- /dev/null +++ b/src/internal/syscall/unix/at_sysnum_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +const unlinkatTrap uintptr = syscall.SYS_UNLINKAT +const openatTrap uintptr = syscall.SYS_OPENAT +const fstatatTrap uintptr = syscall.SYS_FSTATAT + +const AT_REMOVEDIR = 0x08 +const AT_SYMLINK_NOFOLLOW = 0x02 diff --git a/src/internal/syscall/unix/getrandom_freebsd.go b/src/internal/syscall/unix/getrandom_freebsd.go new file mode 100644 index 0000000000000..f1ba5730c9695 --- /dev/null +++ b/src/internal/syscall/unix/getrandom_freebsd.go @@ -0,0 +1,48 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "sync/atomic" + "syscall" + "unsafe" +) + +var randomUnsupported int32 // atomic + +// FreeBSD getrandom system call number. +const randomTrap uintptr = 563 + +// GetRandomFlag is a flag supported by the getrandom system call. +type GetRandomFlag uintptr + +const ( + // GRND_NONBLOCK means return EAGAIN rather than blocking. + GRND_NONBLOCK GetRandomFlag = 0x0001 + + // GRND_RANDOM is only set for portability purpose, no-op on FreeBSD. + GRND_RANDOM GetRandomFlag = 0x0002 +) + +// GetRandom calls the FreeBSD getrandom system call. +func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + if atomic.LoadInt32(&randomUnsupported) != 0 { + return 0, syscall.ENOSYS + } + r1, _, errno := syscall.Syscall(randomTrap, + uintptr(unsafe.Pointer(&p[0])), + uintptr(len(p)), + uintptr(flags)) + if errno != 0 { + if errno == syscall.ENOSYS { + atomic.StoreInt32(&randomUnsupported, 1) + } + return 0, errno + } + return int(r1), nil +} diff --git a/src/internal/syscall/unix/getrandom_linux.go b/src/internal/syscall/unix/getrandom_linux.go index 0d0d4f115c035..00d8110f6fc6b 100644 --- a/src/internal/syscall/unix/getrandom_linux.go +++ b/src/internal/syscall/unix/getrandom_linux.go @@ -26,9 +26,6 @@ const ( // GetRandom calls the Linux getrandom system call. // See https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=c6e9d6f38894798696f23c8084ca7edbf16ee895 func GetRandom(p []byte, flags GetRandomFlag) (n int, err error) { - if randomTrap == 0 { - return 0, syscall.ENOSYS - } if len(p) == 0 { return 0, nil } diff --git a/src/internal/syscall/unix/getrandom_linux_generic.go b/src/internal/syscall/unix/getrandom_linux_generic.go index 8425800b6da84..f8490ce97857c 100644 --- a/src/internal/syscall/unix/getrandom_linux_generic.go +++ b/src/internal/syscall/unix/getrandom_linux_generic.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm64 +// +build linux,arm64 package unix diff --git a/src/internal/syscall/unix/ioctl_aix.go b/src/internal/syscall/unix/ioctl_aix.go new file mode 100644 index 0000000000000..19d56c36a1315 --- /dev/null +++ b/src/internal/syscall/unix/ioctl_aix.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" +//go:linkname libc_ioctl libc_ioctl +var libc_ioctl uintptr + +// Implemented in syscall/syscall_aix.go. +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func Ioctl(fd int, cmd int, args uintptr) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(cmd), uintptr(args), 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/src/internal/syscall/unix/nonblocking.go b/src/internal/syscall/unix/nonblocking.go index 818e9c91a5699..bcc350b56e1e1 100644 --- a/src/internal/syscall/unix/nonblocking.go +++ b/src/internal/syscall/unix/nonblocking.go @@ -2,22 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix dragonfly freebsd linux netbsd openbsd solaris package unix -import ( - "syscall" - _ "unsafe" // for go:linkname -) - -//go:linkname syscall_fcntl syscall.fcntl -func syscall_fcntl(fd int, cmd int, arg int) (val int, err error) +import "syscall" func IsNonblock(fd int) (nonblocking bool, err error) { - flag, err := syscall_fcntl(fd, syscall.F_GETFL, 0) - if err != nil { - return false, err + flag, _, e1 := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(syscall.F_GETFL), 0) + if e1 != 0 { + return false, e1 } return flag&syscall.O_NONBLOCK != 0, nil } diff --git a/src/internal/syscall/unix/nonblocking_darwin.go b/src/internal/syscall/unix/nonblocking_darwin.go new file mode 100644 index 0000000000000..e3dd3a06b013e --- /dev/null +++ b/src/internal/syscall/unix/nonblocking_darwin.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package unix + +import ( + "syscall" + _ "unsafe" // for go:linkname +) + +func IsNonblock(fd int) (nonblocking bool, err error) { + flag, e1 := fcntl(fd, syscall.F_GETFL, 0) + if e1 != nil { + return false, e1 + } + return flag&syscall.O_NONBLOCK != 0, nil +} + +// Implemented in syscall/syscall_darwin.go. +//go:linkname fcntl syscall.fcntl +func fcntl(fd int, cmd int, arg int) (int, error) diff --git a/src/internal/syscall/windows/mksyscall.go b/src/internal/syscall/windows/mksyscall.go index 23efb6a01ab77..a8edafb3c3b5a 100644 --- a/src/internal/syscall/windows/mksyscall.go +++ b/src/internal/syscall/windows/mksyscall.go @@ -4,4 +4,4 @@ package windows -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go security_windows.go psapi_windows.go symlink_windows.go diff --git a/src/internal/syscall/windows/symlink_windows.go b/src/internal/syscall/windows/symlink_windows.go index cc2163e93347d..b64d058d13eba 100644 --- a/src/internal/syscall/windows/symlink_windows.go +++ b/src/internal/syscall/windows/symlink_windows.go @@ -11,4 +11,29 @@ const ( // symlink support for CreateSymbolicLink() starting with Windows 10 (1703, v10.0.14972) SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGED_CREATE = 0x2 + + // FileInformationClass values + FileBasicInfo = 0 // FILE_BASIC_INFO + FileStandardInfo = 1 // FILE_STANDARD_INFO + FileNameInfo = 2 // FILE_NAME_INFO + FileStreamInfo = 7 // FILE_STREAM_INFO + FileCompressionInfo = 8 // FILE_COMPRESSION_INFO + FileAttributeTagInfo = 9 // FILE_ATTRIBUTE_TAG_INFO + FileIdBothDirectoryInfo = 0xa // FILE_ID_BOTH_DIR_INFO + FileIdBothDirectoryRestartInfo = 0xb // FILE_ID_BOTH_DIR_INFO + FileRemoteProtocolInfo = 0xd // FILE_REMOTE_PROTOCOL_INFO + FileFullDirectoryInfo = 0xe // FILE_FULL_DIR_INFO + FileFullDirectoryRestartInfo = 0xf // FILE_FULL_DIR_INFO + FileStorageInfo = 0x10 // FILE_STORAGE_INFO + FileAlignmentInfo = 0x11 // FILE_ALIGNMENT_INFO + FileIdInfo = 0x12 // FILE_ID_INFO + FileIdExtdDirectoryInfo = 0x13 // FILE_ID_EXTD_DIR_INFO + FileIdExtdDirectoryRestartInfo = 0x14 // FILE_ID_EXTD_DIR_INFO ) + +type FILE_ATTRIBUTE_TAG_INFO struct { + FileAttributes uint32 + ReparseTag uint32 +} + +//sys GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go index 66fe9324c0ae1..121132f6f7326 100644 --- a/src/internal/syscall/windows/syscall_windows.go +++ b/src/internal/syscall/windows/syscall_windows.go @@ -12,7 +12,11 @@ import ( const ( ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_LOCK_FAILED syscall.Errno = 167 ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 ) @@ -255,6 +259,14 @@ func Rename(oldpath, newpath string) error { return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) } +//sys LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.LockFileEx +//sys UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) = kernel32.UnlockFileEx + +const ( + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 +) + const MB_ERR_INVALID_CHARS = 8 //sys GetACP() (acp uint32) = kernel32.GetACP diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index 550a8a5bd446c..9527a370a4e01 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -44,28 +44,31 @@ var ( moduserenv = syscall.NewLazyDLL(sysdll.Add("userenv.dll")) modpsapi = syscall.NewLazyDLL(sysdll.Add("psapi.dll")) - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procWSASocketW = modws2_32.NewProc("WSASocketW") - procGetACP = modkernel32.NewProc("GetACP") - procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procNetShareAdd = modnetapi32.NewProc("NetShareAdd") - procNetShareDel = modnetapi32.NewProc("NetShareDel") - procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") - procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procWSASocketW = modws2_32.NewProc("WSASocketW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procGetACP = modkernel32.NewProc("GetACP") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procNetShareAdd = modnetapi32.NewProc("NetShareAdd") + procNetShareDel = modnetapi32.NewProc("NetShareDel") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") + procNetUserGetLocalGroups = modnetapi32.NewProc("NetUserGetLocalGroups") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") ) func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { @@ -126,6 +129,30 @@ func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtoco return } +func LockFileEx(file syscall.Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnlockFileEx(file syscall.Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetACP() (acp uint32) { r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) acp = uint32(r0) @@ -321,3 +348,15 @@ func GetProcessMemoryInfo(handle syscall.Handle, memCounters *PROCESS_MEMORY_COU } return } + +func GetFileInformationByHandleEx(handle syscall.Handle, class uint32, info *byte, bufsize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(info)), uintptr(bufsize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go new file mode 100644 index 0000000000000..cc19fdf8912d2 --- /dev/null +++ b/src/internal/trace/gc.go @@ -0,0 +1,825 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "container/heap" + "math" + "sort" + "strings" + "time" +) + +// MutatorUtil is a change in mutator utilization at a particular +// time. Mutator utilization functions are represented as a +// time-ordered []MutatorUtil. +type MutatorUtil struct { + Time int64 + // Util is the mean mutator utilization starting at Time. This + // is in the range [0, 1]. + Util float64 +} + +// UtilFlags controls the behavior of MutatorUtilization. +type UtilFlags int + +const ( + // UtilSTW means utilization should account for STW events. + UtilSTW UtilFlags = 1 << iota + // UtilBackground means utilization should account for + // background mark workers. + UtilBackground + // UtilAssist means utilization should account for mark + // assists. + UtilAssist + // UtilSweep means utilization should account for sweeping. + UtilSweep + + // UtilPerProc means each P should be given a separate + // utilization function. Otherwise, there is a single function + // and each P is given a fraction of the utilization. + UtilPerProc +) + +// MutatorUtilization returns a set of mutator utilization functions +// for the given trace. Each function will always end with 0 +// utilization. The bounds of each function are implicit in the first +// and last event; outside of these bounds each function is undefined. +// +// If the UtilPerProc flag is not given, this always returns a single +// utilization function. Otherwise, it returns one function per P. +func MutatorUtilization(events []*Event, flags UtilFlags) [][]MutatorUtil { + if len(events) == 0 { + return nil + } + + type perP struct { + // gc > 0 indicates that GC is active on this P. + gc int + // series the logical series number for this P. This + // is necessary because Ps may be removed and then + // re-added, and then the new P needs a new series. + series int + } + ps := []perP{} + stw := 0 + + out := [][]MutatorUtil{} + assists := map[uint64]bool{} + block := map[uint64]*Event{} + bgMark := map[uint64]bool{} + + for _, ev := range events { + switch ev.Type { + case EvGomaxprocs: + gomaxprocs := int(ev.Args[0]) + if len(ps) > gomaxprocs { + if flags&UtilPerProc != 0 { + // End each P's series. + for _, p := range ps[gomaxprocs:] { + out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, 0}) + } + } + ps = ps[:gomaxprocs] + } + for len(ps) < gomaxprocs { + // Start new P's series. + series := 0 + if flags&UtilPerProc != 0 || len(out) == 0 { + series = len(out) + out = append(out, []MutatorUtil{{ev.Ts, 1}}) + } + ps = append(ps, perP{series: series}) + } + case EvGCSTWStart: + if flags&UtilSTW != 0 { + stw++ + } + case EvGCSTWDone: + if flags&UtilSTW != 0 { + stw-- + } + case EvGCMarkAssistStart: + if flags&UtilAssist != 0 { + ps[ev.P].gc++ + assists[ev.G] = true + } + case EvGCMarkAssistDone: + if flags&UtilAssist != 0 { + ps[ev.P].gc-- + delete(assists, ev.G) + } + case EvGCSweepStart: + if flags&UtilSweep != 0 { + ps[ev.P].gc++ + } + case EvGCSweepDone: + if flags&UtilSweep != 0 { + ps[ev.P].gc-- + } + case EvGoStartLabel: + if flags&UtilBackground != 0 && strings.HasPrefix(ev.SArgs[0], "GC ") && ev.SArgs[0] != "GC (idle)" { + // Background mark worker. + // + // If we're in per-proc mode, we don't + // count dedicated workers because + // they kick all of the goroutines off + // that P, so don't directly + // contribute to goroutine latency. + if !(flags&UtilPerProc != 0 && ev.SArgs[0] == "GC (dedicated)") { + bgMark[ev.G] = true + ps[ev.P].gc++ + } + } + fallthrough + case EvGoStart: + if assists[ev.G] { + // Unblocked during assist. + ps[ev.P].gc++ + } + block[ev.G] = ev.Link + default: + if ev != block[ev.G] { + continue + } + + if assists[ev.G] { + // Blocked during assist. + ps[ev.P].gc-- + } + if bgMark[ev.G] { + // Background mark worker done. + ps[ev.P].gc-- + delete(bgMark, ev.G) + } + delete(block, ev.G) + } + + if flags&UtilPerProc == 0 { + // Compute the current average utilization. + if len(ps) == 0 { + continue + } + gcPs := 0 + if stw > 0 { + gcPs = len(ps) + } else { + for i := range ps { + if ps[i].gc > 0 { + gcPs++ + } + } + } + mu := MutatorUtil{ev.Ts, 1 - float64(gcPs)/float64(len(ps))} + + // Record the utilization change. (Since + // len(ps) == len(out), we know len(out) > 0.) + out[0] = addUtil(out[0], mu) + } else { + // Check for per-P utilization changes. + for i := range ps { + p := &ps[i] + util := 1.0 + if stw > 0 || p.gc > 0 { + util = 0.0 + } + out[p.series] = addUtil(out[p.series], MutatorUtil{ev.Ts, util}) + } + } + } + + // Add final 0 utilization event to any remaining series. This + // is important to mark the end of the trace. The exact value + // shouldn't matter since no window should extend beyond this, + // but using 0 is symmetric with the start of the trace. + mu := MutatorUtil{events[len(events)-1].Ts, 0} + for i := range ps { + out[ps[i].series] = addUtil(out[ps[i].series], mu) + } + return out +} + +func addUtil(util []MutatorUtil, mu MutatorUtil) []MutatorUtil { + if len(util) > 0 { + if mu.Util == util[len(util)-1].Util { + // No change. + return util + } + if mu.Time == util[len(util)-1].Time { + // Take the lowest utilization at a time stamp. + if mu.Util < util[len(util)-1].Util { + util[len(util)-1] = mu + } + return util + } + } + return append(util, mu) +} + +// totalUtil is total utilization, measured in nanoseconds. This is a +// separate type primarily to distinguish it from mean utilization, +// which is also a float64. +type totalUtil float64 + +func totalUtilOf(meanUtil float64, dur int64) totalUtil { + return totalUtil(meanUtil * float64(dur)) +} + +// mean returns the mean utilization over dur. +func (u totalUtil) mean(dur time.Duration) float64 { + return float64(u) / float64(dur) +} + +// An MMUCurve is the minimum mutator utilization curve across +// multiple window sizes. +type MMUCurve struct { + series []mmuSeries +} + +type mmuSeries struct { + util []MutatorUtil + // sums[j] is the cumulative sum of util[:j]. + sums []totalUtil + // bands summarizes util in non-overlapping bands of duration + // bandDur. + bands []mmuBand + // bandDur is the duration of each band. + bandDur int64 +} + +type mmuBand struct { + // minUtil is the minimum instantaneous mutator utilization in + // this band. + minUtil float64 + // cumUtil is the cumulative total mutator utilization between + // time 0 and the left edge of this band. + cumUtil totalUtil + + // integrator is the integrator for the left edge of this + // band. + integrator integrator +} + +// NewMMUCurve returns an MMU curve for the given mutator utilization +// function. +func NewMMUCurve(utils [][]MutatorUtil) *MMUCurve { + series := make([]mmuSeries, len(utils)) + for i, util := range utils { + series[i] = newMMUSeries(util) + } + return &MMUCurve{series} +} + +// bandsPerSeries is the number of bands to divide each series into. +// This is only changed by tests. +var bandsPerSeries = 1000 + +func newMMUSeries(util []MutatorUtil) mmuSeries { + // Compute cumulative sum. + sums := make([]totalUtil, len(util)) + var prev MutatorUtil + var sum totalUtil + for j, u := range util { + sum += totalUtilOf(prev.Util, u.Time-prev.Time) + sums[j] = sum + prev = u + } + + // Divide the utilization curve up into equal size + // non-overlapping "bands" and compute a summary for each of + // these bands. + // + // Compute the duration of each band. + numBands := bandsPerSeries + if numBands > len(util) { + // There's no point in having lots of bands if there + // aren't many events. + numBands = len(util) + } + dur := util[len(util)-1].Time - util[0].Time + bandDur := (dur + int64(numBands) - 1) / int64(numBands) + if bandDur < 1 { + bandDur = 1 + } + // Compute the bands. There are numBands+1 bands in order to + // record the final cumulative sum. + bands := make([]mmuBand, numBands+1) + s := mmuSeries{util, sums, bands, bandDur} + leftSum := integrator{&s, 0} + for i := range bands { + startTime, endTime := s.bandTime(i) + cumUtil := leftSum.advance(startTime) + predIdx := leftSum.pos + minUtil := 1.0 + for i := predIdx; i < len(util) && util[i].Time < endTime; i++ { + minUtil = math.Min(minUtil, util[i].Util) + } + bands[i] = mmuBand{minUtil, cumUtil, leftSum} + } + + return s +} + +func (s *mmuSeries) bandTime(i int) (start, end int64) { + start = int64(i)*s.bandDur + s.util[0].Time + end = start + s.bandDur + return +} + +type bandUtil struct { + // Utilization series index + series int + // Band index + i int + // Lower bound of mutator utilization for all windows + // with a left edge in this band. + utilBound float64 +} + +type bandUtilHeap []bandUtil + +func (h bandUtilHeap) Len() int { + return len(h) +} + +func (h bandUtilHeap) Less(i, j int) bool { + return h[i].utilBound < h[j].utilBound +} + +func (h bandUtilHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *bandUtilHeap) Push(x interface{}) { + *h = append(*h, x.(bandUtil)) +} + +func (h *bandUtilHeap) Pop() interface{} { + x := (*h)[len(*h)-1] + *h = (*h)[:len(*h)-1] + return x +} + +// UtilWindow is a specific window at Time. +type UtilWindow struct { + Time int64 + // MutatorUtil is the mean mutator utilization in this window. + MutatorUtil float64 +} + +type utilHeap []UtilWindow + +func (h utilHeap) Len() int { + return len(h) +} + +func (h utilHeap) Less(i, j int) bool { + if h[i].MutatorUtil != h[j].MutatorUtil { + return h[i].MutatorUtil > h[j].MutatorUtil + } + return h[i].Time > h[j].Time +} + +func (h utilHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *utilHeap) Push(x interface{}) { + *h = append(*h, x.(UtilWindow)) +} + +func (h *utilHeap) Pop() interface{} { + x := (*h)[len(*h)-1] + *h = (*h)[:len(*h)-1] + return x +} + +// An accumulator takes a windowed mutator utilization function and +// tracks various statistics for that function. +type accumulator struct { + mmu float64 + + // bound is the mutator utilization bound where adding any + // mutator utilization above this bound cannot affect the + // accumulated statistics. + bound float64 + + // Worst N window tracking + nWorst int + wHeap utilHeap + + // Mutator utilization distribution tracking + mud *mud + // preciseMass is the distribution mass that must be precise + // before accumulation is stopped. + preciseMass float64 + // lastTime and lastMU are the previous point added to the + // windowed mutator utilization function. + lastTime int64 + lastMU float64 +} + +// resetTime declares a discontinuity in the windowed mutator +// utilization function by resetting the current time. +func (acc *accumulator) resetTime() { + // This only matters for distribution collection, since that's + // the only thing that depends on the progression of the + // windowed mutator utilization function. + acc.lastTime = math.MaxInt64 +} + +// addMU adds a point to the windowed mutator utilization function at +// (time, mu). This must be called for monotonically increasing values +// of time. +// +// It returns true if further calls to addMU would be pointless. +func (acc *accumulator) addMU(time int64, mu float64, window time.Duration) bool { + if mu < acc.mmu { + acc.mmu = mu + } + acc.bound = acc.mmu + + if acc.nWorst == 0 { + // If the minimum has reached zero, it can't go any + // lower, so we can stop early. + return mu == 0 + } + + // Consider adding this window to the n worst. + if len(acc.wHeap) < acc.nWorst || mu < acc.wHeap[0].MutatorUtil { + // This window is lower than the K'th worst window. + // + // Check if there's any overlapping window + // already in the heap and keep whichever is + // worse. + for i, ui := range acc.wHeap { + if time+int64(window) > ui.Time && ui.Time+int64(window) > time { + if ui.MutatorUtil <= mu { + // Keep the first window. + goto keep + } else { + // Replace it with this window. + heap.Remove(&acc.wHeap, i) + break + } + } + } + + heap.Push(&acc.wHeap, UtilWindow{time, mu}) + if len(acc.wHeap) > acc.nWorst { + heap.Pop(&acc.wHeap) + } + keep: + } + + if len(acc.wHeap) < acc.nWorst { + // We don't have N windows yet, so keep accumulating. + acc.bound = 1.0 + } else { + // Anything above the least worst window has no effect. + acc.bound = math.Max(acc.bound, acc.wHeap[0].MutatorUtil) + } + + if acc.mud != nil { + if acc.lastTime != math.MaxInt64 { + // Update distribution. + acc.mud.add(acc.lastMU, mu, float64(time-acc.lastTime)) + } + acc.lastTime, acc.lastMU = time, mu + if _, mudBound, ok := acc.mud.approxInvCumulativeSum(); ok { + acc.bound = math.Max(acc.bound, mudBound) + } else { + // We haven't accumulated enough total precise + // mass yet to even reach our goal, so keep + // accumulating. + acc.bound = 1 + } + // It's not worth checking percentiles every time, so + // just keep accumulating this band. + return false + } + + // If we've found enough 0 utilizations, we can stop immediately. + return len(acc.wHeap) == acc.nWorst && acc.wHeap[0].MutatorUtil == 0 +} + +// MMU returns the minimum mutator utilization for the given time +// window. This is the minimum utilization for all windows of this +// duration across the execution. The returned value is in the range +// [0, 1]. +func (c *MMUCurve) MMU(window time.Duration) (mmu float64) { + acc := accumulator{mmu: 1.0, bound: 1.0} + c.mmu(window, &acc) + return acc.mmu +} + +// Examples returns n specific examples of the lowest mutator +// utilization for the given window size. The returned windows will be +// disjoint (otherwise there would be a huge number of +// mostly-overlapping windows at the single lowest point). There are +// no guarantees on which set of disjoint windows this returns. +func (c *MMUCurve) Examples(window time.Duration, n int) (worst []UtilWindow) { + acc := accumulator{mmu: 1.0, bound: 1.0, nWorst: n} + c.mmu(window, &acc) + sort.Sort(sort.Reverse(acc.wHeap)) + return ([]UtilWindow)(acc.wHeap) +} + +// MUD returns mutator utilization distribution quantiles for the +// given window size. +// +// The mutator utilization distribution is the distribution of mean +// mutator utilization across all windows of the given window size in +// the trace. +// +// The minimum mutator utilization is the minimum (0th percentile) of +// this distribution. (However, if only the minimum is desired, it's +// more efficient to use the MMU method.) +func (c *MMUCurve) MUD(window time.Duration, quantiles []float64) []float64 { + if len(quantiles) == 0 { + return []float64{} + } + + // Each unrefined band contributes a known total mass to the + // distribution (bandDur except at the end), but in an unknown + // way. However, we know that all the mass it contributes must + // be at or above its worst-case mean mutator utilization. + // + // Hence, we refine bands until the highest desired + // distribution quantile is less than the next worst-case mean + // mutator utilization. At this point, all further + // contributions to the distribution must be beyond the + // desired quantile and hence cannot affect it. + // + // First, find the highest desired distribution quantile. + maxQ := quantiles[0] + for _, q := range quantiles { + if q > maxQ { + maxQ = q + } + } + // The distribution's mass is in units of time (it's not + // normalized because this would make it more annoying to + // account for future contributions of unrefined bands). The + // total final mass will be the duration of the trace itself + // minus the window size. Using this, we can compute the mass + // corresponding to quantile maxQ. + var duration int64 + for _, s := range c.series { + duration1 := s.util[len(s.util)-1].Time - s.util[0].Time + if duration1 >= int64(window) { + duration += duration1 - int64(window) + } + } + qMass := float64(duration) * maxQ + + // Accumulate the MUD until we have precise information for + // everything to the left of qMass. + acc := accumulator{mmu: 1.0, bound: 1.0, preciseMass: qMass, mud: new(mud)} + acc.mud.setTrackMass(qMass) + c.mmu(window, &acc) + + // Evaluate the quantiles on the accumulated MUD. + out := make([]float64, len(quantiles)) + for i := range out { + mu, _ := acc.mud.invCumulativeSum(float64(duration) * quantiles[i]) + if math.IsNaN(mu) { + // There are a few legitimate ways this can + // happen: + // + // 1. If the window is the full trace + // duration, then the windowed MU function is + // only defined at a single point, so the MU + // distribution is not well-defined. + // + // 2. If there are no events, then the MU + // distribution has no mass. + // + // Either way, all of the quantiles will have + // converged toward the MMU at this point. + mu = acc.mmu + } + out[i] = mu + } + return out +} + +func (c *MMUCurve) mmu(window time.Duration, acc *accumulator) { + if window <= 0 { + acc.mmu = 0 + return + } + + var bandU bandUtilHeap + windows := make([]time.Duration, len(c.series)) + for i, s := range c.series { + windows[i] = window + if max := time.Duration(s.util[len(s.util)-1].Time - s.util[0].Time); window > max { + windows[i] = max + } + + bandU1 := bandUtilHeap(s.mkBandUtil(i, windows[i])) + if bandU == nil { + bandU = bandU1 + } else { + bandU = append(bandU, bandU1...) + } + } + + // Process bands from lowest utilization bound to highest. + heap.Init(&bandU) + + // Refine each band into a precise window and MMU until + // refining the next lowest band can no longer affect the MMU + // or windows. + for len(bandU) > 0 && bandU[0].utilBound < acc.bound { + i := bandU[0].series + c.series[i].bandMMU(bandU[0].i, windows[i], acc) + heap.Pop(&bandU) + } +} + +func (c *mmuSeries) mkBandUtil(series int, window time.Duration) []bandUtil { + // For each band, compute the worst-possible total mutator + // utilization for all windows that start in that band. + + // minBands is the minimum number of bands a window can span + // and maxBands is the maximum number of bands a window can + // span in any alignment. + minBands := int((int64(window) + c.bandDur - 1) / c.bandDur) + maxBands := int((int64(window) + 2*(c.bandDur-1)) / c.bandDur) + if window > 1 && maxBands < 2 { + panic("maxBands < 2") + } + tailDur := int64(window) % c.bandDur + nUtil := len(c.bands) - maxBands + 1 + if nUtil < 0 { + nUtil = 0 + } + bandU := make([]bandUtil, nUtil) + for i := range bandU { + // To compute the worst-case MU, we assume the minimum + // for any bands that are only partially overlapped by + // some window and the mean for any bands that are + // completely covered by all windows. + var util totalUtil + + // Find the lowest and second lowest of the partial + // bands. + l := c.bands[i].minUtil + r1 := c.bands[i+minBands-1].minUtil + r2 := c.bands[i+maxBands-1].minUtil + minBand := math.Min(l, math.Min(r1, r2)) + // Assume the worst window maximally overlaps the + // worst minimum and then the rest overlaps the second + // worst minimum. + if minBands == 1 { + util += totalUtilOf(minBand, int64(window)) + } else { + util += totalUtilOf(minBand, c.bandDur) + midBand := 0.0 + switch { + case minBand == l: + midBand = math.Min(r1, r2) + case minBand == r1: + midBand = math.Min(l, r2) + case minBand == r2: + midBand = math.Min(l, r1) + } + util += totalUtilOf(midBand, tailDur) + } + + // Add the total mean MU of bands that are completely + // overlapped by all windows. + if minBands > 2 { + util += c.bands[i+minBands-1].cumUtil - c.bands[i+1].cumUtil + } + + bandU[i] = bandUtil{series, i, util.mean(window)} + } + + return bandU +} + +// bandMMU computes the precise minimum mutator utilization for +// windows with a left edge in band bandIdx. +func (c *mmuSeries) bandMMU(bandIdx int, window time.Duration, acc *accumulator) { + util := c.util + + // We think of the mutator utilization over time as the + // box-filtered utilization function, which we call the + // "windowed mutator utilization function". The resulting + // function is continuous and piecewise linear (unless + // window==0, which we handle elsewhere), where the boundaries + // between segments occur when either edge of the window + // encounters a change in the instantaneous mutator + // utilization function. Hence, the minimum of this function + // will always occur when one of the edges of the window + // aligns with a utilization change, so these are the only + // points we need to consider. + // + // We compute the mutator utilization function incrementally + // by tracking the integral from t=0 to the left edge of the + // window and to the right edge of the window. + left := c.bands[bandIdx].integrator + right := left + time, endTime := c.bandTime(bandIdx) + if utilEnd := util[len(util)-1].Time - int64(window); utilEnd < endTime { + endTime = utilEnd + } + acc.resetTime() + for { + // Advance edges to time and time+window. + mu := (right.advance(time+int64(window)) - left.advance(time)).mean(window) + if acc.addMU(time, mu, window) { + break + } + if time == endTime { + break + } + + // The maximum slope of the windowed mutator + // utilization function is 1/window, so we can always + // advance the time by at least (mu - mmu) * window + // without dropping below mmu. + minTime := time + int64((mu-acc.bound)*float64(window)) + + // Advance the window to the next time where either + // the left or right edge of the window encounters a + // change in the utilization curve. + if t1, t2 := left.next(time), right.next(time+int64(window))-int64(window); t1 < t2 { + time = t1 + } else { + time = t2 + } + if time < minTime { + time = minTime + } + if time >= endTime { + // For MMUs we could stop here, but for MUDs + // it's important that we span the entire + // band. + time = endTime + } + } +} + +// An integrator tracks a position in a utilization function and +// integrates it. +type integrator struct { + u *mmuSeries + // pos is the index in u.util of the current time's non-strict + // predecessor. + pos int +} + +// advance returns the integral of the utilization function from 0 to +// time. advance must be called on monotonically increasing values of +// times. +func (in *integrator) advance(time int64) totalUtil { + util, pos := in.u.util, in.pos + // Advance pos until pos+1 is time's strict successor (making + // pos time's non-strict predecessor). + // + // Very often, this will be nearby, so we optimize that case, + // but it may be arbitrarily far away, so we handled that + // efficiently, too. + const maxSeq = 8 + if pos+maxSeq < len(util) && util[pos+maxSeq].Time > time { + // Nearby. Use a linear scan. + for pos+1 < len(util) && util[pos+1].Time <= time { + pos++ + } + } else { + // Far. Binary search for time's strict successor. + l, r := pos, len(util) + for l < r { + h := int(uint(l+r) >> 1) + if util[h].Time <= time { + l = h + 1 + } else { + r = h + } + } + pos = l - 1 // Non-strict predecessor. + } + in.pos = pos + var partial totalUtil + if time != util[pos].Time { + partial = totalUtilOf(util[pos].Util, time-util[pos].Time) + } + return in.u.sums[pos] + partial +} + +// next returns the smallest time t' > time of a change in the +// utilization function. +func (in *integrator) next(time int64) int64 { + for _, u := range in.u.util[in.pos:] { + if u.Time > time { + return u.Time + } + } + return 1<<63 - 1 +} diff --git a/src/internal/trace/gc_test.go b/src/internal/trace/gc_test.go new file mode 100644 index 0000000000000..da1cb90f5cdf3 --- /dev/null +++ b/src/internal/trace/gc_test.go @@ -0,0 +1,198 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "io/ioutil" + "math" + "testing" + "time" +) + +// aeq returns true if x and y are equal up to 8 digits (1 part in 100 +// million). +func aeq(x, y float64) bool { + if x < 0 && y < 0 { + x, y = -x, -y + } + const digits = 8 + factor := 1 - math.Pow(10, -digits+1) + return x*factor <= y && y*factor <= x +} + +func TestMMU(t *testing.T) { + t.Parallel() + + // MU + // 1.0 ***** ***** ***** + // 0.5 * * * * + // 0.0 ***** ***** + // 0 1 2 3 4 5 + util := [][]MutatorUtil{{ + {0e9, 1}, + {1e9, 0}, + {2e9, 1}, + {3e9, 0}, + {4e9, 1}, + {5e9, 0}, + }} + mmuCurve := NewMMUCurve(util) + + for _, test := range []struct { + window time.Duration + want float64 + worst []float64 + }{ + {0, 0, []float64{}}, + {time.Millisecond, 0, []float64{0, 0}}, + {time.Second, 0, []float64{0, 0}}, + {2 * time.Second, 0.5, []float64{0.5, 0.5}}, + {3 * time.Second, 1 / 3.0, []float64{1 / 3.0}}, + {4 * time.Second, 0.5, []float64{0.5}}, + {5 * time.Second, 3 / 5.0, []float64{3 / 5.0}}, + {6 * time.Second, 3 / 5.0, []float64{3 / 5.0}}, + } { + if got := mmuCurve.MMU(test.window); !aeq(test.want, got) { + t.Errorf("for %s window, want mu = %f, got %f", test.window, test.want, got) + } + worst := mmuCurve.Examples(test.window, 2) + // Which exact windows are returned is unspecified + // (and depends on the exact banding), so we just + // check that we got the right number with the right + // utilizations. + if len(worst) != len(test.worst) { + t.Errorf("for %s window, want worst %v, got %v", test.window, test.worst, worst) + } else { + for i := range worst { + if worst[i].MutatorUtil != test.worst[i] { + t.Errorf("for %s window, want worst %v, got %v", test.window, test.worst, worst) + break + } + } + } + } +} + +func TestMMUTrace(t *testing.T) { + // Can't be t.Parallel() because it modifies the + // testingOneBand package variable. + + data, err := ioutil.ReadFile("testdata/stress_1_10_good") + if err != nil { + t.Fatalf("failed to read input file: %v", err) + } + _, events, err := parse(bytes.NewReader(data), "") + if err != nil { + t.Fatalf("failed to parse trace: %s", err) + } + mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist) + mmuCurve := NewMMUCurve(mu) + + // Test the optimized implementation against the "obviously + // correct" implementation. + for window := time.Nanosecond; window < 10*time.Second; window *= 10 { + want := mmuSlow(mu[0], window) + got := mmuCurve.MMU(window) + if !aeq(want, got) { + t.Errorf("want %f, got %f mutator utilization in window %s", want, got, window) + } + } + + // Test MUD with band optimization against MUD without band + // optimization. We don't have a simple testing implementation + // of MUDs (the simplest implementation is still quite + // complex), but this is still a pretty good test. + defer func(old int) { bandsPerSeries = old }(bandsPerSeries) + bandsPerSeries = 1 + mmuCurve2 := NewMMUCurve(mu) + quantiles := []float64{0, 1 - .999, 1 - .99} + for window := time.Microsecond; window < time.Second; window *= 10 { + mud1 := mmuCurve.MUD(window, quantiles) + mud2 := mmuCurve2.MUD(window, quantiles) + for i := range mud1 { + if !aeq(mud1[i], mud2[i]) { + t.Errorf("for quantiles %v at window %v, want %v, got %v", quantiles, window, mud2, mud1) + break + } + } + } +} + +func BenchmarkMMU(b *testing.B) { + data, err := ioutil.ReadFile("testdata/stress_1_10_good") + if err != nil { + b.Fatalf("failed to read input file: %v", err) + } + _, events, err := parse(bytes.NewReader(data), "") + if err != nil { + b.Fatalf("failed to parse trace: %s", err) + } + mu := MutatorUtilization(events.Events, UtilSTW|UtilBackground|UtilAssist|UtilSweep) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + mmuCurve := NewMMUCurve(mu) + xMin, xMax := time.Microsecond, time.Second + logMin, logMax := math.Log(float64(xMin)), math.Log(float64(xMax)) + const samples = 100 + for i := 0; i < samples; i++ { + window := time.Duration(math.Exp(float64(i)/(samples-1)*(logMax-logMin) + logMin)) + mmuCurve.MMU(window) + } + } +} + +func mmuSlow(util []MutatorUtil, window time.Duration) (mmu float64) { + if max := time.Duration(util[len(util)-1].Time - util[0].Time); window > max { + window = max + } + + mmu = 1.0 + + // muInWindow returns the mean mutator utilization between + // util[0].Time and end. + muInWindow := func(util []MutatorUtil, end int64) float64 { + total := 0.0 + var prevU MutatorUtil + for _, u := range util { + if u.Time > end { + total += prevU.Util * float64(end-prevU.Time) + break + } + total += prevU.Util * float64(u.Time-prevU.Time) + prevU = u + } + return total / float64(end-util[0].Time) + } + update := func() { + for i, u := range util { + if u.Time+int64(window) > util[len(util)-1].Time { + break + } + mmu = math.Min(mmu, muInWindow(util[i:], u.Time+int64(window))) + } + } + + // Consider all left-aligned windows. + update() + // Reverse the trace. Slightly subtle because each MutatorUtil + // is a *change*. + rutil := make([]MutatorUtil, len(util)) + if util[len(util)-1].Util != 0 { + panic("irreversible trace") + } + for i, u := range util { + util1 := 0.0 + if i != 0 { + util1 = util[i-1].Util + } + rutil[len(rutil)-i-1] = MutatorUtil{Time: -u.Time, Util: util1} + } + util = rutil + // Consider all right-aligned windows. + update() + return +} diff --git a/src/internal/trace/goroutines.go b/src/internal/trace/goroutines.go index 2d7d3aa3ae250..a5fda489bea79 100644 --- a/src/internal/trace/goroutines.go +++ b/src/internal/trace/goroutines.go @@ -37,7 +37,7 @@ type UserRegionDesc struct { // Region end event. Normally EvUserRegion end event or nil, // but can be EvGoStop or EvGoEnd event if the goroutine - // terminated without explicitely ending the region. + // terminated without explicitly ending the region. End *Event GExecutionStat diff --git a/src/internal/trace/mud.go b/src/internal/trace/mud.go new file mode 100644 index 0000000000000..88263060a0b1d --- /dev/null +++ b/src/internal/trace/mud.go @@ -0,0 +1,223 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "sort" +) + +// mud is an updatable mutator utilization distribution. +// +// This is a continuous distribution of duration over mutator +// utilization. For example, the integral from mutator utilization a +// to b is the total duration during which the mutator utilization was +// in the range [a, b]. +// +// This distribution is *not* normalized (it is not a probability +// distribution). This makes it easier to work with as it's being +// updated. +// +// It is represented as the sum of scaled uniform distribution +// functions and Dirac delta functions (which are treated as +// degenerate uniform distributions). +type mud struct { + sorted, unsorted []edge + + // trackMass is the inverse cumulative sum to track as the + // distribution is updated. + trackMass float64 + // trackBucket is the bucket in which trackMass falls. If the + // total mass of the distribution is < trackMass, this is + // len(hist). + trackBucket int + // trackSum is the cumulative sum of hist[:trackBucket]. Once + // trackSum >= trackMass, trackBucket must be recomputed. + trackSum float64 + + // hist is a hierarchical histogram of distribution mass. + hist [mudDegree]float64 +} + +const ( + // mudDegree is the number of buckets in the MUD summary + // histogram. + mudDegree = 1024 +) + +type edge struct { + // At x, the function increases by y. + x, delta float64 + // Additionally at x is a Dirac delta function with area dirac. + dirac float64 +} + +// add adds a uniform function over [l, r] scaled so the total weight +// of the uniform is area. If l==r, this adds a Dirac delta function. +func (d *mud) add(l, r, area float64) { + if area == 0 { + return + } + + if r < l { + l, r = r, l + } + + // Add the edges. + if l == r { + d.unsorted = append(d.unsorted, edge{l, 0, area}) + } else { + delta := area / (r - l) + d.unsorted = append(d.unsorted, edge{l, delta, 0}, edge{r, -delta, 0}) + } + + // Update the histogram. + h := &d.hist + lbFloat, lf := math.Modf(l * mudDegree) + lb := int(lbFloat) + if lb >= mudDegree { + lb, lf = mudDegree-1, 1 + } + if l == r { + h[lb] += area + } else { + rbFloat, rf := math.Modf(r * mudDegree) + rb := int(rbFloat) + if rb >= mudDegree { + rb, rf = mudDegree-1, 1 + } + if lb == rb { + h[lb] += area + } else { + perBucket := area / (r - l) / mudDegree + h[lb] += perBucket * (1 - lf) + h[rb] += perBucket * rf + for i := lb + 1; i < rb; i++ { + h[i] += perBucket + } + } + } + + // Update mass tracking. + if thresh := float64(d.trackBucket) / mudDegree; l < thresh { + if r < thresh { + d.trackSum += area + } else { + d.trackSum += area * (thresh - l) / (r - l) + } + if d.trackSum >= d.trackMass { + // The tracked mass now falls in a different + // bucket. Recompute the inverse cumulative sum. + d.setTrackMass(d.trackMass) + } + } +} + +// setTrackMass sets the mass to track the inverse cumulative sum for. +// +// Specifically, mass is a cumulative duration, and the mutator +// utilization bounds for this duration can be queried using +// approxInvCumulativeSum. +func (d *mud) setTrackMass(mass float64) { + d.trackMass = mass + + // Find the bucket currently containing trackMass by computing + // the cumulative sum. + sum := 0.0 + for i, val := range d.hist[:] { + newSum := sum + val + if newSum > mass { + // mass falls in bucket i. + d.trackBucket = i + d.trackSum = sum + return + } + sum = newSum + } + d.trackBucket = len(d.hist) + d.trackSum = sum +} + +// approxInvCumulativeSum is like invCumulativeSum, but specifically +// operates on the tracked mass and returns an upper and lower bound +// approximation of the inverse cumulative sum. +// +// The true inverse cumulative sum will be in the range [lower, upper). +func (d *mud) approxInvCumulativeSum() (float64, float64, bool) { + if d.trackBucket == len(d.hist) { + return math.NaN(), math.NaN(), false + } + return float64(d.trackBucket) / mudDegree, float64(d.trackBucket+1) / mudDegree, true +} + +// invCumulativeSum returns x such that the integral of d from -∞ to x +// is y. If the total weight of d is less than y, it returns the +// maximum of the distribution and false. +// +// Specifically, y is a cumulative duration, and invCumulativeSum +// returns the mutator utilization x such that at least y time has +// been spent with mutator utilization <= x. +func (d *mud) invCumulativeSum(y float64) (float64, bool) { + if len(d.sorted) == 0 && len(d.unsorted) == 0 { + return math.NaN(), false + } + + // Sort edges. + edges := d.unsorted + sort.Slice(edges, func(i, j int) bool { + return edges[i].x < edges[j].x + }) + // Merge with sorted edges. + d.unsorted = nil + if d.sorted == nil { + d.sorted = edges + } else { + oldSorted := d.sorted + newSorted := make([]edge, len(oldSorted)+len(edges)) + i, j := 0, 0 + for o := range newSorted { + if i >= len(oldSorted) { + copy(newSorted[o:], edges[j:]) + break + } else if j >= len(edges) { + copy(newSorted[o:], oldSorted[i:]) + break + } else if oldSorted[i].x < edges[j].x { + newSorted[o] = oldSorted[i] + i++ + } else { + newSorted[o] = edges[j] + j++ + } + } + d.sorted = newSorted + } + + // Traverse edges in order computing a cumulative sum. + csum, rate, prevX := 0.0, 0.0, 0.0 + for _, e := range d.sorted { + newCsum := csum + (e.x-prevX)*rate + if newCsum >= y { + // y was exceeded between the previous edge + // and this one. + if rate == 0 { + // Anywhere between prevX and + // e.x will do. We return e.x + // because that takes care of + // the y==0 case naturally. + return e.x, true + } + return (y-csum)/rate + prevX, true + } + newCsum += e.dirac + if newCsum >= y { + // y was exceeded by the Dirac delta at e.x. + return e.x, true + } + csum, prevX = newCsum, e.x + rate += e.delta + } + return prevX, false +} diff --git a/src/internal/trace/mud_test.go b/src/internal/trace/mud_test.go new file mode 100644 index 0000000000000..b3d74dcf342e5 --- /dev/null +++ b/src/internal/trace/mud_test.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math/rand" + "testing" +) + +func TestMUD(t *testing.T) { + // Insert random uniforms and check histogram mass and + // cumulative sum approximations. + rnd := rand.New(rand.NewSource(42)) + mass := 0.0 + var mud mud + for i := 0; i < 100; i++ { + area, l, r := rnd.Float64(), rnd.Float64(), rnd.Float64() + if rnd.Intn(10) == 0 { + r = l + } + t.Log(l, r, area) + mud.add(l, r, area) + mass += area + + // Check total histogram weight. + hmass := 0.0 + for _, val := range mud.hist { + hmass += val + } + if !aeq(mass, hmass) { + t.Fatalf("want mass %g, got %g", mass, hmass) + } + + // Check inverse cumulative sum approximations. + for j := 0.0; j < mass; j += mass * 0.099 { + mud.setTrackMass(j) + l, u, ok := mud.approxInvCumulativeSum() + inv, ok2 := mud.invCumulativeSum(j) + if !ok || !ok2 { + t.Fatalf("inverse cumulative sum failed: approx %v, exact %v", ok, ok2) + } + if !(l <= inv && inv < u) { + t.Fatalf("inverse(%g) = %g, not ∈ [%g, %g)", j, inv, l, u) + } + } + } +} + +func TestMUDTracking(t *testing.T) { + // Test that the tracked mass is tracked correctly across + // updates. + rnd := rand.New(rand.NewSource(42)) + const uniforms = 100 + for trackMass := 0.0; trackMass < uniforms; trackMass += uniforms / 50 { + var mud mud + mass := 0.0 + mud.setTrackMass(trackMass) + for i := 0; i < uniforms; i++ { + area, l, r := rnd.Float64(), rnd.Float64(), rnd.Float64() + mud.add(l, r, area) + mass += area + l, u, ok := mud.approxInvCumulativeSum() + inv, ok2 := mud.invCumulativeSum(trackMass) + + if mass < trackMass { + if ok { + t.Errorf("approx(%g) = [%g, %g), but mass = %g", trackMass, l, u, mass) + } + if ok2 { + t.Errorf("exact(%g) = %g, but mass = %g", trackMass, inv, mass) + } + } else { + if !ok { + t.Errorf("approx(%g) failed, but mass = %g", trackMass, mass) + } + if !ok2 { + t.Errorf("exact(%g) failed, but mass = %g", trackMass, mass) + } + if ok && ok2 && !(l <= inv && inv < u) { + t.Errorf("inverse(%g) = %g, not ∈ [%g, %g)", trackMass, inv, l, u) + } + } + } + } +} diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305.go similarity index 97% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305.go index e28f49d12fcbe..80789a1212293 100644 --- a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD as specified in RFC 7539. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" +package chacha20poly1305 import ( "crypto/cipher" diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go similarity index 100% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_generic.go similarity index 96% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_generic.go index 56e4f0e782a74..a77ab35f676d2 100644 --- a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +++ b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -7,8 +7,8 @@ package chacha20poly1305 import ( "encoding/binary" - "golang_org/x/crypto/internal/chacha20" - "golang_org/x/crypto/poly1305" + "internal/x/crypto/internal/chacha20" + "internal/x/crypto/poly1305" ) func roundTo16(n int) int { diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go similarity index 100% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_test.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_test.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_test.go diff --git a/src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go b/src/internal/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go rename to src/internal/x/crypto/chacha20poly1305/chacha20poly1305_vectors_test.go diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/asn1.go b/src/internal/x/crypto/cryptobyte/asn1.go similarity index 99% rename from src/vendor/golang_org/x/crypto/cryptobyte/asn1.go rename to src/internal/x/crypto/cryptobyte/asn1.go index 08314b47d1c56..2d40680ddd3b7 100644 --- a/src/vendor/golang_org/x/crypto/cryptobyte/asn1.go +++ b/src/internal/x/crypto/cryptobyte/asn1.go @@ -11,7 +11,7 @@ import ( "reflect" "time" - "golang_org/x/crypto/cryptobyte/asn1" + "internal/x/crypto/cryptobyte/asn1" ) // This file contains ASN.1-related methods for String and Builder. diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/asn1/asn1.go b/src/internal/x/crypto/cryptobyte/asn1/asn1.go similarity index 96% rename from src/vendor/golang_org/x/crypto/cryptobyte/asn1/asn1.go rename to src/internal/x/crypto/cryptobyte/asn1/asn1.go index cda8e3edfd5ea..90ef6a241de2f 100644 --- a/src/vendor/golang_org/x/crypto/cryptobyte/asn1/asn1.go +++ b/src/internal/x/crypto/cryptobyte/asn1/asn1.go @@ -4,7 +4,7 @@ // Package asn1 contains supporting types for parsing and building ASN.1 // messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" +package asn1 // Tag represents an ASN.1 identifier octet, consisting of a tag number // (indicating a type) and class (such as context-specific or constructed). diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/asn1_test.go b/src/internal/x/crypto/cryptobyte/asn1_test.go similarity index 99% rename from src/vendor/golang_org/x/crypto/cryptobyte/asn1_test.go rename to src/internal/x/crypto/cryptobyte/asn1_test.go index f7762880ca84b..ca28e3bcfb256 100644 --- a/src/vendor/golang_org/x/crypto/cryptobyte/asn1_test.go +++ b/src/internal/x/crypto/cryptobyte/asn1_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "golang_org/x/crypto/cryptobyte/asn1" + "internal/x/crypto/cryptobyte/asn1" ) type readASN1Test struct { diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/builder.go b/src/internal/x/crypto/cryptobyte/builder.go similarity index 100% rename from src/vendor/golang_org/x/crypto/cryptobyte/builder.go rename to src/internal/x/crypto/cryptobyte/builder.go diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/cryptobyte_test.go b/src/internal/x/crypto/cryptobyte/cryptobyte_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/cryptobyte/cryptobyte_test.go rename to src/internal/x/crypto/cryptobyte/cryptobyte_test.go diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/example_test.go b/src/internal/x/crypto/cryptobyte/example_test.go similarity index 98% rename from src/vendor/golang_org/x/crypto/cryptobyte/example_test.go rename to src/internal/x/crypto/cryptobyte/example_test.go index 056c23014a14f..5b50025318f5b 100644 --- a/src/vendor/golang_org/x/crypto/cryptobyte/example_test.go +++ b/src/internal/x/crypto/cryptobyte/example_test.go @@ -8,8 +8,8 @@ import ( "errors" "fmt" - "golang_org/x/crypto/cryptobyte" - "golang_org/x/crypto/cryptobyte/asn1" + "internal/x/crypto/cryptobyte" + "internal/x/crypto/cryptobyte/asn1" ) func ExampleString_lengthPrefixed() { diff --git a/src/vendor/golang_org/x/crypto/cryptobyte/string.go b/src/internal/x/crypto/cryptobyte/string.go similarity index 98% rename from src/vendor/golang_org/x/crypto/cryptobyte/string.go rename to src/internal/x/crypto/cryptobyte/string.go index 39bf98aeead81..bd2ed2e207c6e 100644 --- a/src/vendor/golang_org/x/crypto/cryptobyte/string.go +++ b/src/internal/x/crypto/cryptobyte/string.go @@ -15,7 +15,7 @@ // // See the documentation and examples for the Builder and String types to get // started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" +package cryptobyte // String represents a string of bytes. It provides methods for parsing // fixed-length and length-prefixed values from it. diff --git a/src/vendor/golang_org/x/crypto/curve25519/const_amd64.h b/src/internal/x/crypto/curve25519/const_amd64.h similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/const_amd64.h rename to src/internal/x/crypto/curve25519/const_amd64.h diff --git a/src/vendor/golang_org/x/crypto/curve25519/const_amd64.s b/src/internal/x/crypto/curve25519/const_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/const_amd64.s rename to src/internal/x/crypto/curve25519/const_amd64.s diff --git a/src/vendor/golang_org/x/crypto/curve25519/cswap_amd64.s b/src/internal/x/crypto/curve25519/cswap_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/cswap_amd64.s rename to src/internal/x/crypto/curve25519/cswap_amd64.s diff --git a/src/vendor/golang_org/x/crypto/curve25519/curve25519.go b/src/internal/x/crypto/curve25519/curve25519.go similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/curve25519.go rename to src/internal/x/crypto/curve25519/curve25519.go diff --git a/src/vendor/golang_org/x/crypto/curve25519/curve25519_test.go b/src/internal/x/crypto/curve25519/curve25519_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/curve25519_test.go rename to src/internal/x/crypto/curve25519/curve25519_test.go diff --git a/src/vendor/golang_org/x/crypto/curve25519/doc.go b/src/internal/x/crypto/curve25519/doc.go similarity index 94% rename from src/vendor/golang_org/x/crypto/curve25519/doc.go rename to src/internal/x/crypto/curve25519/doc.go index da9b10d9c1ffd..076a8d4f10aa3 100644 --- a/src/vendor/golang_org/x/crypto/curve25519/doc.go +++ b/src/internal/x/crypto/curve25519/doc.go @@ -4,7 +4,7 @@ // Package curve25519 provides an implementation of scalar multiplication on // the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html -package curve25519 // import "golang.org/x/crypto/curve25519" +package curve25519 // basePoint is the x coordinate of the generator of the curve. var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} diff --git a/src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s b/src/internal/x/crypto/curve25519/freeze_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/freeze_amd64.s rename to src/internal/x/crypto/curve25519/freeze_amd64.s diff --git a/src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s b/src/internal/x/crypto/curve25519/ladderstep_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/ladderstep_amd64.s rename to src/internal/x/crypto/curve25519/ladderstep_amd64.s diff --git a/src/vendor/golang_org/x/crypto/curve25519/mont25519_amd64.go b/src/internal/x/crypto/curve25519/mont25519_amd64.go similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/mont25519_amd64.go rename to src/internal/x/crypto/curve25519/mont25519_amd64.go diff --git a/src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s b/src/internal/x/crypto/curve25519/mul_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/mul_amd64.s rename to src/internal/x/crypto/curve25519/mul_amd64.s diff --git a/src/vendor/golang_org/x/crypto/curve25519/square_amd64.s b/src/internal/x/crypto/curve25519/square_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/curve25519/square_amd64.s rename to src/internal/x/crypto/curve25519/square_amd64.s diff --git a/src/internal/x/crypto/hkdf/example_test.go b/src/internal/x/crypto/hkdf/example_test.go new file mode 100644 index 0000000000000..3b68a408103b9 --- /dev/null +++ b/src/internal/x/crypto/hkdf/example_test.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hkdf_test + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "fmt" + "io" + + "internal/x/crypto/hkdf" +) + +// Usage example that expands one master secret into three other +// cryptographically secure keys. +func Example_usage() { + // Underlying hash function for HMAC. + hash := sha256.New + + // Cryptographically secure master secret. + secret := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this. + + // Non-secret salt, optional (can be nil). + // Recommended: hash-length random value. + salt := make([]byte, hash().Size()) + if _, err := rand.Read(salt); err != nil { + panic(err) + } + + // Non-secret context info, optional (can be nil). + info := []byte("hkdf example") + + // Generate three 128-bit derived keys. + hkdf := hkdf.New(hash, secret, salt, info) + + var keys [][]byte + for i := 0; i < 3; i++ { + key := make([]byte, 16) + if _, err := io.ReadFull(hkdf, key); err != nil { + panic(err) + } + keys = append(keys, key) + } + + for i := range keys { + fmt.Printf("Key #%d: %v\n", i+1, !bytes.Equal(keys[i], make([]byte, 16))) + } + + // Output: + // Key #1: true + // Key #2: true + // Key #3: true +} diff --git a/src/internal/x/crypto/hkdf/hkdf.go b/src/internal/x/crypto/hkdf/hkdf.go new file mode 100644 index 0000000000000..c9077658e64b7 --- /dev/null +++ b/src/internal/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/src/internal/x/crypto/hkdf/hkdf_test.go b/src/internal/x/crypto/hkdf/hkdf_test.go new file mode 100644 index 0000000000000..ea575772ef2f9 --- /dev/null +++ b/src/internal/x/crypto/hkdf/hkdf_test.go @@ -0,0 +1,449 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package hkdf + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" + "io" + "testing" +) + +type hkdfTest struct { + hash func() hash.Hash + master []byte + salt []byte + prk []byte + info []byte + out []byte +} + +var hkdfTests = []hkdfTest{ + // Tests from RFC 5869 + { + sha256.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, + }, + []byte{ + 0x07, 0x77, 0x09, 0x36, 0x2c, 0x2e, 0x32, 0xdf, + 0x0d, 0xdc, 0x3f, 0x0d, 0xc4, 0x7b, 0xba, 0x63, + 0x90, 0xb6, 0xc7, 0x3b, 0xb5, 0x0f, 0x9c, 0x31, + 0x22, 0xec, 0x84, 0x4a, 0xd7, 0xc2, 0xb3, 0xe5, + }, + []byte{ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, + }, + []byte{ + 0x3c, 0xb2, 0x5f, 0x25, 0xfa, 0xac, 0xd5, 0x7a, + 0x90, 0x43, 0x4f, 0x64, 0xd0, 0x36, 0x2f, 0x2a, + 0x2d, 0x2d, 0x0a, 0x90, 0xcf, 0x1a, 0x5a, 0x4c, + 0x5d, 0xb0, 0x2d, 0x56, 0xec, 0xc4, 0xc5, 0xbf, + 0x34, 0x00, 0x72, 0x08, 0xd5, 0xb8, 0x87, 0x18, + 0x58, 0x65, + }, + }, + { + sha256.New, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + }, + []byte{ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + }, + []byte{ + 0x06, 0xa6, 0xb8, 0x8c, 0x58, 0x53, 0x36, 0x1a, + 0x06, 0x10, 0x4c, 0x9c, 0xeb, 0x35, 0xb4, 0x5c, + 0xef, 0x76, 0x00, 0x14, 0x90, 0x46, 0x71, 0x01, + 0x4a, 0x19, 0x3f, 0x40, 0xc1, 0x5f, 0xc2, 0x44, + }, + []byte{ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + }, + []byte{ + 0xb1, 0x1e, 0x39, 0x8d, 0xc8, 0x03, 0x27, 0xa1, + 0xc8, 0xe7, 0xf7, 0x8c, 0x59, 0x6a, 0x49, 0x34, + 0x4f, 0x01, 0x2e, 0xda, 0x2d, 0x4e, 0xfa, 0xd8, + 0xa0, 0x50, 0xcc, 0x4c, 0x19, 0xaf, 0xa9, 0x7c, + 0x59, 0x04, 0x5a, 0x99, 0xca, 0xc7, 0x82, 0x72, + 0x71, 0xcb, 0x41, 0xc6, 0x5e, 0x59, 0x0e, 0x09, + 0xda, 0x32, 0x75, 0x60, 0x0c, 0x2f, 0x09, 0xb8, + 0x36, 0x77, 0x93, 0xa9, 0xac, 0xa3, 0xdb, 0x71, + 0xcc, 0x30, 0xc5, 0x81, 0x79, 0xec, 0x3e, 0x87, + 0xc1, 0x4c, 0x01, 0xd5, 0xc1, 0xf3, 0x43, 0x4f, + 0x1d, 0x87, + }, + }, + { + sha256.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{}, + []byte{ + 0x19, 0xef, 0x24, 0xa3, 0x2c, 0x71, 0x7b, 0x16, + 0x7f, 0x33, 0xa9, 0x1d, 0x6f, 0x64, 0x8b, 0xdf, + 0x96, 0x59, 0x67, 0x76, 0xaf, 0xdb, 0x63, 0x77, + 0xac, 0x43, 0x4c, 0x1c, 0x29, 0x3c, 0xcb, 0x04, + }, + []byte{}, + []byte{ + 0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f, + 0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31, + 0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e, + 0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d, + 0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a, + 0x96, 0xc8, + }, + }, + { + sha256.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + nil, + []byte{ + 0x19, 0xef, 0x24, 0xa3, 0x2c, 0x71, 0x7b, 0x16, + 0x7f, 0x33, 0xa9, 0x1d, 0x6f, 0x64, 0x8b, 0xdf, + 0x96, 0x59, 0x67, 0x76, 0xaf, 0xdb, 0x63, 0x77, + 0xac, 0x43, 0x4c, 0x1c, 0x29, 0x3c, 0xcb, 0x04, + }, + nil, + []byte{ + 0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f, + 0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31, + 0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e, + 0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d, + 0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a, + 0x96, 0xc8, + }, + }, + { + sha1.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, + }, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, + }, + []byte{ + 0x9b, 0x6c, 0x18, 0xc4, 0x32, 0xa7, 0xbf, 0x8f, + 0x0e, 0x71, 0xc8, 0xeb, 0x88, 0xf4, 0xb3, 0x0b, + 0xaa, 0x2b, 0xa2, 0x43, + }, + []byte{ + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, + }, + []byte{ + 0x08, 0x5a, 0x01, 0xea, 0x1b, 0x10, 0xf3, 0x69, + 0x33, 0x06, 0x8b, 0x56, 0xef, 0xa5, 0xad, 0x81, + 0xa4, 0xf1, 0x4b, 0x82, 0x2f, 0x5b, 0x09, 0x15, + 0x68, 0xa9, 0xcd, 0xd4, 0xf1, 0x55, 0xfd, 0xa2, + 0xc2, 0x2e, 0x42, 0x24, 0x78, 0xd3, 0x05, 0xf3, + 0xf8, 0x96, + }, + }, + { + sha1.New, + []byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + }, + []byte{ + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + }, + []byte{ + 0x8a, 0xda, 0xe0, 0x9a, 0x2a, 0x30, 0x70, 0x59, + 0x47, 0x8d, 0x30, 0x9b, 0x26, 0xc4, 0x11, 0x5a, + 0x22, 0x4c, 0xfa, 0xf6, + }, + []byte{ + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + }, + []byte{ + 0x0b, 0xd7, 0x70, 0xa7, 0x4d, 0x11, 0x60, 0xf7, + 0xc9, 0xf1, 0x2c, 0xd5, 0x91, 0x2a, 0x06, 0xeb, + 0xff, 0x6a, 0xdc, 0xae, 0x89, 0x9d, 0x92, 0x19, + 0x1f, 0xe4, 0x30, 0x56, 0x73, 0xba, 0x2f, 0xfe, + 0x8f, 0xa3, 0xf1, 0xa4, 0xe5, 0xad, 0x79, 0xf3, + 0xf3, 0x34, 0xb3, 0xb2, 0x02, 0xb2, 0x17, 0x3c, + 0x48, 0x6e, 0xa3, 0x7c, 0xe3, 0xd3, 0x97, 0xed, + 0x03, 0x4c, 0x7f, 0x9d, 0xfe, 0xb1, 0x5c, 0x5e, + 0x92, 0x73, 0x36, 0xd0, 0x44, 0x1f, 0x4c, 0x43, + 0x00, 0xe2, 0xcf, 0xf0, 0xd0, 0x90, 0x0b, 0x52, + 0xd3, 0xb4, + }, + }, + { + sha1.New, + []byte{ + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + }, + []byte{}, + []byte{ + 0xda, 0x8c, 0x8a, 0x73, 0xc7, 0xfa, 0x77, 0x28, + 0x8e, 0xc6, 0xf5, 0xe7, 0xc2, 0x97, 0x78, 0x6a, + 0xa0, 0xd3, 0x2d, 0x01, + }, + []byte{}, + []byte{ + 0x0a, 0xc1, 0xaf, 0x70, 0x02, 0xb3, 0xd7, 0x61, + 0xd1, 0xe5, 0x52, 0x98, 0xda, 0x9d, 0x05, 0x06, + 0xb9, 0xae, 0x52, 0x05, 0x72, 0x20, 0xa3, 0x06, + 0xe0, 0x7b, 0x6b, 0x87, 0xe8, 0xdf, 0x21, 0xd0, + 0xea, 0x00, 0x03, 0x3d, 0xe0, 0x39, 0x84, 0xd3, + 0x49, 0x18, + }, + }, + { + sha1.New, + []byte{ + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + }, + nil, + []byte{ + 0x2a, 0xdc, 0xca, 0xda, 0x18, 0x77, 0x9e, 0x7c, + 0x20, 0x77, 0xad, 0x2e, 0xb1, 0x9d, 0x3f, 0x3e, + 0x73, 0x13, 0x85, 0xdd, + }, + nil, + []byte{ + 0x2c, 0x91, 0x11, 0x72, 0x04, 0xd7, 0x45, 0xf3, + 0x50, 0x0d, 0x63, 0x6a, 0x62, 0xf6, 0x4f, 0x0a, + 0xb3, 0xba, 0xe5, 0x48, 0xaa, 0x53, 0xd4, 0x23, + 0xb0, 0xd1, 0xf2, 0x7e, 0xbb, 0xa6, 0xf5, 0xe5, + 0x67, 0x3a, 0x08, 0x1d, 0x70, 0xcc, 0xe7, 0xac, + 0xfc, 0x48, + }, + }, +} + +func TestHKDF(t *testing.T) { + for i, tt := range hkdfTests { + prk := Extract(tt.hash, tt.master, tt.salt) + if !bytes.Equal(prk, tt.prk) { + t.Errorf("test %d: incorrect PRK: have %v, need %v.", i, prk, tt.prk) + } + + hkdf := New(tt.hash, tt.master, tt.salt, tt.info) + out := make([]byte, len(tt.out)) + + n, err := io.ReadFull(hkdf, out) + if n != len(tt.out) || err != nil { + t.Errorf("test %d: not enough output bytes: %d.", i, n) + } + + if !bytes.Equal(out, tt.out) { + t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out) + } + + hkdf = Expand(tt.hash, prk, tt.info) + + n, err = io.ReadFull(hkdf, out) + if n != len(tt.out) || err != nil { + t.Errorf("test %d: not enough output bytes from Expand: %d.", i, n) + } + + if !bytes.Equal(out, tt.out) { + t.Errorf("test %d: incorrect output from Expand: have %v, need %v.", i, out, tt.out) + } + } +} + +func TestHKDFMultiRead(t *testing.T) { + for i, tt := range hkdfTests { + hkdf := New(tt.hash, tt.master, tt.salt, tt.info) + out := make([]byte, len(tt.out)) + + for b := 0; b < len(tt.out); b++ { + n, err := io.ReadFull(hkdf, out[b:b+1]) + if n != 1 || err != nil { + t.Errorf("test %d.%d: not enough output bytes: have %d, need %d .", i, b, n, len(tt.out)) + } + } + + if !bytes.Equal(out, tt.out) { + t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out) + } + } +} + +func TestHKDFLimit(t *testing.T) { + hash := sha1.New + master := []byte{0x00, 0x01, 0x02, 0x03} + info := []byte{} + + hkdf := New(hash, master, nil, info) + limit := hash().Size() * 255 + out := make([]byte, limit) + + // The maximum output bytes should be extractable + n, err := io.ReadFull(hkdf, out) + if n != limit || err != nil { + t.Errorf("not enough output bytes: %d, %v.", n, err) + } + + // Reading one more should fail + n, err = io.ReadFull(hkdf, make([]byte, 1)) + if n > 0 || err == nil { + t.Errorf("key expansion overflowed: n = %d, err = %v", n, err) + } +} + +func Benchmark16ByteMD5Single(b *testing.B) { + benchmarkHKDFSingle(md5.New, 16, b) +} + +func Benchmark20ByteSHA1Single(b *testing.B) { + benchmarkHKDFSingle(sha1.New, 20, b) +} + +func Benchmark32ByteSHA256Single(b *testing.B) { + benchmarkHKDFSingle(sha256.New, 32, b) +} + +func Benchmark64ByteSHA512Single(b *testing.B) { + benchmarkHKDFSingle(sha512.New, 64, b) +} + +func Benchmark8ByteMD5Stream(b *testing.B) { + benchmarkHKDFStream(md5.New, 8, b) +} + +func Benchmark16ByteMD5Stream(b *testing.B) { + benchmarkHKDFStream(md5.New, 16, b) +} + +func Benchmark8ByteSHA1Stream(b *testing.B) { + benchmarkHKDFStream(sha1.New, 8, b) +} + +func Benchmark20ByteSHA1Stream(b *testing.B) { + benchmarkHKDFStream(sha1.New, 20, b) +} + +func Benchmark8ByteSHA256Stream(b *testing.B) { + benchmarkHKDFStream(sha256.New, 8, b) +} + +func Benchmark32ByteSHA256Stream(b *testing.B) { + benchmarkHKDFStream(sha256.New, 32, b) +} + +func Benchmark8ByteSHA512Stream(b *testing.B) { + benchmarkHKDFStream(sha512.New, 8, b) +} + +func Benchmark64ByteSHA512Stream(b *testing.B) { + benchmarkHKDFStream(sha512.New, 64, b) +} + +func benchmarkHKDFSingle(hasher func() hash.Hash, block int, b *testing.B) { + master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07} + salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17} + info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27} + out := make([]byte, block) + + b.SetBytes(int64(block)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + hkdf := New(hasher, master, salt, info) + io.ReadFull(hkdf, out) + } +} + +func benchmarkHKDFStream(hasher func() hash.Hash, block int, b *testing.B) { + master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07} + salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17} + info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27} + out := make([]byte, block) + + b.SetBytes(int64(block)) + b.ResetTimer() + + hkdf := New(hasher, master, salt, info) + for i := 0; i < b.N; i++ { + _, err := io.ReadFull(hkdf, out) + if err != nil { + hkdf = New(hasher, master, salt, info) + i-- + } + } +} diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/asm_s390x.s b/src/internal/x/crypto/internal/chacha20/asm_s390x.s similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/asm_s390x.s rename to src/internal/x/crypto/internal/chacha20/asm_s390x.s diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/chacha_generic.go b/src/internal/x/crypto/internal/chacha20/chacha_generic.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/chacha_generic.go rename to src/internal/x/crypto/internal/chacha20/chacha_generic.go diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/chacha_noasm.go b/src/internal/x/crypto/internal/chacha20/chacha_noasm.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/chacha_noasm.go rename to src/internal/x/crypto/internal/chacha20/chacha_noasm.go diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/chacha_s390x.go b/src/internal/x/crypto/internal/chacha20/chacha_s390x.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/chacha_s390x.go rename to src/internal/x/crypto/internal/chacha20/chacha_s390x.go diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/chacha_test.go b/src/internal/x/crypto/internal/chacha20/chacha_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/chacha_test.go rename to src/internal/x/crypto/internal/chacha20/chacha_test.go diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/vectors_test.go b/src/internal/x/crypto/internal/chacha20/vectors_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/vectors_test.go rename to src/internal/x/crypto/internal/chacha20/vectors_test.go diff --git a/src/vendor/golang_org/x/crypto/internal/chacha20/xor.go b/src/internal/x/crypto/internal/chacha20/xor.go similarity index 100% rename from src/vendor/golang_org/x/crypto/internal/chacha20/xor.go rename to src/internal/x/crypto/internal/chacha20/xor.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/poly1305.go b/src/internal/x/crypto/poly1305/poly1305.go similarity index 95% rename from src/vendor/golang_org/x/crypto/poly1305/poly1305.go rename to src/internal/x/crypto/poly1305/poly1305.go index f562fa5712ba0..6d6be9a6406a2 100644 --- a/src/vendor/golang_org/x/crypto/poly1305/poly1305.go +++ b/src/internal/x/crypto/poly1305/poly1305.go @@ -17,7 +17,7 @@ used with a fixed key in order to generate one-time keys from an nonce. However, in this package AES isn't used and the one-time key is specified directly. */ -package poly1305 // import "golang.org/x/crypto/poly1305" +package poly1305 import "crypto/subtle" diff --git a/src/vendor/golang_org/x/crypto/poly1305/poly1305_test.go b/src/internal/x/crypto/poly1305/poly1305_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/poly1305_test.go rename to src/internal/x/crypto/poly1305/poly1305_test.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_amd64.go b/src/internal/x/crypto/poly1305/sum_amd64.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_amd64.go rename to src/internal/x/crypto/poly1305/sum_amd64.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_amd64.s b/src/internal/x/crypto/poly1305/sum_amd64.s similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_amd64.s rename to src/internal/x/crypto/poly1305/sum_amd64.s diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_arm.go b/src/internal/x/crypto/poly1305/sum_arm.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_arm.go rename to src/internal/x/crypto/poly1305/sum_arm.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_arm.s b/src/internal/x/crypto/poly1305/sum_arm.s similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_arm.s rename to src/internal/x/crypto/poly1305/sum_arm.s diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_noasm.go b/src/internal/x/crypto/poly1305/sum_noasm.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_noasm.go rename to src/internal/x/crypto/poly1305/sum_noasm.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_ref.go b/src/internal/x/crypto/poly1305/sum_ref.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_ref.go rename to src/internal/x/crypto/poly1305/sum_ref.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_s390x.go b/src/internal/x/crypto/poly1305/sum_s390x.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_s390x.go rename to src/internal/x/crypto/poly1305/sum_s390x.go diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_s390x.s b/src/internal/x/crypto/poly1305/sum_s390x.s similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_s390x.s rename to src/internal/x/crypto/poly1305/sum_s390x.s diff --git a/src/vendor/golang_org/x/crypto/poly1305/sum_vmsl_s390x.s b/src/internal/x/crypto/poly1305/sum_vmsl_s390x.s similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/sum_vmsl_s390x.s rename to src/internal/x/crypto/poly1305/sum_vmsl_s390x.s diff --git a/src/vendor/golang_org/x/crypto/poly1305/vectors_test.go b/src/internal/x/crypto/poly1305/vectors_test.go similarity index 100% rename from src/vendor/golang_org/x/crypto/poly1305/vectors_test.go rename to src/internal/x/crypto/poly1305/vectors_test.go diff --git a/src/internal/x/fiximports.bash b/src/internal/x/fiximports.bash new file mode 100755 index 0000000000000..ec72643b63a3a --- /dev/null +++ b/src/internal/x/fiximports.bash @@ -0,0 +1,6 @@ +#!/bin/bash + +# To fix import paths when importing new snapshots from the golang.org/x +# repositories, run this script in the current directory. + +sed -i 's,"golang\.org/x,"internal/x,g' $(grep -lr 'golang.org') diff --git a/src/vendor/golang_org/x/net/dns/dnsmessage/example_test.go b/src/internal/x/net/dns/dnsmessage/example_test.go similarity index 98% rename from src/vendor/golang_org/x/net/dns/dnsmessage/example_test.go rename to src/internal/x/net/dns/dnsmessage/example_test.go index a1bb5b7b1b88e..8453c230483cb 100644 --- a/src/vendor/golang_org/x/net/dns/dnsmessage/example_test.go +++ b/src/internal/x/net/dns/dnsmessage/example_test.go @@ -9,7 +9,7 @@ import ( "net" "strings" - "golang_org/x/net/dns/dnsmessage" + "internal/x/net/dns/dnsmessage" ) func mustNewName(name string) dnsmessage.Name { diff --git a/src/vendor/golang_org/x/net/dns/dnsmessage/message.go b/src/internal/x/net/dns/dnsmessage/message.go similarity index 100% rename from src/vendor/golang_org/x/net/dns/dnsmessage/message.go rename to src/internal/x/net/dns/dnsmessage/message.go diff --git a/src/vendor/golang_org/x/net/dns/dnsmessage/message_test.go b/src/internal/x/net/dns/dnsmessage/message_test.go similarity index 100% rename from src/vendor/golang_org/x/net/dns/dnsmessage/message_test.go rename to src/internal/x/net/dns/dnsmessage/message_test.go diff --git a/src/vendor/golang_org/x/net/http/httpguts/guts.go b/src/internal/x/net/http/httpguts/guts.go similarity index 100% rename from src/vendor/golang_org/x/net/http/httpguts/guts.go rename to src/internal/x/net/http/httpguts/guts.go diff --git a/src/vendor/golang_org/x/net/http/httpguts/httplex.go b/src/internal/x/net/http/httpguts/httplex.go similarity index 99% rename from src/vendor/golang_org/x/net/http/httpguts/httplex.go rename to src/internal/x/net/http/httpguts/httplex.go index 9337435174e67..7f3cdd8bd255c 100644 --- a/src/vendor/golang_org/x/net/http/httpguts/httplex.go +++ b/src/internal/x/net/http/httpguts/httplex.go @@ -9,7 +9,7 @@ import ( "strings" "unicode/utf8" - "golang_org/x/net/idna" + "internal/x/net/idna" ) var isTokenTable = [127]bool{ diff --git a/src/vendor/golang_org/x/net/http/httpguts/httplex_test.go b/src/internal/x/net/http/httpguts/httplex_test.go similarity index 100% rename from src/vendor/golang_org/x/net/http/httpguts/httplex_test.go rename to src/internal/x/net/http/httpguts/httplex_test.go diff --git a/src/vendor/golang_org/x/net/http/httpproxy/export_test.go b/src/internal/x/net/http/httpproxy/export_test.go similarity index 100% rename from src/vendor/golang_org/x/net/http/httpproxy/export_test.go rename to src/internal/x/net/http/httpproxy/export_test.go diff --git a/src/vendor/golang_org/x/net/http/httpproxy/proxy.go b/src/internal/x/net/http/httpproxy/proxy.go similarity index 99% rename from src/vendor/golang_org/x/net/http/httpproxy/proxy.go rename to src/internal/x/net/http/httpproxy/proxy.go index 0409f4340c373..d3947841392e6 100644 --- a/src/vendor/golang_org/x/net/http/httpproxy/proxy.go +++ b/src/internal/x/net/http/httpproxy/proxy.go @@ -19,7 +19,7 @@ import ( "strings" "unicode/utf8" - "golang_org/x/net/idna" + "internal/x/net/idna" ) // Config holds configuration for HTTP proxy settings. See diff --git a/src/vendor/golang_org/x/net/http/httpproxy/proxy_test.go b/src/internal/x/net/http/httpproxy/proxy_test.go similarity index 99% rename from src/vendor/golang_org/x/net/http/httpproxy/proxy_test.go rename to src/internal/x/net/http/httpproxy/proxy_test.go index 8791f64bcdc2c..cf0589dba97ec 100644 --- a/src/vendor/golang_org/x/net/http/httpproxy/proxy_test.go +++ b/src/internal/x/net/http/httpproxy/proxy_test.go @@ -13,7 +13,7 @@ import ( "strings" "testing" - "golang_org/x/net/http/httpproxy" + "internal/x/net/http/httpproxy" ) // setHelper calls t.Helper() for Go 1.9+ (see go19_test.go) and does nothing otherwise. diff --git a/src/vendor/golang_org/x/net/http2/hpack/encode.go b/src/internal/x/net/http2/hpack/encode.go similarity index 100% rename from src/vendor/golang_org/x/net/http2/hpack/encode.go rename to src/internal/x/net/http2/hpack/encode.go diff --git a/src/vendor/golang_org/x/net/http2/hpack/encode_test.go b/src/internal/x/net/http2/hpack/encode_test.go similarity index 100% rename from src/vendor/golang_org/x/net/http2/hpack/encode_test.go rename to src/internal/x/net/http2/hpack/encode_test.go diff --git a/src/vendor/golang_org/x/net/http2/hpack/hpack.go b/src/internal/x/net/http2/hpack/hpack.go similarity index 97% rename from src/vendor/golang_org/x/net/http2/hpack/hpack.go rename to src/internal/x/net/http2/hpack/hpack.go index 166788ceec5e8..85f18a2b0a861 100644 --- a/src/vendor/golang_org/x/net/http2/hpack/hpack.go +++ b/src/internal/x/net/http2/hpack/hpack.go @@ -92,6 +92,8 @@ type Decoder struct { // saveBuf is previous data passed to Write which we weren't able // to fully parse before. Unlike buf, we own this data. saveBuf bytes.Buffer + + firstField bool // processing the first field of the header block } // NewDecoder returns a new decoder with the provided maximum dynamic @@ -101,6 +103,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod d := &Decoder{ emit: emitFunc, emitEnabled: true, + firstField: true, } d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize @@ -226,11 +229,15 @@ func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { return hf, nil } +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. func (d *Decoder) Close() error { if d.saveBuf.Len() > 0 { d.saveBuf.Reset() return DecodingError{errors.New("truncated headers")} } + d.firstField = true return nil } @@ -266,6 +273,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { d.saveBuf.Write(d.buf) return len(p), nil } + d.firstField = false if err != nil { break } @@ -391,7 +399,7 @@ func (d *Decoder) callEmit(hf HeaderField) error { func (d *Decoder) parseDynamicTableSizeUpdate() error { // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the // beginning of the first header block following the change to the dynamic table size. - if d.dynTab.size > 0 { + if !d.firstField && d.dynTab.size > 0 { return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} } diff --git a/src/vendor/golang_org/x/net/http2/hpack/hpack_test.go b/src/internal/x/net/http2/hpack/hpack_test.go similarity index 98% rename from src/vendor/golang_org/x/net/http2/hpack/hpack_test.go rename to src/internal/x/net/http2/hpack/hpack_test.go index 3f2227442a985..a361a2a7c2ece 100644 --- a/src/vendor/golang_org/x/net/http2/hpack/hpack_test.go +++ b/src/internal/x/net/http2/hpack/hpack_test.go @@ -748,14 +748,22 @@ func TestDynamicSizeUpdate(t *testing.T) { enc.SetMaxDynamicTableSize(255) enc.WriteField(HeaderField{Name: "foo", Value: "bar"}) - d := NewDecoder(4096, nil) - _, err := d.DecodeFull(buf.Bytes()) + d := NewDecoder(4096, func(_ HeaderField) {}) + _, err := d.Write(buf.Bytes()) + if err != nil { + t.Fatalf("unexpected error: got = %v", err) + } + + d.Close() + + // Start a new header + _, err = d.Write(buf.Bytes()) if err != nil { t.Fatalf("unexpected error: got = %v", err) } // must fail since the dynamic table update must be at the beginning - _, err = d.DecodeFull(buf.Bytes()) + _, err = d.Write(buf.Bytes()) if err == nil { t.Fatalf("dynamic table size update not at the beginning of a header block") } diff --git a/src/vendor/golang_org/x/net/http2/hpack/huffman.go b/src/internal/x/net/http2/hpack/huffman.go similarity index 100% rename from src/vendor/golang_org/x/net/http2/hpack/huffman.go rename to src/internal/x/net/http2/hpack/huffman.go diff --git a/src/vendor/golang_org/x/net/http2/hpack/tables.go b/src/internal/x/net/http2/hpack/tables.go similarity index 100% rename from src/vendor/golang_org/x/net/http2/hpack/tables.go rename to src/internal/x/net/http2/hpack/tables.go diff --git a/src/vendor/golang_org/x/net/http2/hpack/tables_test.go b/src/internal/x/net/http2/hpack/tables_test.go similarity index 100% rename from src/vendor/golang_org/x/net/http2/hpack/tables_test.go rename to src/internal/x/net/http2/hpack/tables_test.go diff --git a/src/vendor/golang_org/x/net/idna/idna.go b/src/internal/x/net/idna/idna.go similarity index 99% rename from src/vendor/golang_org/x/net/idna/idna.go rename to src/internal/x/net/idna/idna.go index 9fd0334cd9df0..7f2471e70e7aa 100644 --- a/src/vendor/golang_org/x/net/idna/idna.go +++ b/src/internal/x/net/idna/idna.go @@ -13,16 +13,16 @@ // UTS #46 is defined in http://www.unicode.org/reports/tr46. // See http://unicode.org/cldr/utility/idna.jsp for a visualization of the // differences between these two standards. -package idna // import "golang_org/x/text/internal/export/idna" +package idna import ( "fmt" "strings" "unicode/utf8" - "golang_org/x/text/secure/bidirule" - "golang_org/x/text/unicode/bidi" - "golang_org/x/text/unicode/norm" + "internal/x/text/secure/bidirule" + "internal/x/text/unicode/bidi" + "internal/x/text/unicode/norm" ) // NOTE: Unlike common practice in Go APIs, the functions will return a diff --git a/src/vendor/golang_org/x/net/idna/punycode.go b/src/internal/x/net/idna/punycode.go similarity index 100% rename from src/vendor/golang_org/x/net/idna/punycode.go rename to src/internal/x/net/idna/punycode.go diff --git a/src/vendor/golang_org/x/net/idna/punycode_test.go b/src/internal/x/net/idna/punycode_test.go similarity index 100% rename from src/vendor/golang_org/x/net/idna/punycode_test.go rename to src/internal/x/net/idna/punycode_test.go diff --git a/src/vendor/golang_org/x/net/idna/tables.go b/src/internal/x/net/idna/tables.go similarity index 99% rename from src/vendor/golang_org/x/net/idna/tables.go rename to src/internal/x/net/idna/tables.go index a470c5a3e2588..41cf9c13d2a5b 100644 --- a/src/vendor/golang_org/x/net/idna/tables.go +++ b/src/internal/x/net/idna/tables.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// Code generated by running "go generate" in golang_org/x/text. DO NOT EDIT. +// Code generated by running "go generate" in internal/x/text. DO NOT EDIT. package idna diff --git a/src/vendor/golang_org/x/net/idna/trie.go b/src/internal/x/net/idna/trie.go similarity index 100% rename from src/vendor/golang_org/x/net/idna/trie.go rename to src/internal/x/net/idna/trie.go diff --git a/src/vendor/golang_org/x/net/idna/trieval.go b/src/internal/x/net/idna/trieval.go similarity index 97% rename from src/vendor/golang_org/x/net/idna/trieval.go rename to src/internal/x/net/idna/trieval.go index 5f4e5f2e7457e..bf57260034946 100644 --- a/src/vendor/golang_org/x/net/idna/trieval.go +++ b/src/internal/x/net/idna/trieval.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// Code generated by running "go generate" in golang_org/x/text. DO NOT EDIT. +// Code generated by running "go generate" in internal/x/text. DO NOT EDIT. package idna diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_bsd.go b/src/internal/x/net/internal/nettest/helper_bsd.go similarity index 100% rename from src/vendor/golang_org/x/net/internal/nettest/helper_bsd.go rename to src/internal/x/net/internal/nettest/helper_bsd.go diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_nobsd.go b/src/internal/x/net/internal/nettest/helper_nobsd.go similarity index 91% rename from src/vendor/golang_org/x/net/internal/nettest/helper_nobsd.go rename to src/internal/x/net/internal/nettest/helper_nobsd.go index bc7da5e0d574a..1611a907f00bb 100644 --- a/src/vendor/golang_org/x/net/internal/nettest/helper_nobsd.go +++ b/src/internal/x/net/internal/nettest/helper_nobsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux solaris +// +build aix linux solaris package nettest diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_posix.go b/src/internal/x/net/internal/nettest/helper_posix.go similarity index 88% rename from src/vendor/golang_org/x/net/internal/nettest/helper_posix.go rename to src/internal/x/net/internal/nettest/helper_posix.go index 963ed99655bab..efc67a8eba975 100644 --- a/src/vendor/golang_org/x/net/internal/nettest/helper_posix.go +++ b/src/internal/x/net/internal/nettest/helper_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package nettest diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_stub.go b/src/internal/x/net/internal/nettest/helper_stub.go similarity index 95% rename from src/vendor/golang_org/x/net/internal/nettest/helper_stub.go rename to src/internal/x/net/internal/nettest/helper_stub.go index d729156de67fd..d89cf29962903 100644 --- a/src/vendor/golang_org/x/net/internal/nettest/helper_stub.go +++ b/src/internal/x/net/internal/nettest/helper_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build js,wasm nacl plan9 +// +build js nacl plan9 package nettest diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_unix.go b/src/internal/x/net/internal/nettest/helper_unix.go similarity index 89% rename from src/vendor/golang_org/x/net/internal/nettest/helper_unix.go rename to src/internal/x/net/internal/nettest/helper_unix.go index ed13e448b7b4c..b6839dcd8fd72 100644 --- a/src/vendor/golang_org/x/net/internal/nettest/helper_unix.go +++ b/src/internal/x/net/internal/nettest/helper_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package nettest diff --git a/src/vendor/golang_org/x/net/internal/nettest/helper_windows.go b/src/internal/x/net/internal/nettest/helper_windows.go similarity index 100% rename from src/vendor/golang_org/x/net/internal/nettest/helper_windows.go rename to src/internal/x/net/internal/nettest/helper_windows.go diff --git a/src/vendor/golang_org/x/net/internal/nettest/interface.go b/src/internal/x/net/internal/nettest/interface.go similarity index 100% rename from src/vendor/golang_org/x/net/internal/nettest/interface.go rename to src/internal/x/net/internal/nettest/interface.go diff --git a/src/vendor/golang_org/x/net/internal/nettest/rlimit.go b/src/internal/x/net/internal/nettest/rlimit.go similarity index 100% rename from src/vendor/golang_org/x/net/internal/nettest/rlimit.go rename to src/internal/x/net/internal/nettest/rlimit.go diff --git a/src/vendor/golang_org/x/net/internal/nettest/stack.go b/src/internal/x/net/internal/nettest/stack.go similarity index 96% rename from src/vendor/golang_org/x/net/internal/nettest/stack.go rename to src/internal/x/net/internal/nettest/stack.go index 46d2fccab588c..1a545e21ab019 100644 --- a/src/vendor/golang_org/x/net/internal/nettest/stack.go +++ b/src/internal/x/net/internal/nettest/stack.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package nettest provides utilities for network testing. -package nettest // import "golang.org/x/net/internal/nettest" +package nettest import ( "fmt" @@ -72,7 +72,7 @@ func TestableNetwork(network string) bool { } case "unixpacket": switch runtime.GOOS { - case "android", "darwin", "freebsd", "js", "nacl", "plan9", "windows": + case "aix", "android", "darwin", "freebsd", "js", "nacl", "plan9", "windows": return false case "netbsd": // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. diff --git a/src/vendor/golang_org/x/net/lif/address.go b/src/internal/x/net/lif/address.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/address.go rename to src/internal/x/net/lif/address.go diff --git a/src/vendor/golang_org/x/net/lif/address_test.go b/src/internal/x/net/lif/address_test.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/address_test.go rename to src/internal/x/net/lif/address_test.go diff --git a/src/vendor/golang_org/x/net/lif/binary.go b/src/internal/x/net/lif/binary.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/binary.go rename to src/internal/x/net/lif/binary.go diff --git a/src/vendor/golang_org/x/net/lif/defs_solaris.go b/src/internal/x/net/lif/defs_solaris.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/defs_solaris.go rename to src/internal/x/net/lif/defs_solaris.go diff --git a/src/vendor/golang_org/x/net/lif/lif.go b/src/internal/x/net/lif/lif.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/lif.go rename to src/internal/x/net/lif/lif.go diff --git a/src/vendor/golang_org/x/net/lif/link.go b/src/internal/x/net/lif/link.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/link.go rename to src/internal/x/net/lif/link.go diff --git a/src/vendor/golang_org/x/net/lif/link_test.go b/src/internal/x/net/lif/link_test.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/link_test.go rename to src/internal/x/net/lif/link_test.go diff --git a/src/vendor/golang_org/x/net/lif/sys.go b/src/internal/x/net/lif/sys.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/sys.go rename to src/internal/x/net/lif/sys.go diff --git a/src/vendor/golang_org/x/net/lif/sys_solaris_amd64.s b/src/internal/x/net/lif/sys_solaris_amd64.s similarity index 100% rename from src/vendor/golang_org/x/net/lif/sys_solaris_amd64.s rename to src/internal/x/net/lif/sys_solaris_amd64.s diff --git a/src/vendor/golang_org/x/net/lif/syscall.go b/src/internal/x/net/lif/syscall.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/syscall.go rename to src/internal/x/net/lif/syscall.go diff --git a/src/vendor/golang_org/x/net/lif/zsys_solaris_amd64.go b/src/internal/x/net/lif/zsys_solaris_amd64.go similarity index 100% rename from src/vendor/golang_org/x/net/lif/zsys_solaris_amd64.go rename to src/internal/x/net/lif/zsys_solaris_amd64.go diff --git a/src/vendor/golang_org/x/net/nettest/conntest.go b/src/internal/x/net/nettest/conntest.go similarity index 100% rename from src/vendor/golang_org/x/net/nettest/conntest.go rename to src/internal/x/net/nettest/conntest.go diff --git a/src/vendor/golang_org/x/net/nettest/conntest_go16.go b/src/internal/x/net/nettest/conntest_go16.go similarity index 100% rename from src/vendor/golang_org/x/net/nettest/conntest_go16.go rename to src/internal/x/net/nettest/conntest_go16.go diff --git a/src/vendor/golang_org/x/net/nettest/conntest_go17.go b/src/internal/x/net/nettest/conntest_go17.go similarity index 100% rename from src/vendor/golang_org/x/net/nettest/conntest_go17.go rename to src/internal/x/net/nettest/conntest_go17.go diff --git a/src/vendor/golang_org/x/net/nettest/conntest_test.go b/src/internal/x/net/nettest/conntest_test.go similarity index 97% rename from src/vendor/golang_org/x/net/nettest/conntest_test.go rename to src/internal/x/net/nettest/conntest_test.go index ae8426a05c29a..e14df0e6fbb77 100644 --- a/src/vendor/golang_org/x/net/nettest/conntest_test.go +++ b/src/internal/x/net/nettest/conntest_test.go @@ -12,7 +12,7 @@ import ( "runtime" "testing" - "golang_org/x/net/internal/nettest" + "internal/x/net/internal/nettest" ) func TestTestConn(t *testing.T) { diff --git a/src/vendor/golang_org/x/net/route/address.go b/src/internal/x/net/route/address.go similarity index 100% rename from src/vendor/golang_org/x/net/route/address.go rename to src/internal/x/net/route/address.go diff --git a/src/vendor/golang_org/x/net/route/address_darwin_test.go b/src/internal/x/net/route/address_darwin_test.go similarity index 100% rename from src/vendor/golang_org/x/net/route/address_darwin_test.go rename to src/internal/x/net/route/address_darwin_test.go diff --git a/src/vendor/golang_org/x/net/route/address_test.go b/src/internal/x/net/route/address_test.go similarity index 100% rename from src/vendor/golang_org/x/net/route/address_test.go rename to src/internal/x/net/route/address_test.go diff --git a/src/vendor/golang_org/x/net/route/binary.go b/src/internal/x/net/route/binary.go similarity index 100% rename from src/vendor/golang_org/x/net/route/binary.go rename to src/internal/x/net/route/binary.go diff --git a/src/vendor/golang_org/x/net/route/defs_darwin.go b/src/internal/x/net/route/defs_darwin.go similarity index 100% rename from src/vendor/golang_org/x/net/route/defs_darwin.go rename to src/internal/x/net/route/defs_darwin.go diff --git a/src/vendor/golang_org/x/net/route/defs_dragonfly.go b/src/internal/x/net/route/defs_dragonfly.go similarity index 100% rename from src/vendor/golang_org/x/net/route/defs_dragonfly.go rename to src/internal/x/net/route/defs_dragonfly.go diff --git a/src/vendor/golang_org/x/net/route/defs_freebsd.go b/src/internal/x/net/route/defs_freebsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/defs_freebsd.go rename to src/internal/x/net/route/defs_freebsd.go diff --git a/src/vendor/golang_org/x/net/route/defs_netbsd.go b/src/internal/x/net/route/defs_netbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/defs_netbsd.go rename to src/internal/x/net/route/defs_netbsd.go diff --git a/src/vendor/golang_org/x/net/route/defs_openbsd.go b/src/internal/x/net/route/defs_openbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/defs_openbsd.go rename to src/internal/x/net/route/defs_openbsd.go diff --git a/src/internal/syscall/unix/empty.s b/src/internal/x/net/route/empty.s similarity index 51% rename from src/internal/syscall/unix/empty.s rename to src/internal/x/net/route/empty.s index 7151ab838bd6e..bff0231c7d57e 100644 --- a/src/internal/syscall/unix/empty.s +++ b/src/internal/x/net/route/empty.s @@ -2,6 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is here just to make the go tool happy. It allows -// empty function declarations (no function body). -// It is used with "go:linkname". +// +build darwin,go1.12 + +// This exists solely so we can linkname in symbols from syscall. diff --git a/src/vendor/golang_org/x/net/route/interface.go b/src/internal/x/net/route/interface.go similarity index 98% rename from src/vendor/golang_org/x/net/route/interface.go rename to src/internal/x/net/route/interface.go index 854906d9c4291..05ef2a9ce3c79 100644 --- a/src/vendor/golang_org/x/net/route/interface.go +++ b/src/internal/x/net/route/interface.go @@ -37,7 +37,7 @@ func (m *InterfaceAddrMessage) Sys() []Sys { return nil } // address message. type InterfaceMulticastAddrMessage struct { Version int // message version - Type int // messsage type + Type int // message type Flags int // interface flags Index int // interface index Addrs []Addr // addresses diff --git a/src/vendor/golang_org/x/net/route/interface_announce.go b/src/internal/x/net/route/interface_announce.go similarity index 100% rename from src/vendor/golang_org/x/net/route/interface_announce.go rename to src/internal/x/net/route/interface_announce.go diff --git a/src/vendor/golang_org/x/net/route/interface_classic.go b/src/internal/x/net/route/interface_classic.go similarity index 100% rename from src/vendor/golang_org/x/net/route/interface_classic.go rename to src/internal/x/net/route/interface_classic.go diff --git a/src/vendor/golang_org/x/net/route/interface_freebsd.go b/src/internal/x/net/route/interface_freebsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/interface_freebsd.go rename to src/internal/x/net/route/interface_freebsd.go diff --git a/src/vendor/golang_org/x/net/route/interface_multicast.go b/src/internal/x/net/route/interface_multicast.go similarity index 100% rename from src/vendor/golang_org/x/net/route/interface_multicast.go rename to src/internal/x/net/route/interface_multicast.go diff --git a/src/vendor/golang_org/x/net/route/interface_openbsd.go b/src/internal/x/net/route/interface_openbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/interface_openbsd.go rename to src/internal/x/net/route/interface_openbsd.go diff --git a/src/vendor/golang_org/x/net/route/message.go b/src/internal/x/net/route/message.go similarity index 100% rename from src/vendor/golang_org/x/net/route/message.go rename to src/internal/x/net/route/message.go diff --git a/src/vendor/golang_org/x/net/route/message_darwin_test.go b/src/internal/x/net/route/message_darwin_test.go similarity index 100% rename from src/vendor/golang_org/x/net/route/message_darwin_test.go rename to src/internal/x/net/route/message_darwin_test.go diff --git a/src/vendor/golang_org/x/net/route/message_freebsd_test.go b/src/internal/x/net/route/message_freebsd_test.go similarity index 95% rename from src/vendor/golang_org/x/net/route/message_freebsd_test.go rename to src/internal/x/net/route/message_freebsd_test.go index db4b56752cfac..c6d8a5f54ca28 100644 --- a/src/vendor/golang_org/x/net/route/message_freebsd_test.go +++ b/src/internal/x/net/route/message_freebsd_test.go @@ -4,10 +4,7 @@ package route -import ( - "testing" - "unsafe" -) +import "testing" func TestFetchAndParseRIBOnFreeBSD(t *testing.T) { for _, typ := range []RIBType{sysNET_RT_IFMALIST} { @@ -40,8 +37,7 @@ func TestFetchAndParseRIBOnFreeBSD10AndAbove(t *testing.T) { if _, err := FetchRIB(sysAF_UNSPEC, sysNET_RT_IFLISTL, 0); err != nil { t.Skip("NET_RT_IFLISTL not supported") } - var p uintptr - if kernelAlign != int(unsafe.Sizeof(p)) { + if compatFreeBSD32 { t.Skip("NET_RT_IFLIST vs. NET_RT_IFLISTL doesn't work for 386 emulation on amd64") } diff --git a/src/vendor/golang_org/x/net/route/message_test.go b/src/internal/x/net/route/message_test.go similarity index 100% rename from src/vendor/golang_org/x/net/route/message_test.go rename to src/internal/x/net/route/message_test.go diff --git a/src/vendor/golang_org/x/net/route/route.go b/src/internal/x/net/route/route.go similarity index 100% rename from src/vendor/golang_org/x/net/route/route.go rename to src/internal/x/net/route/route.go diff --git a/src/vendor/golang_org/x/net/route/route_classic.go b/src/internal/x/net/route/route_classic.go similarity index 100% rename from src/vendor/golang_org/x/net/route/route_classic.go rename to src/internal/x/net/route/route_classic.go diff --git a/src/vendor/golang_org/x/net/route/route_openbsd.go b/src/internal/x/net/route/route_openbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/route_openbsd.go rename to src/internal/x/net/route/route_openbsd.go diff --git a/src/vendor/golang_org/x/net/route/route_test.go b/src/internal/x/net/route/route_test.go similarity index 100% rename from src/vendor/golang_org/x/net/route/route_test.go rename to src/internal/x/net/route/route_test.go diff --git a/src/vendor/golang_org/x/net/route/sys.go b/src/internal/x/net/route/sys.go similarity index 100% rename from src/vendor/golang_org/x/net/route/sys.go rename to src/internal/x/net/route/sys.go diff --git a/src/vendor/golang_org/x/net/route/sys_darwin.go b/src/internal/x/net/route/sys_darwin.go similarity index 100% rename from src/vendor/golang_org/x/net/route/sys_darwin.go rename to src/internal/x/net/route/sys_darwin.go diff --git a/src/vendor/golang_org/x/net/route/sys_dragonfly.go b/src/internal/x/net/route/sys_dragonfly.go similarity index 100% rename from src/vendor/golang_org/x/net/route/sys_dragonfly.go rename to src/internal/x/net/route/sys_dragonfly.go diff --git a/src/vendor/golang_org/x/net/route/sys_freebsd.go b/src/internal/x/net/route/sys_freebsd.go similarity index 90% rename from src/vendor/golang_org/x/net/route/sys_freebsd.go rename to src/internal/x/net/route/sys_freebsd.go index 89ba1c4e26299..fe91be1249c5c 100644 --- a/src/vendor/golang_org/x/net/route/sys_freebsd.go +++ b/src/internal/x/net/route/sys_freebsd.go @@ -54,10 +54,12 @@ func (m *InterfaceMessage) Sys() []Sys { } } +var compatFreeBSD32 bool // 386 emulation on amd64 + func probeRoutingStack() (int, map[int]*wireFormat) { var p uintptr wordSize := int(unsafe.Sizeof(p)) - align := int(unsafe.Sizeof(p)) + align := wordSize // In the case of kern.supported_archs="amd64 i386", we need // to know the underlying kernel's architecture because the // alignment for routing facilities are set at the build time @@ -83,8 +85,11 @@ func probeRoutingStack() (int, map[int]*wireFormat) { break } } + if align != wordSize { + compatFreeBSD32 = true // 386 emulation on amd64 + } var rtm, ifm, ifam, ifmam, ifanm *wireFormat - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { rtm = &wireFormat{extOff: sizeofRtMsghdrFreeBSD10Emu - sizeofRtMetricsFreeBSD10Emu, bodyOff: sizeofRtMsghdrFreeBSD10Emu} ifm = &wireFormat{extOff: 16} ifam = &wireFormat{extOff: sizeofIfaMsghdrFreeBSD10Emu, bodyOff: sizeofIfaMsghdrFreeBSD10Emu} @@ -100,35 +105,38 @@ func probeRoutingStack() (int, map[int]*wireFormat) { rel, _ := syscall.SysctlUint32("kern.osreldate") switch { case rel < 800000: - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD7Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD7 } case 800000 <= rel && rel < 900000: - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD8Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD8 } case 900000 <= rel && rel < 1000000: - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD9Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD9 } case 1000000 <= rel && rel < 1100000: - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD10Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD10 } default: - if align != wordSize { // 386 emulation on amd64 + if compatFreeBSD32 { ifm.bodyOff = sizeofIfMsghdrFreeBSD11Emu } else { ifm.bodyOff = sizeofIfMsghdrFreeBSD11 } + if rel >= 1102000 { // see https://github.com/freebsd/freebsd/commit/027c7f4d66ff8d8c4a46c3665a5ee7d6d8462034#diff-ad4e5b7f1449ea3fc87bc97280de145b + align = wordSize + } } rtm.parse = rtm.parseRouteMessage ifm.parse = ifm.parseInterfaceMessage diff --git a/src/vendor/golang_org/x/net/route/sys_netbsd.go b/src/internal/x/net/route/sys_netbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/sys_netbsd.go rename to src/internal/x/net/route/sys_netbsd.go diff --git a/src/vendor/golang_org/x/net/route/sys_openbsd.go b/src/internal/x/net/route/sys_openbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/sys_openbsd.go rename to src/internal/x/net/route/sys_openbsd.go diff --git a/src/vendor/golang_org/x/net/route/syscall.go b/src/internal/x/net/route/syscall.go similarity index 83% rename from src/vendor/golang_org/x/net/route/syscall.go rename to src/internal/x/net/route/syscall.go index c211188b10b8a..72431b0341061 100644 --- a/src/vendor/golang_org/x/net/route/syscall.go +++ b/src/internal/x/net/route/syscall.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build dragonfly freebsd netbsd openbsd package route @@ -20,7 +20,7 @@ func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { p = unsafe.Pointer(&zero) } - _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) if errno != 0 { return error(errno) } diff --git a/src/internal/x/net/route/syscall_go1_11_darwin.go b/src/internal/x/net/route/syscall_go1_11_darwin.go new file mode 100644 index 0000000000000..7228e443cd23f --- /dev/null +++ b/src/internal/x/net/route/syscall_go1_11_darwin.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package route + +import ( + "syscall" + "unsafe" +) + +var zero uintptr + +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var p unsafe.Pointer + if len(mib) > 0 { + p = unsafe.Pointer(&mib[0]) + } else { + p = unsafe.Pointer(&zero) + } + _, _, errno := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), newlen) + if errno != 0 { + return error(errno) + } + return nil +} diff --git a/src/internal/x/net/route/syscall_go1_12_darwin.go b/src/internal/x/net/route/syscall_go1_12_darwin.go new file mode 100644 index 0000000000000..7922a6836fc3d --- /dev/null +++ b/src/internal/x/net/route/syscall_go1_12_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package route + +import _ "unsafe" // for linkname + +//go:linkname sysctl syscall.sysctl +func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error diff --git a/src/vendor/golang_org/x/net/route/zsys_darwin.go b/src/internal/x/net/route/zsys_darwin.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_darwin.go rename to src/internal/x/net/route/zsys_darwin.go diff --git a/src/vendor/golang_org/x/net/route/zsys_dragonfly.go b/src/internal/x/net/route/zsys_dragonfly.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_dragonfly.go rename to src/internal/x/net/route/zsys_dragonfly.go diff --git a/src/vendor/golang_org/x/net/route/zsys_freebsd_386.go b/src/internal/x/net/route/zsys_freebsd_386.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_freebsd_386.go rename to src/internal/x/net/route/zsys_freebsd_386.go diff --git a/src/vendor/golang_org/x/net/route/zsys_freebsd_amd64.go b/src/internal/x/net/route/zsys_freebsd_amd64.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_freebsd_amd64.go rename to src/internal/x/net/route/zsys_freebsd_amd64.go diff --git a/src/vendor/golang_org/x/net/route/zsys_freebsd_arm.go b/src/internal/x/net/route/zsys_freebsd_arm.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_freebsd_arm.go rename to src/internal/x/net/route/zsys_freebsd_arm.go diff --git a/src/vendor/golang_org/x/net/route/zsys_netbsd.go b/src/internal/x/net/route/zsys_netbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_netbsd.go rename to src/internal/x/net/route/zsys_netbsd.go diff --git a/src/vendor/golang_org/x/net/route/zsys_openbsd.go b/src/internal/x/net/route/zsys_openbsd.go similarity index 100% rename from src/vendor/golang_org/x/net/route/zsys_openbsd.go rename to src/internal/x/net/route/zsys_openbsd.go diff --git a/src/vendor/golang_org/x/text/secure/bidirule/bidirule.go b/src/internal/x/text/secure/bidirule/bidirule.go similarity index 99% rename from src/vendor/golang_org/x/text/secure/bidirule/bidirule.go rename to src/internal/x/text/secure/bidirule/bidirule.go index c3ca2bc6fede0..87e656a37d012 100644 --- a/src/vendor/golang_org/x/text/secure/bidirule/bidirule.go +++ b/src/internal/x/text/secure/bidirule/bidirule.go @@ -14,8 +14,8 @@ import ( "errors" "unicode/utf8" - "golang_org/x/text/transform" - "golang_org/x/text/unicode/bidi" + "internal/x/text/transform" + "internal/x/text/unicode/bidi" ) // This file contains an implementation of RFC 5893: Right-to-Left Scripts for diff --git a/src/vendor/golang_org/x/text/secure/doc.go b/src/internal/x/text/secure/doc.go similarity index 85% rename from src/vendor/golang_org/x/text/secure/doc.go rename to src/internal/x/text/secure/doc.go index 5eb60b94bf275..6151b79d6e3e3 100644 --- a/src/vendor/golang_org/x/text/secure/doc.go +++ b/src/internal/x/text/secure/doc.go @@ -5,4 +5,4 @@ // license that can be found in the LICENSE file. // secure is a repository of text security related packages. -package secure // import "golang_org/x/text/secure" +package secure diff --git a/src/vendor/golang_org/x/text/transform/examples_test.go b/src/internal/x/text/transform/examples_test.go similarity index 92% rename from src/vendor/golang_org/x/text/transform/examples_test.go rename to src/internal/x/text/transform/examples_test.go index 1323d9bec033d..8d2fbb21711e7 100644 --- a/src/vendor/golang_org/x/text/transform/examples_test.go +++ b/src/internal/x/text/transform/examples_test.go @@ -10,8 +10,8 @@ import ( "fmt" "unicode" - "golang_org/x/text/transform" - "golang_org/x/text/unicode/norm" + "internal/x/text/transform" + "internal/x/text/unicode/norm" ) func ExampleRemoveFunc() { diff --git a/src/vendor/golang_org/x/text/transform/transform.go b/src/internal/x/text/transform/transform.go similarity index 99% rename from src/vendor/golang_org/x/text/transform/transform.go rename to src/internal/x/text/transform/transform.go index 9ddfa80cf3e69..7b6b55e0194f2 100644 --- a/src/vendor/golang_org/x/text/transform/transform.go +++ b/src/internal/x/text/transform/transform.go @@ -8,7 +8,7 @@ // bytes passing through as well as various transformations. Example // transformations provided by other packages include normalization and // conversion between character sets. -package transform // import "golang_org/x/text/transform" +package transform import ( "bytes" diff --git a/src/vendor/golang_org/x/text/unicode/bidi/bidi.go b/src/internal/x/text/unicode/bidi/bidi.go similarity index 99% rename from src/vendor/golang_org/x/text/unicode/bidi/bidi.go rename to src/internal/x/text/unicode/bidi/bidi.go index e691ae86942b1..4542171736ee1 100644 --- a/src/vendor/golang_org/x/text/unicode/bidi/bidi.go +++ b/src/internal/x/text/unicode/bidi/bidi.go @@ -10,7 +10,7 @@ // // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways // and without notice. -package bidi // import "golang_org/x/text/unicode/bidi" +package bidi // TODO: // The following functionality would not be hard to implement, but hinges on diff --git a/src/vendor/golang_org/x/text/unicode/bidi/bracket.go b/src/internal/x/text/unicode/bidi/bracket.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/bidi/bracket.go rename to src/internal/x/text/unicode/bidi/bracket.go diff --git a/src/vendor/golang_org/x/text/unicode/bidi/core.go b/src/internal/x/text/unicode/bidi/core.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/bidi/core.go rename to src/internal/x/text/unicode/bidi/core.go diff --git a/src/vendor/golang_org/x/text/unicode/bidi/example_test.go b/src/internal/x/text/unicode/bidi/example_test.go similarity index 99% rename from src/vendor/golang_org/x/text/unicode/bidi/example_test.go rename to src/internal/x/text/unicode/bidi/example_test.go index e1739598d43e1..56c5c4a1219ab 100644 --- a/src/vendor/golang_org/x/text/unicode/bidi/example_test.go +++ b/src/internal/x/text/unicode/bidi/example_test.go @@ -8,7 +8,7 @@ import ( "fmt" "log" - "golang_org/x/text/bidi" + "internal/x/text/bidi" ) func foo() { diff --git a/src/vendor/golang_org/x/text/unicode/bidi/prop.go b/src/internal/x/text/unicode/bidi/prop.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/bidi/prop.go rename to src/internal/x/text/unicode/bidi/prop.go diff --git a/src/vendor/golang_org/x/text/unicode/bidi/tables.go b/src/internal/x/text/unicode/bidi/tables.go similarity index 99% rename from src/vendor/golang_org/x/text/unicode/bidi/tables.go rename to src/internal/x/text/unicode/bidi/tables.go index fb2229efa875c..c9c45c625f559 100644 --- a/src/vendor/golang_org/x/text/unicode/bidi/tables.go +++ b/src/internal/x/text/unicode/bidi/tables.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// Code generated by running "go generate" in golang_org/x/text. DO NOT EDIT. +// Code generated by running "go generate" in internal/x/text. DO NOT EDIT. package bidi diff --git a/src/vendor/golang_org/x/text/unicode/bidi/trieval.go b/src/internal/x/text/unicode/bidi/trieval.go similarity index 95% rename from src/vendor/golang_org/x/text/unicode/bidi/trieval.go rename to src/internal/x/text/unicode/bidi/trieval.go index c3f0e21f3e879..e59d249c7507f 100644 --- a/src/vendor/golang_org/x/text/unicode/bidi/trieval.go +++ b/src/internal/x/text/unicode/bidi/trieval.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// Code generated by running "go generate" in golang_org/x/text. DO NOT EDIT. +// Code generated by running "go generate" in internal/x/text. DO NOT EDIT. package bidi diff --git a/src/vendor/golang_org/x/text/unicode/doc.go b/src/internal/x/text/unicode/doc.go similarity index 84% rename from src/vendor/golang_org/x/text/unicode/doc.go rename to src/internal/x/text/unicode/doc.go index 55a6775d59a38..4f7e9f5a4336e 100644 --- a/src/vendor/golang_org/x/text/unicode/doc.go +++ b/src/internal/x/text/unicode/doc.go @@ -5,6 +5,6 @@ // license that can be found in the LICENSE file. // unicode holds packages with implementations of Unicode standards that are -// mostly used as building blocks for other packages in golang_org/x/text, +// mostly used as building blocks for other packages in internal/x/text, // layout engines, or are otherwise more low-level in nature. package unicode diff --git a/src/vendor/golang_org/x/text/unicode/norm/composition.go b/src/internal/x/text/unicode/norm/composition.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/composition.go rename to src/internal/x/text/unicode/norm/composition.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/example_iter_test.go b/src/internal/x/text/unicode/norm/example_iter_test.go similarity index 98% rename from src/vendor/golang_org/x/text/unicode/norm/example_iter_test.go rename to src/internal/x/text/unicode/norm/example_iter_test.go index aed6c16fbb23d..fb0e52410b8a3 100644 --- a/src/vendor/golang_org/x/text/unicode/norm/example_iter_test.go +++ b/src/internal/x/text/unicode/norm/example_iter_test.go @@ -11,7 +11,7 @@ import ( "fmt" "unicode/utf8" - "golang_org/x/text/unicode/norm" + "internal/x/text/unicode/norm" ) // EqualSimple uses a norm.Iter to compare two non-normalized diff --git a/src/vendor/golang_org/x/text/unicode/norm/example_test.go b/src/internal/x/text/unicode/norm/example_test.go similarity index 94% rename from src/vendor/golang_org/x/text/unicode/norm/example_test.go rename to src/internal/x/text/unicode/norm/example_test.go index 72e72c9d34a57..a9904400df43d 100644 --- a/src/vendor/golang_org/x/text/unicode/norm/example_test.go +++ b/src/internal/x/text/unicode/norm/example_test.go @@ -9,7 +9,7 @@ package norm_test import ( "fmt" - "golang_org/x/text/unicode/norm" + "internal/x/text/unicode/norm" ) func ExampleForm_NextBoundary() { diff --git a/src/vendor/golang_org/x/text/unicode/norm/forminfo.go b/src/internal/x/text/unicode/norm/forminfo.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/forminfo.go rename to src/internal/x/text/unicode/norm/forminfo.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/input.go b/src/internal/x/text/unicode/norm/input.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/input.go rename to src/internal/x/text/unicode/norm/input.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/iter.go b/src/internal/x/text/unicode/norm/iter.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/iter.go rename to src/internal/x/text/unicode/norm/iter.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/normalize.go b/src/internal/x/text/unicode/norm/normalize.go similarity index 99% rename from src/vendor/golang_org/x/text/unicode/norm/normalize.go rename to src/internal/x/text/unicode/norm/normalize.go index 4de4ed6ed0f9d..791c39b1c48e8 100644 --- a/src/vendor/golang_org/x/text/unicode/norm/normalize.go +++ b/src/internal/x/text/unicode/norm/normalize.go @@ -7,12 +7,12 @@ // Note: the file data_test.go that is generated should not be checked in. // Package norm contains types and functions for normalizing Unicode strings. -package norm // import "golang_org/x/text/unicode/norm" +package norm import ( "unicode/utf8" - "golang_org/x/text/transform" + "internal/x/text/transform" ) // A Form denotes a canonical representation of Unicode code points. diff --git a/src/vendor/golang_org/x/text/unicode/norm/readwriter.go b/src/internal/x/text/unicode/norm/readwriter.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/readwriter.go rename to src/internal/x/text/unicode/norm/readwriter.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/tables.go b/src/internal/x/text/unicode/norm/tables.go similarity index 99% rename from src/vendor/golang_org/x/text/unicode/norm/tables.go rename to src/internal/x/text/unicode/norm/tables.go index d6466836cefe6..2dd61adf63c03 100644 --- a/src/vendor/golang_org/x/text/unicode/norm/tables.go +++ b/src/internal/x/text/unicode/norm/tables.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// Code generated by running "go generate" in golang_org/x/text. DO NOT EDIT. +// Code generated by running "go generate" in internal/x/text. DO NOT EDIT. package norm diff --git a/src/vendor/golang_org/x/text/unicode/norm/transform.go b/src/internal/x/text/unicode/norm/transform.go similarity index 98% rename from src/vendor/golang_org/x/text/unicode/norm/transform.go rename to src/internal/x/text/unicode/norm/transform.go index 73869a5a1cddb..7837cb96a414a 100644 --- a/src/vendor/golang_org/x/text/unicode/norm/transform.go +++ b/src/internal/x/text/unicode/norm/transform.go @@ -9,7 +9,7 @@ package norm import ( "unicode/utf8" - "golang_org/x/text/transform" + "internal/x/text/transform" ) // Reset implements the Reset method of the transform.Transformer interface. diff --git a/src/vendor/golang_org/x/text/unicode/norm/trie.go b/src/internal/x/text/unicode/norm/trie.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/trie.go rename to src/internal/x/text/unicode/norm/trie.go diff --git a/src/vendor/golang_org/x/text/unicode/norm/triegen.go b/src/internal/x/text/unicode/norm/triegen.go similarity index 100% rename from src/vendor/golang_org/x/text/unicode/norm/triegen.go rename to src/internal/x/text/unicode/norm/triegen.go diff --git a/src/internal/xcoff/ar.go b/src/internal/xcoff/ar.go new file mode 100644 index 0000000000000..0fb410f7dd71d --- /dev/null +++ b/src/internal/xcoff/ar.go @@ -0,0 +1,228 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +import ( + "encoding/binary" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + SAIAMAG = 0x8 + AIAFMAG = "`\n" + AIAMAG = "\n" + AIAMAGBIG = "\n" + + // Sizeof + FL_HSZ_BIG = 0x80 + AR_HSZ_BIG = 0x70 +) + +type bigarFileHeader struct { + Flmagic [SAIAMAG]byte // Archive magic string + Flmemoff [20]byte // Member table offset + Flgstoff [20]byte // 32-bits global symtab offset + Flgst64off [20]byte // 64-bits global symtab offset + Flfstmoff [20]byte // First member offset + Fllstmoff [20]byte // Last member offset + Flfreeoff [20]byte // First member on free list offset +} + +type bigarMemberHeader struct { + Arsize [20]byte // File member size + Arnxtmem [20]byte // Next member pointer + Arprvmem [20]byte // Previous member pointer + Ardate [12]byte // File member date + Aruid [12]byte // File member uid + Argid [12]byte // File member gid + Armode [12]byte // File member mode (octal) + Arnamlen [4]byte // File member name length + // _ar_nam is removed because it's easier to get name without it. +} + +// Archive represents an open AIX big archive. +type Archive struct { + ArchiveHeader + Members []*Member + + closer io.Closer +} + +// MemberHeader holds information about a big archive file header +type ArchiveHeader struct { + magic string +} + +// Member represents a member of an AIX big archive. +type Member struct { + MemberHeader + sr *io.SectionReader +} + +// MemberHeader holds information about a big archive member +type MemberHeader struct { + Name string + Size uint64 +} + +// OpenArchive opens the named archive using os.Open and prepares it for use +// as an AIX big archive. +func OpenArchive(name string) (*Archive, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + arch, err := NewArchive(f) + if err != nil { + f.Close() + return nil, err + } + arch.closer = f + return arch, nil +} + +// Close closes the Archive. +// If the Archive was created using NewArchive directly instead of OpenArchive, +// Close has no effect. +func (a *Archive) Close() error { + var err error + if a.closer != nil { + err = a.closer.Close() + a.closer = nil + } + return err +} + +// NewArchive creates a new Archive for accessing an AIX big archive in an underlying reader. +func NewArchive(r io.ReaderAt) (*Archive, error) { + parseDecimalBytes := func(b []byte) (int64, error) { + return strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64) + } + sr := io.NewSectionReader(r, 0, 1<<63-1) + + // Read File Header + var magic [SAIAMAG]byte + if _, err := sr.ReadAt(magic[:], 0); err != nil { + return nil, err + } + + arch := new(Archive) + switch string(magic[:]) { + case AIAMAGBIG: + arch.magic = string(magic[:]) + case AIAMAG: + return nil, fmt.Errorf("small AIX archive not supported") + default: + return nil, fmt.Errorf("unrecognised archive magic: 0x%x", magic) + } + + var fhdr bigarFileHeader + if _, err := sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + if err := binary.Read(sr, binary.BigEndian, &fhdr); err != nil { + return nil, err + } + + off, err := parseDecimalBytes(fhdr.Flfstmoff[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + if off == 0 { + // Occurs if the archive is empty. + return arch, nil + } + + lastoff, err := parseDecimalBytes(fhdr.Fllstmoff[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + // Read members + for { + // Read Member Header + // The member header is normally 2 bytes larger. But it's easier + // to read the name if the header is read without _ar_nam. + // However, AIAFMAG must be read afterward. + if _, err := sr.Seek(off, os.SEEK_SET); err != nil { + return nil, err + } + + var mhdr bigarMemberHeader + if err := binary.Read(sr, binary.BigEndian, &mhdr); err != nil { + return nil, err + } + + member := new(Member) + arch.Members = append(arch.Members, member) + + size, err := parseDecimalBytes(mhdr.Arsize[:]) + if err != nil { + return nil, fmt.Errorf("error parsing size in member header(%q); %v", mhdr, err) + } + member.Size = uint64(size) + + // Read name + namlen, err := parseDecimalBytes(mhdr.Arnamlen[:]) + if err != nil { + return nil, fmt.Errorf("error parsing name length in member header(%q); %v", mhdr, err) + } + name := make([]byte, namlen) + if err := binary.Read(sr, binary.BigEndian, name); err != nil { + return nil, err + } + member.Name = string(name) + + fileoff := off + AR_HSZ_BIG + namlen + if fileoff&1 != 0 { + fileoff++ + if _, err := sr.Seek(1, os.SEEK_CUR); err != nil { + return nil, err + } + } + + // Read AIAFMAG string + var fmag [2]byte + if err := binary.Read(sr, binary.BigEndian, &fmag); err != nil { + return nil, err + } + if string(fmag[:]) != AIAFMAG { + return nil, fmt.Errorf("AIAFMAG not found after member header") + } + + fileoff += 2 // Add the two bytes of AIAFMAG + member.sr = io.NewSectionReader(sr, fileoff, size) + + if off == lastoff { + break + } + off, err = parseDecimalBytes(mhdr.Arnxtmem[:]) + if err != nil { + return nil, fmt.Errorf("error parsing offset of first member in archive header(%q); %v", fhdr, err) + } + + } + + return arch, nil + +} + +// GetFile returns the XCOFF file defined by member name. +// FIXME: This doesn't work if an archive has two members with the same +// name which can occur if a archive has both 32-bits and 64-bits files. +func (arch *Archive) GetFile(name string) (*File, error) { + for _, mem := range arch.Members { + if mem.Name == name { + return NewFile(mem.sr) + } + } + return nil, fmt.Errorf("unknown member %s in archive", name) + +} diff --git a/src/internal/xcoff/ar_test.go b/src/internal/xcoff/ar_test.go new file mode 100644 index 0000000000000..03c2fd1c5a21a --- /dev/null +++ b/src/internal/xcoff/ar_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +import ( + "reflect" + "testing" +) + +type archiveTest struct { + file string + hdr ArchiveHeader + members []*MemberHeader + membersFileHeader []FileHeader +} + +var archTest = []archiveTest{ + { + "testdata/bigar-ppc64", + ArchiveHeader{AIAMAGBIG}, + []*MemberHeader{ + {"printbye.o", 836}, + {"printhello.o", 860}, + }, + []FileHeader{ + FileHeader{U64_TOCMAGIC}, + FileHeader{U64_TOCMAGIC}, + }, + }, + { + "testdata/bigar-empty", + ArchiveHeader{AIAMAGBIG}, + []*MemberHeader{}, + []FileHeader{}, + }, +} + +func TestOpenArchive(t *testing.T) { + for i := range archTest { + tt := &archTest[i] + arch, err := OpenArchive(tt.file) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(arch.ArchiveHeader, tt.hdr) { + t.Errorf("open archive %s:\n\thave %#v\n\twant %#v\n", tt.file, arch.ArchiveHeader, tt.hdr) + continue + } + + for i, mem := range arch.Members { + if i >= len(tt.members) { + break + } + have := &mem.MemberHeader + want := tt.members[i] + if !reflect.DeepEqual(have, want) { + t.Errorf("open %s, member %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want) + } + + f, err := arch.GetFile(mem.Name) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(f.FileHeader, tt.membersFileHeader[i]) { + t.Errorf("open %s, member file header %d:\n\thave %#v\n\twant %#v\n", tt.file, i, f.FileHeader, tt.membersFileHeader[i]) + } + } + tn := len(tt.members) + an := len(arch.Members) + if tn != an { + t.Errorf("open %s: len(Members) = %d, want %d", tt.file, an, tn) + } + + } +} diff --git a/src/internal/xcoff/file.go b/src/internal/xcoff/file.go new file mode 100644 index 0000000000000..0923b9fcf3a59 --- /dev/null +++ b/src/internal/xcoff/file.go @@ -0,0 +1,687 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xcoff implements access to XCOFF (Extended Common Object File Format) files. +package xcoff + +import ( + "debug/dwarf" + "encoding/binary" + "fmt" + "io" + "os" + "strings" +) + +// SectionHeader holds information about an XCOFF section header. +type SectionHeader struct { + Name string + VirtualAddress uint64 + Size uint64 + Type uint32 + Relptr uint64 + Nreloc uint32 +} + +type Section struct { + SectionHeader + Relocs []Reloc + io.ReaderAt + sr *io.SectionReader +} + +// AuxiliaryCSect holds information about an XCOFF symbol in an AUX_CSECT entry. +type AuxiliaryCSect struct { + Length int64 + StorageMappingClass int + SymbolType int +} + +// AuxiliaryFcn holds information about an XCOFF symbol in an AUX_FCN entry. +type AuxiliaryFcn struct { + Size int64 +} + +type Symbol struct { + Name string + Value uint64 + SectionNumber int + StorageClass int + AuxFcn AuxiliaryFcn + AuxCSect AuxiliaryCSect +} + +type Reloc struct { + VirtualAddress uint64 + Symbol *Symbol + Signed bool + InstructionFixed bool + Length uint8 + Type uint8 +} + +// ImportedSymbol holds information about an imported XCOFF symbol. +type ImportedSymbol struct { + Name string + Library string +} + +// FileHeader holds information about an XCOFF file header. +type FileHeader struct { + TargetMachine uint16 +} + +// A File represents an open XCOFF file. +type File struct { + FileHeader + Sections []*Section + Symbols []*Symbol + StringTable []byte + LibraryPaths []string + + closer io.Closer +} + +// Open opens the named file using os.Open and prepares it for use as an XCOFF binary. +func Open(name string) (*File, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + ff, err := NewFile(f) + if err != nil { + f.Close() + return nil, err + } + ff.closer = f + return ff, nil +} + +// Close closes the File. +// If the File was created using NewFile directly instead of Open, +// Close has no effect. +func (f *File) Close() error { + var err error + if f.closer != nil { + err = f.closer.Close() + f.closer = nil + } + return err +} + +// Section returns the first section with the given name, or nil if no such +// section exists. +// Xcoff have section's name limited to 8 bytes. Some sections like .gosymtab +// can be trunked but this method will still find them. +func (f *File) Section(name string) *Section { + for _, s := range f.Sections { + if s.Name == name || (len(name) > 8 && s.Name == name[:8]) { + return s + } + } + return nil +} + +// SectionByType returns the first section in f with the +// given type, or nil if there is no such section. +func (f *File) SectionByType(typ uint32) *Section { + for _, s := range f.Sections { + if s.Type == typ { + return s + } + } + return nil +} + +// cstring converts ASCII byte sequence b to string. +// It stops once it finds 0 or reaches end of b. +func cstring(b []byte) string { + var i int + for i = 0; i < len(b) && b[i] != 0; i++ { + } + return string(b[:i]) +} + +// getString extracts a string from an XCOFF string table. +func getString(st []byte, offset uint32) (string, bool) { + if offset < 4 || int(offset) >= len(st) { + return "", false + } + return cstring(st[offset:]), true +} + +// NewFile creates a new File for accessing an XCOFF binary in an underlying reader. +func NewFile(r io.ReaderAt) (*File, error) { + sr := io.NewSectionReader(r, 0, 1<<63-1) + // Read XCOFF target machine + var magic uint16 + if err := binary.Read(sr, binary.BigEndian, &magic); err != nil { + return nil, err + } + if magic != U802TOCMAGIC && magic != U64_TOCMAGIC { + return nil, fmt.Errorf("unrecognised XCOFF magic: 0x%x", magic) + } + + f := new(File) + f.TargetMachine = magic + + // Read XCOFF file header + if _, err := sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var nscns uint16 + var symptr uint64 + var nsyms int32 + var opthdr uint16 + var hdrsz int + switch f.TargetMachine { + case U802TOCMAGIC: + fhdr := new(FileHeader32) + if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil { + return nil, err + } + nscns = fhdr.Fnscns + symptr = uint64(fhdr.Fsymptr) + nsyms = fhdr.Fnsyms + opthdr = fhdr.Fopthdr + hdrsz = FILHSZ_32 + case U64_TOCMAGIC: + fhdr := new(FileHeader64) + if err := binary.Read(sr, binary.BigEndian, fhdr); err != nil { + return nil, err + } + nscns = fhdr.Fnscns + symptr = fhdr.Fsymptr + nsyms = fhdr.Fnsyms + opthdr = fhdr.Fopthdr + hdrsz = FILHSZ_64 + } + + if symptr == 0 || nsyms <= 0 { + return nil, fmt.Errorf("no symbol table") + } + + // Read string table (located right after symbol table). + offset := symptr + uint64(nsyms)*SYMESZ + if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil { + return nil, err + } + // The first 4 bytes contain the length (in bytes). + var l uint32 + if err := binary.Read(sr, binary.BigEndian, &l); err != nil { + return nil, err + } + if l > 4 { + if _, err := sr.Seek(int64(offset), os.SEEK_SET); err != nil { + return nil, err + } + f.StringTable = make([]byte, l) + if _, err := io.ReadFull(sr, f.StringTable); err != nil { + return nil, err + } + } + + // Read section headers + if _, err := sr.Seek(int64(hdrsz)+int64(opthdr), os.SEEK_SET); err != nil { + return nil, err + } + f.Sections = make([]*Section, nscns) + for i := 0; i < int(nscns); i++ { + var scnptr uint64 + s := new(Section) + switch f.TargetMachine { + case U802TOCMAGIC: + shdr := new(SectionHeader32) + if err := binary.Read(sr, binary.BigEndian, shdr); err != nil { + return nil, err + } + s.Name = cstring(shdr.Sname[:]) + s.VirtualAddress = uint64(shdr.Svaddr) + s.Size = uint64(shdr.Ssize) + scnptr = uint64(shdr.Sscnptr) + s.Type = shdr.Sflags + s.Relptr = uint64(shdr.Srelptr) + s.Nreloc = uint32(shdr.Snreloc) + case U64_TOCMAGIC: + shdr := new(SectionHeader64) + if err := binary.Read(sr, binary.BigEndian, shdr); err != nil { + return nil, err + } + s.Name = cstring(shdr.Sname[:]) + s.VirtualAddress = shdr.Svaddr + s.Size = shdr.Ssize + scnptr = shdr.Sscnptr + s.Type = shdr.Sflags + s.Relptr = shdr.Srelptr + s.Nreloc = shdr.Snreloc + } + r2 := r + if scnptr == 0 { // .bss must have all 0s + r2 = zeroReaderAt{} + } + s.sr = io.NewSectionReader(r2, int64(scnptr), int64(s.Size)) + s.ReaderAt = s.sr + f.Sections[i] = s + } + + // Symbol map needed by relocation + var idxToSym = make(map[int]*Symbol) + + // Read symbol table + if _, err := sr.Seek(int64(symptr), os.SEEK_SET); err != nil { + return nil, err + } + f.Symbols = make([]*Symbol, 0) + for i := 0; i < int(nsyms); i++ { + var numaux int + var ok, needAuxFcn bool + sym := new(Symbol) + switch f.TargetMachine { + case U802TOCMAGIC: + se := new(SymEnt32) + if err := binary.Read(sr, binary.BigEndian, se); err != nil { + return nil, err + } + numaux = int(se.Nnumaux) + sym.SectionNumber = int(se.Nscnum) + sym.StorageClass = int(se.Nsclass) + sym.Value = uint64(se.Nvalue) + needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1 + zeroes := binary.BigEndian.Uint32(se.Nname[:4]) + if zeroes != 0 { + sym.Name = cstring(se.Nname[:]) + } else { + offset := binary.BigEndian.Uint32(se.Nname[4:]) + sym.Name, ok = getString(f.StringTable, offset) + if !ok { + goto skip + } + } + case U64_TOCMAGIC: + se := new(SymEnt64) + if err := binary.Read(sr, binary.BigEndian, se); err != nil { + return nil, err + } + numaux = int(se.Nnumaux) + sym.SectionNumber = int(se.Nscnum) + sym.StorageClass = int(se.Nsclass) + sym.Value = se.Nvalue + needAuxFcn = se.Ntype&SYM_TYPE_FUNC != 0 && numaux > 1 + sym.Name, ok = getString(f.StringTable, se.Noffset) + if !ok { + goto skip + } + } + if sym.StorageClass != C_EXT && sym.StorageClass != C_WEAKEXT && sym.StorageClass != C_HIDEXT { + goto skip + } + // Must have at least one csect auxiliary entry. + if numaux < 1 || i+numaux >= int(nsyms) { + goto skip + } + + if sym.SectionNumber > int(nscns) { + goto skip + } + if sym.SectionNumber == 0 { + sym.Value = 0 + } else { + sym.Value -= f.Sections[sym.SectionNumber-1].VirtualAddress + } + + idxToSym[i] = sym + + // If this symbol is a function, it must retrieve its size from + // its AUX_FCN entry. + // It can happend that a function symbol doesn't have any AUX_FCN. + // In this case, needAuxFcn is false and their size will be set to 0 + if needAuxFcn { + switch f.TargetMachine { + case U802TOCMAGIC: + aux := new(AuxFcn32) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxFcn.Size = int64(aux.Xfsize) + case U64_TOCMAGIC: + aux := new(AuxFcn64) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxFcn.Size = int64(aux.Xfsize) + } + } + + // Read csect auxiliary entry (by convention, it is the last). + if !needAuxFcn { + if _, err := sr.Seek(int64(numaux-1)*SYMESZ, os.SEEK_CUR); err != nil { + return nil, err + } + } + i += numaux + numaux = 0 + switch f.TargetMachine { + case U802TOCMAGIC: + aux := new(AuxCSect32) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7) + sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas) + sym.AuxCSect.Length = int64(aux.Xscnlen) + case U64_TOCMAGIC: + aux := new(AuxCSect64) + if err := binary.Read(sr, binary.BigEndian, aux); err != nil { + return nil, err + } + sym.AuxCSect.SymbolType = int(aux.Xsmtyp & 0x7) + sym.AuxCSect.StorageMappingClass = int(aux.Xsmclas) + sym.AuxCSect.Length = int64(aux.Xscnlenhi)<<32 | int64(aux.Xscnlenlo) + } + f.Symbols = append(f.Symbols, sym) + skip: + i += numaux // Skip auxiliary entries + if _, err := sr.Seek(int64(numaux)*SYMESZ, os.SEEK_CUR); err != nil { + return nil, err + } + } + + // Read relocations + // Only for .data or .text section + for _, sect := range f.Sections { + if sect.Type != STYP_TEXT && sect.Type != STYP_DATA { + continue + } + sect.Relocs = make([]Reloc, sect.Nreloc) + if sect.Relptr == 0 { + continue + } + if _, err := sr.Seek(int64(sect.Relptr), os.SEEK_SET); err != nil { + return nil, err + } + for i := uint32(0); i < sect.Nreloc; i++ { + switch f.TargetMachine { + case U802TOCMAGIC: + rel := new(Reloc32) + if err := binary.Read(sr, binary.BigEndian, rel); err != nil { + return nil, err + } + sect.Relocs[i].VirtualAddress = uint64(rel.Rvaddr) + sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)] + sect.Relocs[i].Type = rel.Rtype + sect.Relocs[i].Length = rel.Rsize&0x3F + 1 + + if rel.Rsize&0x80 == 1 { + sect.Relocs[i].Signed = true + } + if rel.Rsize&0x40 == 1 { + sect.Relocs[i].InstructionFixed = true + } + + case U64_TOCMAGIC: + rel := new(Reloc64) + if err := binary.Read(sr, binary.BigEndian, rel); err != nil { + return nil, err + } + sect.Relocs[i].VirtualAddress = rel.Rvaddr + sect.Relocs[i].Symbol = idxToSym[int(rel.Rsymndx)] + sect.Relocs[i].Type = rel.Rtype + sect.Relocs[i].Length = rel.Rsize&0x3F + 1 + if rel.Rsize&0x80 == 1 { + sect.Relocs[i].Signed = true + } + if rel.Rsize&0x40 == 1 { + sect.Relocs[i].InstructionFixed = true + } + } + } + } + + return f, nil +} + +// zeroReaderAt is ReaderAt that reads 0s. +type zeroReaderAt struct{} + +// ReadAt writes len(p) 0s into p. +func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + for i := range p { + p[i] = 0 + } + return len(p), nil +} + +// Data reads and returns the contents of the XCOFF section s. +func (s *Section) Data() ([]byte, error) { + dat := make([]byte, s.sr.Size()) + n, err := s.sr.ReadAt(dat, 0) + if n == len(dat) { + err = nil + } + return dat[:n], err +} + +// CSect reads and returns the contents of a csect. +func (f *File) CSect(name string) []byte { + for _, sym := range f.Symbols { + if sym.Name == name && sym.AuxCSect.SymbolType == XTY_SD { + if i := sym.SectionNumber - 1; 0 <= i && i < len(f.Sections) { + s := f.Sections[i] + if sym.Value+uint64(sym.AuxCSect.Length) <= s.Size { + dat := make([]byte, sym.AuxCSect.Length) + _, err := s.sr.ReadAt(dat, int64(sym.Value)) + if err != nil { + return nil + } + return dat + } + } + break + } + } + return nil +} + +func (f *File) DWARF() (*dwarf.Data, error) { + // There are many other DWARF sections, but these + // are the ones the debug/dwarf package uses. + // Don't bother loading others. + var subtypes = [...]uint32{SSUBTYP_DWABREV, SSUBTYP_DWINFO, SSUBTYP_DWLINE, SSUBTYP_DWRNGES, SSUBTYP_DWSTR} + var dat [len(subtypes)][]byte + for i, subtype := range subtypes { + s := f.SectionByType(STYP_DWARF | subtype) + if s != nil { + b, err := s.Data() + if err != nil && uint64(len(b)) < s.Size { + return nil, err + } + dat[i] = b + } + } + + abbrev, info, line, ranges, str := dat[0], dat[1], dat[2], dat[3], dat[4] + return dwarf.New(abbrev, nil, nil, info, line, nil, ranges, str) +} + +// readImportID returns the import file IDs stored inside the .loader section. +// Library name pattern is either path/base/member or base/member +func (f *File) readImportIDs(s *Section) ([]string, error) { + // Read loader header + if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var istlen uint32 + var nimpid int32 + var impoff uint64 + switch f.TargetMachine { + case U802TOCMAGIC: + lhdr := new(LoaderHeader32) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + istlen = lhdr.Listlen + nimpid = lhdr.Lnimpid + impoff = uint64(lhdr.Limpoff) + case U64_TOCMAGIC: + lhdr := new(LoaderHeader64) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + istlen = lhdr.Listlen + nimpid = lhdr.Lnimpid + impoff = lhdr.Limpoff + } + + // Read loader import file ID table + if _, err := s.sr.Seek(int64(impoff), os.SEEK_SET); err != nil { + return nil, err + } + table := make([]byte, istlen) + if _, err := io.ReadFull(s.sr, table); err != nil { + return nil, err + } + + offset := 0 + // First import file ID is the default LIBPATH value + libpath := cstring(table[offset:]) + f.LibraryPaths = strings.Split(libpath, ":") + offset += len(libpath) + 3 // 3 null bytes + all := make([]string, 0) + for i := 1; i < int(nimpid); i++ { + impidpath := cstring(table[offset:]) + offset += len(impidpath) + 1 + impidbase := cstring(table[offset:]) + offset += len(impidbase) + 1 + impidmem := cstring(table[offset:]) + offset += len(impidmem) + 1 + var path string + if len(impidpath) > 0 { + path = impidpath + "/" + impidbase + "/" + impidmem + } else { + path = impidbase + "/" + impidmem + } + all = append(all, path) + } + + return all, nil +} + +// ImportedSymbols returns the names of all symbols +// referred to by the binary f that are expected to be +// satisfied by other libraries at dynamic load time. +// It does not return weak symbols. +func (f *File) ImportedSymbols() ([]ImportedSymbol, error) { + s := f.SectionByType(STYP_LOADER) + if s == nil { + return nil, nil + } + // Read loader header + if _, err := s.sr.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + var stlen uint32 + var stoff uint64 + var nsyms int32 + var symoff uint64 + switch f.TargetMachine { + case U802TOCMAGIC: + lhdr := new(LoaderHeader32) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + stlen = lhdr.Lstlen + stoff = uint64(lhdr.Lstoff) + nsyms = lhdr.Lnsyms + symoff = LDHDRSZ_32 + case U64_TOCMAGIC: + lhdr := new(LoaderHeader64) + if err := binary.Read(s.sr, binary.BigEndian, lhdr); err != nil { + return nil, err + } + stlen = lhdr.Lstlen + stoff = lhdr.Lstoff + nsyms = lhdr.Lnsyms + symoff = lhdr.Lsymoff + } + + // Read loader section string table + if _, err := s.sr.Seek(int64(stoff), os.SEEK_SET); err != nil { + return nil, err + } + st := make([]byte, stlen) + if _, err := io.ReadFull(s.sr, st); err != nil { + return nil, err + } + + // Read imported libraries + libs, err := f.readImportIDs(s) + if err != nil { + return nil, err + } + + // Read loader symbol table + if _, err := s.sr.Seek(int64(symoff), os.SEEK_SET); err != nil { + return nil, err + } + all := make([]ImportedSymbol, 0) + for i := 0; i < int(nsyms); i++ { + var name string + var ifile int32 + var ok bool + switch f.TargetMachine { + case U802TOCMAGIC: + ldsym := new(LoaderSymbol32) + if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil { + return nil, err + } + if ldsym.Lsmtype&0x40 == 0 { + continue // Imported symbols only + } + zeroes := binary.BigEndian.Uint32(ldsym.Lname[:4]) + if zeroes != 0 { + name = cstring(ldsym.Lname[:]) + } else { + offset := binary.BigEndian.Uint32(ldsym.Lname[4:]) + name, ok = getString(st, offset) + if !ok { + continue + } + } + ifile = ldsym.Lifile + case U64_TOCMAGIC: + ldsym := new(LoaderSymbol64) + if err := binary.Read(s.sr, binary.BigEndian, ldsym); err != nil { + return nil, err + } + if ldsym.Lsmtype&0x40 == 0 { + continue // Imported symbols only + } + name, ok = getString(st, ldsym.Loffset) + if !ok { + continue + } + ifile = ldsym.Lifile + } + var sym ImportedSymbol + sym.Name = name + if ifile >= 1 && int(ifile) <= len(libs) { + sym.Library = libs[ifile-1] + } + all = append(all, sym) + } + + return all, nil +} + +// ImportedLibraries returns the names of all libraries +// referred to by the binary f that are expected to be +// linked with the binary at dynamic link time. +func (f *File) ImportedLibraries() ([]string, error) { + s := f.SectionByType(STYP_LOADER) + if s == nil { + return nil, nil + } + all, err := f.readImportIDs(s) + return all, err +} diff --git a/src/internal/xcoff/file_test.go b/src/internal/xcoff/file_test.go new file mode 100644 index 0000000000000..a6722e9453f2f --- /dev/null +++ b/src/internal/xcoff/file_test.go @@ -0,0 +1,102 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +import ( + "reflect" + "testing" +) + +type fileTest struct { + file string + hdr FileHeader + sections []*SectionHeader + needed []string +} + +var fileTests = []fileTest{ + { + "testdata/gcc-ppc32-aix-dwarf2-exec", + FileHeader{U802TOCMAGIC}, + []*SectionHeader{ + {".text", 0x10000290, 0x00000bbd, STYP_TEXT, 0x7ae6, 0x36}, + {".data", 0x20000e4d, 0x00000437, STYP_DATA, 0x7d02, 0x2b}, + {".bss", 0x20001284, 0x0000021c, STYP_BSS, 0, 0}, + {".loader", 0x00000000, 0x000004b3, STYP_LOADER, 0, 0}, + {".dwline", 0x00000000, 0x000000df, STYP_DWARF | SSUBTYP_DWLINE, 0x7eb0, 0x7}, + {".dwinfo", 0x00000000, 0x00000314, STYP_DWARF | SSUBTYP_DWINFO, 0x7ef6, 0xa}, + {".dwabrev", 0x00000000, 0x000000d6, STYP_DWARF | SSUBTYP_DWABREV, 0, 0}, + {".dwarnge", 0x00000000, 0x00000020, STYP_DWARF | SSUBTYP_DWARNGE, 0x7f5a, 0x2}, + {".dwloc", 0x00000000, 0x00000074, STYP_DWARF | SSUBTYP_DWLOC, 0, 0}, + {".debug", 0x00000000, 0x00005e4f, STYP_DEBUG, 0, 0}, + }, + []string{"libc.a/shr.o"}, + }, + { + "testdata/gcc-ppc64-aix-dwarf2-exec", + FileHeader{U64_TOCMAGIC}, + []*SectionHeader{ + {".text", 0x10000480, 0x00000afd, STYP_TEXT, 0x8322, 0x34}, + {".data", 0x20000f7d, 0x000002f3, STYP_DATA, 0x85fa, 0x25}, + {".bss", 0x20001270, 0x00000428, STYP_BSS, 0, 0}, + {".loader", 0x00000000, 0x00000535, STYP_LOADER, 0, 0}, + {".dwline", 0x00000000, 0x000000b4, STYP_DWARF | SSUBTYP_DWLINE, 0x8800, 0x4}, + {".dwinfo", 0x00000000, 0x0000036a, STYP_DWARF | SSUBTYP_DWINFO, 0x8838, 0x7}, + {".dwabrev", 0x00000000, 0x000000b5, STYP_DWARF | SSUBTYP_DWABREV, 0, 0}, + {".dwarnge", 0x00000000, 0x00000040, STYP_DWARF | SSUBTYP_DWARNGE, 0x889a, 0x2}, + {".dwloc", 0x00000000, 0x00000062, STYP_DWARF | SSUBTYP_DWLOC, 0, 0}, + {".debug", 0x00000000, 0x00006605, STYP_DEBUG, 0, 0}, + }, + []string{"libc.a/shr_64.o"}, + }, +} + +func TestOpen(t *testing.T) { + for i := range fileTests { + tt := &fileTests[i] + + f, err := Open(tt.file) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(f.FileHeader, tt.hdr) { + t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr) + continue + } + + for i, sh := range f.Sections { + if i >= len(tt.sections) { + break + } + have := &sh.SectionHeader + want := tt.sections[i] + if !reflect.DeepEqual(have, want) { + t.Errorf("open %s, section %d:\n\thave %#v\n\twant %#v\n", tt.file, i, have, want) + } + } + tn := len(tt.sections) + fn := len(f.Sections) + if tn != fn { + t.Errorf("open %s: len(Sections) = %d, want %d", tt.file, fn, tn) + } + tl := tt.needed + fl, err := f.ImportedLibraries() + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(tl, fl) { + t.Errorf("open %s: loader import = %v, want %v", tt.file, tl, fl) + } + } +} + +func TestOpenFailure(t *testing.T) { + filename := "file.go" // not an XCOFF object file + _, err := Open(filename) // don't crash + if err == nil { + t.Errorf("open %s: succeeded unexpectedly", filename) + } +} diff --git a/src/internal/xcoff/testdata/bigar-empty b/src/internal/xcoff/testdata/bigar-empty new file mode 100644 index 0000000000000..851ccc5123694 --- /dev/null +++ b/src/internal/xcoff/testdata/bigar-empty @@ -0,0 +1,2 @@ + +0 0 0 0 0 0 \ No newline at end of file diff --git a/src/internal/xcoff/testdata/bigar-ppc64 b/src/internal/xcoff/testdata/bigar-ppc64 new file mode 100644 index 0000000000000..a8d4979d121f7 Binary files /dev/null and b/src/internal/xcoff/testdata/bigar-ppc64 differ diff --git a/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec b/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec new file mode 100644 index 0000000000000..810e21a0dfc78 Binary files /dev/null and b/src/internal/xcoff/testdata/gcc-ppc32-aix-dwarf2-exec differ diff --git a/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec b/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec new file mode 100644 index 0000000000000..707d01ebd4348 Binary files /dev/null and b/src/internal/xcoff/testdata/gcc-ppc64-aix-dwarf2-exec differ diff --git a/src/internal/xcoff/testdata/hello.c b/src/internal/xcoff/testdata/hello.c new file mode 100644 index 0000000000000..34d9ee79234ef --- /dev/null +++ b/src/internal/xcoff/testdata/hello.c @@ -0,0 +1,7 @@ +#include + +void +main(int argc, char *argv[]) +{ + printf("hello, world\n"); +} diff --git a/src/internal/xcoff/testdata/printbye.c b/src/internal/xcoff/testdata/printbye.c new file mode 100644 index 0000000000000..904507998ab1b --- /dev/null +++ b/src/internal/xcoff/testdata/printbye.c @@ -0,0 +1,5 @@ +#include + +void printbye(){ + printf("Goodbye\n"); +} diff --git a/src/internal/xcoff/testdata/printhello.c b/src/internal/xcoff/testdata/printhello.c new file mode 100644 index 0000000000000..182aa09728abc --- /dev/null +++ b/src/internal/xcoff/testdata/printhello.c @@ -0,0 +1,5 @@ +#include + +void printhello(){ + printf("Helloworld\n"); +} diff --git a/src/internal/xcoff/xcoff.go b/src/internal/xcoff/xcoff.go new file mode 100644 index 0000000000000..f8465d728970b --- /dev/null +++ b/src/internal/xcoff/xcoff.go @@ -0,0 +1,367 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xcoff + +// File Header. +type FileHeader32 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint32 // Byte offset to symbol table start + Fnsyms int32 // Number of entries in symbol table + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags +} + +type FileHeader64 struct { + Fmagic uint16 // Target machine + Fnscns uint16 // Number of sections + Ftimedat int32 // Time and date of file creation + Fsymptr uint64 // Byte offset to symbol table start + Fopthdr uint16 // Number of bytes in optional header + Fflags uint16 // Flags + Fnsyms int32 // Number of entries in symbol table +} + +const ( + FILHSZ_32 = 20 + FILHSZ_64 = 24 +) +const ( + U802TOCMAGIC = 0737 // AIX 32-bit XCOFF + U64_TOCMAGIC = 0767 // AIX 64-bit XCOFF +) + +// Flags that describe the type of the object file. +const ( + F_RELFLG = 0x0001 + F_EXEC = 0x0002 + F_LNNO = 0x0004 + F_FDPR_PROF = 0x0010 + F_FDPR_OPTI = 0x0020 + F_DSA = 0x0040 + F_VARPG = 0x0100 + F_DYNLOAD = 0x1000 + F_SHROBJ = 0x2000 + F_LOADONLY = 0x4000 +) + +// Section Header. +type SectionHeader32 struct { + Sname [8]byte // Section name + Spaddr uint32 // Physical address + Svaddr uint32 // Virtual address + Ssize uint32 // Section size + Sscnptr uint32 // Offset in file to raw data for section + Srelptr uint32 // Offset in file to relocation entries for section + Slnnoptr uint32 // Offset in file to line number entries for section + Snreloc uint16 // Number of relocation entries + Snlnno uint16 // Number of line number entries + Sflags uint32 // Flags to define the section type +} + +type SectionHeader64 struct { + Sname [8]byte // Section name + Spaddr uint64 // Physical address + Svaddr uint64 // Virtual address + Ssize uint64 // Section size + Sscnptr uint64 // Offset in file to raw data for section + Srelptr uint64 // Offset in file to relocation entries for section + Slnnoptr uint64 // Offset in file to line number entries for section + Snreloc uint32 // Number of relocation entries + Snlnno uint32 // Number of line number entries + Sflags uint32 // Flags to define the section type + Spad uint32 // Needs to be 72 bytes long +} + +// Flags defining the section type. +const ( + STYP_DWARF = 0x0010 + STYP_TEXT = 0x0020 + STYP_DATA = 0x0040 + STYP_BSS = 0x0080 + STYP_EXCEPT = 0x0100 + STYP_INFO = 0x0200 + STYP_TDATA = 0x0400 + STYP_TBSS = 0x0800 + STYP_LOADER = 0x1000 + STYP_DEBUG = 0x2000 + STYP_TYPCHK = 0x4000 + STYP_OVRFLO = 0x8000 +) +const ( + SSUBTYP_DWINFO = 0x10000 // DWARF info section + SSUBTYP_DWLINE = 0x20000 // DWARF line-number section + SSUBTYP_DWPBNMS = 0x30000 // DWARF public names section + SSUBTYP_DWPBTYP = 0x40000 // DWARF public types section + SSUBTYP_DWARNGE = 0x50000 // DWARF aranges section + SSUBTYP_DWABREV = 0x60000 // DWARF abbreviation section + SSUBTYP_DWSTR = 0x70000 // DWARF strings section + SSUBTYP_DWRNGES = 0x80000 // DWARF ranges section + SSUBTYP_DWLOC = 0x90000 // DWARF location lists section + SSUBTYP_DWFRAME = 0xA0000 // DWARF frames section + SSUBTYP_DWMAC = 0xB0000 // DWARF macros section +) + +// Symbol Table Entry. +type SymEnt32 struct { + Nname [8]byte // Symbol name + Nvalue uint32 // Symbol value + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass int8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +type SymEnt64 struct { + Nvalue uint64 // Symbol value + Noffset uint32 // Offset of the name in string table or .debug section + Nscnum int16 // Section number of symbol + Ntype uint16 // Basic and derived type specification + Nsclass int8 // Storage class of symbol + Nnumaux int8 // Number of auxiliary entries +} + +const SYMESZ = 18 + +const ( + // Nscnum + N_DEBUG = -2 + N_ABS = -1 + N_UNDEF = 0 + + //Ntype + SYM_V_INTERNAL = 0x1000 + SYM_V_HIDDEN = 0x2000 + SYM_V_PROTECTED = 0x3000 + SYM_V_EXPORTED = 0x4000 + SYM_TYPE_FUNC = 0x0020 // is function +) + +// Storage Class. +const ( + C_NULL = 0 // Symbol table entry marked for deletion + C_EXT = 2 // External symbol + C_STAT = 3 // Static symbol + C_BLOCK = 100 // Beginning or end of inner block + C_FCN = 101 // Beginning or end of function + C_FILE = 103 // Source file name and compiler information + C_HIDEXT = 107 // Unnamed external symbol + C_BINCL = 108 // Beginning of include file + C_EINCL = 109 // End of include file + C_WEAKEXT = 111 // Weak external symbol + C_DWARF = 112 // DWARF symbol + C_GSYM = 128 // Global variable + C_LSYM = 129 // Automatic variable allocated on stack + C_PSYM = 130 // Argument to subroutine allocated on stack + C_RSYM = 131 // Register variable + C_RPSYM = 132 // Argument to function or procedure stored in register + C_STSYM = 133 // Statically allocated symbol + C_BCOMM = 135 // Beginning of common block + C_ECOML = 136 // Local member of common block + C_ECOMM = 137 // End of common block + C_DECL = 140 // Declaration of object + C_ENTRY = 141 // Alternate entry + C_FUN = 142 // Function or procedure + C_BSTAT = 143 // Beginning of static block + C_ESTAT = 144 // End of static block + C_GTLS = 145 // Global thread-local variable + C_STTLS = 146 // Static thread-local variable +) + +// File Auxiliary Entry +type AuxFile64 struct { + Xfname [8]byte // Name or offset inside string table + Xftype uint8 // Source file string type + Xauxtype uint8 // Type of auxiliary entry +} + +// Function Auxiliary Entry +type AuxFcn32 struct { + Xexptr uint32 // File offset to exception table entry + Xfsize uint32 // Size of function in bytes + Xlnnoptr uint32 // File pointer to line number + Xendndx uint32 // Symbol table index of next entry + Xpad uint16 // Unused +} +type AuxFcn64 struct { + Xlnnoptr uint64 // File pointer to line number + Xfsize uint32 // Size of function in bytes + Xendndx uint32 // Symbol table index of next entry + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +type AuxSect64 struct { + Xscnlen uint64 // section length + Xnreloc uint64 // Num RLDs + pad uint8 + Xauxtype uint8 // Type of auxiliary entry +} + +// csect Auxiliary Entry. +type AuxCSect32 struct { + Xscnlen int32 // Length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xstab uint32 // Reserved + Xsnstab uint16 // Reserved +} + +type AuxCSect64 struct { + Xscnlenlo uint32 // Lower 4 bytes of length or symbol table index + Xparmhash uint32 // Offset of parameter type-check string + Xsnhash uint16 // .typchk section number + Xsmtyp uint8 // Symbol alignment and type + Xsmclas uint8 // Storage-mapping class + Xscnlenhi int32 // Upper 4 bytes of length or symbol table index + Xpad uint8 // Unused + Xauxtype uint8 // Type of auxiliary entry +} + +// Auxiliary type +const ( + _AUX_EXCEPT = 255 + _AUX_FCN = 254 + _AUX_SYM = 253 + _AUX_FILE = 252 + _AUX_CSECT = 251 + _AUX_SECT = 250 +) + +// Symbol type field. +const ( + XTY_ER = 0 // External reference + XTY_SD = 1 // Section definition + XTY_LD = 2 // Label definition + XTY_CM = 3 // Common csect definition +) + +// Defines for File auxiliary definitions: x_ftype field of x_file +const ( + XFT_FN = 0 // Source File Name + XFT_CT = 1 // Compile Time Stamp + XFT_CV = 2 // Compiler Version Number + XFT_CD = 128 // Compiler Defined Information +) + +// Storage-mapping class. +const ( + XMC_PR = 0 // Program code + XMC_RO = 1 // Read-only constant + XMC_DB = 2 // Debug dictionary table + XMC_TC = 3 // TOC entry + XMC_UA = 4 // Unclassified + XMC_RW = 5 // Read/Write data + XMC_GL = 6 // Global linkage + XMC_XO = 7 // Extended operation + XMC_SV = 8 // 32-bit supervisor call descriptor + XMC_BS = 9 // BSS class + XMC_DS = 10 // Function descriptor + XMC_UC = 11 // Unnamed FORTRAN common + XMC_TC0 = 15 // TOC anchor + XMC_TD = 16 // Scalar data entry in the TOC + XMC_SV64 = 17 // 64-bit supervisor call descriptor + XMC_SV3264 = 18 // Supervisor call descriptor for both 32-bit and 64-bit + XMC_TL = 20 // Read/Write thread-local data + XMC_UL = 21 // Read/Write thread-local data (.tbss) + XMC_TE = 22 // TOC entry +) + +// Loader Header. +type LoaderHeader32 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Limpoff uint32 // Offset to start of import file IDs + Lstlen uint32 // Length of string table + Lstoff uint32 // Offset to start of string table +} + +type LoaderHeader64 struct { + Lversion int32 // Loader section version number + Lnsyms int32 // Number of symbol table entries + Lnreloc int32 // Number of relocation table entries + Listlen uint32 // Length of import file ID string table + Lnimpid int32 // Number of import file IDs + Lstlen uint32 // Length of string table + Limpoff uint64 // Offset to start of import file IDs + Lstoff uint64 // Offset to start of string table + Lsymoff uint64 // Offset to start of symbol table + Lrldoff uint64 // Offset to start of relocation entries +} + +const ( + LDHDRSZ_32 = 32 + LDHDRSZ_64 = 56 +) + +// Loader Symbol. +type LoaderSymbol32 struct { + Lname [8]byte // Symbol name or byte offset into string table + Lvalue uint32 // Address field + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type LoaderSymbol64 struct { + Lvalue uint64 // Address field + Loffset uint32 // Byte offset into string table of symbol name + Lscnum int16 // Section number containing symbol + Lsmtype int8 // Symbol type, export, import flags + Lsmclas int8 // Symbol storage class + Lifile int32 // Import file ID; ordinal of import file IDs + Lparm uint32 // Parameter type-check field +} + +type Reloc32 struct { + Rvaddr uint32 // (virtual) address of reference + Rsymndx uint32 // Index into symbol table + Rsize uint8 // Sign and reloc bit len + Rtype uint8 // Toc relocation type +} + +type Reloc64 struct { + Rvaddr uint64 // (virtual) address of reference + Rsymndx uint32 // Index into symbol table + Rsize uint8 // Sign and reloc bit len + Rtype uint8 // Toc relocation type +} + +const ( + R_POS = 0x00 // A(sym) Positive Relocation + R_NEG = 0x01 // -A(sym) Negative Relocation + R_REL = 0x02 // A(sym-*) Relative to self + R_TOC = 0x03 // A(sym-TOC) Relative to TOC + R_TRL = 0x12 // A(sym-TOC) TOC Relative indirect load. + + R_TRLA = 0x13 // A(sym-TOC) TOC Rel load address. modifiable inst + R_GL = 0x05 // A(external TOC of sym) Global Linkage + R_TCL = 0x06 // A(local TOC of sym) Local object TOC address + R_RL = 0x0C // A(sym) Pos indirect load. modifiable instruction + R_RLA = 0x0D // A(sym) Pos Load Address. modifiable instruction + R_REF = 0x0F // AL0(sym) Non relocating ref. No garbage collect + R_BA = 0x08 // A(sym) Branch absolute. Cannot modify instruction + R_RBA = 0x18 // A(sym) Branch absolute. modifiable instruction + R_BR = 0x0A // A(sym-*) Branch rel to self. non modifiable + R_RBR = 0x1A // A(sym-*) Branch rel to self. modifiable instr + + R_TLS = 0x20 // General-dynamic reference to TLS symbol + R_TLS_IE = 0x21 // Initial-exec reference to TLS symbol + R_TLS_LD = 0x22 // Local-dynamic reference to TLS symbol + R_TLS_LE = 0x23 // Local-exec reference to TLS symbol + R_TLSM = 0x24 // Module reference to TLS symbol + R_TLSML = 0x25 // Module reference to local (own) module + + R_TOCU = 0x30 // Relative to TOC - high order bits + R_TOCL = 0x31 // Relative to TOC - low order bits +) diff --git a/src/io/io.go b/src/io/io.go index 72b75813a5e34..2010770e6a428 100644 --- a/src/io/io.go +++ b/src/io/io.go @@ -278,16 +278,16 @@ type RuneScanner interface { UnreadRune() error } -// stringWriter is the interface that wraps the WriteString method. -type stringWriter interface { +// StringWriter is the interface that wraps the WriteString method. +type StringWriter interface { WriteString(s string) (n int, err error) } // WriteString writes the contents of the string s to w, which accepts a slice of bytes. -// If w implements a WriteString method, it is invoked directly. +// If w implements StringWriter, its WriteString method is invoked directly. // Otherwise, w.Write is called exactly once. func WriteString(w Writer, s string) (n int, err error) { - if sw, ok := w.(stringWriter); ok { + if sw, ok := w.(StringWriter); ok { return sw.WriteString(s) } return w.Write([]byte(s)) diff --git a/src/io/ioutil/example_test.go b/src/io/ioutil/example_test.go index 0b24f672eecdb..a7d340b77fa59 100644 --- a/src/io/ioutil/example_test.go +++ b/src/io/ioutil/example_test.go @@ -99,3 +99,11 @@ func ExampleReadFile() { // Output: // File contents: Hello, Gophers! } + +func ExampleWriteFile() { + message := []byte("Hello, Gophers!") + err := ioutil.WriteFile("testdata/hello", message, 0644) + if err != nil { + log.Fatal(err) + } +} diff --git a/src/io/multi.go b/src/io/multi.go index 65f99099ca7ea..24ee71e4ca65b 100644 --- a/src/io/multi.go +++ b/src/io/multi.go @@ -69,12 +69,12 @@ func (t *multiWriter) Write(p []byte) (n int, err error) { return len(p), nil } -var _ stringWriter = (*multiWriter)(nil) +var _ StringWriter = (*multiWriter)(nil) func (t *multiWriter) WriteString(s string) (n int, err error) { var p []byte // lazily initialized if/when needed for _, w := range t.writers { - if sw, ok := w.(stringWriter); ok { + if sw, ok := w.(StringWriter); ok { n, err = sw.WriteString(s) } else { if p == nil { diff --git a/src/log/log.go b/src/log/log.go index 2b7c57fdfe5ff..527f853438f08 100644 --- a/src/log/log.go +++ b/src/log/log.go @@ -254,6 +254,13 @@ func (l *Logger) SetPrefix(prefix string) { l.prefix = prefix } +// Writer returns the output destination for the logger. +func (l *Logger) Writer() io.Writer { + l.mu.Lock() + defer l.mu.Unlock() + return l.out +} + // SetOutput sets the output destination for the standard logger. func SetOutput(w io.Writer) { std.mu.Lock() diff --git a/src/make.bash b/src/make.bash index 78882d98341f6..13497eb039d89 100755 --- a/src/make.bash +++ b/src/make.bash @@ -64,6 +64,7 @@ set -e unset GOBIN # Issue 14340 unset GOFLAGS +unset GO111MODULE if [ ! -f run.bash ]; then echo 'make.bash must be run from $GOROOT/src' 1>&2 diff --git a/src/make.bat b/src/make.bat index 2e718334a2e47..69275e22568a2 100644 --- a/src/make.bat +++ b/src/make.bat @@ -48,6 +48,7 @@ setlocal set GOBUILDFAIL=0 set GOFLAGS= +set GO111MODULE= if exist make.bat goto ok echo Must run make.bat from Go src directory. @@ -77,10 +78,10 @@ set GOROOT=%GOROOT_BOOTSTRAP% set GOOS= set GOARCH= set GOBIN= -"%GOROOT_BOOTSTRAP%\bin\go" build -o cmd\dist\dist.exe .\cmd\dist +"%GOROOT_BOOTSTRAP%\bin\go.exe" build -o cmd\dist\dist.exe .\cmd\dist endlocal if errorlevel 1 goto fail -.\cmd\dist\dist env -w -p >env.bat +.\cmd\dist\dist.exe env -w -p >env.bat if errorlevel 1 goto fail call env.bat del env.bat @@ -104,7 +105,7 @@ if x%4==x--no-banner set buildall=%buildall% --no-banner :: Run dist bootstrap to complete make.bash. :: Bootstrap installs a proper cmd/dist, built with the new toolchain. :: Throw ours, built with Go 1.4, away after bootstrap. -.\cmd\dist\dist bootstrap %vflag% %buildall% +.\cmd\dist\dist.exe bootstrap %vflag% %buildall% if errorlevel 1 goto fail del .\cmd\dist\dist.exe goto end diff --git a/src/make.rc b/src/make.rc index a97dfc8a0105f..5f888c19fd7bd 100755 --- a/src/make.rc +++ b/src/make.rc @@ -48,6 +48,7 @@ if(~ $1 -v) { } GOFLAGS=() +GO111MODULE=() GOROOT = `{cd .. && pwd} if(! ~ $#GOROOT_BOOTSTRAP 1) GOROOT_BOOTSTRAP = $home/go1.4 diff --git a/src/math/acos_s390x.s b/src/math/acos_s390x.s index 306f45a406672..d2288b8cd8e6b 100644 --- a/src/math/acos_s390x.s +++ b/src/math/acos_s390x.s @@ -42,7 +42,7 @@ GLOBL ·acosrodataL13<> + 0(SB), RODATA, $200 TEXT ·acosAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·acosrodataL13<>+0(SB), R9 - WORD $0xB3CD00C0 //lgdr %r12, %f0 + LGDR F0, R12 FMOVD F0, F10 SRAD $32, R12 WORD $0xC0293FE6 //iilf %r2,1072079005 diff --git a/src/math/acosh_s390x.s b/src/math/acosh_s390x.s index 3575ed6394b7d..87a5d00154dfe 100644 --- a/src/math/acosh_s390x.s +++ b/src/math/acosh_s390x.s @@ -53,7 +53,7 @@ GLOBL ·acoshtab2068<> + 0(SB), RODATA, $128 TEXT ·acoshAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·acoshrodataL11<>+0(SB), R9 - WORD $0xB3CD0010 //lgdr %r1, %f0 + LGDR F0, R1 WORD $0xC0295FEF //iilf %r2,1609564159 BYTE $0xFF BYTE $0xFF @@ -85,7 +85,7 @@ L2: WORD $0xC0398006 //iilf %r3,2147909631 BYTE $0x7F BYTE $0xFF - WORD $0xB3CD0050 //lgdr %r5, %f0 + LGDR F0, R5 SRAD $32, R5 MOVH $0x0, R1 SUBW R5, R3 @@ -105,7 +105,7 @@ L2: SRAW $8, R2, R2 ORW $0x45000000, R2 L5: - WORD $0xB3C10001 //ldgr %f0,%r1 + LDGR R1, F0 FMOVD 104(R9), F2 FMADD F8, F0, F2 FMOVD 96(R9), F4 @@ -153,7 +153,7 @@ L4: WORD $0xC0398006 //iilf %r3,2147909631 BYTE $0x7F BYTE $0xFF - WORD $0xB3CD0050 //lgdr %r5, %f0 + LGDR F0, R5 SRAD $32, R5 MOVH $0x0, R1 SUBW R5, R3 diff --git a/src/math/all_test.go b/src/math/all_test.go index bcc20a3917680..ed42941780973 100644 --- a/src/math/all_test.go +++ b/src/math/all_test.go @@ -128,7 +128,7 @@ var cbrt = []float64{ var ceil = []float64{ 5.0000000000000000e+00, 8.0000000000000000e+00, - 0.0000000000000000e+00, + Copysign(0, -1), -5.0000000000000000e+00, 1.0000000000000000e+01, 3.0000000000000000e+00, @@ -175,6 +175,7 @@ var cosLarge = []float64{ -2.51772931436786954751e-01, -7.3924135157173099849e-01, } + var cosh = []float64{ 7.2668796942212842775517446e+01, 1.1479413465659254502011135e+03, @@ -644,7 +645,7 @@ var tanh = []float64{ var trunc = []float64{ 4.0000000000000000e+00, 7.0000000000000000e+00, - -0.0000000000000000e+00, + Copysign(0, -1), -5.0000000000000000e+00, 9.0000000000000000e+00, 2.0000000000000000e+00, @@ -1527,6 +1528,7 @@ var vflog1pSC = []float64{ 0, Inf(1), NaN(), + 4503599627370496.5, // Issue #29488 } var log1pSC = []float64{ NaN(), @@ -1536,6 +1538,7 @@ var log1pSC = []float64{ 0, Inf(1), NaN(), + 36.04365338911715, // Issue #29488 } var vfmodfSC = []float64{ @@ -2158,7 +2161,7 @@ func TestCbrt(t *testing.T) { func TestCeil(t *testing.T) { for i := 0; i < len(vf); i++ { - if f := Ceil(vf[i]); ceil[i] != f { + if f := Ceil(vf[i]); !alike(ceil[i], f) { t.Errorf("Ceil(%g) = %g, want %g", vf[i], f, ceil[i]) } } @@ -2385,7 +2388,7 @@ func TestDim(t *testing.T) { func TestFloor(t *testing.T) { for i := 0; i < len(vf); i++ { - if f := Floor(vf[i]); floor[i] != f { + if f := Floor(vf[i]); !alike(floor[i], f) { t.Errorf("Floor(%g) = %g, want %g", vf[i], f, floor[i]) } } @@ -2916,7 +2919,7 @@ func TestTanh(t *testing.T) { func TestTrunc(t *testing.T) { for i := 0; i < len(vf); i++ { - if f := Trunc(vf[i]); trunc[i] != f { + if f := Trunc(vf[i]); !alike(trunc[i], f) { t.Errorf("Trunc(%g) = %g, want %g", vf[i], f, trunc[i]) } } @@ -3026,6 +3029,41 @@ func TestLargeTan(t *testing.T) { } } +// Check that trigReduce matches the standard reduction results for input values +// below reduceThreshold. +func TestTrigReduce(t *testing.T) { + inputs := make([]float64, len(vf)) + // all of the standard inputs + copy(inputs, vf) + // all of the large inputs + large := float64(100000 * Pi) + for _, v := range vf { + inputs = append(inputs, v+large) + } + // Also test some special inputs, Pi and right below the reduceThreshold + inputs = append(inputs, Pi, Nextafter(ReduceThreshold, 0)) + for _, x := range inputs { + // reduce the value to compare + j, z := TrigReduce(x) + xred := float64(j)*(Pi/4) + z + + if f, fred := Sin(x), Sin(xred); !close(f, fred) { + t.Errorf("Sin(trigReduce(%g)) != Sin(%g), got %g, want %g", x, x, fred, f) + } + if f, fred := Cos(x), Cos(xred); !close(f, fred) { + t.Errorf("Cos(trigReduce(%g)) != Cos(%g), got %g, want %g", x, x, fred, f) + } + if f, fred := Tan(x), Tan(xred); !close(f, fred) { + t.Errorf(" Tan(trigReduce(%g)) != Tan(%g), got %g, want %g", x, x, fred, f) + } + f, g := Sincos(x) + fred, gred := Sincos(xred) + if !close(f, fred) || !close(g, gred) { + t.Errorf(" Sincos(trigReduce(%g)) != Sincos(%g), got %g, %g, want %g, %g", x, x, fred, gred, f, g) + } + } +} + // Check that math constants are accepted by compiler // and have right value (assumes strconv.ParseFloat works). // https://golang.org/issue/201 @@ -3635,3 +3673,41 @@ func BenchmarkYn(b *testing.B) { } GlobalF = x } + +func BenchmarkFloat64bits(b *testing.B) { + y := uint64(0) + for i := 0; i < b.N; i++ { + y = Float64bits(roundNeg) + } + GlobalI = int(y) +} + +var roundUint64 = uint64(5) + +func BenchmarkFloat64frombits(b *testing.B) { + x := 0.0 + for i := 0; i < b.N; i++ { + x = Float64frombits(roundUint64) + } + GlobalF = x +} + +var roundFloat32 = float32(-2.5) + +func BenchmarkFloat32bits(b *testing.B) { + y := uint32(0) + for i := 0; i < b.N; i++ { + y = Float32bits(roundFloat32) + } + GlobalI = int(y) +} + +var roundUint32 = uint32(5) + +func BenchmarkFloat32frombits(b *testing.B) { + x := float32(0.0) + for i := 0; i < b.N; i++ { + x = Float32frombits(roundUint32) + } + GlobalF = float64(x) +} diff --git a/src/math/asin_s390x.s b/src/math/asin_s390x.s index fd5ab040a5aeb..dc54d053f1cab 100644 --- a/src/math/asin_s390x.s +++ b/src/math/asin_s390x.s @@ -46,7 +46,7 @@ GLOBL ·asinrodataL15<> + 0(SB), RODATA, $224 TEXT ·asinAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·asinrodataL15<>+0(SB), R9 - WORD $0xB3CD0070 //lgdr %r7, %f0 + LGDR F0, R7 FMOVD F0, F8 SRAD $32, R7 WORD $0xC0193FE6 //iilf %r1,1072079005 diff --git a/src/math/asinh_s390x.s b/src/math/asinh_s390x.s index a9cee342d30f0..a3680c661fbd8 100644 --- a/src/math/asinh_s390x.s +++ b/src/math/asinh_s390x.s @@ -64,7 +64,7 @@ GLOBL ·asinhtab2080<> + 0(SB), RODATA, $128 TEXT ·asinhAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·asinhrodataL18<>+0(SB), R9 - WORD $0xB3CD00C0 //lgdr %r12, %f0 + LGDR F0, R12 WORD $0xC0293FDF //iilf %r2,1071644671 BYTE $0xFF BYTE $0xFF @@ -93,7 +93,7 @@ L9: WORD $0xC0398006 //iilf %r3,2147909631 BYTE $0x7F BYTE $0xFF - WORD $0xB3CD0050 //lgdr %r5, %f0 + LGDR F0, R5 SRAD $32, R5 MOVH $0x0, R2 SUBW R5, R3 @@ -133,7 +133,7 @@ L5: WORD $0xC0398006 //iilf %r3,2147909631 BYTE $0x7F BYTE $0xFF - WORD $0xB3CD0050 //lgdr %r5, %f0 + LGDR F0, R5 SRAD $32, R5 MOVH $0x0, R2 SUBW R5, R3 @@ -146,7 +146,7 @@ L5: BYTE $0x59 ORW $0x45000000, R1 L6: - WORD $0xB3C10022 //ldgr %f2,%r2 + LDGR R2, F2 FMOVD 184(R9), F0 WFMADB V8, V2, V0, V8 FMOVD 176(R9), F4 diff --git a/src/math/atan2_s390x.s b/src/math/atan2_s390x.s index f37555b07f1bb..c7a8a09d05635 100644 --- a/src/math/atan2_s390x.s +++ b/src/math/atan2_s390x.s @@ -142,8 +142,8 @@ Normal: FMOVD x+0(FP), F0 FMOVD y+8(FP), F2 MOVD $·atan2rodataL25<>+0(SB), R9 - WORD $0xB3CD0020 //lgdr %r2,%f0 - WORD $0xB3CD0012 //lgdr %r1,%f2 + LGDR F0, R2 + LGDR F2, R1 WORD $0xEC2220BF //risbgn %r2,%r2,64-32,128+63,64+0+32 BYTE $0x60 BYTE $0x59 @@ -229,7 +229,7 @@ L18: BYTE $0x55 MOVD $·atan2xpi2h<>+0(SB), R1 MOVD ·atan2xpim<>+0(SB), R3 - WORD $0xB3C10003 //ldgr %f0,%r3 + LDGR R3, F0 WORD $0xED021000 //madb %f4,%f0,0(%r2,%r1) BYTE $0x40 BYTE $0x1E diff --git a/src/math/atan_s390x.s b/src/math/atan_s390x.s index 9f4eaa28d5ae2..713727ddbf809 100644 --- a/src/math/atan_s390x.s +++ b/src/math/atan_s390x.s @@ -54,7 +54,7 @@ TEXT ·atanAsm(SB), NOSPLIT, $0-16 MOVD $·atanrodataL8<>+0(SB), R5 MOVH $0x3FE0, R3 - WORD $0xB3CD0010 //lgdr %r1,%f0 + LGDR F0, R1 WORD $0xEC1120BF //risbgn %r1,%r1,64-32,128+63,64+0+32 BYTE $0x60 BYTE $0x59 diff --git a/src/math/atanh_s390x.s b/src/math/atanh_s390x.s index 57b61a34ff1c9..e7c63597041b9 100644 --- a/src/math/atanh_s390x.s +++ b/src/math/atanh_s390x.s @@ -64,7 +64,7 @@ GLOBL ·atanhtabh2075<> + 0(SB), RODATA, $16 TEXT ·atanhAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·atanhrodataL10<>+0(SB), R5 - WORD $0xB3CD0010 //lgdr %r1, %f0 + LGDR F0, R1 WORD $0xC0393FEF //iilf %r3,1072693247 BYTE $0xFF BYTE $0xFF @@ -128,7 +128,7 @@ L9: WORD $0xED405088 //adb %f4,.L12-.L10(%r5) BYTE $0x00 BYTE $0x1A - WORD $0xB3CD0044 //lgdr %r4, %f4 + LGDR F4, R4 SRAD $32, R4 FMOVD F4, F3 WORD $0xED305088 //sdb %f3,.L12-.L10(%r5) @@ -140,7 +140,7 @@ L9: BYTE $0x00 BYTE $0x55 SLD $32, R1, R1 - WORD $0xB3C10021 //ldgr %f2,%r1 + LDGR R1, F2 WFMADB V4, V2, V16, V4 SRAW $8, R2, R1 WFMADB V4, V5, V6, V5 diff --git a/src/math/big/arith.go b/src/math/big/arith.go index ad352403a7c5e..f9db9118eb254 100644 --- a/src/math/big/arith.go +++ b/src/math/big/arith.go @@ -82,7 +82,7 @@ func nlz(x Word) uint { return uint(bits.LeadingZeros(uint(x))) } -// q = (u1<<_W + u0 - r)/y +// q = (u1<<_W + u0 - r)/v // Adapted from Warren, Hacker's Delight, p. 152. func divWW_g(u1, u0, v Word) (q, r Word) { if u1 >= v { diff --git a/src/math/big/arith_386.s b/src/math/big/arith_386.s index 6c080f074a3c0..864fbc554e1c8 100644 --- a/src/math/big/arith_386.s +++ b/src/math/big/arith_386.s @@ -183,7 +183,7 @@ L9: MOVL AX, DX // w = w1 SHRL CX, DX:AX // w>>s | w1<<ŝ MOVL DX, (DI)(BX*4) // z[i] = w>>s | w1<<ŝ ADDL $1, BX // i++ - + E9: CMPL BX, BP JL L9 // i < n-1 diff --git a/src/math/big/arith_amd64.s b/src/math/big/arith_amd64.s index 1b950a4a25750..e9c8887523f2d 100644 --- a/src/math/big/arith_amd64.s +++ b/src/math/big/arith_amd64.s @@ -324,10 +324,10 @@ TEXT ·mulAddVWW(SB),NOSPLIT,$0 MOVQ r+56(FP), CX // c = r MOVQ z_len+8(FP), R11 MOVQ $0, BX // i = 0 - + CMPQ R11, $4 JL E5 - + U5: // i+4 <= n // regular loop body unrolled 4x MOVQ (0*8)(R8)(BX*8), AX @@ -355,7 +355,7 @@ U5: // i+4 <= n MOVQ AX, (3*8)(R10)(BX*8) MOVQ DX, CX ADDQ $4, BX // i += 4 - + LEAQ 4(BX), DX CMPQ DX, R11 JLE U5 diff --git a/src/math/big/arith_arm.s b/src/math/big/arith_arm.s index ba65fd2b1fa5e..33aa36f7090fb 100644 --- a/src/math/big/arith_arm.s +++ b/src/math/big/arith_arm.s @@ -123,7 +123,7 @@ TEXT ·shlVU(SB),NOSPLIT,$0 MOVW z_len+4(FP), R5 TEQ $0, R5 BEQ X7 - + MOVW z+0(FP), R1 MOVW x+12(FP), R2 ADD R5<<2, R2, R2 @@ -135,7 +135,7 @@ TEXT ·shlVU(SB),NOSPLIT,$0 MOVW $32, R4 SUB R3, R4 MOVW $0, R7 - + MOVW.W -4(R2), R6 MOVW R6<>R4, R6 diff --git a/src/math/big/arith_s390x.s b/src/math/big/arith_s390x.s index 4520d161d779d..9156d9debe400 100644 --- a/src/math/big/arith_s390x.s +++ b/src/math/big/arith_s390x.s @@ -54,7 +54,7 @@ TEXT ·divWW(SB),NOSPLIT,$0 TEXT ·addVV(SB),NOSPLIT,$0 MOVD addvectorfacility+0x00(SB),R1 BR (R1) - + TEXT ·addVV_check(SB),NOSPLIT, $0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported @@ -89,7 +89,7 @@ TEXT ·addVV_vec(SB),NOSPLIT,$0 BLT v1 SUB $12, R3 // n -= 16 BLT A1 // if n < 0 goto A1 - + MOVD R8, R5 MOVD R9, R6 MOVD R2, R7 @@ -291,7 +291,7 @@ E1n: NEG R4, R4 TEXT ·subVV(SB),NOSPLIT,$0 MOVD subvectorfacility+0x00(SB),R1 BR (R1) - + TEXT ·subVV_check(SB),NOSPLIT,$0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported @@ -321,7 +321,7 @@ TEXT ·subVV_vec(SB),NOSPLIT,$0 MOVD $0, R4 // c = 0 MOVD $0, R0 // make sure it's zero MOVD $0, R10 // i = 0 - + // s/JL/JMP/ below to disable the unrolled loop SUB $4, R3 // n -= 4 BLT v1 // if n < 0 goto v1 @@ -413,7 +413,7 @@ UU1: VLM 0(R5), V1, V4 // 64-bytes into V1..V8 A1: ADD $12, R3 // n += 16 BLT v1 // if n < 0 goto v1 - + U1: // n >= 0 // regular loop body unrolled 4x MOVD 0(R8)(R10*1), R5 @@ -532,7 +532,7 @@ E1: NEG R4, R4 TEXT ·addVW(SB),NOSPLIT,$0 MOVD addwvectorfacility+0x00(SB),R1 BR (R1) - + TEXT ·addVW_check(SB),NOSPLIT,$0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported @@ -742,7 +742,7 @@ E4: MOVD R4, c+56(FP) // return c TEXT ·subVW(SB),NOSPLIT,$0 MOVD subwvectorfacility+0x00(SB),R1 BR (R1) - + TEXT ·subVW_check(SB),NOSPLIT,$0 MOVB ·hasVX(SB), R1 CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported diff --git a/src/math/big/float.go b/src/math/big/float.go index 55b93c8915072..b3c329520141f 100644 --- a/src/math/big/float.go +++ b/src/math/big/float.go @@ -43,7 +43,7 @@ const debugFloat = false // enable for debugging // precision of the argument with the largest precision value before any // rounding takes place, and the rounding mode remains unchanged. Thus, // uninitialized Floats provided as result arguments will have their -// precision set to a reasonable value determined by the operands and +// precision set to a reasonable value determined by the operands, and // their mode is the zero value for RoundingMode (ToNearestEven). // // By setting the desired precision to 24 or 53 and using matching rounding @@ -56,6 +56,12 @@ const debugFloat = false // enable for debugging // The zero (uninitialized) value for a Float is ready to use and represents // the number +0.0 exactly, with precision 0 and rounding mode ToNearestEven. // +// Operations always take pointer arguments (*Float) rather +// than Float values, and each unique Float value requires +// its own unique *Float pointer. To "copy" a Float value, +// an existing (or newly allocated) Float must be set to +// a new value using the Float.Set method; shallow copies +// of Floats are not supported and may lead to errors. type Float struct { prec uint32 mode RoundingMode @@ -293,7 +299,7 @@ func (z *Float) setExpAndRound(exp int64, sbit uint) { z.round(sbit) } -// SetMantExp sets z to mant × 2**exp and and returns z. +// SetMantExp sets z to mant × 2**exp and returns z. // The result z has the same precision and rounding mode // as mant. SetMantExp is an inverse of MantExp but does // not require 0.5 <= |mant| < 1.0. Specifically: @@ -321,7 +327,7 @@ func (z *Float) SetMantExp(mant *Float, exp int) *Float { return z } -// Signbit returns true if x is negative or negative zero. +// Signbit reports whether x is negative or negative zero. func (x *Float) Signbit() bool { return x.neg } diff --git a/src/math/big/int.go b/src/math/big/int.go index 47a288ab44d87..dab9a5cc0f7e6 100644 --- a/src/math/big/int.go +++ b/src/math/big/int.go @@ -15,6 +15,13 @@ import ( // An Int represents a signed multi-precision integer. // The zero value for an Int represents the value 0. +// +// Operations always take pointer arguments (*Int) rather +// than Int values, and each unique Int value requires +// its own unique *Int pointer. To "copy" an Int value, +// an existing (or newly allocated) Int must be set to +// a new value using the Int.Set method; shallow copies +// of Ints are not supported and may lead to errors. type Int struct { neg bool // sign abs nat // absolute value of the integer diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go index 9930ed016af13..7ef2b3907f75a 100644 --- a/src/math/big/int_test.go +++ b/src/math/big/int_test.go @@ -1727,3 +1727,29 @@ func BenchmarkIntSqr(b *testing.B) { }) } } + +func benchmarkDiv(b *testing.B, aSize, bSize int) { + var r = rand.New(rand.NewSource(1234)) + aa := randInt(r, uint(aSize)) + bb := randInt(r, uint(bSize)) + if aa.Cmp(bb) < 0 { + aa, bb = bb, aa + } + x := new(Int) + y := new(Int) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + x.DivMod(aa, bb, y) + } +} + +func BenchmarkDiv(b *testing.B) { + min, max, step := 10, 100000, 10 + for i := min; i <= max; i *= step { + j := 2 * i + b.Run(fmt.Sprintf("%d/%d", j, i), func(b *testing.B) { + benchmarkDiv(b, j, i) + }) + } +} diff --git a/src/math/big/nat.go b/src/math/big/nat.go index a6f79edccc439..1e4a3b09cf771 100644 --- a/src/math/big/nat.go +++ b/src/math/big/nat.go @@ -58,6 +58,10 @@ func (z nat) make(n int) nat { if n <= cap(z) { return z[:n] // reuse z } + if n == 1 { + // Most nats start small and stay that way; don't over-allocate. + return make(nat, 1) + } // Choosing a good value for e has significant performance impact // because it increases the chance that a value can be reused. const e = 4 // extra capacity @@ -680,43 +684,36 @@ func putNat(x *nat) { var natPool sync.Pool -// q = (uIn-r)/v, with 0 <= r < y +// q = (uIn-r)/vIn, with 0 <= r < y // Uses z as storage for q, and u as storage for r if possible. // See Knuth, Volume 2, section 4.3.1, Algorithm D. // Preconditions: -// len(v) >= 2 -// len(uIn) >= len(v) -func (z nat) divLarge(u, uIn, v nat) (q, r nat) { - n := len(v) +// len(vIn) >= 2 +// len(uIn) >= len(vIn) +// u must not alias z +func (z nat) divLarge(u, uIn, vIn nat) (q, r nat) { + n := len(vIn) m := len(uIn) - n - // determine if z can be reused - // TODO(gri) should find a better solution - this if statement - // is very costly (see e.g. time pidigits -s -n 10000) - if alias(z, u) || alias(z, uIn) || alias(z, v) { - z = nil // z is an alias for u or uIn or v - cannot reuse + // D1. + shift := nlz(vIn[n-1]) + // do not modify vIn, it may be used by another goroutine simultaneously + vp := getNat(n) + v := *vp + shlVU(v, vIn, shift) + + // u may safely alias uIn or vIn, the value of uIn is used to set u and vIn was already used + u = u.make(len(uIn) + 1) + u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift) + + // z may safely alias uIn or vIn, both values were used already + if alias(z, u) { + z = nil // z is an alias for u - cannot reuse } q = z.make(m + 1) qhatvp := getNat(n + 1) qhatv := *qhatvp - if alias(u, uIn) || alias(u, v) { - u = nil // u is an alias for uIn or v - cannot reuse - } - u = u.make(len(uIn) + 1) - u.clear() // TODO(gri) no need to clear if we allocated a new u - - // D1. - var v1p *nat - shift := nlz(v[n-1]) - if shift > 0 { - // do not modify v, it may be used by another goroutine simultaneously - v1p = getNat(n) - v1 := *v1p - shlVU(v1, v, shift) - v = v1 - } - u[len(uIn)] = shlVU(u[0:len(uIn)], uIn, shift) // D2. vn1 := v[n-1] @@ -756,9 +753,8 @@ func (z nat) divLarge(u, uIn, v nat) (q, r nat) { q[j] = qhat } - if v1p != nil { - putNat(v1p) - } + + putNat(vp) putNat(qhatvp) q = q.norm() diff --git a/src/math/big/prime.go b/src/math/big/prime.go index 4c2c152f65e56..d9a5f1ec96838 100644 --- a/src/math/big/prime.go +++ b/src/math/big/prime.go @@ -51,7 +51,7 @@ func (x *Int) ProbablyPrime(n int) bool { } if w&1 == 0 { - return false // n is even + return false // x is even } const primesA = 3 * 5 * 7 * 11 * 13 * 17 * 19 * 23 * 37 diff --git a/src/math/big/rat.go b/src/math/big/rat.go index 46d58fcf365d2..5d0800ca936b2 100644 --- a/src/math/big/rat.go +++ b/src/math/big/rat.go @@ -13,6 +13,13 @@ import ( // A Rat represents a quotient a/b of arbitrary precision. // The zero value for a Rat represents the value 0. +// +// Operations always take pointer arguments (*Rat) rather +// than Rat values, and each unique Rat value requires +// its own unique *Rat pointer. To "copy" a Rat value, +// an existing (or newly allocated) Rat must be set to +// a new value using the Rat.Set method; shallow copies +// of Rats are not supported and may lead to errors. type Rat struct { // To make zero values for Rat work w/o initialization, // a zero value of b (len(b) == 0) acts like b == 1. diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go index 157d8d006d4f5..5656280e84dab 100644 --- a/src/math/big/ratconv.go +++ b/src/math/big/ratconv.go @@ -38,8 +38,8 @@ func (z *Rat) Scan(s fmt.ScanState, ch rune) error { } // SetString sets z to the value of s and returns z and a boolean indicating -// success. s can be given as a fraction "a/b" or as a floating-point number -// optionally followed by an exponent. The entire string (not just a prefix) +// success. s can be given as a fraction "a/b" or as a decimal floating-point +// number optionally followed by an exponent. The entire string (not just a prefix) // must be valid for success. If the operation failed, the value of z is // undefined but the returned value is nil. func (z *Rat) SetString(s string) (*Rat, bool) { @@ -78,6 +78,7 @@ func (z *Rat) SetString(s string) (*Rat, bool) { } // mantissa + // TODO(gri) allow other bases besides 10 for mantissa and exponent? (issue #29799) var ecorr int z.a.abs, _, ecorr, err = z.a.abs.scan(r, 10, true) if err != nil { diff --git a/src/math/big/sqrt.go b/src/math/big/sqrt.go index b989649dcdee4..53403aa41d727 100644 --- a/src/math/big/sqrt.go +++ b/src/math/big/sqrt.go @@ -7,8 +7,6 @@ package big import "math" var ( - half = NewFloat(0.5) - two = NewFloat(2.0) three = NewFloat(3.0) ) @@ -57,9 +55,9 @@ func (z *Float) Sqrt(x *Float) *Float { case 0: // nothing to do case 1: - z.Mul(two, z) + z.exp++ case -1: - z.Mul(half, z) + z.exp-- } // 0.25 <= z < 2.0 @@ -96,7 +94,7 @@ func (z *Float) sqrtDirect(x *Float) { u.prec = t.prec u.Mul(t, t) // u = t² u.Add(u, x) // = t² + x - u.Mul(half, u) // = ½(t² + x) + u.exp-- // = ½(t² + x) return t.Quo(u, t) // = ½(t² + x)/t } @@ -133,11 +131,13 @@ func (z *Float) sqrtInverse(x *Float) { ng := func(t *Float) *Float { u.prec = t.prec v.prec = t.prec - u.Mul(t, t) // u = t² - u.Mul(x, u) // = xt² - v.Sub(three, u) // v = 3 - xt² - u.Mul(t, v) // u = t(3 - xt²) - return t.Mul(half, u) // = ½t(3 - xt²) + u.Mul(t, t) // u = t² + u.Mul(x, u) // = xt² + v.Sub(three, u) // v = 3 - xt² + u.Mul(t, v) // u = t(3 - xt²) + u.exp-- // = ½t(3 - xt²) + return t.Set(u) + } xf, _ := x.Float64() diff --git a/src/math/bits/bits.go b/src/math/bits/bits.go index 989baacc13fc1..b06c363348449 100644 --- a/src/math/bits/bits.go +++ b/src/math/bits/bits.go @@ -8,6 +8,8 @@ // functions for the predeclared unsigned integer types. package bits +import _ "unsafe" // for go:linkname + const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 // UintSize is the size of a uint in bits. @@ -63,7 +65,7 @@ func TrailingZeros8(x uint8) int { } // TrailingZeros16 returns the number of trailing zero bits in x; the result is 16 for x == 0. -func TrailingZeros16(x uint16) (n int) { +func TrailingZeros16(x uint16) int { if x == 0 { return 16 } @@ -328,3 +330,206 @@ func Len64(x uint64) (n int) { } return n + int(len8tab[x]) } + +// --- Add with carry --- + +// Add returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add(x, y, carry uint) (sum, carryOut uint) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Add32 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add32(x, y, carry uint32) (sum, carryOut uint32) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// --- Subtract with borrow --- + +// Sub returns the difference of x, y and borrow: diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +func Sub(x, y, borrow uint) (diff, borrowOut uint) { + yb := y + borrow + diff = x - yb + if diff > x || yb < y { + borrowOut = 1 + } + return +} + +// Sub32 returns the difference of x, y and borrow, diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +func Sub32(x, y, borrow uint32) (diff, borrowOut uint32) { + yb := y + borrow + diff = x - yb + if diff > x || yb < y { + borrowOut = 1 + } + return +} + +// Sub64 returns the difference of x, y and borrow: diff = x - y - borrow. +// The borrow input must be 0 or 1; otherwise the behavior is undefined. +// The borrowOut output is guaranteed to be 0 or 1. +func Sub64(x, y, borrow uint64) (diff, borrowOut uint64) { + yb := y + borrow + diff = x - yb + if diff > x || yb < y { + borrowOut = 1 + } + return +} + +// --- Full-width multiply --- + +// Mul returns the full-width product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul(x, y uint) (hi, lo uint) { + if UintSize == 32 { + h, l := Mul32(uint32(x), uint32(y)) + return uint(h), uint(l) + } + h, l := Mul64(uint64(x), uint64(y)) + return uint(h), uint(l) +} + +// Mul32 returns the 64-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul32(x, y uint32) (hi, lo uint32) { + tmp := uint64(x) * uint64(y) + hi, lo = uint32(tmp>>32), uint32(tmp) + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} + +// --- Full-width divide --- + +// Div returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div(hi, lo, y uint) (quo, rem uint) { + if UintSize == 32 { + q, r := Div32(uint32(hi), uint32(lo), uint32(y)) + return uint(q), uint(r) + } + q, r := Div64(uint64(hi), uint64(lo), uint64(y)) + return uint(q), uint(r) +} + +// Div32 returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div32 panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div32(hi, lo, y uint32) (quo, rem uint32) { + if y != 0 && y <= hi { + panic(overflowError) + } + z := uint64(hi)<<32 | uint64(lo) + quo, rem = uint32(z/uint64(y)), uint32(z%uint64(y)) + return +} + +// Div64 returns the quotient and remainder of (hi, lo) divided by y: +// quo = (hi, lo)/y, rem = (hi, lo)%y with the dividend bits' upper +// half in parameter hi and the lower half in parameter lo. +// Div64 panics for y == 0 (division by zero) or y <= hi (quotient overflow). +func Div64(hi, lo, y uint64) (quo, rem uint64) { + const ( + two32 = 1 << 32 + mask32 = two32 - 1 + ) + if y == 0 { + panic(divideError) + } + if y <= hi { + panic(overflowError) + } + + s := uint(LeadingZeros64(y)) + y <<= s + + yn1 := y >> 32 + yn0 := y & mask32 + un32 := hi<>(64-s) + un10 := lo << s + un1 := un10 >> 32 + un0 := un10 & mask32 + q1 := un32 / yn1 + rhat := un32 - q1*yn1 + + for q1 >= two32 || q1*yn0 > two32*rhat+un1 { + q1-- + rhat += yn1 + if rhat >= two32 { + break + } + } + + un21 := un32*two32 + un1 - q1*y + q0 := un21 / yn1 + rhat = un21 - q0*yn1 + + for q0 >= two32 || q0*yn0 > two32*rhat+un0 { + q0-- + rhat += yn1 + if rhat >= two32 { + break + } + } + + return q1*two32 + q0, (un21*two32 + un0 - q0*y) >> s +} + +//go:linkname overflowError runtime.overflowError +var overflowError error + +//go:linkname divideError runtime.divideError +var divideError error diff --git a/src/math/bits/bits_test.go b/src/math/bits/bits_test.go index 5c34f6dbf7f84..1ec5107ae1294 100644 --- a/src/math/bits/bits_test.go +++ b/src/math/bits/bits_test.go @@ -6,6 +6,7 @@ package bits_test import ( . "math/bits" + "runtime" "testing" "unsafe" ) @@ -705,6 +706,385 @@ func TestLen(t *testing.T) { } } +const ( + _M = 1< + 0(SB), RODATA, $128 TEXT ·cbrtAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·cbrtrodataL9<>+0(SB), R9 - WORD $0xB3CD0020 //lgdr %r2, %f0 + LGDR F0, R2 WORD $0xC039000F //iilf %r3,1048575 BYTE $0xFF BYTE $0xFF @@ -103,7 +103,7 @@ L2: BYTE $0x00 BYTE $0x1C MOVH $0x200, R4 - WORD $0xB3CD0022 //lgdr %r2, %f2 + LGDR F2, R2 SRAD $32, R2 L4: WORD $0xEC3239BE //risbg %r3,%r2,57,128+62,64-25 @@ -134,7 +134,7 @@ L4: ADDW R4, R1 SLW $16, R1, R1 SLD $32, R1, R1 - WORD $0xB3C10021 //ldgr %f2,%r1 + LDGR R1, F2 WFMDB V2, V2, V4 WFMDB V4, V0, V6 WFMSDB V4, V6, V2, V4 diff --git a/src/math/cmplx/isinf.go b/src/math/cmplx/isinf.go index d5a65b44b320c..6273cd3a6c16b 100644 --- a/src/math/cmplx/isinf.go +++ b/src/math/cmplx/isinf.go @@ -6,7 +6,7 @@ package cmplx import "math" -// IsInf returns true if either real(x) or imag(x) is an infinity. +// IsInf reports whether either real(x) or imag(x) is an infinity. func IsInf(x complex128) bool { if math.IsInf(real(x), 0) || math.IsInf(imag(x), 0) { return true diff --git a/src/math/cmplx/isnan.go b/src/math/cmplx/isnan.go index 05d0cce6335ef..d3382c05eefd8 100644 --- a/src/math/cmplx/isnan.go +++ b/src/math/cmplx/isnan.go @@ -6,7 +6,7 @@ package cmplx import "math" -// IsNaN returns true if either real(x) or imag(x) is NaN +// IsNaN reports whether either real(x) or imag(x) is NaN // and neither is an infinity. func IsNaN(x complex128) bool { switch { diff --git a/src/math/erf_s390x.s b/src/math/erf_s390x.s index 5b62bdad76975..5be5d4de16aa4 100644 --- a/src/math/erf_s390x.s +++ b/src/math/erf_s390x.s @@ -100,7 +100,7 @@ GLOBL ·erftab12067<> + 0(SB), RODATA, $16 TEXT ·erfAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·erfrodataL13<>+0(SB), R5 - WORD $0xB3CD0010 //lgdr %r1, %f0 + LGDR F0, R1 FMOVD F0, F6 SRAD $48, R1 MOVH $16383, R3 @@ -205,7 +205,7 @@ L9: FMOVD 256(R5), F4 WFMADB V1, V4, V3, V4 FDIV F6, F2 - WORD $0xB3CD0014 //lgdr %r1, %f4 + LGDR F4, R1 FSUB F3, F4 FMOVD 248(R5), F6 WFMSDB V4, V6, V1, V4 @@ -230,7 +230,7 @@ L9: BYTE $0x59 MOVD $·erftab2066<>+0(SB), R1 FMOVD 192(R5), F1 - WORD $0xB3C10033 //ldgr %f3,%r3 + LDGR R3, F3 WORD $0xED221000 //madb %f2,%f2,0(%r2,%r1) BYTE $0x20 BYTE $0x1E diff --git a/src/math/erfc_s390x.s b/src/math/erfc_s390x.s index 57710b254b38e..0cb606d6de7db 100644 --- a/src/math/erfc_s390x.s +++ b/src/math/erfc_s390x.s @@ -219,7 +219,7 @@ L9: WFMADB V0, V5, V3, V5 WFMADB V6, V7, V2, V7 L11: - WORD $0xB3CD0065 //lgdr %r6, %f5 + LGDR F5, R6 WFSDB V0, V0, V2 WORD $0xED509298 //sdb %f5,.L55-.L38(%r9) BYTE $0x00 @@ -253,7 +253,7 @@ L11: BYTE $0x30 BYTE $0x59 WFMADB V4, V0, V2, V4 - WORD $0xB3C10024 //ldgr %f2,%r4 + LDGR R4, F2 FMADD F4, F2, F2 MOVW R2, R6 CMPBLE R6, $0, L20 @@ -504,7 +504,7 @@ L37: CMPBGT R6, R7, L24 WORD $0xA5400010 //iihh %r4,16 - WORD $0xB3C10024 //ldgr %f2,%r4 + LDGR R4, F2 FMUL F2, F2 BR L1 L23: @@ -521,7 +521,7 @@ L18: CMPBGT R6, R7, L25 WORD $0xA5408010 //iihh %r4,32784 FMOVD 568(R9), F2 - WORD $0xB3C10004 //ldgr %f0,%r4 + LDGR R4, F0 FMADD F2, F0, F2 BR L1 L25: diff --git a/src/math/example_test.go b/src/math/example_test.go index a1f764bcdaabc..25d6975903bcb 100644 --- a/src/math/example_test.go +++ b/src/math/example_test.go @@ -113,3 +113,25 @@ func ExamplePow10() { fmt.Printf("%.1f", c) // Output: 100.0 } + +func ExampleRound() { + p := math.Round(10.5) + fmt.Printf("%.1f\n", p) + + n := math.Round(-10.5) + fmt.Printf("%.1f\n", n) + // Output: + // 11.0 + // -11.0 +} + +func ExampleRoundToEven() { + u := math.RoundToEven(11.5) + fmt.Printf("%.1f\n", u) + + d := math.RoundToEven(12.5) + fmt.Printf("%.1f\n", d) + // Output: + // 12.0 + // 12.0 +} diff --git a/src/math/exp_s390x.s b/src/math/exp_s390x.s index 613ec24136491..cef1ce7684815 100644 --- a/src/math/exp_s390x.s +++ b/src/math/exp_s390x.s @@ -84,7 +84,7 @@ L2: FMOVD 32(R5), F4 FMUL F0, F0 WFMADB V2, V4, V1, V4 - WORD $0xB3CD0016 //lgdr %r1,%f6 + LGDR F6, R1 FMOVD 24(R5), F1 WFMADB V2, V3, V1, V3 FMOVD 16(R5), F1 @@ -100,7 +100,7 @@ L2: FMADD F4, F2, F2 SLD $48, R1, R2 WFMADB V2, V0, V4, V2 - WORD $0xB3C10002 //ldgr %f0,%r2 + LDGR R2, F0 FMADD F0, F2, F0 FMOVD F0, ret+8(FP) RET @@ -135,7 +135,7 @@ L6: FMUL F6, F6 WFMADB V4, V1, V5, V1 FMOVD 48(R5), F7 - WORD $0xB3CD0013 //lgdr %r1,%f3 + LGDR F3, R1 FMOVD 24(R5), F5 WFMADB V4, V7, V5, V7 FMOVD 16(R5), F5 @@ -157,7 +157,7 @@ L6: WORD $0xEC21000F //risbgn %r2,%r1,64-64+0,64-64+0+16-1,64-0-16 BYTE $0x30 BYTE $0x59 - WORD $0xB3C10002 //ldgr %f0,%r2 + LDGR R2, F0 FMADD F0, F4, F0 MOVD $·expx4ff<>+0(SB), R3 FMOVD 0(R3), F2 @@ -173,7 +173,7 @@ L21: WORD $0xEC21000F //risbgn %r2,%r1,64-64+0,64-64+0+16-1,64-0-16 BYTE $0x30 BYTE $0x59 - WORD $0xB3C10002 //ldgr %f0,%r2 + LDGR R2, F0 FMADD F0, F4, F0 MOVD $·expx2ff<>+0(SB), R3 FMOVD 0(R3), F2 diff --git a/src/math/expm1_386.s b/src/math/expm1_386.s index c1392cd52b028..d020296ca71e3 100644 --- a/src/math/expm1_386.s +++ b/src/math/expm1_386.s @@ -8,7 +8,7 @@ TEXT ·Expm1(SB),NOSPLIT,$0 FLDLN2 // F0=log(2) = 1/log2(e) ~ 0.693147 FMOVD x+0(FP), F0 // F0=x, F1=1/log2(e) - FABS // F0=|x|, F1=1/log2(e) + FABS // F0=|x|, F1=1/log2(e) FUCOMPP F0, F1 // compare F0 to F1 FSTSW AX SAHF @@ -36,7 +36,7 @@ use_exp: FSCALE // F0=e**x, F1=int(x*log2(e)) FMOVDP F0, F1 // F0=e**x FLD1 // F0=1, F1=e**x - FSUBDP F0, F1 // F0=e**x-1 + FSUBDP F0, F1 // F0=e**x-1 FMOVDP F0, ret+8(FP) RET not_finite: diff --git a/src/math/expm1_s390x.s b/src/math/expm1_s390x.s index 22e5eb16a9049..c7c793b982b95 100644 --- a/src/math/expm1_s390x.s +++ b/src/math/expm1_s390x.s @@ -89,7 +89,7 @@ L2: FMADD F2, F0, F6 WFMADB V0, V5, V3, V5 WFMDB V0, V0, V2 - WORD $0xB3CD0011 //lgdr %r1,%f1 + LGDR F1, R1 WFMADB V6, V2, V5, V6 FMOVD 40(R5), F3 FMOVD 32(R5), F5 @@ -108,7 +108,7 @@ L2: FMADD F4, F0, F0 SLD $48, R1, R2 WFMSDB V2, V0, V4, V0 - WORD $0xB3C10042 //ldgr %f4,%r2 + LDGR R2, F4 WORD $0xB3130000 //lcdbr %f0,%f0 FSUB F4, F6 WFMSDB V0, V4, V6, V0 @@ -155,7 +155,7 @@ L6: WFMADB V1, V16, V3, V1 FMOVD 16(R5), F6 FMADD F4, F1, F6 - WORD $0xB3CD0015 //lgdr %r1,%f5 + LGDR F5, R1 WORD $0xB3130066 //lcdbr %f6,%f6 WORD $0xEC3139BC //risbg %r3,%r1,57,128+60,3 BYTE $0x03 @@ -171,7 +171,7 @@ L6: WORD $0xEC21000F //risbgn %r2,%r1,64-64+0,64-64+0+16-1,64-0-16 BYTE $0x30 BYTE $0x59 - WORD $0xB3C10002 //ldgr %f0,%r2 + LDGR R2, F0 FMADD F0, F4, F0 MOVD $·expm1x4ff<>+0(SB), R3 FMOVD 0(R5), F4 @@ -189,7 +189,7 @@ L21: WORD $0xEC21000F //risbgn %r2,%r1,64-64+0,64-64+0+16-1,64-0-16 BYTE $0x30 BYTE $0x59 - WORD $0xB3C10002 //ldgr %f0,%r2 + LDGR R2, F0 FMADD F0, F4, F0 MOVD $·expm1x2ff<>+0(SB), R3 FMOVD 0(R5), F4 diff --git a/src/math/export_test.go b/src/math/export_test.go index 368308e1e5d3f..53d9205b9d169 100644 --- a/src/math/export_test.go +++ b/src/math/export_test.go @@ -9,3 +9,6 @@ var ExpGo = exp var Exp2Go = exp2 var HypotGo = hypot var SqrtGo = sqrt +var TrigReduce = trigReduce + +const ReduceThreshold = reduceThreshold diff --git a/src/math/huge_test.go b/src/math/huge_test.go new file mode 100644 index 0000000000000..0b45dbf5b12ec --- /dev/null +++ b/src/math/huge_test.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Disabled for s390x because it uses assembly routines that are not +// accurate for huge arguments. + +// +build !s390x + +package math_test + +import ( + . "math" + "testing" +) + +// Inputs to test trig_reduce +var trigHuge = []float64{ + 1 << 120, + 1 << 240, + 1 << 480, + 1234567891234567 << 180, + 1234567891234567 << 300, + MaxFloat64, +} + +// Results for trigHuge[i] calculated with https://github.com/robpike/ivy +// using 4096 bits of working precision. Values requiring less than +// 102 decimal digits (1 << 120, 1 << 240, 1 << 480, 1234567891234567 << 180) +// were confirmed via https://keisan.casio.com/ +var cosHuge = []float64{ + -0.92587902285483787, + 0.93601042593353793, + -0.28282777640193788, + -0.14616431394103619, + -0.79456058210671406, + -0.99998768942655994, +} + +var sinHuge = []float64{ + 0.37782010936075202, + -0.35197227524865778, + 0.95917070894368716, + 0.98926032637023618, + -0.60718488235646949, + 0.00496195478918406, +} + +var tanHuge = []float64{ + -0.40806638884180424, + -0.37603456702698076, + -3.39135965054779932, + -6.76813854009065030, + 0.76417695016604922, + -0.00496201587444489, +} + +// Check that trig values of huge angles return accurate results. +// This confirms that argument reduction works for very large values +// up to MaxFloat64. +func TestHugeCos(t *testing.T) { + for i := 0; i < len(trigHuge); i++ { + f1 := cosHuge[i] + f2 := Cos(trigHuge[i]) + if !close(f1, f2) { + t.Errorf("Cos(%g) = %g, want %g", trigHuge[i], f2, f1) + } + } +} + +func TestHugeSin(t *testing.T) { + for i := 0; i < len(trigHuge); i++ { + f1 := sinHuge[i] + f2 := Sin(trigHuge[i]) + if !close(f1, f2) { + t.Errorf("Sin(%g) = %g, want %g", trigHuge[i], f2, f1) + } + } +} + +func TestHugeSinCos(t *testing.T) { + for i := 0; i < len(trigHuge); i++ { + f1, g1 := sinHuge[i], cosHuge[i] + f2, g2 := Sincos(trigHuge[i]) + if !close(f1, f2) || !close(g1, g2) { + t.Errorf("Sincos(%g) = %g, %g, want %g, %g", trigHuge[i], f2, g2, f1, g1) + } + } +} + +func TestHugeTan(t *testing.T) { + for i := 0; i < len(trigHuge); i++ { + f1 := tanHuge[i] + f2 := Tan(trigHuge[i]) + if !close(f1, f2) { + t.Errorf("Tan(%g) = %g, want %g", trigHuge[i], f2, f1) + } + } +} diff --git a/src/math/log1p.go b/src/math/log1p.go index b128a1620c880..c4ec61b2259eb 100644 --- a/src/math/log1p.go +++ b/src/math/log1p.go @@ -151,12 +151,13 @@ func log1p(x float64) float64 { u = 1.0 + x iu = Float64bits(u) k = int((iu >> 52) - 1023) + // correction term if k > 0 { c = 1.0 - (u - x) } else { - c = x - (u - 1.0) // correction term - c /= u + c = x - (u - 1.0) } + c /= u } else { u = x iu = Float64bits(u) diff --git a/src/math/log1p_s390x.s b/src/math/log1p_s390x.s index c7e986033f90c..ba4933d5b0cb8 100644 --- a/src/math/log1p_s390x.s +++ b/src/math/log1p_s390x.s @@ -96,7 +96,7 @@ TEXT ·log1pAsm(SB), NOSPLIT, $0-16 MOVD $·log1pc5<>+0(SB), R1 VLEG $0, 0(R1), V16 MOVD R2, R5 - WORD $0xB3CD0034 //lgdr %r3,%f4 + LGDR F4, R3 WORD $0xC0190006 //iilf %r1,425983 BYTE $0x7F BYTE $0xFF @@ -118,7 +118,7 @@ TEXT ·log1pAsm(SB), NOSPLIT, $0-16 MOVD $·log1pxzero<>+0(SB), R1 FMOVD 0(R1), F2 BVS LEXITTAGlog1p - WORD $0xB3130044 + WORD $0xB3130044 // lcdbr %f4,%f4 WFCEDBS V2, V4, V6 BEQ L9 WFCHDBS V4, V2, V2 @@ -129,11 +129,11 @@ TEXT ·log1pAsm(SB), NOSPLIT, $0-16 RET L8: - WORD $0xB3C10022 //ldgr %f2,%r2 + LDGR R2, F2 FSUB F4, F3 FMADD F2, F4, F1 MOVD $·log1pc4<>+0(SB), R2 - WORD $0xB3130041 + WORD $0xB3130041 // lcdbr %f4,%f1 FMOVD 0(R2), F7 FSUB F3, F0 MOVD $·log1pc3<>+0(SB), R2 @@ -164,7 +164,7 @@ L8: FMOVD 0(R3), F2 WFMADB V0, V6, V1, V0 MOVD $·log1pyout<>+0(SB), R1 - WORD $0xB3C10065 //ldgr %f6,%r5 + LDGR R5, F6 FMOVD 0(R1), F4 WFMSDB V2, V6, V4, V2 MOVD $·log1pxl2<>+0(SB), R1 diff --git a/src/math/log_s390x.s b/src/math/log_s390x.s index 3e24ca79bb6f5..7bcfdfcffa727 100644 --- a/src/math/log_s390x.s +++ b/src/math/log_s390x.s @@ -63,7 +63,7 @@ TEXT ·logAsm(SB), NOSPLIT, $0-16 FMOVD x+0(FP), F0 MOVD $·logrodataL21<>+0(SB), R9 MOVH $0x8006, R4 - WORD $0xB3CD0010 //lgdr %r1,%f0 + LGDR F0, R1 MOVD $0x3FF0000000000000, R6 SRAD $48, R1, R1 MOVD $0x40F03E8000000000, R8 @@ -91,7 +91,7 @@ L7: BLEU L3 L15: FMUL F2, F0 - WORD $0xB3CD0010 //lgdr %r1,%f0 + LGDR F0, R1 SRAD $48, R1, R1 SUBW R1, R0, R2 SUBW R1, R12, R3 @@ -114,7 +114,7 @@ L2: MOVH $0x7FEF, R1 CMPW R5, R1 BGT L1 - WORD $0xB3C10026 //ldgr %f2,%r6 + LDGR R6, F2 FMUL F2, F0 WORD $0xEC4439BB //risbg %r4,%r4,57,128+59,3 BYTE $0x03 @@ -148,14 +148,14 @@ L2: WFMADB V6, V4, V1, V4 FMOVD 8(R4), F1 WFMADB V0, V2, V4, V2 - WORD $0xB3C10048 //ldgr %f4,%r8 + LDGR R8, F4 WFMADB V6, V2, V0, V2 WORD $0xED401000 //msdb %f1,%f4,0(%r1) BYTE $0x10 BYTE $0x1F MOVD ·logxl2<>+0(SB), R1 WORD $0xB3130001 //lcdbr %f0,%f1 - WORD $0xB3C10041 //ldgr %f4,%r1 + LDGR R1, F4 WFMADB V0, V4, V2, V0 L1: FMOVD F0, ret+8(FP) diff --git a/src/math/mod.go b/src/math/mod.go index e1a414e5f9cba..7efc018a5da2c 100644 --- a/src/math/mod.go +++ b/src/math/mod.go @@ -24,16 +24,12 @@ func mod(x, y float64) float64 { if y == 0 || IsInf(x, 0) || IsNaN(x) || IsNaN(y) { return NaN() } - if y < 0 { - y = -y - } + y = Abs(y) yfr, yexp := Frexp(y) - sign := false r := x if x < 0 { r = -x - sign = true } for r >= y { @@ -43,7 +39,7 @@ func mod(x, y float64) float64 { } r = r - Ldexp(y, rexp-yexp) } - if sign { + if x < 0 { r = -r } return r diff --git a/src/math/pow.go b/src/math/pow.go index 336193bce12bf..2219a906b8d08 100644 --- a/src/math/pow.go +++ b/src/math/pow.go @@ -83,13 +83,7 @@ func pow(x, y float64) float64 { return 1 / Sqrt(x) } - absy := y - flip := false - if absy < 0 { - absy = -absy - flip = true - } - yi, yf := Modf(absy) + yi, yf := Modf(Abs(y)) if yf != 0 && x < 0 { return NaN() } @@ -147,9 +141,9 @@ func pow(x, y float64) float64 { } // ans = a1*2**ae - // if flip { ans = 1 / ans } + // if y < 0 { ans = 1 / ans } // but in the opposite order - if flip { + if y < 0 { a1 = 1 / a1 ae = -ae } diff --git a/src/math/pow_s390x.s b/src/math/pow_s390x.s index fd1961756161f..754b119e24918 100644 --- a/src/math/pow_s390x.s +++ b/src/math/pow_s390x.s @@ -297,7 +297,7 @@ Normal: FMOVD x+0(FP), F0 FMOVD y+8(FP), F2 MOVD $·powrodataL51<>+0(SB), R9 - WORD $0xB3CD0030 //lgdr %r3,%f0 + LGDR F0, R3 WORD $0xC0298009 //iilf %r2,2148095317 BYTE $0x55 BYTE $0x55 @@ -340,7 +340,7 @@ L2: BYTE $0x24 FMOVD 0(R2), F6 FSUBS F1, F3 - WORD $0xB3C10018 //ldgr %f1,%r8 + LDGR R8, F1 WFMSDB V4, V1, V6, V4 FMOVD 152(R9), F6 WFMDB V4, V4, V7 @@ -387,7 +387,7 @@ L2: WFMSDB V2, V3, V5, V3 VLEG $0, 48(R9), V18 WFADB V3, V5, V6 - WORD $0xB3CD0023 //lgdr %r2,%f3 + LGDR F3, R2 WFMSDB V2, V16, V6, V16 FMOVD 40(R9), F1 WFMADB V2, V4, V16, V4 @@ -410,8 +410,8 @@ L2: BYTE $0x30 BYTE $0x59 WFMADB V4, V1, V3, V4 - WORD $0xB3CD0026 //lgdr %r2,%f6 - WORD $0xB3C10015 //ldgr %f1,%r5 + LGDR F6, R2 + LDGR R5, F1 SRAD $48, R2, R2 FMADD F1, F4, F1 RLL $16, R2, R2 @@ -452,7 +452,7 @@ L11: WORD $0xEC1520BF //risbgn %r1,%r5,64-32,128+63,64+0+32 BYTE $0x60 BYTE $0x59 - WORD $0xB3CD0026 //lgdr %r2,%f6 + LGDR F6, R2 MOVD $powiadd<>+0(SB), R3 WORD $0xEC223CBC //risbg %r2,%r2,60,128+60,64-60 BYTE $0x04 @@ -461,7 +461,7 @@ L11: WORD $0xEC51001F //risbgn %r5,%r1,64-64+0,64-64+0+32-1,64-0-32 BYTE $0x20 BYTE $0x59 - WORD $0xB3C10015 //ldgr %f1,%r5 + LDGR R5, F1 FMADD F1, F4, F1 MOVD $powxscale<>+0(SB), R1 WORD $0xED121000 //mdb %f1,0(%r2,%r1) @@ -486,7 +486,7 @@ L3: WORD $0xC0298009 //iilf %r2,2148095317 BYTE $0x55 BYTE $0x55 - WORD $0xB3CD0034 //lgdr %r3,%f4 + LGDR F4, R3 WORD $0xEC3320BF //risbgn %r3,%r3,64-32,128+63,64+0+32 BYTE $0x60 BYTE $0x59 @@ -566,11 +566,11 @@ L47: BVS L49 L16: MOVD ·pow_xnan<>+0(SB), R1 - WORD $0xB3C10001 //ldgr %f0,%r1 + LDGR R1, F0 WFMDB V4, V0, V1 BR L1 L48: - WORD $0xB3CD0030 //lgdr %r3,%f0 + LGDR F0, R3 WORD $0xEC1320BF //risbgn %r1,%r3,64-32,128+63,64+0+32 BYTE $0x60 BYTE $0x59 diff --git a/src/math/signbit.go b/src/math/signbit.go index 670cc1a66799b..f6e61d660e27d 100644 --- a/src/math/signbit.go +++ b/src/math/signbit.go @@ -4,7 +4,7 @@ package math -// Signbit returns true if x is negative or negative zero. +// Signbit reports whether x is negative or negative zero. func Signbit(x float64) bool { return Float64bits(x)&(1<<63) != 0 } diff --git a/src/math/sin.go b/src/math/sin.go index 929cac34ecaa6..cc8b1366ad37b 100644 --- a/src/math/sin.go +++ b/src/math/sin.go @@ -118,10 +118,9 @@ func Cos(x float64) float64 func cos(x float64) float64 { const ( - PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, - PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, - M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi + PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, + PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, ) // special cases switch { @@ -133,15 +132,23 @@ func cos(x float64) float64 { sign := false x = Abs(x) - j := int64(x * M4PI) // integer part of x/(Pi/4), as integer for tests on the phase angle - y := float64(j) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y++ + var j uint64 + var y, z float64 + if x >= reduceThreshold { + j, z = trigReduce(x) + } else { + j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle + y = float64(j) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y++ + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic } - j &= 7 // octant modulo 2Pi radians (360 degrees) + if j > 3 { j -= 4 sign = !sign @@ -150,7 +157,6 @@ func cos(x float64) float64 { sign = !sign } - z := ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic zz := z * z if j == 1 || j == 2 { y = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5]) @@ -173,10 +179,9 @@ func Sin(x float64) float64 func sin(x float64) float64 { const ( - PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, - PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, - M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi + PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, + PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, ) // special cases switch { @@ -193,22 +198,27 @@ func sin(x float64) float64 { sign = true } - j := int64(x * M4PI) // integer part of x/(Pi/4), as integer for tests on the phase angle - y := float64(j) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y++ + var j uint64 + var y, z float64 + if x >= reduceThreshold { + j, z = trigReduce(x) + } else { + j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle + y = float64(j) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y++ + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic } - j &= 7 // octant modulo 2Pi radians (360 degrees) // reflect in x axis if j > 3 { sign = !sign j -= 4 } - - z := ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic zz := z * z if j == 1 || j == 2 { y = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5]) diff --git a/src/math/sin_386.s b/src/math/sin_386.s index 9d605a1e38b99..cf7679d1889cb 100644 --- a/src/math/sin_386.s +++ b/src/math/sin_386.s @@ -6,42 +6,8 @@ // func Cos(x float64) float64 TEXT ·Cos(SB),NOSPLIT,$0 - FMOVD x+0(FP), F0 // F0=x - FCOS // F0=cos(x) if -2**63 < x < 2**63 - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE 3(PC) // jump if x outside range - FMOVDP F0, ret+8(FP) - RET - FLDPI // F0=Pi, F1=x - FADDD F0, F0 // F0=2*Pi, F1=x - FXCHD F0, F1 // F0=x, F1=2*Pi - FPREM1 // F0=reduced_x, F1=2*Pi - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE -3(PC) // jump if reduction incomplete - FMOVDP F0, F1 // F0=reduced_x - FCOS // F0=cos(reduced_x) - FMOVDP F0, ret+8(FP) - RET - + JMP ·cos(SB) + // func Sin(x float64) float64 TEXT ·Sin(SB),NOSPLIT,$0 - FMOVD x+0(FP), F0 // F0=x - FSIN // F0=sin(x) if -2**63 < x < 2**63 - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE 3(PC) // jump if x outside range - FMOVDP F0, ret+8(FP) - RET - FLDPI // F0=Pi, F1=x - FADDD F0, F0 // F0=2*Pi, F1=x - FXCHD F0, F1 // F0=x, F1=2*Pi - FPREM1 // F0=reduced_x, F1=2*Pi - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE -3(PC) // jump if reduction incomplete - FMOVDP F0, F1 // F0=reduced_x - FSIN // F0=sin(reduced_x) - FMOVDP F0, ret+8(FP) - RET + JMP ·sin(SB) diff --git a/src/math/sincos.go b/src/math/sincos.go index 3ae193a3b253c..c002db6b3cd16 100644 --- a/src/math/sincos.go +++ b/src/math/sincos.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !386 - package math // Coefficients _sin[] and _cos[] are found in pkg/math/sin.go. @@ -16,10 +14,9 @@ package math // Sincos(NaN) = NaN, NaN func Sincos(x float64) (sin, cos float64) { const ( - PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, - PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, - M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi + PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, + PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, ) // special cases switch { @@ -36,14 +33,21 @@ func Sincos(x float64) (sin, cos float64) { sinSign = true } - j := int64(x * M4PI) // integer part of x/(Pi/4), as integer for tests on the phase angle - y := float64(j) // integer part of x/(Pi/4), as float + var j uint64 + var y, z float64 + if x >= reduceThreshold { + j, z = trigReduce(x) + } else { + j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle + y = float64(j) // integer part of x/(Pi/4), as float - if j&1 == 1 { // map zeros to origin - j++ - y++ + if j&1 == 1 { // map zeros to origin + j++ + y++ + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + z = ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic } - j &= 7 // octant modulo 2Pi radians (360 degrees) if j > 3 { // reflect in x axis j -= 4 sinSign, cosSign = !sinSign, !cosSign @@ -52,7 +56,6 @@ func Sincos(x float64) (sin, cos float64) { cosSign = !cosSign } - z := ((x - y*PI4A) - y*PI4B) - y*PI4C // Extended precision modular arithmetic zz := z * z cos = 1.0 - 0.5*zz + zz*zz*((((((_cos[0]*zz)+_cos[1])*zz+_cos[2])*zz+_cos[3])*zz+_cos[4])*zz+_cos[5]) sin = z + z*zz*((((((_sin[0]*zz)+_sin[1])*zz+_sin[2])*zz+_sin[3])*zz+_sin[4])*zz+_sin[5]) diff --git a/src/math/sincos_386.go b/src/math/sincos_386.go deleted file mode 100644 index 38bb050572036..0000000000000 --- a/src/math/sincos_386.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package math - -// Sincos returns Sin(x), Cos(x). -// -// Special cases are: -// Sincos(±0) = ±0, 1 -// Sincos(±Inf) = NaN, NaN -// Sincos(NaN) = NaN, NaN -func Sincos(x float64) (sin, cos float64) diff --git a/src/math/sincos_386.s b/src/math/sincos_386.s deleted file mode 100644 index f700a4f9bfb5c..0000000000000 --- a/src/math/sincos_386.s +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func Sincos(x float64) (sin, cos float64) -TEXT ·Sincos(SB),NOSPLIT,$0 - FMOVD x+0(FP), F0 // F0=x - FSINCOS // F0=cos(x), F1=sin(x) if -2**63 < x < 2**63 - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE 4(PC) // jump if x outside range - FMOVDP F0, cos+16(FP) // F0=sin(x) - FMOVDP F0, sin+8(FP) - RET - FLDPI // F0=Pi, F1=x - FADDD F0, F0 // F0=2*Pi, F1=x - FXCHD F0, F1 // F0=x, F1=2*Pi - FPREM1 // F0=reduced_x, F1=2*Pi - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE -3(PC) // jump if reduction incomplete - FMOVDP F0, F1 // F0=reduced_x - FSINCOS // F0=cos(reduced_x), F1=sin(reduced_x) - FMOVDP F0, cos+16(FP) // F0=sin(reduced_x) - FMOVDP F0, sin+8(FP) - RET diff --git a/src/math/sinh.go b/src/math/sinh.go index 39e7c2047a9ec..573a37e35fa2a 100644 --- a/src/math/sinh.go +++ b/src/math/sinh.go @@ -43,7 +43,7 @@ func sinh(x float64) float64 { } var temp float64 - switch true { + switch { case x > 21: temp = Exp(x) * 0.5 diff --git a/src/math/sqrt_386.s b/src/math/sqrt_386.s index 402d1527851a0..5a5c33a79ac20 100644 --- a/src/math/sqrt_386.s +++ b/src/math/sqrt_386.s @@ -4,7 +4,7 @@ #include "textflag.h" -// func Sqrt(x float64) float64 +// func Sqrt(x float64) float64 TEXT ·Sqrt(SB),NOSPLIT,$0 FMOVD x+0(FP),F0 FSQRT diff --git a/src/math/sqrt_arm.s b/src/math/sqrt_arm.s index deb67125535ac..ffc7d1026d16f 100644 --- a/src/math/sqrt_arm.s +++ b/src/math/sqrt_arm.s @@ -4,7 +4,7 @@ #include "textflag.h" -// func Sqrt(x float64) float64 +// func Sqrt(x float64) float64 TEXT ·Sqrt(SB),NOSPLIT,$0 MOVB runtime·goarm(SB), R11 CMP $5, R11 diff --git a/src/math/tan.go b/src/math/tan.go index aa2fb37e81b95..0d5394cf264d3 100644 --- a/src/math/tan.go +++ b/src/math/tan.go @@ -83,10 +83,9 @@ func Tan(x float64) float64 func tan(x float64) float64 { const ( - PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, - PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, - M4PI = 1.273239544735162542821171882678754627704620361328125 // 4/pi + PI4A = 7.85398125648498535156E-1 // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B = 3.77489470793079817668E-8 // 0x3e64442d00000000, + PI4C = 2.69515142907905952645E-15 // 0x3ce8469898cc5170, ) // special cases switch { @@ -102,17 +101,22 @@ func tan(x float64) float64 { x = -x sign = true } + var j uint64 + var y, z float64 + if x >= reduceThreshold { + j, z = trigReduce(x) + } else { + j = uint64(x * (4 / Pi)) // integer part of x/(Pi/4), as integer for tests on the phase angle + y = float64(j) // integer part of x/(Pi/4), as float - j := int64(x * M4PI) // integer part of x/(Pi/4), as integer for tests on the phase angle - y := float64(j) // integer part of x/(Pi/4), as float + /* map zeros and singularities to origin */ + if j&1 == 1 { + j++ + y++ + } - /* map zeros and singularities to origin */ - if j&1 == 1 { - j++ - y++ + z = ((x - y*PI4A) - y*PI4B) - y*PI4C } - - z := ((x - y*PI4A) - y*PI4B) - y*PI4C zz := z * z if zz > 1e-14 { diff --git a/src/math/tan_386.s b/src/math/tan_386.s index cb65a3f703d4b..4e44c2692d2d8 100644 --- a/src/math/tan_386.s +++ b/src/math/tan_386.s @@ -6,23 +6,4 @@ // func Tan(x float64) float64 TEXT ·Tan(SB),NOSPLIT,$0 - FMOVD x+0(FP), F0 // F0=x - FPTAN // F0=1, F1=tan(x) if -2**63 < x < 2**63 - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE 4(PC) // jump if x outside range - FMOVDP F0, F0 // F0=tan(x) - FMOVDP F0, ret+8(FP) - RET - FLDPI // F0=Pi, F1=x - FADDD F0, F0 // F0=2*Pi, F1=x - FXCHD F0, F1 // F0=x, F1=2*Pi - FPREM1 // F0=reduced_x, F1=2*Pi - FSTSW AX // AX=status word - ANDW $0x0400, AX - JNE -3(PC) // jump if reduction incomplete - FMOVDP F0, F1 // F0=reduced_x - FPTAN // F0=1, F1=tan(reduced_x) - FMOVDP F0, F0 // F0=tan(reduced_x) - FMOVDP F0, ret+8(FP) - RET + JMP ·tan(SB) diff --git a/src/math/tan_s390x.s b/src/math/tan_s390x.s index 7b05ba053e3e0..b6e2295874e6e 100644 --- a/src/math/tan_s390x.s +++ b/src/math/tan_s390x.s @@ -68,7 +68,7 @@ L2: WFMADB V4, V3, V2, V4 FMUL F2, F2 VLEG $0, 48(R5), V18 - WORD $0xB3CD0016 //lgdr %r1,%f6 + LGDR F6, R1 FMOVD 40(R5), F5 FMOVD 32(R5), F3 FMADD F1, F2, F3 @@ -82,7 +82,7 @@ L2: WFLCDB V4, V16 WFMADB V2, V5, V18, V5 WFMADB V1, V0, V7, V0 - WORD $0xA7110001 //tmll %r1,1 + TMLL R1, $1 WFMADB V1, V5, V3, V1 BNE L12 WFDDB V0, V1, V0 diff --git a/src/math/trig_reduce.go b/src/math/trig_reduce.go new file mode 100644 index 0000000000000..6f8eaba9b9184 --- /dev/null +++ b/src/math/trig_reduce.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math + +import ( + "math/bits" +) + +// reduceThreshold is the maximum value where the reduction using Pi/4 +// in 3 float64 parts still gives accurate results. Above this +// threshold Payne-Hanek range reduction must be used. +const reduceThreshold = (1 << 52) / (4 / Pi) + +// trigReduce implements Payne-Hanek range reduction by Pi/4 +// for x > 0. It returns the integer part mod 8 (j) and +// the fractional part (z) of x / (Pi/4). +// The implementation is based on: +// "ARGUMENT REDUCTION FOR HUGE ARGUMENTS: Good to the Last Bit" +// K. C. Ng et al, March 24, 1992 +// The simulated multi-precision calculation of x*B uses 64-bit integer arithmetic. +func trigReduce(x float64) (j uint64, z float64) { + const PI4 = Pi / 4 + if x < PI4 { + return 0, x + } + // Extract out the integer and exponent such that, + // x = ix * 2 ** exp. + ix := Float64bits(x) + exp := int(ix>>shift&mask) - bias - shift + ix &^= mask << shift + ix |= 1 << shift + // Use the exponent to extract the 3 appropriate uint64 digits from mPi4, + // B ~ (z0, z1, z2), such that the product leading digit has the exponent -61. + // Note, exp >= -53 since x >= PI4 and exp < 971 for maximum float64. + digit, bitshift := uint(exp+61)/64, uint(exp+61)%64 + z0 := (mPi4[digit] << bitshift) | (mPi4[digit+1] >> (64 - bitshift)) + z1 := (mPi4[digit+1] << bitshift) | (mPi4[digit+2] >> (64 - bitshift)) + z2 := (mPi4[digit+2] << bitshift) | (mPi4[digit+3] >> (64 - bitshift)) + // Multiply mantissa by the digits and extract the upper two digits (hi, lo). + z2hi, _ := bits.Mul64(z2, ix) + z1hi, z1lo := bits.Mul64(z1, ix) + z0lo := z0 * ix + lo, c := bits.Add64(z1lo, z2hi, 0) + hi, _ := bits.Add64(z0lo, z1hi, c) + // The top 3 bits are j. + j = hi >> 61 + // Extract the fraction and find its magnitude. + hi = hi<<3 | lo>>61 + lz := uint(bits.LeadingZeros64(hi)) + e := uint64(bias - (lz + 1)) + // Clear implicit mantissa bit and shift into place. + hi = (hi << (lz + 1)) | (lo >> (64 - (lz + 1))) + hi >>= 64 - shift + // Include the exponent and convert to a float. + hi |= e << shift + z = Float64frombits(hi) + // Map zeros to origin. + if j&1 == 1 { + j++ + j &= 7 + z-- + } + // Multiply the fractional part by pi/4. + return j, z * PI4 +} + +// mPi4 is the binary digits of 4/pi as a uint64 array, +// that is, 4/pi = Sum mPi4[i]*2^(-64*i) +// 19 64-bit digits and the leading one bit give 1217 bits +// of precision to handle the largest possible float64 exponent. +var mPi4 = [...]uint64{ + 0x0000000000000001, + 0x45f306dc9c882a53, + 0xf84eafa3ea69bb81, + 0xb6c52b3278872083, + 0xfca2c757bd778ac3, + 0x6e48dc74849ba5c0, + 0x0c925dd413a32439, + 0xfc3bd63962534e7d, + 0xd1046bea5d768909, + 0xd338e04d68befc82, + 0x7323ac7306a673e9, + 0x3908bf177bf25076, + 0x3ff12fffbc0b301f, + 0xde5e2316b414da3e, + 0xda6cfd9e4f96136e, + 0x9e8c7ecd3cbfd45a, + 0xea4f758fd7cbe2f6, + 0x7a0e73ef14a525d4, + 0xd7f6bf623f1aba10, + 0xac06608df8f6d757, +} diff --git a/src/math/unsafe.go b/src/math/unsafe.go index 5ae67420f4dc0..e59f50ca62e5c 100644 --- a/src/math/unsafe.go +++ b/src/math/unsafe.go @@ -6,16 +6,24 @@ package math import "unsafe" -// Float32bits returns the IEEE 754 binary representation of f. +// Float32bits returns the IEEE 754 binary representation of f, +// with the sign bit of f and the result in the same bit position. +// Float32bits(Float32frombits(x)) == x. func Float32bits(f float32) uint32 { return *(*uint32)(unsafe.Pointer(&f)) } -// Float32frombits returns the floating point number corresponding -// to the IEEE 754 binary representation b. +// Float32frombits returns the floating-point number corresponding +// to the IEEE 754 binary representation b, with the sign bit of b +// and the result in the same bit position. +// Float32frombits(Float32bits(x)) == x. func Float32frombits(b uint32) float32 { return *(*float32)(unsafe.Pointer(&b)) } -// Float64bits returns the IEEE 754 binary representation of f. +// Float64bits returns the IEEE 754 binary representation of f, +// with the sign bit of f and the result in the same bit position, +// and Float64bits(Float64frombits(x)) == x. func Float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) } -// Float64frombits returns the floating point number corresponding -// the IEEE 754 binary representation b. +// Float64frombits returns the floating-point number corresponding +// to the IEEE 754 binary representation b, with the sign bit of b +// and the result in the same bit position. +// Float64frombits(Float64bits(x)) == x. func Float64frombits(b uint64) float64 { return *(*float64)(unsafe.Pointer(&b)) } diff --git a/src/mime/mediatype.go b/src/mime/mediatype.go index ea2bbac1891be..05390773a8afa 100644 --- a/src/mime/mediatype.go +++ b/src/mime/mediatype.go @@ -56,7 +56,8 @@ func FormatMediaType(t string, param map[string]string) string { b.WriteByte('"') offset := 0 - for index, character := range value { + for index := 0; index < len(value); index++ { + character := value[index] if character == '"' || character == '\\' { b.WriteString(value[offset:index]) offset = index @@ -280,7 +281,7 @@ func consumeValue(v string) (value, rest string) { // and intended as a literal backslash. This makes Go servers deal better // with MSIE without affecting the way they handle conforming MIME // generators. - if r == '\\' && i+1 < len(v) && !isTokenChar(rune(v[i+1])) { + if r == '\\' && i+1 < len(v) && isTSpecial(rune(v[i+1])) { buffer.WriteByte(v[i+1]) i++ continue diff --git a/src/mime/mediatype_test.go b/src/mime/mediatype_test.go index 88d742f0aab5f..945a8189e171c 100644 --- a/src/mime/mediatype_test.go +++ b/src/mime/mediatype_test.go @@ -40,6 +40,8 @@ func TestConsumeValue(t *testing.T) { {`"\\" rest`, "\\", " rest"}, {`"My \" value"end`, "My \" value", "end"}, {`"\" rest`, "", `"\" rest`}, + {`"C:\dev\go\robots.txt"`, `C:\dev\go\robots.txt`, ""}, + {`"C:\新建文件件\中文第二次测试.mp4"`, `C:\新建文件件\中文第二次测试.mp4`, ""}, } for _, test := range tests { value, rest := consumeValue(test[0]) @@ -393,6 +395,7 @@ func TestParseMediaType(t *testing.T) { // Microsoft browers in intranet mode do not think they need to escape \ in file name. {`form-data; name="file"; filename="C:\dev\go\robots.txt"`, "form-data", m("name", "file", "filename", `C:\dev\go\robots.txt`)}, + {`form-data; name="file"; filename="C:\新建文件件\中文第二次测试.mp4"`, "form-data", m("name", "file", "filename", `C:\新建文件件\中文第二次测试.mp4`)}, } for _, test := range tests { mt, params, err := ParseMediaType(test.in) @@ -478,6 +481,8 @@ var formatTests = []formatTest{ {"noslash", map[string]string{"X": "Y"}, "noslash; x=Y"}, // e.g. Content-Disposition values (RFC 2183); issue 11289 {"foo bar/baz", nil, ""}, {"foo/bar baz", nil, ""}, + {"attachment", map[string]string{"filename": "ĄĄŽŽČČŠŠ"}, ""}, + {"attachment", map[string]string{"filename": "ÁÁÊÊÇÇÎÎ"}, ""}, {"foo/BAR", nil, "foo/bar"}, {"foo/BAR", map[string]string{"X": "Y"}, "foo/bar; x=Y"}, {"foo/BAR", map[string]string{"space": "With space"}, `foo/bar; space="With space"`}, diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go index 2d6a830cb669b..105a82c417063 100644 --- a/src/mime/multipart/formdata_test.go +++ b/src/mime/multipart/formdata_test.go @@ -13,7 +13,7 @@ import ( ) func TestReadForm(t *testing.T) { - b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1)) + b := strings.NewReader(strings.ReplaceAll(message, "\n", "\r\n")) r := NewReader(b, boundary) f, err := r.ReadForm(25) if err != nil { @@ -39,7 +39,7 @@ func TestReadForm(t *testing.T) { } func TestReadFormWithNamelessFile(t *testing.T) { - b := strings.NewReader(strings.Replace(messageWithFileWithoutName, "\n", "\r\n", -1)) + b := strings.NewReader(strings.ReplaceAll(messageWithFileWithoutName, "\n", "\r\n")) r := NewReader(b, boundary) f, err := r.ReadForm(25) if err != nil { @@ -54,7 +54,7 @@ func TestReadFormWithNamelessFile(t *testing.T) { func TestReadFormWithTextContentType(t *testing.T) { // From https://github.com/golang/go/issues/24041 - b := strings.NewReader(strings.Replace(messageWithTextContentType, "\n", "\r\n", -1)) + b := strings.NewReader(strings.ReplaceAll(messageWithTextContentType, "\n", "\r\n")) r := NewReader(b, boundary) f, err := r.ReadForm(25) if err != nil { @@ -184,7 +184,7 @@ Content-Disposition: form-data; name="largetext" --MyBoundary-- ` - testBody := strings.Replace(message, "\n", "\r\n", -1) + testBody := strings.ReplaceAll(message, "\n", "\r\n") testCases := []struct { name string maxMemory int64 diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go index 0993fb7e91d5a..a222409d3ce64 100644 --- a/src/mime/multipart/multipart.go +++ b/src/mime/multipart/multipart.go @@ -21,6 +21,7 @@ import ( "mime" "mime/quotedprintable" "net/textproto" + "strings" ) var emptyParams = make(map[string]string) @@ -135,7 +136,7 @@ func newPart(mr *Reader) (*Part, error) { } bp.r = partReader{bp} const cte = "Content-Transfer-Encoding" - if bp.Header.Get(cte) == "quoted-printable" { + if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") { bp.Header.Del(cte) bp.r = quotedprintable.NewReader(bp.r) } diff --git a/src/mime/multipart/multipart_test.go b/src/mime/multipart/multipart_test.go index abe1cc8e77cc7..5a8102b82236c 100644 --- a/src/mime/multipart/multipart_test.go +++ b/src/mime/multipart/multipart_test.go @@ -105,7 +105,7 @@ never read data useless trailer ` - testBody = strings.Replace(testBody, "\n", sep, -1) + testBody = strings.ReplaceAll(testBody, "\n", sep) return strings.Replace(testBody, "[longline]", longLine, 1) } @@ -151,7 +151,7 @@ func testMultipart(t *testing.T, r io.Reader, onlyNewlines bool) { adjustNewlines := func(s string) string { if onlyNewlines { - return strings.Replace(s, "\r\n", "\n", -1) + return strings.ReplaceAll(s, "\r\n", "\n") } return s } @@ -299,7 +299,7 @@ foo-bar: baz Oh no, premature EOF! ` - body := strings.Replace(testBody, "\n", "\r\n", -1) + body := strings.ReplaceAll(testBody, "\n", "\r\n") bodyReader := strings.NewReader(body) r := NewReader(bodyReader, "MyBoundary") @@ -419,8 +419,16 @@ func TestLineContinuation(t *testing.T) { } func TestQuotedPrintableEncoding(t *testing.T) { + for _, cte := range []string{"quoted-printable", "Quoted-PRINTABLE"} { + t.Run(cte, func(t *testing.T) { + testQuotedPrintableEncoding(t, cte) + }) + } +} + +func testQuotedPrintableEncoding(t *testing.T, cte string) { // From https://golang.org/issue/4411 - body := "--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=text\r\nContent-Transfer-Encoding: quoted-printable\r\n\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words\r\n--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=submit\r\n\r\nSubmit\r\n--0016e68ee29c5d515f04cedf6733--" + body := "--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=text\r\nContent-Transfer-Encoding: " + cte + "\r\n\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words words words words wor=\r\nds words words words words words words words words words words words words =\r\nwords words words words words words words words words\r\n--0016e68ee29c5d515f04cedf6733\r\nContent-Type: text/plain; charset=ISO-8859-1\r\nContent-Disposition: form-data; name=submit\r\n\r\nSubmit\r\n--0016e68ee29c5d515f04cedf6733--" r := NewReader(strings.NewReader(body), "0016e68ee29c5d515f04cedf6733") part, err := r.NextPart() if err != nil { diff --git a/src/mime/multipart/writer.go b/src/mime/multipart/writer.go index 3dd0c8fb1368a..d1ff151a7d1d6 100644 --- a/src/mime/multipart/writer.go +++ b/src/mime/multipart/writer.go @@ -72,7 +72,13 @@ func (w *Writer) SetBoundary(boundary string) error { // FormDataContentType returns the Content-Type for an HTTP // multipart/form-data with this Writer's Boundary. func (w *Writer) FormDataContentType() string { - return "multipart/form-data; boundary=" + w.boundary + b := w.boundary + // We must quote the boundary if it contains any of the + // tspecials characters defined by RFC 2045, or space. + if strings.ContainsAny(b, `()<>@,;:\"/[]?= `) { + b = `"` + b + `"` + } + return "multipart/form-data; boundary=" + b } func randomBoundary() string { diff --git a/src/mime/multipart/writer_test.go b/src/mime/multipart/writer_test.go index 8b1bcd68d870a..b89b093fff6ff 100644 --- a/src/mime/multipart/writer_test.go +++ b/src/mime/multipart/writer_test.go @@ -7,6 +7,7 @@ package multipart import ( "bytes" "io/ioutil" + "mime" "net/textproto" "strings" "testing" @@ -94,6 +95,7 @@ func TestWriterSetBoundary(t *testing.T) { {"my-separator", true}, {"with space", true}, {"badspace ", false}, + {"(boundary)", true}, } for i, tt := range tests { var b bytes.Buffer @@ -107,6 +109,17 @@ func TestWriterSetBoundary(t *testing.T) { if got != tt.b { t.Errorf("boundary = %q; want %q", got, tt.b) } + + ct := w.FormDataContentType() + mt, params, err := mime.ParseMediaType(ct) + if err != nil { + t.Errorf("could not parse Content-Type %q: %v", ct, err) + } else if mt != "multipart/form-data" { + t.Errorf("unexpected media type %q; want %q", mt, "multipart/form-data") + } else if b := params["boundary"]; b != tt.b { + t.Errorf("unexpected boundary parameter %q; want %q", b, tt.b) + } + w.Close() wantSub := "\r\n--" + tt.b + "--\r\n" if got := b.String(); !strings.Contains(got, wantSub) { diff --git a/src/mime/type_unix.go b/src/mime/type_unix.go index 6549c0f5e9aca..dfc1f88b2ae6d 100644 --- a/src/mime/type_unix.go +++ b/src/mime/type_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package mime diff --git a/src/naclmake.bash b/src/naclmake.bash index 74fd802f41784..5e6c3ce05e69c 100755 --- a/src/naclmake.bash +++ b/src/naclmake.bash @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2016 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. diff --git a/src/nacltest.bash b/src/nacltest.bash index 3e929a14a4568..dc245b484cb4c 100755 --- a/src/nacltest.bash +++ b/src/nacltest.bash @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2014 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. diff --git a/src/net/addrselect.go b/src/net/addrselect.go index 1ab9fc53261c4..7c0dfe261c4c1 100644 --- a/src/net/addrselect.go +++ b/src/net/addrselect.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Minimal RFC 6724 address selection. diff --git a/src/net/cgo_stub.go b/src/net/cgo_stub.go index 51259722aec8e..041f8af12966a 100644 --- a/src/net/cgo_stub.go +++ b/src/net/cgo_stub.go @@ -24,7 +24,7 @@ func cgoLookupPort(ctx context.Context, network, service string) (port int, err return 0, nil, false } -func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, completed bool) { +func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) { return nil, nil, false } diff --git a/src/net/cgo_unix.go b/src/net/cgo_unix.go index 3db867a080e1f..b7cbcfe77a9d3 100644 --- a/src/net/cgo_unix.go +++ b/src/net/cgo_unix.go @@ -49,7 +49,7 @@ type reverseLookupResult struct { } func cgoLookupHost(ctx context.Context, name string) (hosts []string, err error, completed bool) { - addrs, err, completed := cgoLookupIP(ctx, name) + addrs, err, completed := cgoLookupIP(ctx, "ip", name) for _, addr := range addrs { hosts = append(hosts, addr.String()) } @@ -69,13 +69,11 @@ func cgoLookupPort(ctx context.Context, network, service string) (port int, err default: return 0, &DNSError{Err: "unknown network", Name: network + "/" + service}, true } - if len(network) >= 4 { - switch network[3] { - case '4': - hints.ai_family = C.AF_INET - case '6': - hints.ai_family = C.AF_INET6 - } + switch ipVersion(network) { + case '4': + hints.ai_family = C.AF_INET + case '6': + hints.ai_family = C.AF_INET6 } if ctx.Done() == nil { port, err := cgoLookupServicePort(&hints, network, service) @@ -135,13 +133,20 @@ func cgoPortLookup(result chan<- portLookupResult, hints *C.struct_addrinfo, net result <- portLookupResult{port, err} } -func cgoLookupIPCNAME(name string) (addrs []IPAddr, cname string, err error) { +func cgoLookupIPCNAME(network, name string) (addrs []IPAddr, cname string, err error) { acquireThread() defer releaseThread() var hints C.struct_addrinfo hints.ai_flags = cgoAddrInfoFlags hints.ai_socktype = C.SOCK_STREAM + hints.ai_family = C.AF_UNSPEC + switch ipVersion(network) { + case '4': + hints.ai_family = C.AF_INET + case '6': + hints.ai_family = C.AF_INET6 + } h := make([]byte, len(name)+1) copy(h, name) @@ -197,18 +202,18 @@ func cgoLookupIPCNAME(name string) (addrs []IPAddr, cname string, err error) { return addrs, cname, nil } -func cgoIPLookup(result chan<- ipLookupResult, name string) { - addrs, cname, err := cgoLookupIPCNAME(name) +func cgoIPLookup(result chan<- ipLookupResult, network, name string) { + addrs, cname, err := cgoLookupIPCNAME(network, name) result <- ipLookupResult{addrs, cname, err} } -func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, completed bool) { +func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error, completed bool) { if ctx.Done() == nil { - addrs, _, err = cgoLookupIPCNAME(name) + addrs, _, err = cgoLookupIPCNAME(network, name) return addrs, err, true } result := make(chan ipLookupResult, 1) - go cgoIPLookup(result, name) + go cgoIPLookup(result, network, name) select { case r := <-result: return r.addrs, r.err, true @@ -219,11 +224,11 @@ func cgoLookupIP(ctx context.Context, name string) (addrs []IPAddr, err error, c func cgoLookupCNAME(ctx context.Context, name string) (cname string, err error, completed bool) { if ctx.Done() == nil { - _, cname, err = cgoLookupIPCNAME(name) + _, cname, err = cgoLookupIPCNAME("ip", name) return cname, err, true } result := make(chan ipLookupResult, 1) - go cgoIPLookup(result, name) + go cgoIPLookup(result, "ip", name) select { case r := <-result: return r.cname, r.err, true diff --git a/src/net/cgo_unix_test.go b/src/net/cgo_unix_test.go index b476a6d62686e..c3eab5b3b2a5e 100644 --- a/src/net/cgo_unix_test.go +++ b/src/net/cgo_unix_test.go @@ -15,7 +15,7 @@ import ( func TestCgoLookupIP(t *testing.T) { defer dnsWaitGroup.Wait() ctx := context.Background() - _, err, ok := cgoLookupIP(ctx, "localhost") + _, err, ok := cgoLookupIP(ctx, "ip", "localhost") if !ok { t.Errorf("cgoLookupIP must not be a placeholder") } @@ -28,7 +28,7 @@ func TestCgoLookupIPWithCancel(t *testing.T) { defer dnsWaitGroup.Wait() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - _, err, ok := cgoLookupIP(ctx, "localhost") + _, err, ok := cgoLookupIP(ctx, "ip", "localhost") if !ok { t.Errorf("cgoLookupIP must not be a placeholder") } diff --git a/src/net/conf.go b/src/net/conf.go index 71ed1360c5637..971b1a399a1bb 100644 --- a/src/net/conf.go +++ b/src/net/conf.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net import ( + "internal/bytealg" "os" "runtime" "sync" @@ -132,7 +133,7 @@ func (c *conf) hostLookupOrder(r *Resolver, hostname string) (ret hostLookupOrde if c.forceCgoLookupHost || c.resolv.unknownOpt || c.goos == "android" { return fallbackOrder } - if byteIndex(hostname, '\\') != -1 || byteIndex(hostname, '%') != -1 { + if bytealg.IndexByteString(hostname, '\\') != -1 || bytealg.IndexByteString(hostname, '%') != -1 { // Don't deal with special form hostnames with backslashes // or '%'. return fallbackOrder @@ -301,7 +302,7 @@ func goDebugNetDNS() (dnsMode string, debugLevel int) { dnsMode = s } } - if i := byteIndex(goDebug, '+'); i != -1 { + if i := bytealg.IndexByteString(goDebug, '+'); i != -1 { parsePart(goDebug[:i]) parsePart(goDebug[i+1:]) return diff --git a/src/net/dial.go b/src/net/dial.go index b1a5ca7cd53f5..1dd8690739ef6 100644 --- a/src/net/dial.go +++ b/src/net/dial.go @@ -44,22 +44,31 @@ type Dialer struct { // If nil, a local address is automatically chosen. LocalAddr Addr - // DualStack enables RFC 6555-compliant "Happy Eyeballs" - // dialing when the network is "tcp" and the host in the - // address parameter resolves to both IPv4 and IPv6 addresses. - // This allows a client to tolerate networks where one address - // family is silently broken. + // DualStack previously enabled RFC 6555 Fast Fallback + // support, also known as "Happy Eyeballs", in which IPv4 is + // tried soon if IPv6 appears to be misconfigured and + // hanging. + // + // Deprecated: Fast Fallback is enabled by default. To + // disable, set FallbackDelay to a negative value. DualStack bool // FallbackDelay specifies the length of time to wait before - // spawning a fallback connection, when DualStack is enabled. + // spawning a RFC 6555 Fast Fallback connection. That is, this + // is the amount of time to wait for IPv6 to succeed before + // assuming that IPv6 is misconfigured and falling back to + // IPv4. + // // If zero, a default delay of 300ms is used. + // A negative value disables Fast Fallback support. FallbackDelay time.Duration // KeepAlive specifies the keep-alive period for an active // network connection. - // If zero, keep-alives are not enabled. Network protocols + // If zero, keep-alives are enabled if supported by the protocol + // and operating system. Network protocols or operating systems // that do not support keep-alives ignore this field. + // If negative, keep-alives are disabled. KeepAlive time.Duration // Resolver optionally specifies an alternate resolver to use. @@ -81,6 +90,8 @@ type Dialer struct { Control func(network, address string, c syscall.RawConn) error } +func (d *Dialer) dualStack() bool { return d.FallbackDelay >= 0 } + func minNonzeroTime(a, b time.Time) time.Time { if a.IsZero() { return b @@ -393,7 +404,7 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn } var primaries, fallbacks addrList - if d.DualStack && network == "tcp" { + if d.dualStack() && network == "tcp" { primaries, fallbacks = addrs.partition(isIPv4) } else { primaries = addrs @@ -409,10 +420,14 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn return nil, err } - if tc, ok := c.(*TCPConn); ok && d.KeepAlive > 0 { + if tc, ok := c.(*TCPConn); ok && d.KeepAlive >= 0 { setKeepAlive(tc.fd, true) - setKeepAlivePeriod(tc.fd, d.KeepAlive) - testHookSetKeepAlive() + ka := d.KeepAlive + if d.KeepAlive == 0 { + ka = 15 * time.Second + } + setKeepAlivePeriod(tc.fd, ka) + testHookSetKeepAlive(ka) } return c, nil } diff --git a/src/net/dial_test.go b/src/net/dial_test.go index 00a84d17d6059..3a2c59a2d1d6b 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -318,9 +318,9 @@ func TestDialParallel(t *testing.T) { expectElapsedMin := tt.expectElapsed - 95*time.Millisecond expectElapsedMax := tt.expectElapsed + 95*time.Millisecond - if !(elapsed >= expectElapsedMin) { + if elapsed < expectElapsedMin { t.Errorf("#%d: got %v; want >= %v", i, elapsed, expectElapsedMin) - } else if !(elapsed <= expectElapsedMax) { + } else if elapsed > expectElapsedMax { t.Errorf("#%d: got %v; want <= %v", i, elapsed, expectElapsedMax) } @@ -346,7 +346,7 @@ func TestDialParallel(t *testing.T) { } } -func lookupSlowFast(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { +func lookupSlowFast(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { switch host { case "slow6loopback4": // Returns a slow IPv6 address, and a local IPv4 address. @@ -355,7 +355,7 @@ func lookupSlowFast(ctx context.Context, fn func(context.Context, string) ([]IPA {IP: ParseIP("127.0.0.1")}, }, nil default: - return fn(ctx, host) + return fn(ctx, network, host) } } @@ -418,10 +418,10 @@ func TestDialerFallbackDelay(t *testing.T) { } expectMin := tt.expectElapsed - 1*time.Millisecond expectMax := tt.expectElapsed + 95*time.Millisecond - if !(elapsed >= expectMin) { + if elapsed < expectMin { t.Errorf("#%d: got %v; want >= %v", i, elapsed, expectMin) } - if !(elapsed <= expectMax) { + if elapsed > expectMax { t.Errorf("#%d: got %v; want <= %v", i, elapsed, expectMax) } } @@ -729,22 +729,29 @@ func TestDialerKeepAlive(t *testing.T) { if err := ls.buildup(handler); err != nil { t.Fatal(err) } - defer func() { testHookSetKeepAlive = func() {} }() + defer func() { testHookSetKeepAlive = func(time.Duration) {} }() - for _, keepAlive := range []bool{false, true} { - got := false - testHookSetKeepAlive = func() { got = true } - var d Dialer - if keepAlive { - d.KeepAlive = 30 * time.Second - } + tests := []struct { + ka time.Duration + expected time.Duration + }{ + {-1, -1}, + {0, 15 * time.Second}, + {5 * time.Second, 5 * time.Second}, + {30 * time.Second, 30 * time.Second}, + } + + for _, test := range tests { + var got time.Duration = -1 + testHookSetKeepAlive = func(d time.Duration) { got = d } + d := Dialer{KeepAlive: test.ka} c, err := d.Dial("tcp", ls.Listener.Addr().String()) if err != nil { t.Fatal(err) } c.Close() - if got != keepAlive { - t.Errorf("Dialer.KeepAlive = %v: SetKeepAlive called = %v, want %v", d.KeepAlive, got, !got) + if got != test.expected { + t.Errorf("Dialer.KeepAlive = %v: SetKeepAlive set to %v, want %v", d.KeepAlive, got, test.expected) } } } diff --git a/src/net/dial_unix_test.go b/src/net/dial_unix_test.go index 0adc10d0bdbea..3cfc9d81b8e82 100644 --- a/src/net/dial_unix_test.go +++ b/src/net/dial_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/src/net/dnsclient.go b/src/net/dnsclient.go index 2e4bffaab8b85..4fdf60ff4e35a 100644 --- a/src/net/dnsclient.go +++ b/src/net/dnsclient.go @@ -8,7 +8,7 @@ import ( "math/rand" "sort" - "golang_org/x/net/dns/dnsmessage" + "internal/x/net/dns/dnsmessage" ) // reverseaddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP @@ -27,10 +27,10 @@ func reverseaddr(addr string) (arpa string, err error) { // Add it, in reverse, to the buffer for i := len(ip) - 1; i >= 0; i-- { v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') + buf = append(buf, hexDigit[v&0xF], + '.', + hexDigit[v>>4], + '.') } // Append "ip6.arpa." and return (buf already has the final .) buf = append(buf, "ip6.arpa."...) @@ -75,7 +75,7 @@ func isDomainName(s string) bool { } last := byte('.') - ok := false // Ok once we've seen a letter. + nonNumeric := false // true once we've seen a letter or hyphen partlen := 0 for i := 0; i < len(s); i++ { c := s[i] @@ -83,7 +83,7 @@ func isDomainName(s string) bool { default: return false case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_': - ok = true + nonNumeric = true partlen++ case '0' <= c && c <= '9': // fine @@ -94,6 +94,7 @@ func isDomainName(s string) bool { return false } partlen++ + nonNumeric = true case c == '.': // Byte before dot cannot be dot, dash. if last == '.' || last == '-' { @@ -110,7 +111,7 @@ func isDomainName(s string) bool { return false } - return ok + return nonNumeric } // absDomainName returns an absolute domain name which ends with a diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index 2fee3346e92dc..86ce92dc43784 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -2,14 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // DNS client: see RFC 1035. // Has to be linked into package net for Dial. // TODO(rsc): // Could potentially handle many outstanding lookups faster. -// Could have a small cache. // Random UDP source port (net.Dial should do that for us). // Random request IDs. @@ -24,7 +23,21 @@ import ( "sync" "time" - "golang_org/x/net/dns/dnsmessage" + "internal/x/net/dns/dnsmessage" +) + +var ( + errLameReferral = errors.New("lame referral") + errCannotUnmarshalDNSMessage = errors.New("cannot unmarshal DNS message") + errCannotMarshalDNSMessage = errors.New("cannot marshal DNS message") + errServerMisbehaving = errors.New("server misbehaving") + errInvalidDNSResponse = errors.New("invalid DNS response") + errNoAnswerFromDNSServer = errors.New("no answer from DNS server") + + // errServerTemporarlyMisbehaving is like errServerMisbehaving, except + // that when it gets translated to a DNSError, the IsTemporary field + // gets set to true. + errServerTemporarlyMisbehaving = errors.New("server misbehaving") ) func newRequest(q dnsmessage.Question) (id uint16, udpReq, tcpReq []byte, err error) { @@ -105,14 +118,14 @@ func dnsStreamRoundTrip(c Conn, id uint16, query dnsmessage.Question, b []byte) var p dnsmessage.Parser h, err := p.Start(b[:n]) if err != nil { - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("cannot unmarshal DNS message") + return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage } q, err := p.Question() if err != nil { - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("cannot unmarshal DNS message") + return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotUnmarshalDNSMessage } if !checkResponse(id, query, h, q) { - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("invalid DNS response") + return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse } return p, h, nil } @@ -122,7 +135,7 @@ func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Que q.Class = dnsmessage.ClassINET id, udpReq, tcpReq, err := newRequest(q) if err != nil { - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("cannot marshal DNS message") + return dnsmessage.Parser{}, dnsmessage.Header{}, errCannotMarshalDNSMessage } for _, network := range []string{"udp", "tcp"} { ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout)) @@ -147,31 +160,31 @@ func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Que return dnsmessage.Parser{}, dnsmessage.Header{}, mapErr(err) } if err := p.SkipQuestion(); err != dnsmessage.ErrSectionDone { - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("invalid DNS response") + return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse } if h.Truncated { // see RFC 5966 continue } return p, h, nil } - return dnsmessage.Parser{}, dnsmessage.Header{}, errors.New("no answer from DNS server") + return dnsmessage.Parser{}, dnsmessage.Header{}, errNoAnswerFromDNSServer } // checkHeader performs basic sanity checks on the header. func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header, name, server string) error { + if h.RCode == dnsmessage.RCodeNameError { + return errNoSuchHost + } + _, err := p.AnswerHeader() if err != nil && err != dnsmessage.ErrSectionDone { - return &DNSError{ - Err: "cannot unmarshal DNS message", - Name: name, - Server: server, - } + return errCannotUnmarshalDNSMessage } // libresolv continues to the next server when it receives // an invalid referral response. See golang.org/issue/15434. if h.RCode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone { - return &DNSError{Err: "lame referral", Name: name, Server: server} + return errLameReferral } if h.RCode != dnsmessage.RCodeSuccess && h.RCode != dnsmessage.RCodeNameError { @@ -180,11 +193,10 @@ func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header, name, server string) // a name error and we didn't get success, // the server is behaving incorrectly or // having temporary trouble. - err := &DNSError{Err: "server misbehaving", Name: name, Server: server} if h.RCode == dnsmessage.RCodeServerFailure { - err.IsTemporary = true + return errServerTemporarlyMisbehaving } - return err + return errServerMisbehaving } return nil @@ -194,28 +206,16 @@ func skipToAnswer(p *dnsmessage.Parser, qtype dnsmessage.Type, name, server stri for { h, err := p.AnswerHeader() if err == dnsmessage.ErrSectionDone { - return &DNSError{ - Err: errNoSuchHost.Error(), - Name: name, - Server: server, - } + return errNoSuchHost } if err != nil { - return &DNSError{ - Err: "cannot unmarshal DNS message", - Name: name, - Server: server, - } + return errCannotUnmarshalDNSMessage } if h.Type == qtype { return nil } if err := p.SkipAnswer(); err != nil { - return &DNSError{ - Err: "cannot unmarshal DNS message", - Name: name, - Server: server, - } + return errCannotUnmarshalDNSMessage } } } @@ -229,7 +229,7 @@ func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, n, err := dnsmessage.NewName(name) if err != nil { - return dnsmessage.Parser{}, "", errors.New("cannot marshal DNS message") + return dnsmessage.Parser{}, "", errCannotMarshalDNSMessage } q := dnsmessage.Question{ Name: n, @@ -243,38 +243,62 @@ func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, p, h, err := r.exchange(ctx, server, q, cfg.timeout) if err != nil { - lastErr = &DNSError{ + dnsErr := &DNSError{ Err: err.Error(), Name: name, Server: server, } if nerr, ok := err.(Error); ok && nerr.Timeout() { - lastErr.(*DNSError).IsTimeout = true + dnsErr.IsTimeout = true } // Set IsTemporary for socket-level errors. Note that this flag // may also be used to indicate a SERVFAIL response. if _, ok := err.(*OpError); ok { - lastErr.(*DNSError).IsTemporary = true + dnsErr.IsTemporary = true } + lastErr = dnsErr continue } - // The name does not exist, so trying another server won't help. - // - // TODO: indicate this in a more obvious way, such as a field on DNSError? - if h.RCode == dnsmessage.RCodeNameError { - return dnsmessage.Parser{}, "", &DNSError{Err: errNoSuchHost.Error(), Name: name, Server: server} - } - - lastErr = checkHeader(&p, h, name, server) - if lastErr != nil { + if err := checkHeader(&p, h, name, server); err != nil { + dnsErr := &DNSError{ + Err: err.Error(), + Name: name, + Server: server, + } + if err == errServerTemporarlyMisbehaving { + dnsErr.IsTemporary = true + } + if err == errNoSuchHost { + // The name does not exist, so trying + // another server won't help. + // + // TODO: indicate this in a more + // obvious way, such as a field on + // DNSError? + return p, server, dnsErr + } + lastErr = dnsErr continue } - lastErr = skipToAnswer(&p, qtype, name, server) - if lastErr == nil { + err = skipToAnswer(&p, qtype, name, server) + if err == nil { return p, server, nil } + lastErr = &DNSError{ + Err: err.Error(), + Name: name, + Server: server, + } + if err == errNoSuchHost { + // The name does not exist, so trying another + // server won't help. + // + // TODO: indicate this in a more obvious way, + // such as a field on DNSError? + return p, server, lastErr + } } } return dnsmessage.Parser{}, "", lastErr diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go index bb014b903ab93..be04a44c14bea 100644 --- a/src/net/dnsclient_unix_test.go +++ b/src/net/dnsclient_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net @@ -20,7 +20,7 @@ import ( "testing" "time" - "golang_org/x/net/dns/dnsmessage" + "internal/x/net/dns/dnsmessage" ) var goResolver = Resolver{PreferGo: true} @@ -1427,28 +1427,35 @@ func TestDNSGoroutineRace(t *testing.T) { } } +func lookupWithFake(fake fakeDNSServer, name string, typ dnsmessage.Type) error { + r := Resolver{PreferGo: true, Dial: fake.DialContext} + + resolvConf.mu.RLock() + conf := resolvConf.dnsConfig + resolvConf.mu.RUnlock() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, _, err := r.tryOneName(ctx, conf, name, typ) + return err +} + // Issue 8434: verify that Temporary returns true on an error when rcode // is SERVFAIL func TestIssue8434(t *testing.T) { - msg := dnsmessage.Message{ - Header: dnsmessage.Header{ - RCode: dnsmessage.RCodeServerFailure, + err := lookupWithFake(fakeDNSServer{ + rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + return dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.ID, + Response: true, + RCode: dnsmessage.RCodeServerFailure, + }, + Questions: q.Questions, + }, nil }, - } - b, err := msg.Pack() - if err != nil { - t.Fatal("Pack failed:", err) - } - var p dnsmessage.Parser - h, err := p.Start(b) - if err != nil { - t.Fatal("Start failed:", err) - } - if err := p.SkipAllQuestions(); err != nil { - t.Fatal("SkipAllQuestions failed:", err) - } - - err = checkHeader(&p, h, "golang.org", "foo:53") + }, "golang.org.", dnsmessage.TypeALL) if err == nil { t.Fatal("expected an error") } @@ -1464,50 +1471,76 @@ func TestIssue8434(t *testing.T) { } } -// Issue 12778: verify that NXDOMAIN without RA bit errors as -// "no such host" and not "server misbehaving" +// TestNoSuchHost verifies that tryOneName works correctly when the domain does +// not exist. +// +// Issue 12778: verify that NXDOMAIN without RA bit errors as "no such host" +// and not "server misbehaving" // // Issue 25336: verify that NXDOMAIN errors fail fast. -func TestIssue12778(t *testing.T) { - lookups := 0 - fake := fakeDNSServer{ - rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { - lookups++ - return dnsmessage.Message{ - Header: dnsmessage.Header{ - ID: q.ID, - Response: true, - RCode: dnsmessage.RCodeNameError, - RecursionAvailable: false, - }, - Questions: q.Questions, - }, nil +// +// Issue 27525: verify that empty answers fail fast. +func TestNoSuchHost(t *testing.T) { + tests := []struct { + name string + f func(string, string, dnsmessage.Message, time.Time) (dnsmessage.Message, error) + }{ + { + "NXDOMAIN", + func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + return dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.ID, + Response: true, + RCode: dnsmessage.RCodeNameError, + RecursionAvailable: false, + }, + Questions: q.Questions, + }, nil + }, + }, + { + "no answers", + func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + return dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.ID, + Response: true, + RCode: dnsmessage.RCodeSuccess, + RecursionAvailable: false, + Authoritative: true, + }, + Questions: q.Questions, + }, nil + }, }, } - r := Resolver{PreferGo: true, Dial: fake.DialContext} - - resolvConf.mu.RLock() - conf := resolvConf.dnsConfig - resolvConf.mu.RUnlock() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - _, _, err := r.tryOneName(ctx, conf, ".", dnsmessage.TypeALL) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lookups := 0 + err := lookupWithFake(fakeDNSServer{ + rh: func(n, s string, q dnsmessage.Message, d time.Time) (dnsmessage.Message, error) { + lookups++ + return test.f(n, s, q, d) + }, + }, ".", dnsmessage.TypeALL) - if lookups != 1 { - t.Errorf("got %d lookups, wanted 1", lookups) - } + if lookups != 1 { + t.Errorf("got %d lookups, wanted 1", lookups) + } - if err == nil { - t.Fatal("expected an error") - } - de, ok := err.(*DNSError) - if !ok { - t.Fatalf("err = %#v; wanted a *net.DNSError", err) - } - if de.Err != errNoSuchHost.Error() { - t.Fatalf("Err = %#v; wanted %q", de.Err, errNoSuchHost.Error()) + if err == nil { + t.Fatal("expected an error") + } + de, ok := err.(*DNSError) + if !ok { + t.Fatalf("err = %#v; wanted a *net.DNSError", err) + } + if de.Err != errNoSuchHost.Error() { + t.Fatalf("Err = %#v; wanted %q", de.Err, errNoSuchHost.Error()) + } + }) } } @@ -1535,3 +1568,56 @@ func TestDNSDialTCP(t *testing.T) { t.Fatal("exhange failed:", err) } } + +// Issue 27763: verify that two strings in one TXT record are concatenated. +func TestTXTRecordTwoStrings(t *testing.T) { + fake := fakeDNSServer{ + rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + r := dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.Header.ID, + Response: true, + RCode: dnsmessage.RCodeSuccess, + }, + Questions: q.Questions, + Answers: []dnsmessage.Resource{ + { + Header: dnsmessage.ResourceHeader{ + Name: q.Questions[0].Name, + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.TXTResource{ + TXT: []string{"string1 ", "string2"}, + }, + }, + { + Header: dnsmessage.ResourceHeader{ + Name: q.Questions[0].Name, + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + Body: &dnsmessage.TXTResource{ + TXT: []string{"onestring"}, + }, + }, + }, + } + return r, nil + }, + } + r := Resolver{PreferGo: true, Dial: fake.DialContext} + txt, err := r.lookupTXT(context.Background(), "golang.org") + if err != nil { + t.Fatal("LookupTXT failed:", err) + } + if want := 2; len(txt) != want { + t.Fatalf("len(txt), got %d, want %d", len(txt), want) + } + if want := "string1 string2"; txt[0] != want { + t.Errorf("txt[0], got %q, want %q", txt[0], want) + } + if want := "onestring"; txt[1] != want { + t.Errorf("txt[1], got %q, want %q", txt[1], want) + } +} diff --git a/src/net/dnsconfig_unix.go b/src/net/dnsconfig_unix.go index 707fd6f6fe841..842d408e5625b 100644 --- a/src/net/dnsconfig_unix.go +++ b/src/net/dnsconfig_unix.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Read system DNS config from /etc/resolv.conf package net import ( + "internal/bytealg" "os" "sync/atomic" "time" @@ -155,7 +156,7 @@ func dnsDefaultSearch() []string { // best effort return nil } - if i := byteIndex(hn, '.'); i >= 0 && i < len(hn)-1 { + if i := bytealg.IndexByteString(hn, '.'); i >= 0 && i < len(hn)-1 { return []string{ensureRooted(hn[i+1:])} } return nil diff --git a/src/net/dnsconfig_unix_test.go b/src/net/dnsconfig_unix_test.go index 37bdeb04c87ad..0797559d1a208 100644 --- a/src/net/dnsconfig_unix_test.go +++ b/src/net/dnsconfig_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/src/net/dnsname_test.go b/src/net/dnsname_test.go index 806d8756cb5c5..2964982311ce4 100644 --- a/src/net/dnsname_test.go +++ b/src/net/dnsname_test.go @@ -22,6 +22,7 @@ var dnsNameTests = []dnsNameTest{ {"foo.com", true}, {"1foo.com", true}, {"26.0.0.73.com", true}, + {"10-0-0-1", true}, {"fo-o.com", true}, {"fo1o.com", true}, {"foo1.com", true}, diff --git a/src/net/error_posix.go b/src/net/error_posix.go index 0000700809e5c..70efa4c66fbca 100644 --- a/src/net/error_posix.go +++ b/src/net/error_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net diff --git a/src/net/error_test.go b/src/net/error_test.go index e09670e5ce0e2..2819986c0cd7c 100644 --- a/src/net/error_test.go +++ b/src/net/error_test.go @@ -144,7 +144,7 @@ func TestDialError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "dial error test", Name: "name", Server: "server", IsTimeout: true} } sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) { @@ -293,7 +293,7 @@ func TestListenError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(_ context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "listen error test", Name: "name", Server: "server", IsTimeout: true} } sw.Set(socktest.FilterListen, func(so *socktest.Status) (socktest.AfterFilter, error) { @@ -353,7 +353,7 @@ func TestListenPacketError(t *testing.T) { origTestHookLookupIP := testHookLookupIP defer func() { testHookLookupIP = origTestHookLookupIP }() - testHookLookupIP = func(_ context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { + testHookLookupIP = func(_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { return nil, &DNSError{Err: "listen error test", Name: "name", Server: "server", IsTimeout: true} } diff --git a/src/net/error_unix.go b/src/net/error_unix.go index b5a5829eaa083..e6153303882fe 100644 --- a/src/net/error_unix.go +++ b/src/net/error_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js linux netbsd openbsd solaris package net diff --git a/src/net/fd_unix.go b/src/net/fd_unix.go index 055ecf0336b76..e7ab9a45fd65c 100644 --- a/src/net/fd_unix.go +++ b/src/net/fd_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package net diff --git a/src/net/file_unix.go b/src/net/file_unix.go index 676798d693165..452a079bfc5b8 100644 --- a/src/net/file_unix.go +++ b/src/net/file_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/src/net/hook.go b/src/net/hook.go index d7316ea4383f5..ea71803e22a70 100644 --- a/src/net/hook.go +++ b/src/net/hook.go @@ -4,7 +4,10 @@ package net -import "context" +import ( + "context" + "time" +) var ( // if non-nil, overrides dialTCP. @@ -13,10 +16,11 @@ var ( testHookHostsPath = "/etc/hosts" testHookLookupIP = func( ctx context.Context, - fn func(context.Context, string) ([]IPAddr, error), + fn func(context.Context, string, string) ([]IPAddr, error), + network string, host string, ) ([]IPAddr, error) { - return fn(ctx, host) + return fn(ctx, network, host) } - testHookSetKeepAlive = func() {} + testHookSetKeepAlive = func(time.Duration) {} ) diff --git a/src/net/hook_unix.go b/src/net/hook_unix.go index d672bd01b03f4..a1568319f3c2a 100644 --- a/src/net/hook_unix.go +++ b/src/net/hook_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package net diff --git a/src/net/hosts.go b/src/net/hosts.go index ebc0353a7fb6d..5c560f3756ed1 100644 --- a/src/net/hosts.go +++ b/src/net/hosts.go @@ -5,6 +5,7 @@ package net import ( + "internal/bytealg" "sync" "time" ) @@ -68,7 +69,7 @@ func readHosts() { return } for line, ok := file.readLine(); ok; line, ok = file.readLine() { - if i := byteIndex(line, '#'); i >= 0 { + if i := bytealg.IndexByteString(line, '#'); i >= 0 { // Discard comments. line = line[0:i] } diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go index da12ac34980b8..10325c2eb5ae0 100644 --- a/src/net/http/cgi/child.go +++ b/src/net/http/cgi/child.go @@ -86,7 +86,7 @@ func RequestFromMap(params map[string]string) (*http.Request, error) { if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" { continue } - r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) + r.Header.Add(strings.ReplaceAll(k[5:], "_", "-"), v) } // TODO: cookies. parsing them isn't exported, though. diff --git a/src/net/http/client.go b/src/net/http/client.go index 8f69a298e3fcf..921f86bd92dc7 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -238,7 +238,7 @@ func send(ireq *Request, rt RoundTripper, deadline time.Time) (resp *Response, d username := u.Username() password, _ := u.Password() forkReq() - req.Header = cloneHeader(ireq.Header) + req.Header = ireq.Header.clone() req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) } @@ -478,10 +478,10 @@ func urlErrorOp(method string) string { // error. // // If the returned error is nil, the Response will contain a non-nil -// Body which the user is expected to close. If the Body is not -// closed, the Client's underlying RoundTripper (typically Transport) -// may not be able to re-use a persistent TCP connection to the server -// for a subsequent "keep-alive" request. +// Body which the user is expected to close. If the Body is not both +// read to EOF and closed, the Client's underlying RoundTripper +// (typically Transport) may not be able to re-use a persistent TCP +// connection to the server for a subsequent "keep-alive" request. // // The request Body, if non-nil, will be closed by the underlying // Transport, even on errors. @@ -833,6 +833,22 @@ func (c *Client) Head(url string) (resp *Response, err error) { return c.Do(req) } +// CloseIdleConnections closes any connections on its Transport which +// were previously connected from previous requests but are now +// sitting idle in a "keep-alive" state. It does not interrupt any +// connections currently in use. +// +// If the Client's Transport does not have a CloseIdleConnections method +// then this method does nothing. +func (c *Client) CloseIdleConnections() { + type closeIdler interface { + CloseIdleConnections() + } + if tr, ok := c.transport().(closeIdler); ok { + tr.CloseIdleConnections() + } +} + // cancelTimerBody is an io.ReadCloser that wraps rc with two features: // 1) on Read error or close, the stop func is called. // 2) On Read failure, if reqDidTimeout is true, the error is wrapped and diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index bfc793e638cae..1c59ce7435216 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -977,6 +977,7 @@ func TestResponseSetsTLSConnectionState(t *testing.T) { c := ts.Client() tr := c.Transport.(*Transport) tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA} + tr.TLSClientConfig.MaxVersion = tls.VersionTLS12 // to get to pick the cipher suite tr.Dial = func(netw, addr string) (net.Conn, error) { return net.Dial(netw, ts.Listener.Addr().String()) } @@ -1888,3 +1889,27 @@ func TestTransportBodyReadError(t *testing.T) { t.Errorf("close calls = %d; want 1", closeCalls) } } + +type roundTripperWithoutCloseIdle struct{} + +func (roundTripperWithoutCloseIdle) RoundTrip(*Request) (*Response, error) { panic("unused") } + +type roundTripperWithCloseIdle func() // underlying func is CloseIdleConnections func + +func (roundTripperWithCloseIdle) RoundTrip(*Request) (*Response, error) { panic("unused") } +func (f roundTripperWithCloseIdle) CloseIdleConnections() { f() } + +func TestClientCloseIdleConnections(t *testing.T) { + c := &Client{Transport: roundTripperWithoutCloseIdle{}} + c.CloseIdleConnections() // verify we don't crash at least + + closed := false + var tr RoundTripper = roundTripperWithCloseIdle(func() { + closed = true + }) + c = &Client{Transport: tr} + c.CloseIdleConnections() + if !closed { + t.Error("not closed") + } +} diff --git a/src/net/http/clientserver_test.go b/src/net/http/clientserver_test.go index c2a2548df11a2..465bae147850b 100644 --- a/src/net/http/clientserver_test.go +++ b/src/net/http/clientserver_test.go @@ -9,8 +9,11 @@ package http_test import ( "bytes" "compress/gzip" + "crypto/rand" + "crypto/sha1" "crypto/tls" "fmt" + "hash" "io" "io/ioutil" "log" @@ -249,7 +252,7 @@ type slurpResult struct { func (sr slurpResult) String() string { return fmt.Sprintf("body %q; err %v", sr.body, sr.err) } func (tt h12Compare) normalizeRes(t *testing.T, res *Response, wantProto string) { - if res.Proto == wantProto { + if res.Proto == wantProto || res.Proto == "HTTP/IGNORE" { res.Proto, res.ProtoMajor, res.ProtoMinor = "", 0, 0 } else { t.Errorf("got %q response; want %q", res.Proto, wantProto) @@ -1471,11 +1474,97 @@ func testWriteHeaderAfterWrite(t *testing.T, h2, hijack bool) { return } gotLog := strings.TrimSpace(errorLog.String()) - wantLog := "http: multiple response.WriteHeader calls" + wantLog := "http: superfluous response.WriteHeader call from net/http_test.testWriteHeaderAfterWrite.func1 (clientserver_test.go:" if hijack { - wantLog = "http: response.WriteHeader on hijacked connection" + wantLog = "http: response.WriteHeader on hijacked connection from net/http_test.testWriteHeaderAfterWrite.func1 (clientserver_test.go:" } - if gotLog != wantLog { + if !strings.HasPrefix(gotLog, wantLog) { t.Errorf("stderr output = %q; want %q", gotLog, wantLog) } } + +func TestBidiStreamReverseProxy(t *testing.T) { + setParallel(t) + defer afterTest(t) + backend := newClientServerTest(t, h2Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if _, err := io.Copy(w, r.Body); err != nil { + log.Printf("bidi backend copy: %v", err) + } + })) + defer backend.close() + + backURL, err := url.Parse(backend.ts.URL) + if err != nil { + t.Fatal(err) + } + rp := httputil.NewSingleHostReverseProxy(backURL) + rp.Transport = backend.tr + proxy := newClientServerTest(t, h2Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + rp.ServeHTTP(w, r) + })) + defer proxy.close() + + bodyRes := make(chan interface{}, 1) // error or hash.Hash + pr, pw := io.Pipe() + req, _ := NewRequest("PUT", proxy.ts.URL, pr) + const size = 4 << 20 + go func() { + h := sha1.New() + _, err := io.CopyN(io.MultiWriter(h, pw), rand.Reader, size) + go pw.Close() + if err != nil { + bodyRes <- err + } else { + bodyRes <- h + } + }() + res, err := backend.c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + hgot := sha1.New() + n, err := io.Copy(hgot, res.Body) + if err != nil { + t.Fatal(err) + } + if n != size { + t.Fatalf("got %d bytes; want %d", n, size) + } + select { + case v := <-bodyRes: + switch v := v.(type) { + default: + t.Fatalf("body copy: %v", err) + case hash.Hash: + if !bytes.Equal(v.Sum(nil), hgot.Sum(nil)) { + t.Errorf("written bytes didn't match received bytes") + } + } + case <-time.After(10 * time.Second): + t.Fatal("timeout") + } + +} + +// Always use HTTP/1.1 for WebSocket upgrades. +func TestH12_WebSocketUpgrade(t *testing.T) { + h12Compare{ + Handler: func(w ResponseWriter, r *Request) { + h := w.Header() + h.Set("Foo", "bar") + }, + ReqFunc: func(c *Client, url string) (*Response, error) { + req, _ := NewRequest("GET", url, nil) + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "WebSocket") + return c.Do(req) + }, + EarlyCheckResponse: func(proto string, res *Response) { + if res.Proto != "HTTP/1.1" { + t.Errorf("%s: expected HTTP/1.1, got %q", proto, res.Proto) + } + res.Proto = "HTTP/IGNORE" // skip later checks that Proto must be 1.1 vs 2.0 + }, + }.run(t) +} diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index b1a6cef6f700f..63f62214db825 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -36,10 +36,10 @@ type Cookie struct { Unparsed []string // Raw text of unparsed attribute-value pairs } -// SameSite allows a server define a cookie attribute making it impossible to -// the browser send this cookie along with cross-site requests. The main goal -// is mitigate the risk of cross-origin information leakage, and provides some -// protection against cross-site request forgery attacks. +// SameSite allows a server to define a cookie attribute making it impossible for +// the browser to send this cookie along with cross-site requests. The main +// goal is to mitigate the risk of cross-origin information leakage, and provide +// some protection against cross-site request forgery attacks. // // See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details. type SameSite int @@ -263,7 +263,7 @@ func readCookies(h Header, filter string) []*Cookie { return cookies } -// validCookieDomain returns whether v is a valid cookie domain-value. +// validCookieDomain reports whether v is a valid cookie domain-value. func validCookieDomain(v string) bool { if isCookieDomainName(v) { return true @@ -274,13 +274,13 @@ func validCookieDomain(v string) bool { return false } -// validCookieExpires returns whether v is a valid cookie expires-value. +// validCookieExpires reports whether v is a valid cookie expires-value. func validCookieExpires(t time.Time) bool { // IETF RFC 6265 Section 5.1.1.5, the year must not be less than 1601 return t.Year() >= 1601 } -// isCookieDomainName returns whether s is a valid domain name or a valid +// isCookieDomainName reports whether s is a valid domain name or a valid // domain name with a leading dot '.'. It is almost a direct copy of // package net's isDomainName. func isCookieDomainName(s string) bool { diff --git a/src/net/http/example_filesystem_test.go b/src/net/http/example_filesystem_test.go new file mode 100644 index 0000000000000..e1fd42d049489 --- /dev/null +++ b/src/net/http/example_filesystem_test.go @@ -0,0 +1,71 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "log" + "net/http" + "os" + "strings" +) + +// containsDotFile reports whether name contains a path element starting with a period. +// The name is assumed to be a delimited by forward slashes, as guaranteed +// by the http.FileSystem interface. +func containsDotFile(name string) bool { + parts := strings.Split(name, "/") + for _, part := range parts { + if strings.HasPrefix(part, ".") { + return true + } + } + return false +} + +// dotFileHidingFile is the http.File use in dotFileHidingFileSystem. +// It is used to wrap the Readdir method of http.File so that we can +// remove files and directories that start with a period from its output. +type dotFileHidingFile struct { + http.File +} + +// Readdir is a wrapper around the Readdir method of the embedded File +// that filters out all files that start with a period in their name. +func (f dotFileHidingFile) Readdir(n int) (fis []os.FileInfo, err error) { + files, err := f.File.Readdir(n) + for _, file := range files { // Filters out the dot files + if !strings.HasPrefix(file.Name(), ".") { + fis = append(fis, file) + } + } + return +} + +// dotFileHidingFileSystem is an http.FileSystem that hides +// hidden "dot files" from being served. +type dotFileHidingFileSystem struct { + http.FileSystem +} + +// Open is a wrapper around the Open method of the embedded FileSystem +// that serves a 403 permission error when name has a file or directory +// with whose name starts with a period in its path. +func (fs dotFileHidingFileSystem) Open(name string) (http.File, error) { + if containsDotFile(name) { // If dot file, return 403 response + return nil, os.ErrPermission + } + + file, err := fs.FileSystem.Open(name) + if err != nil { + return nil, err + } + return dotFileHidingFile{file}, err +} + +func ExampleFileServer_dotFileHiding() { + fs := dotFileHidingFileSystem{http.Dir(".")} + http.Handle("/", http.FileServer(fs)) + log.Fatal(http.ListenAndServe(":8080", nil)) +} diff --git a/src/net/http/example_handle_test.go b/src/net/http/example_handle_test.go new file mode 100644 index 0000000000000..10a62f64c2f48 --- /dev/null +++ b/src/net/http/example_handle_test.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "fmt" + "log" + "net/http" + "sync" +) + +type countHandler struct { + mu sync.Mutex // guards n + n int +} + +func (h *countHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.mu.Lock() + defer h.mu.Unlock() + h.n++ + fmt.Fprintf(w, "count is %d\n", h.n) +} + +func ExampleHandle() { + http.Handle("/count", new(countHandler)) + log.Fatal(http.ListenAndServe(":8080", nil)) +} diff --git a/src/net/http/example_test.go b/src/net/http/example_test.go index 53fb0bbb4e104..2a09f5f6c6965 100644 --- a/src/net/http/example_test.go +++ b/src/net/http/example_test.go @@ -159,3 +159,35 @@ func ExampleListenAndServe() { http.HandleFunc("/hello", helloHandler) log.Fatal(http.ListenAndServe(":8080", nil)) } + +func ExampleHandleFunc() { + h1 := func(w http.ResponseWriter, _ *http.Request) { + io.WriteString(w, "Hello from a HandleFunc #1!\n") + } + h2 := func(w http.ResponseWriter, _ *http.Request) { + io.WriteString(w, "Hello from a HandleFunc #2!\n") + } + + http.HandleFunc("/", h1) + http.HandleFunc("/endpoint", h2) + + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +func newPeopleHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "This is the people handler.") + }) +} + +func ExampleNotFoundHandler() { + mux := http.NewServeMux() + + // Create sample handler to returns 404 + mux.Handle("/resources", http.NotFoundHandler()) + + // Create sample handler that returns 200 + mux.Handle("/resources/people/", newPeopleHandler()) + + log.Fatal(http.ListenAndServe(":8080", mux)) +} diff --git a/src/net/http/export_test.go b/src/net/http/export_test.go index bc0db53a2c600..b6965c239e95e 100644 --- a/src/net/http/export_test.go +++ b/src/net/http/export_test.go @@ -155,7 +155,7 @@ func (t *Transport) IdleConnStrsForTesting_h2() []string { func (t *Transport) IdleConnCountForTesting(scheme, addr string) int { t.idleMu.Lock() defer t.idleMu.Unlock() - key := connectMethodKey{"", scheme, addr} + key := connectMethodKey{"", scheme, addr, false} cacheKey := key.String() for k, conns := range t.idleConn { if k.String() == cacheKey { @@ -178,12 +178,12 @@ func (t *Transport) IsIdleForTesting() bool { } func (t *Transport) RequestIdleConnChForTesting() { - t.getIdleConnCh(connectMethod{nil, "http", "example.com"}) + t.getIdleConnCh(connectMethod{nil, "http", "example.com", false}) } func (t *Transport) PutIdleTestConn(scheme, addr string) bool { c, _ := net.Pipe() - key := connectMethodKey{"", scheme, addr} + key := connectMethodKey{"", scheme, addr, false} select { case <-t.incHostConnCount(key): default: @@ -242,3 +242,5 @@ func ExportSetH2GoawayTimeout(d time.Duration) (restore func()) { http2goAwayTimeout = d return func() { http2goAwayTimeout = old } } + +func (r *Request) ExportIsReplayable() bool { return r.isReplayable() } diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go index 255d215f3cfe0..762e88b05ff30 100644 --- a/src/net/http/fs_test.go +++ b/src/net/http/fs_test.go @@ -583,16 +583,23 @@ func TestFileServerZeroByte(t *testing.T) { ts := httptest.NewServer(FileServer(Dir("."))) defer ts.Close() - res, err := Get(ts.URL + "/..\x00") + c, err := net.Dial("tcp", ts.Listener.Addr().String()) if err != nil { t.Fatal(err) } - b, err := ioutil.ReadAll(res.Body) + defer c.Close() + _, err = fmt.Fprintf(c, "GET /..\x00 HTTP/1.0\r\n\r\n") + if err != nil { + t.Fatal(err) + } + var got bytes.Buffer + bufr := bufio.NewReader(io.TeeReader(c, &got)) + res, err := ReadResponse(bufr, nil) if err != nil { - t.Fatal("reading Body:", err) + t.Fatal("ReadResponse: ", err) } if res.StatusCode == 200 { - t.Errorf("got status 200; want an error. Body is:\n%s", string(b)) + t.Errorf("got status 200; want an error. Body is:\n%s", got.Bytes()) } } diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index 12cf65f109c13..f714cbb9a138d 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -44,9 +44,9 @@ import ( "sync" "time" - "golang_org/x/net/http/httpguts" - "golang_org/x/net/http2/hpack" - "golang_org/x/net/idna" + "internal/x/net/http/httpguts" + "internal/x/net/http2/hpack" + "internal/x/net/idna" ) // A list of the possible cipher suite ids. Taken from @@ -954,75 +954,6 @@ func (p http2noDialClientConnPool) GetClientConn(req *Request, addr string) (*ht return p.getClientConn(req, addr, http2noDialOnMiss) } -func http2configureTransport(t1 *Transport) (*http2Transport, error) { - connPool := new(http2clientConnPool) - t2 := &http2Transport{ - ConnPool: http2noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) RoundTripper { - addr := http2authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return http2erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// converting panics into errors. -func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -// (The field is exported so it can be accessed via reflect from net/http; tested -// by TestNoDialH2RoundTripperType) -type http2noDialH2RoundTripper struct{ *http2Transport } - -func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) { - res, err := rt.http2Transport.RoundTrip(req) - if http2isNoCachedConnError(err) { - return nil, ErrSkipAltProtocol - } - return res, err -} - // Buffer chunks are allocated from a pool to reduce pressure on GC. // The maximum wasted space per dataBuffer is 2x the largest size class, // which happens when the dataBuffer has multiple chunks and there is @@ -2788,7 +2719,7 @@ func (fr *http2Framer) maxHeaderStringLen() int { } // readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame +// merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFrame, error) { if fr.AllowIllegalReads { @@ -2924,181 +2855,23 @@ func http2summarizeFrame(f http2Frame) string { return buf.String() } -func http2traceHasWroteHeaderField(trace *http2clientTrace) bool { +func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return trace != nil && trace.WroteHeaderField != nil } -func http2traceWroteHeaderField(trace *http2clientTrace, k, v string) { +func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { if trace != nil && trace.WroteHeaderField != nil { trace.WroteHeaderField(k, []string{v}) } } -func http2traceGot1xxResponseFunc(trace *http2clientTrace) func(int, textproto.MIMEHeader) error { +func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse } return nil } -func http2transportExpectContinueTimeout(t1 *Transport) time.Duration { - return t1.ExpectContinueTimeout -} - -type http2contextContext interface { - context.Context -} - -var http2errCanceled = context.Canceled - -func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx http2contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, ServerContextKey, hs) - } - return -} - -func http2contextWithCancel(ctx http2contextContext) (_ http2contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func http2requestWithContext(req *Request, ctx http2contextContext) *Request { - return req.WithContext(ctx) -} - -type http2clientTrace httptrace.ClientTrace - -func http2reqContext(r *Request) context.Context { return r.Context() } - -func (t *http2Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func http2setResponseUncompressed(res *Response) { res.Uncompressed = true } - -func http2traceGetConn(req *Request, hostPort string) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GetConn == nil { - return - } - trace.GetConn(hostPort) -} - -func http2traceGotConn(req *Request, cc *http2ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func http2traceWroteHeaders(trace *http2clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func http2traceGot100Continue(trace *http2clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func http2traceWait100Continue(trace *http2clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func http2traceWroteRequest(trace *http2clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func http2traceFirstResponseByte(trace *http2clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func http2requestTrace(req *Request) *http2clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*http2clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *http2ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} - -// Shutdown gracefully closes the client connection, waiting for running streams to complete. -func (cc *http2ClientConn) Shutdown(ctx context.Context) error { - return cc.shutdown(ctx) -} - -func http2cloneTLSConfig(c *tls.Config) *tls.Config { - c2 := c.Clone() - c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264 - return c2 -} - -var _ Pusher = (*http2responseWriter)(nil) - -// Push implements http.Pusher. -func (w *http2responseWriter) Push(target string, opts *PushOptions) error { - internalOpts := http2pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func http2configureServer18(h1 *Server, h2 *http2Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func http2shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != ErrAbortHandler -} - -func http2reqGetBody(req *Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func http2reqBodyIsNoBody(body io.ReadCloser) bool { - return body == NoBody -} - -func http2go18httpNoBody() io.ReadCloser { return NoBody } // for tests only - -func http2configureServer19(s *Server, conf *http2Server) error { - s.RegisterOnShutdown(conf.state.startGracefulShutdown) - return nil -} - var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type http2goroutineLock uint64 @@ -3252,12 +3025,17 @@ func http2cutoff64(base int) uint64 { } var ( - http2commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - http2commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case + http2commonBuildOnce sync.Once + http2commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + http2commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case ) -func init() { - for _, v := range []string{ +func http2buildCommonHeaderMapsOnce() { + http2commonBuildOnce.Do(http2buildCommonHeaderMaps) +} + +func http2buildCommonHeaderMaps() { + common := []string{ "accept", "accept-charset", "accept-encoding", @@ -3305,7 +3083,10 @@ func init() { "vary", "via", "www-authenticate", - } { + } + http2commonLowerHeader = make(map[string]string, len(common)) + http2commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { chk := CanonicalHeaderKey(v) http2commonLowerHeader[chk] = v http2commonCanonHeader[v] = chk @@ -3313,6 +3094,7 @@ func init() { } func http2lowerHeader(v string) string { + http2buildCommonHeaderMapsOnce() if s, ok := http2commonLowerHeader[v]; ok { return s } @@ -3488,19 +3270,12 @@ func http2validWireHeaderFieldName(v string) bool { return true } -var http2httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := StatusText(i); v != "" { - http2httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - func http2httpCodeString(code int) string { - if s, ok := http2httpCodeStringCommon[code]; ok { - return s + switch code { + case 200: + return "200" + case 404: + return "404" } return strconv.Itoa(code) } @@ -3993,12 +3768,14 @@ func http2ConfigureServer(s *Server, conf *http2Server) error { conf = new(http2Server) } conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})} - if err := http2configureServer18(s, conf); err != nil { - return err - } - if err := http2configureServer19(s, conf); err != nil { - return err + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) @@ -4219,6 +3996,15 @@ func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) { sc.serve() } +func http2serverConnBaseContext(c net.Conn, opts *http2ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, ServerContextKey, hs) + } + return +} + func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. @@ -4234,7 +4020,7 @@ type http2serverConn struct { conn net.Conn bw *http2bufferedWriter // writing to conn handler Handler - baseCtx http2contextContext + baseCtx context.Context framer *http2Framer doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan http2readFrameResult // written by serverConn.readFrames @@ -4314,7 +4100,7 @@ type http2stream struct { id uint32 body *http2pipe // non-nil if expecting DATA frames cw http2closeWaiter // closed wait stream transitions to closed state - ctx http2contextContext + ctx context.Context cancelCtx func() // owned by serverConn's serve loop: @@ -4450,6 +4236,7 @@ func (sc *http2serverConn) condlogf(err error, format string, args ...interface{ func (sc *http2serverConn) canonicalHeader(v string) string { sc.serveG.check() + http2buildCommonHeaderMapsOnce() cv, ok := http2commonCanonHeader[v] if ok { return cv @@ -4898,7 +4685,7 @@ func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) { // errHandlerPanicked is the error given to any callers blocked in a read from // Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. +// main ServeHTTP goroutine, this will show up rarely. var http2errHandlerPanicked = errors.New("http2: handler panicked") // wroteFrame is called on the serve goroutine with the result of @@ -5370,12 +5157,6 @@ func (sc *http2serverConn) processData(f *http2DataFrame) error { // type PROTOCOL_ERROR." return http2ConnectionError(http2ErrCodeProtocol) } - // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in - // "open" or "half-closed (local)" state, the recipient MUST respond with a - // stream error (Section 5.4.2) of type STREAM_CLOSED. - if state == http2stateClosed { - return http2streamError(id, http2ErrCodeStreamClosed) - } if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -5670,7 +5451,7 @@ func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState panic("internal error: cannot create stream with id 0") } - ctx, cancelCtx := http2contextWithCancel(sc.baseCtx) + ctx, cancelCtx := context.WithCancel(sc.baseCtx) st := &http2stream{ sc: sc, id: id, @@ -5836,7 +5617,7 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re Body: body, Trailer: trailer, } - req = http2requestWithContext(req, st.ctx) + req = req.WithContext(st.ctx) rws := http2responseWriterStatePool.Get().(*http2responseWriterState) bwSave := rws.bw @@ -5864,7 +5645,7 @@ func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, han stream: rw.rws.stream, }) // Same as net/http: - if http2shouldLogPanic(e) { + if e != nil && e != ErrAbortHandler { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] @@ -6426,14 +6207,9 @@ var ( http2ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") ) -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type http2pushOptions struct { - Method string - Header Header -} +var _ Pusher = (*http2responseWriter)(nil) -func (w *http2responseWriter) push(target string, opts http2pushOptions) error { +func (w *http2responseWriter) Push(target string, opts *PushOptions) error { st := w.rws.stream sc := st.sc sc.serveG.checkNotOn() @@ -6444,6 +6220,10 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error { return http2ErrRecursivePush } + if opts == nil { + opts = new(PushOptions) + } + // Default options. if opts.Method == "" { opts.Method = "GET" @@ -6739,6 +6519,16 @@ type http2Transport struct { // to mean no limit. MaxHeaderListSize uint32 + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -6762,16 +6552,56 @@ func (t *http2Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -var http2errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. +// It returns an error if t1 has already been HTTP/2-enabled. func http2ConfigureTransport(t1 *Transport) error { - _, err := http2configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + _, err := http2configureTransport(t1) return err } +func http2configureTransport(t1 *Transport) (*http2Transport, error) { + connPool := new(http2clientConnPool) + t2 := &http2Transport{ + ConnPool: http2noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := http2registerHTTPSProtocol(t1, http2noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !http2strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) RoundTripper { + addr := http2authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return http2erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + func (t *http2Transport) connPool() http2ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -6836,7 +6666,7 @@ type http2ClientConn struct { type http2clientStream struct { cc *http2ClientConn req *Request - trace *http2clientTrace // or nil + trace *httptrace.ClientTrace // or nil ID uint32 resc chan http2resAndError bufPipe http2pipe // buffered pipe with the flow-controlled response payload @@ -6870,7 +6700,7 @@ type http2clientStream struct { // channel to be signaled. A non-nil error is returned only if the request was // canceled. func http2awaitRequestCancel(req *Request, done <-chan struct{}) error { - ctx := http2reqContext(req) + ctx := req.Context() if req.Cancel == nil && ctx.Done() == nil { return nil } @@ -7046,8 +6876,8 @@ func (t *http2Transport) RoundTripOpt(req *Request, opt http2RoundTripOpt) (*Res select { case <-time.After(time.Second * time.Duration(backoff)): continue - case <-http2reqContext(req).Done(): - return nil, http2reqContext(req).Err() + case <-req.Context().Done(): + return nil, req.Context().Err() } } } @@ -7084,16 +6914,15 @@ func http2shouldRetryRequest(req *Request, err error, afterBodyWrite bool) (*Req } // If the Body is nil (or http.NoBody), it's safe to reuse // this request and its Body. - if req.Body == nil || http2reqBodyIsNoBody(req.Body) { + if req.Body == nil || req.Body == NoBody { return req, nil } // If the request body can be reset back to its original // state via the optional req.GetBody, do that. - getBody := http2reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody != nil { + if req.GetBody != nil { // TODO: consider a req.Body.Close here? or audit that all caller paths do? - body, err := getBody() + body, err := req.GetBody() if err != nil { return nil, err } @@ -7139,7 +6968,7 @@ func (t *http2Transport) dialClientConn(addr string, singleUse bool) (*http2Clie func (t *http2Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { - *cfg = *http2cloneTLSConfig(t.TLSClientConfig) + *cfg = *t.TLSClientConfig.Clone() } if !http2strSliceContains(cfg.NextProtos, http2NextProtoTLS) { cfg.NextProtos = append([]string{http2NextProtoTLS}, cfg.NextProtos...) @@ -7190,7 +7019,7 @@ func (t *http2Transport) expectContinueTimeout() time.Duration { if t.t1 == nil { return 0 } - return http2transportExpectContinueTimeout(t.t1) + return t.t1.ExpectContinueTimeout } func (t *http2Transport) NewClientConn(c net.Conn) (*http2ClientConn, error) { @@ -7315,8 +7144,19 @@ func (cc *http2ClientConn) idleStateLocked() (st http2clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { return } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && - int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32 + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -7356,8 +7196,7 @@ func (cc *http2ClientConn) closeIfIdle() { var http2shutdownEnterWaitStateHook = func() {} // Shutdown gracefully close the client connection, waiting for running streams to complete. -// Public implementation is in go17.go and not_go17.go -func (cc *http2ClientConn) shutdown(ctx http2contextContext) error { +func (cc *http2ClientConn) Shutdown(ctx context.Context) error { if err := cc.sendGoAway(); err != nil { return err } @@ -7527,7 +7366,7 @@ func http2checkConnHeaders(req *Request) error { // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. func http2actualContentLength(req *Request) int64 { - if req.Body == nil || http2reqBodyIsNoBody(req.Body) { + if req.Body == nil || req.Body == NoBody { return 0 } if req.ContentLength != 0 { @@ -7597,7 +7436,7 @@ func (cc *http2ClientConn) roundTrip(req *Request) (res *Response, gotErrAfterRe cs := cc.newStream() cs.req = req - cs.trace = http2requestTrace(req) + cs.trace = httptrace.ContextClientTrace(req.Context()) cs.requestedGzip = requestedGzip bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 @@ -7635,7 +7474,7 @@ func (cc *http2ClientConn) roundTrip(req *Request) (res *Response, gotErrAfterRe readLoopResCh := cs.resc bodyWritten := false - ctx := http2reqContext(req) + ctx := req.Context() handleReadLoopResponse := func(re http2resAndError) (*Response, bool, error) { res := re.res @@ -7705,6 +7544,7 @@ func (cc *http2ClientConn) roundTrip(req *Request) (res *Response, gotErrAfterRe default: } if err != nil { + cc.forgetStreamID(cs.ID) return nil, cs.getStartedWrite(), err } bodyWritten = true @@ -7826,6 +7666,7 @@ func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Clos sawEOF = true err = nil } else if err != nil { + cc.writeStreamReset(cs.ID, http2ErrCodeCancel, err) return err } @@ -8061,7 +7902,7 @@ func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trail return nil, http2errRequestHeaderListSize } - trace := http2requestTrace(req) + trace := httptrace.ContextClientTrace(req.Context()) traceHeaders := http2traceHasWroteHeaderField(trace) // Header list size is ok. Write the headers. @@ -8484,7 +8325,7 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http res.Header.Del("Content-Length") res.ContentLength = -1 res.Body = &http2gzipReader{body: res.Body} - http2setResponseUncompressed(res) + res.Uncompressed = true } return res, nil } @@ -8861,8 +8702,7 @@ func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) er } // Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *http2ClientConn) ping(ctx http2contextContext) error { +func (cc *http2ClientConn) Ping(ctx context.Context) error { c := make(chan struct{}) // Generate a random payload var p [8]byte @@ -9097,6 +8937,94 @@ func http2isConnectionCloseRequest(req *Request) bool { return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") } +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func http2registerHTTPSProtocol(t *Transport, rt http2noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type http2noDialH2RoundTripper struct{ *http2Transport } + +func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) { + res, err := rt.http2Transport.RoundTrip(req) + if http2isNoCachedConnError(err) { + return nil, ErrSkipAltProtocol + } + return res, err +} + +func (t *http2Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func http2traceGetConn(req *Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func http2traceGotConn(req *Request, cc *http2ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func http2traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func http2traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func http2traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func http2traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func http2traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} + // writeFramer is implemented by any type that is used to write frames. type http2writeFramer interface { writeFrame(http2writeContext) error @@ -9283,7 +9211,7 @@ func (w *http2writeResHeaders) staysWithinBuffer(max int) bool { // TODO: this is a common one. It'd be nice to return true // here and get into the fast path if we could be clever and // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and + // upper bound that usually fires. (Maybe if w.h and // w.trailers are nil, so we don't need to enumerate it.) // Otherwise I'm afraid that just calculating the length to // answer this question would be slower than the ~2µs benefit. @@ -9413,7 +9341,7 @@ func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error { } // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. +// is encoded only if k is in keys. func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) { if keys == nil { sorter := http2sorterPool.Get().(*http2sorter) diff --git a/src/net/http/header.go b/src/net/http/header.go index 461ae9368ac15..b699e7ef8ffb1 100644 --- a/src/net/http/header.go +++ b/src/net/http/header.go @@ -14,30 +14,35 @@ import ( "time" ) -var raceEnabled = false // set by race.go - // A Header represents the key-value pairs in an HTTP header. +// +// The keys should be in canonical form, as returned by +// CanonicalHeaderKey. type Header map[string][]string // Add adds the key, value pair to the header. // It appends to any existing values associated with key. +// The key is case insensitive; it is canonicalized by +// CanonicalHeaderKey. func (h Header) Add(key, value string) { textproto.MIMEHeader(h).Add(key, value) } -// Set sets the header entries associated with key to -// the single element value. It replaces any existing -// values associated with key. +// Set sets the header entries associated with key to the +// single element value. It replaces any existing values +// associated with key. The key is case insensitive; it is +// canonicalized by textproto.CanonicalMIMEHeaderKey. +// To use non-canonical keys, assign to the map directly. func (h Header) Set(key, value string) { textproto.MIMEHeader(h).Set(key, value) } -// Get gets the first value associated with the given key. -// It is case insensitive; textproto.CanonicalMIMEHeaderKey is used -// to canonicalize the provided key. -// If there are no values associated with the key, Get returns "". -// To access multiple values of a key, or to use non-canonical keys, -// access the map directly. +// Get gets the first value associated with the given key. If +// there are no values associated with the key, Get returns "". +// It is case insensitive; textproto.CanonicalMIMEHeaderKey is +// used to canonicalize the provided key. To access multiple +// values of a key, or to use non-canonical keys, access the +// map directly. func (h Header) Get(key string) string { return textproto.MIMEHeader(h).Get(key) } @@ -50,7 +55,16 @@ func (h Header) get(key string) string { return "" } +// has reports whether h has the provided key defined, even if it's +// set to 0-length slice. +func (h Header) has(key string) bool { + _, ok := h[key] + return ok +} + // Del deletes the values associated with key. +// The key is case insensitive; it is canonicalized by +// CanonicalHeaderKey. func (h Header) Del(key string) { textproto.MIMEHeader(h).Del(key) } @@ -95,10 +109,6 @@ func ParseTime(text string) (t time.Time, err error) { var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") -type writeStringer interface { - WriteString(string) (int, error) -} - // stringWriter implements WriteString on a Writer. type stringWriter struct { w io.Writer @@ -154,7 +164,7 @@ func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { } func (h Header) writeSubset(w io.Writer, exclude map[string]bool, trace *httptrace.ClientTrace) error { - ws, ok := w.(writeStringer) + ws, ok := w.(io.StringWriter) if !ok { ws = stringWriter{w} } @@ -231,13 +241,3 @@ func hasToken(v, token string) bool { func isTokenBoundary(b byte) bool { return b == ' ' || b == ',' || b == '\t' } - -func cloneHeader(h Header) Header { - h2 := make(Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} diff --git a/src/net/http/header_test.go b/src/net/http/header_test.go index bbd35c485a4a3..48158d313aeba 100644 --- a/src/net/http/header_test.go +++ b/src/net/http/header_test.go @@ -6,6 +6,7 @@ package http import ( "bytes" + "internal/race" "runtime" "testing" "time" @@ -196,7 +197,7 @@ func TestHeaderWriteSubsetAllocs(t *testing.T) { if testing.Short() { t.Skip("skipping alloc test in short mode") } - if raceEnabled { + if race.Enabled { t.Skip("skipping test under race detector") } if runtime.GOMAXPROCS(0) > 1 { diff --git a/src/net/http/http.go b/src/net/http/http.go index ce0eceb1de303..e5d59e14120ba 100644 --- a/src/net/http/http.go +++ b/src/net/http/http.go @@ -11,7 +11,7 @@ import ( "time" "unicode/utf8" - "golang_org/x/net/http/httpguts" + "internal/x/net/http/httpguts" ) // maxInt64 is the effective "infinite" value for the Server and @@ -59,6 +59,17 @@ func isASCII(s string) bool { return true } +// stringContainsCTLByte reports whether s contains any ASCII control character. +func stringContainsCTLByte(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b < ' ' || b == 0x7f { + return true + } + } + return false +} + func hexEscapeNonASCII(s string) string { newLen := 0 for i := 0; i < len(s); i++ { @@ -135,6 +146,10 @@ type Pusher interface { // data that may trigger a request for URL X. This avoids a race where the // client issues requests for X before receiving the PUSH_PROMISE for X. // + // Push will run in a separate goroutine making the order of arrival + // non-deterministic. Any required synchronization needs to be implemented + // by the caller. + // // Push returns ErrNotSupported if the client has disabled push or if push // is not supported on the underlying connection. Push(target string, opts *PushOptions) error diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go index 67f90b837698a..f2c3c0757bacf 100644 --- a/src/net/http/httptest/recorder.go +++ b/src/net/http/httptest/recorder.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "golang_org/x/net/http/httpguts" + "internal/x/net/http/httpguts" ) // ResponseRecorder is an implementation of http.ResponseWriter that diff --git a/src/net/http/httptest/server.go b/src/net/http/httptest/server.go index ebafc9999c603..b4e2e9266e685 100644 --- a/src/net/http/httptest/server.go +++ b/src/net/http/httptest/server.go @@ -53,10 +53,10 @@ type Server struct { } func newLocalListener() net.Listener { - if *serve != "" { - l, err := net.Listen("tcp", *serve) + if serveFlag != "" { + l, err := net.Listen("tcp", serveFlag) if err != nil { - panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err)) + panic(fmt.Sprintf("httptest: failed to listen on %v: %v", serveFlag, err)) } return l } @@ -73,7 +73,25 @@ func newLocalListener() net.Listener { // this flag lets you run // go test -run=BrokenTest -httptest.serve=127.0.0.1:8000 // to start the broken server so you can interact with it manually. -var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks") +// We only register this flag if it looks like the caller knows about it +// and is trying to use it as we don't want to pollute flags and this +// isn't really part of our API. Don't depend on this. +var serveFlag string + +func init() { + if strSliceContainsPrefix(os.Args, "-httptest.serve=") || strSliceContainsPrefix(os.Args, "--httptest.serve=") { + flag.StringVar(&serveFlag, "httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks.") + } +} + +func strSliceContainsPrefix(v []string, pre string) bool { + for _, s := range v { + if strings.HasPrefix(s, pre) { + return true + } + } + return false +} // NewServer starts and returns a new Server. // The caller should call Close when finished, to shut it down. @@ -107,7 +125,7 @@ func (s *Server) Start() { s.URL = "http://" + s.Listener.Addr().String() s.wrap() s.goServe() - if *serve != "" { + if serveFlag != "" { fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL) select {} } diff --git a/src/net/http/httputil/dump_test.go b/src/net/http/httputil/dump_test.go index 5703a7fb866a0..63312dd885690 100644 --- a/src/net/http/httputil/dump_test.go +++ b/src/net/http/httputil/dump_test.go @@ -370,7 +370,7 @@ func TestDumpResponse(t *testing.T) { } got := string(gotb) got = strings.TrimSpace(got) - got = strings.Replace(got, "\r", "", -1) + got = strings.ReplaceAll(got, "\r", "") if got != tt.want { t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want) diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go index 1dddaa95a7707..4e10bf399711b 100644 --- a/src/net/http/httputil/reverseproxy.go +++ b/src/net/http/httputil/reverseproxy.go @@ -8,6 +8,7 @@ package httputil import ( "context" + "fmt" "io" "log" "net" @@ -16,11 +17,9 @@ import ( "strings" "sync" "time" -) -// onExitFlushLoop is a callback set by tests to detect the state of the -// flushLoop() goroutine. -var onExitFlushLoop func() + "internal/x/net/http/httpguts" +) // ReverseProxy is an HTTP Handler that takes an incoming request and // sends it to another server, proxying the response back to the @@ -42,6 +41,12 @@ type ReverseProxy struct { // to flush to the client while copying the // response body. // If zero, no periodic flushing is done. + // A negative value means to flush immediately + // after each write to the client. + // The FlushInterval is ignored when ReverseProxy + // recognizes a response as a streaming response; + // for such responses, writes are flushed to the client + // immediately. FlushInterval time.Duration // ErrorLog specifies an optional logger for errors @@ -166,6 +171,20 @@ func (p *ReverseProxy) getErrorHandler() func(http.ResponseWriter, *http.Request return p.defaultErrorHandler } +// modifyResponse conditionally runs the optional ModifyResponse hook +// and reports whether the request should proceed. +func (p *ReverseProxy) modifyResponse(rw http.ResponseWriter, res *http.Response, req *http.Request) bool { + if p.ModifyResponse == nil { + return true + } + if err := p.ModifyResponse(res); err != nil { + res.Body.Close() + p.getErrorHandler()(rw, req, err) + return false + } + return true +} + func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { transport := p.Transport if transport == nil { @@ -197,6 +216,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { p.Director(outreq) outreq.Close = false + reqUpType := upgradeType(outreq.Header) removeConnectionHeaders(outreq.Header) // Remove hop-by-hop headers to the backend. Especially @@ -219,6 +239,13 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { outreq.Header.Del(h) } + // After stripping all the hop-by-hop connection headers above, add back any + // necessary for protocol upgrades, such as for websockets. + if reqUpType != "" { + outreq.Header.Set("Connection", "Upgrade") + outreq.Header.Set("Upgrade", reqUpType) + } + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { // If we aren't the first proxy retain prior // X-Forwarded-For information as a comma+space @@ -235,18 +262,23 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { return } + // Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc) + if res.StatusCode == http.StatusSwitchingProtocols { + if !p.modifyResponse(rw, res, outreq) { + return + } + p.handleUpgradeResponse(rw, outreq, res) + return + } + removeConnectionHeaders(res.Header) for _, h := range hopHeaders { res.Header.Del(h) } - if p.ModifyResponse != nil { - if err := p.ModifyResponse(res); err != nil { - res.Body.Close() - p.getErrorHandler()(rw, outreq, err) - return - } + if !p.modifyResponse(rw, res, outreq) { + return } copyHeader(rw.Header(), res.Header) @@ -263,15 +295,8 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } rw.WriteHeader(res.StatusCode) - if len(res.Trailer) > 0 { - // Force chunking if we saw a response trailer. - // This prevents net/http from calculating the length for short - // bodies and adding a Content-Length. - if fl, ok := rw.(http.Flusher); ok { - fl.Flush() - } - } - err = p.copyResponse(rw, res.Body) + + err = p.copyResponse(rw, res.Body, p.flushInterval(req, res)) if err != nil { defer res.Body.Close() // Since we're streaming the response, if we run into an error all we can do @@ -285,6 +310,15 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { } res.Body.Close() // close now, instead of defer, to populate res.Trailer + if len(res.Trailer) > 0 { + // Force chunking if we saw a response trailer. + // This prevents net/http from calculating the length for short + // bodies and adding a Content-Length. + if fl, ok := rw.(http.Flusher); ok { + fl.Flush() + } + } + if len(res.Trailer) == announcedTrailers { copyHeader(rw.Header(), res.Trailer) return @@ -332,15 +366,28 @@ func removeConnectionHeaders(h http.Header) { } } -func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) error { - if p.FlushInterval != 0 { +// flushInterval returns the p.FlushInterval value, conditionally +// overriding its value for a specific request/response. +func (p *ReverseProxy) flushInterval(req *http.Request, res *http.Response) time.Duration { + resCT := res.Header.Get("Content-Type") + + // For Server-Sent Events responses, flush immediately. + // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream + if resCT == "text/event-stream" { + return -1 // negative means immediately + } + + // TODO: more specific cases? e.g. res.ContentLength == -1? + return p.FlushInterval +} + +func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error { + if flushInterval != 0 { if wf, ok := dst.(writeFlusher); ok { mlw := &maxLatencyWriter{ dst: wf, - latency: p.FlushInterval, - done: make(chan bool), + latency: flushInterval, } - go mlw.flushLoop() defer mlw.stop() dst = mlw } @@ -403,34 +450,115 @@ type writeFlusher interface { type maxLatencyWriter struct { dst writeFlusher - latency time.Duration + latency time.Duration // non-zero; negative means to flush immediately - mu sync.Mutex // protects Write + Flush - done chan bool + mu sync.Mutex // protects t, flushPending, and dst.Flush + t *time.Timer + flushPending bool } -func (m *maxLatencyWriter) Write(p []byte) (int, error) { +func (m *maxLatencyWriter) Write(p []byte) (n int, err error) { m.mu.Lock() defer m.mu.Unlock() - return m.dst.Write(p) + n, err = m.dst.Write(p) + if m.latency < 0 { + m.dst.Flush() + return + } + if m.flushPending { + return + } + if m.t == nil { + m.t = time.AfterFunc(m.latency, m.delayedFlush) + } else { + m.t.Reset(m.latency) + } + m.flushPending = true + return } -func (m *maxLatencyWriter) flushLoop() { - t := time.NewTicker(m.latency) - defer t.Stop() - for { - select { - case <-m.done: - if onExitFlushLoop != nil { - onExitFlushLoop() - } - return - case <-t.C: - m.mu.Lock() - m.dst.Flush() - m.mu.Unlock() - } +func (m *maxLatencyWriter) delayedFlush() { + m.mu.Lock() + defer m.mu.Unlock() + if !m.flushPending { // if stop was called but AfterFunc already started this goroutine + return } + m.dst.Flush() + m.flushPending = false } -func (m *maxLatencyWriter) stop() { m.done <- true } +func (m *maxLatencyWriter) stop() { + m.mu.Lock() + defer m.mu.Unlock() + m.flushPending = false + if m.t != nil { + m.t.Stop() + } +} + +func upgradeType(h http.Header) string { + if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") { + return "" + } + return strings.ToLower(h.Get("Upgrade")) +} + +func (p *ReverseProxy) handleUpgradeResponse(rw http.ResponseWriter, req *http.Request, res *http.Response) { + reqUpType := upgradeType(req.Header) + resUpType := upgradeType(res.Header) + if reqUpType != resUpType { + p.getErrorHandler()(rw, req, fmt.Errorf("backend tried to switch protocol %q when %q was requested", resUpType, reqUpType)) + return + } + + copyHeader(res.Header, rw.Header()) + + hj, ok := rw.(http.Hijacker) + if !ok { + p.getErrorHandler()(rw, req, fmt.Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw)) + return + } + backConn, ok := res.Body.(io.ReadWriteCloser) + if !ok { + p.getErrorHandler()(rw, req, fmt.Errorf("internal error: 101 switching protocols response with non-writable body")) + return + } + defer backConn.Close() + conn, brw, err := hj.Hijack() + if err != nil { + p.getErrorHandler()(rw, req, fmt.Errorf("Hijack failed on protocol switch: %v", err)) + return + } + defer conn.Close() + res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above + if err := res.Write(brw); err != nil { + p.getErrorHandler()(rw, req, fmt.Errorf("response write: %v", err)) + return + } + if err := brw.Flush(); err != nil { + p.getErrorHandler()(rw, req, fmt.Errorf("response flush: %v", err)) + return + } + errc := make(chan error, 1) + spc := switchProtocolCopier{user: conn, backend: backConn} + go spc.copyToBackend(errc) + go spc.copyFromBackend(errc) + <-errc + return +} + +// switchProtocolCopier exists so goroutines proxying data back and +// forth have nice names in stacks. +type switchProtocolCopier struct { + user, backend io.ReadWriter +} + +func (c switchProtocolCopier) copyFromBackend(errc chan<- error) { + _, err := io.Copy(c.user, c.backend) + errc <- err +} + +func (c switchProtocolCopier) copyToBackend(errc chan<- error) { + _, err := io.Copy(c.backend, c.user) + errc <- err +} diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go index 2f75b4e34ec99..5edefa08e55a3 100644 --- a/src/net/http/httputil/reverseproxy_test.go +++ b/src/net/http/httputil/reverseproxy_test.go @@ -153,15 +153,20 @@ func TestReverseProxy(t *testing.T) { func TestReverseProxyStripHeadersPresentInConnection(t *testing.T) { const fakeConnectionToken = "X-Fake-Connection-Token" const backendResponse = "I am the backend" + + // someConnHeader is some arbitrary header to be declared as a hop-by-hop header + // in the Request's Connection header. + const someConnHeader = "X-Some-Conn-Header" + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if c := r.Header.Get(fakeConnectionToken); c != "" { t.Errorf("handler got header %q = %q; want empty", fakeConnectionToken, c) } - if c := r.Header.Get("Upgrade"); c != "" { - t.Errorf("handler got header %q = %q; want empty", "Upgrade", c) + if c := r.Header.Get(someConnHeader); c != "" { + t.Errorf("handler got header %q = %q; want empty", someConnHeader, c) } - w.Header().Set("Connection", "Upgrade, "+fakeConnectionToken) - w.Header().Set("Upgrade", "should be deleted") + w.Header().Set("Connection", someConnHeader+", "+fakeConnectionToken) + w.Header().Set(someConnHeader, "should be deleted") w.Header().Set(fakeConnectionToken, "should be deleted") io.WriteString(w, backendResponse) })) @@ -173,15 +178,15 @@ func TestReverseProxyStripHeadersPresentInConnection(t *testing.T) { proxyHandler := NewSingleHostReverseProxy(backendURL) frontend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { proxyHandler.ServeHTTP(w, r) - if c := r.Header.Get("Upgrade"); c != "original value" { - t.Errorf("handler modified header %q = %q; want %q", "Upgrade", c, "original value") + if c := r.Header.Get(someConnHeader); c != "original value" { + t.Errorf("handler modified header %q = %q; want %q", someConnHeader, c, "original value") } })) defer frontend.Close() getReq, _ := http.NewRequest("GET", frontend.URL, nil) - getReq.Header.Set("Connection", "Upgrade, "+fakeConnectionToken) - getReq.Header.Set("Upgrade", "original value") + getReq.Header.Set("Connection", someConnHeader+", "+fakeConnectionToken) + getReq.Header.Set(someConnHeader, "original value") getReq.Header.Set(fakeConnectionToken, "should be deleted") res, err := frontend.Client().Do(getReq) if err != nil { @@ -195,8 +200,8 @@ func TestReverseProxyStripHeadersPresentInConnection(t *testing.T) { if got, want := string(bodyBytes), backendResponse; got != want { t.Errorf("got body %q; want %q", got, want) } - if c := res.Header.Get("Upgrade"); c != "" { - t.Errorf("handler got header %q = %q; want empty", "Upgrade", c) + if c := res.Header.Get(someConnHeader); c != "" { + t.Errorf("handler got header %q = %q; want empty", someConnHeader, c) } if c := res.Header.Get(fakeConnectionToken); c != "" { t.Errorf("handler got header %q = %q; want empty", fakeConnectionToken, c) @@ -297,10 +302,6 @@ func TestReverseProxyFlushInterval(t *testing.T) { proxyHandler := NewSingleHostReverseProxy(backendURL) proxyHandler.FlushInterval = time.Microsecond - done := make(chan bool) - onExitFlushLoop = func() { done <- true } - defer func() { onExitFlushLoop = nil }() - frontend := httptest.NewServer(proxyHandler) defer frontend.Close() @@ -314,13 +315,6 @@ func TestReverseProxyFlushInterval(t *testing.T) { if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected { t.Errorf("got body %q; expected %q", bodyBytes, expected) } - - select { - case <-done: - // OK - case <-time.After(5 * time.Second): - t.Error("maxLatencyWriter flushLoop() never exited") - } } func TestReverseProxyCancelation(t *testing.T) { @@ -946,3 +940,184 @@ func TestReverseProxy_PanicBodyError(t *testing.T) { req, _ := http.NewRequest("GET", "http://foo.tld/", nil) rproxy.ServeHTTP(httptest.NewRecorder(), req) } + +func TestSelectFlushInterval(t *testing.T) { + tests := []struct { + name string + p *ReverseProxy + req *http.Request + res *http.Response + want time.Duration + }{ + { + name: "default", + res: &http.Response{}, + p: &ReverseProxy{FlushInterval: 123}, + want: 123, + }, + { + name: "server-sent events overrides non-zero", + res: &http.Response{ + Header: http.Header{ + "Content-Type": {"text/event-stream"}, + }, + }, + p: &ReverseProxy{FlushInterval: 123}, + want: -1, + }, + { + name: "server-sent events overrides zero", + res: &http.Response{ + Header: http.Header{ + "Content-Type": {"text/event-stream"}, + }, + }, + p: &ReverseProxy{FlushInterval: 0}, + want: -1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.p.flushInterval(tt.req, tt.res) + if got != tt.want { + t.Errorf("flushLatency = %v; want %v", got, tt.want) + } + }) + } +} + +func TestReverseProxyWebSocket(t *testing.T) { + backendServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if upgradeType(r.Header) != "websocket" { + t.Error("unexpected backend request") + http.Error(w, "unexpected request", 400) + return + } + c, _, err := w.(http.Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + defer c.Close() + io.WriteString(c, "HTTP/1.1 101 Switching Protocols\r\nConnection: upgrade\r\nUpgrade: WebSocket\r\n\r\n") + bs := bufio.NewScanner(c) + if !bs.Scan() { + t.Errorf("backend failed to read line from client: %v", bs.Err()) + return + } + fmt.Fprintf(c, "backend got %q\n", bs.Text()) + })) + defer backendServer.Close() + + backURL, _ := url.Parse(backendServer.URL) + rproxy := NewSingleHostReverseProxy(backURL) + rproxy.ErrorLog = log.New(ioutil.Discard, "", 0) // quiet for tests + rproxy.ModifyResponse = func(res *http.Response) error { + res.Header.Add("X-Modified", "true") + return nil + } + + handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("X-Header", "X-Value") + rproxy.ServeHTTP(rw, req) + }) + + frontendProxy := httptest.NewServer(handler) + defer frontendProxy.Close() + + req, _ := http.NewRequest("GET", frontendProxy.URL, nil) + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "websocket") + + c := frontendProxy.Client() + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 101 { + t.Fatalf("status = %v; want 101", res.Status) + } + + got := res.Header.Get("X-Header") + want := "X-Value" + if got != want { + t.Errorf("Header(XHeader) = %q; want %q", got, want) + } + + if upgradeType(res.Header) != "websocket" { + t.Fatalf("not websocket upgrade; got %#v", res.Header) + } + rwc, ok := res.Body.(io.ReadWriteCloser) + if !ok { + t.Fatalf("response body is of type %T; does not implement ReadWriteCloser", res.Body) + } + defer rwc.Close() + + if got, want := res.Header.Get("X-Modified"), "true"; got != want { + t.Errorf("response X-Modified header = %q; want %q", got, want) + } + + io.WriteString(rwc, "Hello\n") + bs := bufio.NewScanner(rwc) + if !bs.Scan() { + t.Fatalf("Scan: %v", bs.Err()) + } + got = bs.Text() + want = `backend got "Hello"` + if got != want { + t.Errorf("got %#q, want %#q", got, want) + } +} + +func TestUnannouncedTrailer(t *testing.T) { + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + w.Header().Set(http.TrailerPrefix+"X-Unannounced-Trailer", "unannounced_trailer_value") + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + proxyHandler.ErrorLog = log.New(ioutil.Discard, "", 0) // quiet for tests + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + frontendClient := frontend.Client() + + res, err := frontendClient.Get(frontend.URL) + if err != nil { + t.Fatalf("Get: %v", err) + } + + ioutil.ReadAll(res.Body) + + if g, w := res.Trailer.Get("X-Unannounced-Trailer"), "unannounced_trailer_value"; g != w { + t.Errorf("Trailer(X-Unannounced-Trailer) = %q; want %q", g, w) + } + +} + +func TestSingleJoinSlash(t *testing.T) { + tests := []struct { + slasha string + slashb string + expected string + }{ + {"https://www.google.com/", "/favicon.ico", "https://www.google.com/favicon.ico"}, + {"https://www.google.com", "/favicon.ico", "https://www.google.com/favicon.ico"}, + {"https://www.google.com", "favicon.ico", "https://www.google.com/favicon.ico"}, + {"https://www.google.com", "", "https://www.google.com/"}, + {"", "favicon.ico", "/favicon.ico"}, + } + for _, tt := range tests { + if got := singleJoiningSlash(tt.slasha, tt.slashb); got != tt.expected { + t.Errorf("singleJoiningSlash(%s,%s) want %s got %s", + tt.slasha, + tt.slashb, + tt.expected, + got) + } + } +} diff --git a/src/net/http/proxy_test.go b/src/net/http/proxy_test.go index eef0ca82f8c9a..feb7047a58e55 100644 --- a/src/net/http/proxy_test.go +++ b/src/net/http/proxy_test.go @@ -35,7 +35,7 @@ func TestCacheKeys(t *testing.T) { } proxy = u } - cm := connectMethod{proxy, tt.scheme, tt.addr} + cm := connectMethod{proxy, tt.scheme, tt.addr, false} if got := cm.key().String(); got != tt.key { t.Fatalf("{%q, %q, %q} cache key = %q; want %q", tt.proxy, tt.scheme, tt.addr, got, tt.key) } diff --git a/src/net/http/readrequest_test.go b/src/net/http/readrequest_test.go index 18eed345a8422..517a8189e1580 100644 --- a/src/net/http/readrequest_test.go +++ b/src/net/http/readrequest_test.go @@ -438,7 +438,7 @@ func TestReadRequest(t *testing.T) { // reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters, // ending in \r\n\r\n func reqBytes(req string) []byte { - return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n") + return []byte(strings.ReplaceAll(strings.TrimSpace(req), "\n", "\r\n") + "\r\n\r\n") } var badRequestTests = []struct { diff --git a/src/net/http/request.go b/src/net/http/request.go index a40b0a3cb8385..dcad2b6fab366 100644 --- a/src/net/http/request.go +++ b/src/net/http/request.go @@ -26,7 +26,7 @@ import ( "strings" "sync" - "golang_org/x/net/idna" + "internal/x/net/idna" ) const ( @@ -53,8 +53,9 @@ var ( // available. ErrNotSupported = &ProtocolError{"feature not supported"} - // ErrUnexpectedTrailer is returned by the Transport when a server - // replies with a Trailer header, but without a chunked reply. + // Deprecated: ErrUnexpectedTrailer is no longer returned by + // anything in the net/http package. Callers should not + // compare errors against this variable. ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} // ErrMissingBoundary is returned by Request.MultipartReader when the @@ -105,7 +106,7 @@ var reqWriteExcludeHeader = map[string]bool{ // documentation for Request.Write and RoundTripper. type Request struct { // Method specifies the HTTP method (GET, POST, PUT, etc.). - // For client requests an empty string means GET. + // For client requests, an empty string means GET. // // Go's HTTP client does not support sending a request with // the CONNECT method. See the documentation on Transport for @@ -115,7 +116,7 @@ type Request struct { // URL specifies either the URI being requested (for server // requests) or the URL to access (for client requests). // - // For server requests the URL is parsed from the URI + // For server requests, the URL is parsed from the URI // supplied on the Request-Line as stored in RequestURI. For // most requests, fields other than Path and RawQuery will be // empty. (See RFC 7230, Section 5.3) @@ -128,7 +129,7 @@ type Request struct { // The protocol version for incoming server requests. // - // For client requests these fields are ignored. The HTTP + // For client requests, these fields are ignored. The HTTP // client code always uses either HTTP/1.1 or HTTP/2. // See the docs on Transport for details. Proto string // "HTTP/1.0" @@ -170,11 +171,11 @@ type Request struct { // Body is the request's body. // - // For client requests a nil body means the request has no + // For client requests, a nil body means the request has no // body, such as a GET request. The HTTP Client's Transport // is responsible for calling the Close method. // - // For server requests the Request Body is always non-nil + // For server requests, the Request Body is always non-nil // but will return EOF immediately when no body is present. // The Server will close the request body. The ServeHTTP // Handler does not need to. @@ -185,13 +186,14 @@ type Request struct { // reading the body more than once. Use of GetBody still // requires setting Body. // - // For server requests it is unused. + // For server requests, it is unused. GetBody func() (io.ReadCloser, error) // ContentLength records the length of the associated content. // The value -1 indicates that the length is unknown. // Values >= 0 indicate that the given number of bytes may // be read from Body. + // // For client requests, a value of 0 with a non-nil Body is // also treated as unknown. ContentLength int64 @@ -215,7 +217,7 @@ type Request struct { // Transport.DisableKeepAlives were set. Close bool - // For server requests Host specifies the host on which the URL + // For server requests, Host specifies the host on which the URL // is sought. Per RFC 7230, section 5.4, this is either the value // of the "Host" header or the host name given in the URL itself. // It may be of the form "host:port". For international domain @@ -228,7 +230,7 @@ type Request struct { // ServeMux supports patterns registered to particular host // names and thus protects its registered Handlers. // - // For client requests Host optionally overrides the Host + // For client requests, Host optionally overrides the Host // header to send. If empty, the Request.Write method uses // the value of URL.Host. Host may contain an international // domain name. @@ -255,14 +257,14 @@ type Request struct { // Trailer specifies additional headers that are sent after the request // body. // - // For server requests the Trailer map initially contains only the + // For server requests, the Trailer map initially contains only the // trailer keys, with nil values. (The client declares which trailers it // will later send.) While the handler is reading from Body, it must // not reference Trailer. After reading from Body returns EOF, Trailer // can be read again and will contain non-nil values, if they were sent // by the client. // - // For client requests Trailer must be initialized to a map containing + // For client requests, Trailer must be initialized to a map containing // the trailer keys to later send. The values may be nil or their final // values. The ContentLength must be 0 or -1, to send a chunked request. // After the HTTP request is sent the map values can be updated while @@ -544,8 +546,16 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF } else if r.Method == "CONNECT" && r.URL.Path == "" { // CONNECT requests normally give just the host and port, not a full URL. ruri = host + if r.URL.Opaque != "" { + ruri = r.URL.Opaque + } + } + if stringContainsCTLByte(ruri) { + return errors.New("net/http: can't write control character in Request.URL") } - // TODO(bradfitz): escape at least newlines in ruri? + // TODO: validate r.Method too? At least it's less likely to + // come from an attacker (more likely to be a constant in + // code). // Wrap the writer in a bufio Writer if it's not already buffered. // Don't always call NewWriter, as that forces a bytes.Buffer @@ -574,7 +584,7 @@ func (r *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, waitF // Use the defaultUserAgent unless the Header contains one, which // may be blank to not send the header. userAgent := defaultUserAgent - if _, ok := r.Header["User-Agent"]; ok { + if r.Header.has("User-Agent") { userAgent = r.Header.Get("User-Agent") } if userAgent != "" { @@ -1325,6 +1335,9 @@ func (r *Request) wantsHttp10KeepAlive() bool { } func (r *Request) wantsClose() bool { + if r.Close { + return true + } return hasToken(r.Header.get("Connection"), "close") } @@ -1340,6 +1353,12 @@ func (r *Request) isReplayable() bool { case "GET", "HEAD", "OPTIONS", "TRACE": return true } + // The Idempotency-Key, while non-standard, is widely used to + // mean a POST or other request is idempotent. See + // https://golang.org/issue/19943#issuecomment-421092421 + if r.Header.has("Idempotency-Key") || r.Header.has("X-Idempotency-Key") { + return true + } } return false } @@ -1370,3 +1389,10 @@ func requestMethodUsuallyLacksBody(method string) bool { } return false } + +// requiresHTTP1 reports whether this request requires being sent on +// an HTTP/1 connection. +func (r *Request) requiresHTTP1() bool { + return hasToken(r.Header.Get("Connection"), "upgrade") && + strings.EqualFold(r.Header.Get("Upgrade"), "websocket") +} diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go index 7a83ae5b1cef3..e8005571df975 100644 --- a/src/net/http/request_test.go +++ b/src/net/http/request_test.go @@ -878,7 +878,7 @@ func testMissingFile(t *testing.T, req *Request) { } func newTestMultipartRequest(t *testing.T) *Request { - b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1)) + b := strings.NewReader(strings.ReplaceAll(message, "\n", "\r\n")) req, err := NewRequest("POST", "/", b) if err != nil { t.Fatal("NewRequest:", err) @@ -970,8 +970,8 @@ Content-Disposition: form-data; name="textb" ` func benchmarkReadRequest(b *testing.B, request string) { - request = request + "\n" // final \n - request = strings.Replace(request, "\n", "\r\n", -1) // expand \n to \r\n + request = request + "\n" // final \n + request = strings.ReplaceAll(request, "\n", "\r\n") // expand \n to \r\n b.SetBytes(int64(len(request))) r := bufio.NewReader(&infiniteReader{buf: []byte(request)}) b.ReportAllocs() diff --git a/src/net/http/requestwrite_test.go b/src/net/http/requestwrite_test.go index eb65b9f736f5b..b110b57b1ab92 100644 --- a/src/net/http/requestwrite_test.go +++ b/src/net/http/requestwrite_test.go @@ -512,6 +512,81 @@ var reqWriteTests = []reqWriteTest{ "User-Agent: Go-http-client/1.1\r\n" + "\r\n", }, + + // CONNECT without Opaque + 21: { + Req: Request{ + Method: "CONNECT", + URL: &url.URL{ + Scheme: "https", // of proxy.com + Host: "proxy.com", + }, + }, + // What we used to do, locking that behavior in: + WantWrite: "CONNECT proxy.com HTTP/1.1\r\n" + + "Host: proxy.com\r\n" + + "User-Agent: Go-http-client/1.1\r\n" + + "\r\n", + }, + + // CONNECT with Opaque + 22: { + Req: Request{ + Method: "CONNECT", + URL: &url.URL{ + Scheme: "https", // of proxy.com + Host: "proxy.com", + Opaque: "backend:443", + }, + }, + WantWrite: "CONNECT backend:443 HTTP/1.1\r\n" + + "Host: proxy.com\r\n" + + "User-Agent: Go-http-client/1.1\r\n" + + "\r\n", + }, + + // Verify that a nil header value doesn't get written. + 23: { + Req: Request{ + Method: "GET", + URL: mustParseURL("/foo"), + Header: Header{ + "X-Foo": []string{"X-Bar"}, + "X-Idempotency-Key": nil, + }, + }, + + WantWrite: "GET /foo HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go-http-client/1.1\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + 24: { + Req: Request{ + Method: "GET", + URL: mustParseURL("/foo"), + Header: Header{ + "X-Foo": []string{"X-Bar"}, + "X-Idempotency-Key": []string{}, + }, + }, + + WantWrite: "GET /foo HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go-http-client/1.1\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + + 25: { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Host: "www.example.com", + RawQuery: "new\nline", // or any CTL + }, + }, + WantError: errors.New("net/http: can't write control character in Request.URL"), + }, } func TestRequestWrite(t *testing.T) { diff --git a/src/net/http/response.go b/src/net/http/response.go index bf1e13c8ae2f1..f906ce829b4aa 100644 --- a/src/net/http/response.go +++ b/src/net/http/response.go @@ -12,6 +12,7 @@ import ( "crypto/tls" "errors" "fmt" + "internal/x/net/http/httpguts" "io" "net/textproto" "net/url" @@ -63,6 +64,10 @@ type Response struct { // // The Body is automatically dechunked if the server replied // with a "chunked" Transfer-Encoding. + // + // As of Go 1.12, the Body will be also implement io.Writer + // on a successful "101 Switching Protocols" responses, + // as used by WebSockets and HTTP/2's "h2c" mode. Body io.ReadCloser // ContentLength records the length of the associated content. The @@ -333,3 +338,23 @@ func (r *Response) closeBody() { r.Body.Close() } } + +// bodyIsWritable reports whether the Body supports writing. The +// Transport returns Writable bodies for 101 Switching Protocols +// responses. +// The Transport uses this method to determine whether a persistent +// connection is done being managed from its perspective. Once we +// return a writable response body to a user, the net/http package is +// done managing that connection. +func (r *Response) bodyIsWritable() bool { + _, ok := r.Body.(io.Writer) + return ok +} + +// isProtocolSwitch reports whether r is a response to a successful +// protocol upgrade. +func (r *Response) isProtocolSwitch() bool { + return r.StatusCode == StatusSwitchingProtocols && + r.Header.Get("Upgrade") != "" && + httpguts.HeaderValuesContainsToken(r.Header["Connection"], "Upgrade") +} diff --git a/src/net/http/response_test.go b/src/net/http/response_test.go index c28b0cba89e87..c46f13f798833 100644 --- a/src/net/http/response_test.go +++ b/src/net/http/response_test.go @@ -157,6 +157,34 @@ var respTests = []respTest{ "Body here\ncontinued", }, + // Trailer header but no TransferEncoding + { + "HTTP/1.0 200 OK\r\n" + + "Trailer: Content-MD5, Content-Sources\r\n" + + "Content-Length: 10\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{ + "Connection": {"close"}, + "Content-Length": {"10"}, + "Trailer": []string{"Content-MD5, Content-Sources"}, + }, + Close: true, + ContentLength: 10, + }, + + "Body here\n", + }, + // Chunked response with Content-Length. { "HTTP/1.1 200 OK\r\n" + diff --git a/src/net/http/roundtrip_js.go b/src/net/http/roundtrip_js.go index 16b7b891c86b1..1e38b908d387e 100644 --- a/src/net/http/roundtrip_js.go +++ b/src/net/http/roundtrip_js.go @@ -93,7 +93,7 @@ func (t *Transport) RoundTrip(req *Request) (*Response, error) { respCh = make(chan *Response, 1) errCh = make(chan error, 1) ) - success := js.NewCallback(func(args []js.Value) { + success := js.FuncOf(func(this js.Value, args []js.Value) interface{} { result := args[0] header := Header{} // https://developer.mozilla.org/en-US/docs/Web/API/Headers/entries @@ -116,7 +116,9 @@ func (t *Transport) RoundTrip(req *Request) (*Response, error) { b := result.Get("body") var body io.ReadCloser - if b != js.Undefined() { + // The body is undefined when the browser does not support streaming response bodies (Firefox), + // and null in certain error cases, i.e. when the request is blocked because of CORS settings. + if b != js.Undefined() && b != js.Null() { body = &streamReader{stream: b.Call("getReader")} } else { // Fall back to using ArrayBuffer @@ -135,14 +137,17 @@ func (t *Transport) RoundTrip(req *Request) (*Response, error) { }: case <-req.Context().Done(): } + + return nil }) defer success.Release() - failure := js.NewCallback(func(args []js.Value) { + failure := js.FuncOf(func(this js.Value, args []js.Value) interface{} { err := fmt.Errorf("net/http: fetch() failed: %s", args[0].String()) select { case errCh <- err: case <-req.Context().Done(): } + return nil }) defer failure.Release() respPromise.Call("then", success, failure) @@ -185,26 +190,28 @@ func (r *streamReader) Read(p []byte) (n int, err error) { bCh = make(chan []byte, 1) errCh = make(chan error, 1) ) - success := js.NewCallback(func(args []js.Value) { + success := js.FuncOf(func(this js.Value, args []js.Value) interface{} { result := args[0] if result.Get("done").Bool() { errCh <- io.EOF - return + return nil } value := make([]byte, result.Get("value").Get("byteLength").Int()) a := js.TypedArrayOf(value) a.Call("set", result.Get("value")) a.Release() bCh <- value + return nil }) defer success.Release() - failure := js.NewCallback(func(args []js.Value) { + failure := js.FuncOf(func(this js.Value, args []js.Value) interface{} { // Assumes it's a TypeError. See // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypeError // for more information on this type. See // https://streams.spec.whatwg.org/#byob-reader-read for the spec on // the read method. errCh <- errors.New(args[0].Get("message").String()) + return nil }) defer failure.Release() r.stream.Call("read").Call("then", success, failure) @@ -251,7 +258,7 @@ func (r *arrayReader) Read(p []byte) (n int, err error) { bCh = make(chan []byte, 1) errCh = make(chan error, 1) ) - success := js.NewCallback(func(args []js.Value) { + success := js.FuncOf(func(this js.Value, args []js.Value) interface{} { // Wrap the input ArrayBuffer with a Uint8Array uint8arrayWrapper := js.Global().Get("Uint8Array").New(args[0]) value := make([]byte, uint8arrayWrapper.Get("byteLength").Int()) @@ -259,14 +266,16 @@ func (r *arrayReader) Read(p []byte) (n int, err error) { a.Call("set", uint8arrayWrapper) a.Release() bCh <- value + return nil }) defer success.Release() - failure := js.NewCallback(func(args []js.Value) { + failure := js.FuncOf(func(this js.Value, args []js.Value) interface{} { // Assumes it's a TypeError. See // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypeError // for more information on this type. // See https://fetch.spec.whatwg.org/#concept-body-consume-body for reasons this might error. errCh <- errors.New(args[0].Get("message").String()) + return nil }) defer failure.Release() r.arrayPromise.Call("then", success, failure) diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index a4385419d04b1..6eb0088a96375 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -130,7 +130,7 @@ func (c *testConn) Close() error { // reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters, // ending in \r\n\r\n func reqBytes(req string) []byte { - return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n") + return []byte(strings.ReplaceAll(strings.TrimSpace(req), "\n", "\r\n") + "\r\n\r\n") } type handlerTest struct { @@ -1556,6 +1556,32 @@ func TestServeTLS(t *testing.T) { } } +// Test that the HTTPS server nicely rejects plaintext HTTP/1.x requests. +func TestTLSServerRejectHTTPRequests(t *testing.T) { + setParallel(t) + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + t.Error("unexpected HTTPS request") + })) + var errBuf bytes.Buffer + ts.Config.ErrorLog = log.New(&errBuf, "", 0) + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + io.WriteString(conn, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n") + slurp, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + const wantPrefix = "HTTP/1.0 400 Bad Request\r\n" + if !strings.HasPrefix(string(slurp), wantPrefix) { + t.Errorf("response = %q; wanted prefix %q", slurp, wantPrefix) + } +} + // Issue 15908 func TestAutomaticHTTP2_Serve_NoTLSConfig(t *testing.T) { testAutomaticHTTP2_Serve(t, nil, true) @@ -2988,7 +3014,7 @@ func testRequestBodyLimit(t *testing.T, h2 bool) { // side of their TCP connection, the server doesn't send a 400 Bad Request. func TestClientWriteShutdown(t *testing.T) { if runtime.GOOS == "plan9" { - t.Skip("skipping test; see https://golang.org/issue/7237") + t.Skip("skipping test; see https://golang.org/issue/17906") } defer afterTest(t) ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) @@ -4028,21 +4054,18 @@ func TestRequestBodyCloseDoesntBlock(t *testing.T) { } } -// test that ResponseWriter implements io.stringWriter. +// test that ResponseWriter implements io.StringWriter. func TestResponseWriterWriteString(t *testing.T) { okc := make(chan bool, 1) ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { - type stringWriter interface { - WriteString(s string) (n int, err error) - } - _, ok := w.(stringWriter) + _, ok := w.(io.StringWriter) okc <- ok })) ht.rawResponse("GET / HTTP/1.0") select { case ok := <-okc: if !ok { - t.Error("ResponseWriter did not implement io.stringWriter") + t.Error("ResponseWriter did not implement io.StringWriter") } default: t.Error("handler was never called") diff --git a/src/net/http/server.go b/src/net/http/server.go index c24ad750f2114..aa9c3f5d2ebd5 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -22,13 +22,14 @@ import ( "os" "path" "runtime" + "sort" "strconv" "strings" "sync" "sync/atomic" "time" - "golang_org/x/net/http/httpguts" + "internal/x/net/http/httpguts" ) // Errors used by the HTTP server. @@ -1093,13 +1094,34 @@ func checkWriteHeaderCode(code int) { } } +// relevantCaller searches the call stack for the first function outside of net/http. +// The purpose of this function is to provide more helpful error messages. +func relevantCaller() runtime.Frame { + pc := make([]uintptr, 16) + n := runtime.Callers(1, pc) + frames := runtime.CallersFrames(pc[:n]) + var frame runtime.Frame + for { + frame, more := frames.Next() + if !strings.HasPrefix(frame.Function, "net/http.") { + return frame + } + if !more { + break + } + } + return frame +} + func (w *response) WriteHeader(code int) { if w.conn.hijacked() { - w.conn.server.logf("http: response.WriteHeader on hijacked connection") + caller := relevantCaller() + w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) return } if w.wroteHeader { - w.conn.server.logf("http: multiple response.WriteHeader calls") + caller := relevantCaller() + w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) return } checkWriteHeaderCode(code) @@ -1368,7 +1390,7 @@ func (cw *chunkWriter) writeHeader(p []byte) { } } - if _, ok := header["Date"]; !ok { + if !header.has("Date") { setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) } @@ -1529,7 +1551,8 @@ func (w *response) WriteString(data string) (n int, err error) { func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { if w.conn.hijacked() { if lenData > 0 { - w.conn.server.logf("http: response.Write on hijacked connection") + caller := relevantCaller() + w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) } return 0, ErrHijacked } @@ -1760,6 +1783,14 @@ func (c *conn) serve(ctx context.Context) { c.rwc.SetWriteDeadline(time.Now().Add(d)) } if err := tlsConn.Handshake(); err != nil { + // If the handshake failed due to the client not speaking + // TLS, assume they're speaking plaintext HTTP and write a + // 400 response on the TLS conn's underlying net.Conn. + if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) { + io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n") + re.Conn.Close() + return + } c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) return } @@ -2149,7 +2180,8 @@ func RedirectHandler(url string, code int) Handler { type ServeMux struct { mu sync.RWMutex m map[string]muxEntry - hosts bool // whether any patterns contain hostnames + es []muxEntry // slice of entries sorted from longest to shortest. + hosts bool // whether any patterns contain hostnames } type muxEntry struct { @@ -2165,19 +2197,6 @@ var DefaultServeMux = &defaultServeMux var defaultServeMux ServeMux -// Does path match pattern? -func pathMatch(pattern, path string) bool { - if len(pattern) == 0 { - // should not happen - return false - } - n := len(pattern) - if pattern[n-1] != '/' { - return pattern == path - } - return len(path) >= n && path[0:n] == pattern -} - // cleanPath returns the canonical path for p, eliminating . and .. elements. func cleanPath(p string) string { if p == "" { @@ -2222,19 +2241,14 @@ func (mux *ServeMux) match(path string) (h Handler, pattern string) { return v.h, v.pattern } - // Check for longest valid match. - var n = 0 - for k, v := range mux.m { - if !pathMatch(k, path) { - continue - } - if h == nil || len(k) > n { - n = len(k) - h = v.h - pattern = v.pattern + // Check for longest valid match. mux.es contains all patterns + // that end in / sorted from longest to shortest. + for _, e := range mux.es { + if strings.HasPrefix(path, e.pattern) { + return e.h, e.pattern } } - return + return nil, "" } // redirectToPathSlash determines if the given path needs appending "/" to it. @@ -2380,13 +2394,32 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) { if mux.m == nil { mux.m = make(map[string]muxEntry) } - mux.m[pattern] = muxEntry{h: handler, pattern: pattern} + e := muxEntry{h: handler, pattern: pattern} + mux.m[pattern] = e + if pattern[len(pattern)-1] == '/' { + mux.es = appendSorted(mux.es, e) + } if pattern[0] != '/' { mux.hosts = true } } +func appendSorted(es []muxEntry, e muxEntry) []muxEntry { + n := len(es) + i := sort.Search(n, func(i int) bool { + return len(es[i].pattern) < len(e.pattern) + }) + if i == n { + return append(es, e) + } + // we now know that i points at where we want to insert + es = append(es, muxEntry{}) // try to grow the slice in place, any entry works. + copy(es[i+1:], es[i:]) // Move shorter entries down + es[i] = e + return es +} + // HandleFunc registers the handler function for the given pattern. func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { if handler == nil { @@ -3049,7 +3082,7 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { } // setupHTTP2_ServeTLS conditionally configures HTTP/2 on -// srv and returns whether there was an error setting it up. If it is +// srv and reports whether there was an error setting it up. If it is // not configured for policy reasons, nil is returned. func (srv *Server) setupHTTP2_ServeTLS() error { srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) @@ -3176,7 +3209,6 @@ func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { w.WriteHeader(StatusServiceUnavailable) io.WriteString(w, h.errorBody()) tw.timedOut = true - return } } @@ -3369,3 +3401,13 @@ func strSliceContains(ss []string, s string) bool { } return false } + +// tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header +// looks like it might've been a misdirected plaintext HTTP request. +func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { + switch string(hdr[:]) { + case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": + return true + } + return false +} diff --git a/src/net/http/server_test.go b/src/net/http/server_test.go new file mode 100644 index 0000000000000..0132f3ba5fbd7 --- /dev/null +++ b/src/net/http/server_test.go @@ -0,0 +1,45 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Server unit tests + +package http + +import ( + "fmt" + "testing" +) + +func BenchmarkServerMatch(b *testing.B) { + fn := func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "OK") + } + mux := NewServeMux() + mux.HandleFunc("/", fn) + mux.HandleFunc("/index", fn) + mux.HandleFunc("/home", fn) + mux.HandleFunc("/about", fn) + mux.HandleFunc("/contact", fn) + mux.HandleFunc("/robots.txt", fn) + mux.HandleFunc("/products/", fn) + mux.HandleFunc("/products/1", fn) + mux.HandleFunc("/products/2", fn) + mux.HandleFunc("/products/3", fn) + mux.HandleFunc("/products/3/image.jpg", fn) + mux.HandleFunc("/admin", fn) + mux.HandleFunc("/admin/products/", fn) + mux.HandleFunc("/admin/products/create", fn) + mux.HandleFunc("/admin/products/update", fn) + mux.HandleFunc("/admin/products/delete", fn) + + paths := []string{"/", "/notfound", "/admin/", "/admin/foo", "/contact", "/products", + "/products/", "/products/3/image.jpg"} + b.StartTimer() + for i := 0; i < b.N; i++ { + if h, p := mux.match(paths[i%len(paths)]); h != nil && p == "" { + b.Error("impossible") + } + } + b.StopTimer() +} diff --git a/src/net/http/socks_bundle.go b/src/net/http/socks_bundle.go index e4314b4128306..e6640dd404df2 100644 --- a/src/net/http/socks_bundle.go +++ b/src/net/http/socks_bundle.go @@ -380,6 +380,7 @@ func (d *socksDialer) Dial(network, address string) (net.Conn, error) { return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} } if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() return nil, err } return c, nil diff --git a/src/net/http/status.go b/src/net/http/status.go index dd72d67be911d..086f3d1a71ed5 100644 --- a/src/net/http/status.go +++ b/src/net/http/status.go @@ -55,6 +55,7 @@ const ( StatusUnprocessableEntity = 422 // RFC 4918, 11.2 StatusLocked = 423 // RFC 4918, 11.3 StatusFailedDependency = 424 // RFC 4918, 11.4 + StatusTooEarly = 425 // RFC 8470, 5.2. StatusUpgradeRequired = 426 // RFC 7231, 6.5.15 StatusPreconditionRequired = 428 // RFC 6585, 3 StatusTooManyRequests = 429 // RFC 6585, 4 @@ -122,6 +123,7 @@ var statusText = map[int]string{ StatusUnprocessableEntity: "Unprocessable Entity", StatusLocked: "Locked", StatusFailedDependency: "Failed Dependency", + StatusTooEarly: "Too Early", StatusUpgradeRequired: "Upgrade Required", StatusPreconditionRequired: "Precondition Required", StatusTooManyRequests: "Too Many Requests", diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go index 2c6ba3231b0b4..e8a93e9137eb8 100644 --- a/src/net/http/transfer.go +++ b/src/net/http/transfer.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "golang_org/x/net/http/httpguts" + "internal/x/net/http/httpguts" ) // ErrLineTooLong is returned when reading request or response bodies @@ -184,6 +184,9 @@ func (t *transferWriter) shouldSendChunkedRequestBody() bool { if t.ContentLength >= 0 || t.Body == nil { // redundant checks; caller did them return false } + if t.Method == "CONNECT" { + return false + } if requestMethodUsuallyLacksBody(t.Method) { // Only probe the Request.Body for GET/HEAD/DELETE/etc // requests, because it's only those types of requests @@ -357,7 +360,11 @@ func (t *transferWriter) writeBody(w io.Writer) error { err = cw.Close() } } else if t.ContentLength == -1 { - ncopy, err = io.Copy(w, body) + dst := w + if t.Method == "CONNECT" { + dst = bufioFlushWriter{dst} + } + ncopy, err = io.Copy(dst, body) } else { ncopy, err = io.Copy(w, io.LimitReader(body, t.ContentLength)) if err != nil { @@ -733,6 +740,16 @@ func fixTrailer(header Header, te []string) (Header, error) { if !ok { return nil, nil } + if !chunked(te) { + // Trailer and no chunking: + // this is an invalid use case for trailer header. + // Nevertheless, no error will be returned and we + // let users decide if this is a valid HTTP message. + // The Trailer header will be kept in Response.Header + // but not populate Response.Trailer. + // See issue #27197. + return nil, nil + } header.Del("Trailer") trailer := make(Header) @@ -756,10 +773,6 @@ func fixTrailer(header Header, te []string) (Header, error) { if len(trailer) == 0 { return nil, nil } - if !chunked(te) { - // Trailer and no chunking - return nil, ErrUnexpectedTrailer - } return trailer, nil } @@ -942,7 +955,7 @@ func (b *body) Close() error { // no trailer and closing the connection next. // no point in reading to EOF. case b.doEarlyClose: - // Read up to maxPostHandlerReadBytes bytes of the body, looking for + // Read up to maxPostHandlerReadBytes bytes of the body, looking // for EOF (and trailers), so we can re-use this connection. if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes { // There was a declared Content-Length, and we have more bytes remaining @@ -1050,3 +1063,18 @@ func isKnownInMemoryReader(r io.Reader) bool { } return false } + +// bufioFlushWriter is an io.Writer wrapper that flushes all writes +// on its wrapped writer if it's a *bufio.Writer. +type bufioFlushWriter struct{ w io.Writer } + +func (fw bufioFlushWriter) Write(p []byte) (n int, err error) { + n, err = fw.w.Write(p) + if bw, ok := fw.w.(*bufio.Writer); n > 0 && ok { + ferr := bw.Flush() + if ferr != nil && err == nil { + err = ferr + } + } + return +} diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 40947baf87a42..a8c5efe6aaff2 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -30,8 +30,8 @@ import ( "sync/atomic" "time" - "golang_org/x/net/http/httpguts" - "golang_org/x/net/http/httpproxy" + "internal/x/net/http/httpguts" + "internal/x/net/http/httpproxy" ) // DefaultTransport is the default implementation of Transport and is @@ -85,21 +85,21 @@ func init() { // To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2 // and call ConfigureTransport. See the package docs for more about HTTP/2. // -// The Transport will send CONNECT requests to a proxy for its own use -// when processing HTTPS requests, but Transport should generally not -// be used to send a CONNECT request. That is, the Request passed to -// the RoundTrip method should not have a Method of "CONNECT", as Go's -// HTTP/1.x implementation does not support full-duplex request bodies -// being written while the response body is streamed. Go's HTTP/2 -// implementation does support full duplex, but many CONNECT proxies speak -// HTTP/1.x. -// // Responses with status codes in the 1xx range are either handled // automatically (100 expect-continue) or ignored. The one // exception is HTTP status code 101 (Switching Protocols), which is // considered a terminal status and returned by RoundTrip. To see the // ignored 1xx responses, use the httptrace trace package's // ClientTrace.Got1xxResponse. +// +// Transport only retries a request upon encountering a network error +// if the request is idempotent and either has no body or has its +// Request.GetBody defined. HTTP requests are considered idempotent if +// they have HTTP methods GET, HEAD, OPTIONS, or TRACE; or if their +// Header map contains an "Idempotency-Key" or "X-Idempotency-Key" +// entry. If the idempotency key value is an zero-length slice, the +// request is treated as idempotent but the header is not sent on the +// wire. type Transport struct { idleMu sync.Mutex wantIdle bool // user has requested to close all idle conns @@ -134,7 +134,7 @@ type Transport struct { // // DialContext runs concurrently with calls to RoundTrip. // A RoundTrip call that initiates a dial may end up using - // an connection dialed previously when the earlier connection + // a connection dialed previously when the earlier connection // becomes idle before the later DialContext completes. DialContext func(ctx context.Context, network, addr string) (net.Conn, error) @@ -142,7 +142,7 @@ type Transport struct { // // Dial runs concurrently with calls to RoundTrip. // A RoundTrip call that initiates a dial may end up using - // an connection dialed previously when the earlier connection + // a connection dialed previously when the earlier connection // becomes idle before the later Dial completes. // // Deprecated: Use DialContext instead, which allows the transport @@ -278,7 +278,7 @@ func (t *Transport) onceSetNextProtoDefaults() { // If they've already configured http2 with // golang.org/x/net/http2 instead of the bundled copy, try to - // get at its http2.Transport value (via the the "https" + // get at its http2.Transport value (via the "https" // altproto map) so we can call CloseIdleConnections on it if // requested. (Issue 22891) altProto, _ := t.altProto.Load().(map[string]RoundTripper) @@ -286,6 +286,7 @@ func (t *Transport) onceSetNextProtoDefaults() { if v := rv.Field(0); v.CanInterface() { if h2i, ok := v.Interface().(h2Transport); ok { t.h2transport = h2i + return } } } @@ -381,6 +382,19 @@ func (tr *transportRequest) setError(err error) { tr.mu.Unlock() } +// useRegisteredProtocol reports whether an alternate protocol (as reqistered +// with Transport.RegisterProtocol) should be respected for this request. +func (t *Transport) useRegisteredProtocol(req *Request) bool { + if req.URL.Scheme == "https" && req.requiresHTTP1() { + // If this request requires HTTP/1, don't use the + // "https" alternate protocol, which is used by the + // HTTP/2 code to take over requests if there's an + // existing cached HTTP/2 connection. + return false + } + return true +} + // roundTrip implements a RoundTripper over HTTP. func (t *Transport) roundTrip(req *Request) (*Response, error) { t.nextProtoOnce.Do(t.onceSetNextProtoDefaults) @@ -410,10 +424,12 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) { } } - altProto, _ := t.altProto.Load().(map[string]RoundTripper) - if altRT := altProto[scheme]; altRT != nil { - if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol { - return resp, err + if t.useRegisteredProtocol(req) { + altProto, _ := t.altProto.Load().(map[string]RoundTripper) + if altRT := altProto[scheme]; altRT != nil { + if resp, err := altRT.RoundTrip(req); err != ErrSkipAltProtocol { + return resp, err + } } } if !isHTTP { @@ -477,9 +493,8 @@ func (t *Transport) roundTrip(req *Request) (*Response, error) { } testHookRoundTripRetried() - // Rewind the body if we're able to. (HTTP/2 does this itself so we only - // need to do it for HTTP/1.1 connections.) - if req.GetBody != nil && pconn.alt == nil { + // Rewind the body if we're able to. + if req.GetBody != nil { newReq := *req var err error newReq.Body, err = req.GetBody() @@ -653,6 +668,7 @@ func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectM } } } + cm.onlyH1 = treq.requiresHTTP1() return cm, err } @@ -1155,6 +1171,9 @@ func (pconn *persistConn) addTLS(name string, trace *httptrace.ClientTrace) erro if cfg.ServerName == "" { cfg.ServerName = name } + if pconn.cacheKey.onlyH1 { + cfg.NextProtos = nil + } plainConn := pconn.conn tlsConn := tls.Client(plainConn, cfg) errc := make(chan error, 2) @@ -1361,10 +1380,11 @@ func (w persistConnWriter) Write(p []byte) (n int, err error) { // // A connect method may be of the following types: // -// Cache key form Description -// ----------------- ------------------------- +// connectMethod.key().String() Description +// ------------------------------ ------------------------- // |http|foo.com http directly to server, no proxy // |https|foo.com https directly to server, no proxy +// |https,h1|foo.com https directly to server w/o HTTP/2, no proxy // http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com // http://proxy.com|http http to proxy, http to anywhere after that // socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com @@ -1379,6 +1399,7 @@ type connectMethod struct { // then targetAddr is not included in the connect method key, because the socket can // be reused for different targetAddr values. targetAddr string + onlyH1 bool // whether to disable HTTP/2 and force HTTP/1 } func (cm *connectMethod) key() connectMethodKey { @@ -1394,6 +1415,7 @@ func (cm *connectMethod) key() connectMethodKey { proxy: proxyStr, scheme: cm.targetScheme, addr: targetAddr, + onlyH1: cm.onlyH1, } } @@ -1428,11 +1450,16 @@ func (cm *connectMethod) tlsHost() string { // a URL. type connectMethodKey struct { proxy, scheme, addr string + onlyH1 bool } func (k connectMethodKey) String() string { // Only used by tests. - return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr) + var h1 string + if k.onlyH1 { + h1 = ",h1" + } + return fmt.Sprintf("%s|%s%s|%s", k.proxy, k.scheme, h1, k.addr) } // persistConn wraps a connection, usually a persistent one @@ -1607,6 +1634,11 @@ func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritte return err } +// errCallerOwnsConn is an internal sentinel error used when we hand +// off a writable response.Body to the caller. We use this to prevent +// closing a net.Conn that is now owned by the caller. +var errCallerOwnsConn = errors.New("read loop ending; caller owns writable underlying conn") + func (pc *persistConn) readLoop() { closeErr := errReadLoopExiting // default value, if not changed below defer func() { @@ -1681,16 +1713,17 @@ func (pc *persistConn) readLoop() { pc.numExpectedResponses-- pc.mu.Unlock() + bodyWritable := resp.bodyIsWritable() hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0 - if resp.Close || rc.req.Close || resp.StatusCode <= 199 { + if resp.Close || rc.req.Close || resp.StatusCode <= 199 || bodyWritable { // Don't do keep-alive on error if either party requested a close // or we get an unexpected informational (1xx) response. // StatusCode 100 is already handled above. alive = false } - if !hasBody { + if !hasBody || bodyWritable { pc.t.setReqCanceler(rc.req, nil) // Put the idle conn back into the pool before we send the response @@ -1704,6 +1737,10 @@ func (pc *persistConn) readLoop() { pc.wroteRequest() && tryPutIdleConn(trace) + if bodyWritable { + closeErr = errCallerOwnsConn + } + select { case rc.ch <- responseAndError{res: resp}: case <-rc.callerGone: @@ -1848,6 +1885,10 @@ func (pc *persistConn) readResponse(rc requestAndChan, trace *httptrace.ClientTr } break } + if resp.isProtocolSwitch() { + resp.Body = newReadWriteCloserBody(pc.br, pc.conn) + } + resp.TLS = pc.tlsState return } @@ -1874,6 +1915,38 @@ func (pc *persistConn) waitForContinue(continueCh <-chan struct{}) func() bool { } } +func newReadWriteCloserBody(br *bufio.Reader, rwc io.ReadWriteCloser) io.ReadWriteCloser { + body := &readWriteCloserBody{ReadWriteCloser: rwc} + if br.Buffered() != 0 { + body.br = br + } + return body +} + +// readWriteCloserBody is the Response.Body type used when we want to +// give users write access to the Body through the underlying +// connection (TCP, unless using custom dialers). This is then +// the concrete type for a Response.Body on the 101 Switching +// Protocols response, as used by WebSockets, h2c, etc. +type readWriteCloserBody struct { + br *bufio.Reader // used until empty + io.ReadWriteCloser +} + +func (b *readWriteCloserBody) Read(p []byte) (n int, err error) { + if b.br != nil { + if n := b.br.Buffered(); len(p) > n { + p = p[:n] + } + n, err = b.br.Read(p) + if b.br.Buffered() == 0 { + b.br = nil + } + return n, err + } + return b.ReadWriteCloser.Read(p) +} + // nothingWrittenError wraps a write errors which ended up writing zero bytes. type nothingWrittenError struct { error @@ -2043,7 +2116,7 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: @@ -2062,7 +2135,7 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err continueCh = make(chan struct{}, 1) } - if pc.t.DisableKeepAlives { + if pc.t.DisableKeepAlives && !req.wantsClose() { req.extraHeaders().Set("Connection", "close") } @@ -2193,7 +2266,9 @@ func (pc *persistConn) closeLocked(err error) { // freelist for http2. That's done by the // alternate protocol's RoundTripper. } else { - pc.conn.Close() + if err != errCallerOwnsConn { + pc.conn.Close() + } close(pc.closech) } } @@ -2341,7 +2416,7 @@ type fakeLocker struct{} func (fakeLocker) Lock() {} func (fakeLocker) Unlock() {} -// clneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if +// cloneTLSConfig returns a shallow clone of cfg, or a new zero tls.Config if // cfg is nil. This is safe to call even if cfg is in active use by a TLS // client or server. func cloneTLSConfig(cfg *tls.Config) *tls.Config { diff --git a/src/net/http/transport_internal_test.go b/src/net/http/transport_internal_test.go index a5f29c97a9087..92729e65b2647 100644 --- a/src/net/http/transport_internal_test.go +++ b/src/net/http/transport_internal_test.go @@ -7,8 +7,13 @@ package http import ( + "bytes" + "crypto/tls" "errors" + "io" + "io/ioutil" "net" + "net/http/internal" "strings" "testing" ) @@ -178,3 +183,81 @@ func TestTransportShouldRetryRequest(t *testing.T) { } } } + +type roundTripFunc func(r *Request) (*Response, error) + +func (f roundTripFunc) RoundTrip(r *Request) (*Response, error) { + return f(r) +} + +// Issue 25009 +func TestTransportBodyAltRewind(t *testing.T) { + cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey) + if err != nil { + t.Fatal(err) + } + ln := newLocalListener(t) + defer ln.Close() + + go func() { + tln := tls.NewListener(ln, &tls.Config{ + NextProtos: []string{"foo"}, + Certificates: []tls.Certificate{cert}, + }) + for i := 0; i < 2; i++ { + sc, err := tln.Accept() + if err != nil { + t.Error(err) + return + } + if err := sc.(*tls.Conn).Handshake(); err != nil { + t.Error(err) + return + } + sc.Close() + } + }() + + addr := ln.Addr().String() + req, _ := NewRequest("POST", "https://example.org/", bytes.NewBufferString("request")) + roundTripped := false + tr := &Transport{ + DisableKeepAlives: true, + TLSNextProto: map[string]func(string, *tls.Conn) RoundTripper{ + "foo": func(authority string, c *tls.Conn) RoundTripper { + return roundTripFunc(func(r *Request) (*Response, error) { + n, _ := io.Copy(ioutil.Discard, r.Body) + if n == 0 { + t.Error("body length is zero") + } + if roundTripped { + return &Response{ + Body: NoBody, + StatusCode: 200, + }, nil + } + roundTripped = true + return nil, http2noCachedConnError{} + }) + }, + }, + DialTLS: func(_, _ string) (net.Conn, error) { + tc, err := tls.Dial("tcp", addr, &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{"foo"}, + }) + if err != nil { + return nil, err + } + if err := tc.Handshake(); err != nil { + return nil, err + } + return tc, nil + }, + } + c := &Client{Transport: tr} + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index aa8beb9357c88..6e075847ddeea 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "internal/nettrace" - "internal/testenv" "io" "io/ioutil" "log" @@ -42,6 +41,8 @@ import ( "sync/atomic" "testing" "time" + + "internal/x/net/http/httpguts" ) // TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close @@ -311,6 +312,58 @@ func TestTransportConnectionCloseOnRequestDisableKeepAlive(t *testing.T) { } } +// Test that Transport only sends one "Connection: close", regardless of +// how "close" was indicated. +func TestTransportRespectRequestWantsClose(t *testing.T) { + tests := []struct { + disableKeepAlives bool + close bool + }{ + {disableKeepAlives: false, close: false}, + {disableKeepAlives: false, close: true}, + {disableKeepAlives: true, close: false}, + {disableKeepAlives: true, close: true}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("DisableKeepAlive=%v,RequestClose=%v", tc.disableKeepAlives, tc.close), + func(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + c := ts.Client() + c.Transport.(*Transport).DisableKeepAlives = tc.disableKeepAlives + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + count := 0 + trace := &httptrace.ClientTrace{ + WroteHeaderField: func(key string, field []string) { + if key != "Connection" { + return + } + if httpguts.HeaderValuesContainsToken(field, "close") { + count += 1 + } + }, + } + req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) + req.Close = tc.close + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if want := tc.disableKeepAlives || tc.close; count > 1 || (count == 1) != want { + t.Errorf("expecting want:%v, got 'Connection: close':%d", want, count) + } + }) + } + +} + func TestTransportIdleCacheKeys(t *testing.T) { defer afterTest(t) ts := httptest.NewServer(hostPortHandler) @@ -2726,7 +2779,6 @@ func TestTransportTLSHandshakeTimeout(t *testing.T) { // Trying to repro golang.org/issue/3514 func TestTLSServerClosesConnection(t *testing.T) { defer afterTest(t) - testenv.SkipFlaky(t, 7634) closedc := make(chan bool, 1) ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { @@ -3827,9 +3879,9 @@ func testTransportEventTrace(t *testing.T, h2 bool, noHooks bool) { } // Install a fake DNS server. - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, network, host string) ([]net.IPAddr, error) { if host != "dns-is-faked.golang" { - t.Errorf("unexpected DNS host lookup for %q", host) + t.Errorf("unexpected DNS host lookup for %q/%q", network, host) return nil, nil } return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil @@ -4178,7 +4230,7 @@ func TestTransportMaxIdleConns(t *testing.T) { if err != nil { t.Fatal(err) } - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, _, host string) ([]net.IPAddr, error) { return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil }) @@ -4281,7 +4333,7 @@ func testTransportIdleConnTimeout(t *testing.T, h2 bool) { } // Issue 16208: Go 1.7 crashed after Transport.IdleConnTimeout if an -// HTTP/2 connection was established but but its caller no longer +// HTTP/2 connection was established but its caller no longer // wanted it. (Assuming the connection cache was enabled, which it is // by default) // @@ -4418,9 +4470,9 @@ func testTransportIDNA(t *testing.T, h2 bool) { } // Install a fake DNS server. - ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) { + ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, network, host string) ([]net.IPAddr, error) { if host != punyDomain { - t.Errorf("got DNS host lookup for %q; want %q", host, punyDomain) + t.Errorf("got DNS host lookup for %q/%q; want %q", network, host, punyDomain) return nil, nil } return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil @@ -4755,7 +4807,7 @@ func TestClientTimeoutKillsConn_BeforeHeaders(t *testing.T) { } case <-time.After(timeout * 10): // If we didn't get into the Handler in 50ms, that probably means - // the builder was just slow and the the Get failed in that time + // the builder was just slow and the Get failed in that time // but never made it to the server. That's fine. We'll usually // test the part above on faster machines. t.Skip("skipping test on slow builder") @@ -4766,7 +4818,7 @@ func TestClientTimeoutKillsConn_BeforeHeaders(t *testing.T) { // conn is closed so that it's not reused. // // This is the test variant that has the server send response headers -// first, and time out during the the write of the response body. +// first, and time out during the write of the response body. func TestClientTimeoutKillsConn_AfterHeaders(t *testing.T) { setParallel(t) defer afterTest(t) @@ -4838,3 +4890,172 @@ func TestClientTimeoutKillsConn_AfterHeaders(t *testing.T) { t.Fatal("timeout") } } + +func TestTransportResponseBodyWritableOnProtocolSwitch(t *testing.T) { + setParallel(t) + defer afterTest(t) + done := make(chan struct{}) + defer close(done) + cst := newClientServerTest(t, h1Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + conn, _, err := w.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + io.WriteString(conn, "HTTP/1.1 101 Switching Protocols Hi\r\nConnection: upgRADe\r\nUpgrade: foo\r\n\r\nSome buffered data\n") + bs := bufio.NewScanner(conn) + bs.Scan() + fmt.Fprintf(conn, "%s\n", strings.ToUpper(bs.Text())) + <-done + })) + defer cst.close() + + req, _ := NewRequest("GET", cst.ts.URL, nil) + req.Header.Set("Upgrade", "foo") + req.Header.Set("Connection", "upgrade") + res, err := cst.c.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 101 { + t.Fatalf("expected 101 switching protocols; got %v, %v", res.Status, res.Header) + } + rwc, ok := res.Body.(io.ReadWriteCloser) + if !ok { + t.Fatalf("expected a ReadWriteCloser; got a %T", res.Body) + } + defer rwc.Close() + bs := bufio.NewScanner(rwc) + if !bs.Scan() { + t.Fatalf("expected readable input") + } + if got, want := bs.Text(), "Some buffered data"; got != want { + t.Errorf("read %q; want %q", got, want) + } + io.WriteString(rwc, "echo\n") + if !bs.Scan() { + t.Fatalf("expected another line") + } + if got, want := bs.Text(), "ECHO"; got != want { + t.Errorf("read %q; want %q", got, want) + } +} + +func TestTransportCONNECTBidi(t *testing.T) { + defer afterTest(t) + const target = "backend:443" + cst := newClientServerTest(t, h1Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "CONNECT" { + t.Errorf("unexpected method %q", r.Method) + w.WriteHeader(500) + return + } + if r.RequestURI != target { + t.Errorf("unexpected CONNECT target %q", r.RequestURI) + w.WriteHeader(500) + return + } + nc, brw, err := w.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + defer nc.Close() + nc.Write([]byte("HTTP/1.1 200 OK\r\n\r\n")) + // Switch to a little protocol that capitalize its input lines: + for { + line, err := brw.ReadString('\n') + if err != nil { + if err != io.EOF { + t.Error(err) + } + return + } + io.WriteString(brw, strings.ToUpper(line)) + brw.Flush() + } + })) + defer cst.close() + pr, pw := io.Pipe() + defer pw.Close() + req, err := NewRequest("CONNECT", cst.ts.URL, pr) + if err != nil { + t.Fatal(err) + } + req.URL.Opaque = target + res, err := cst.c.Do(req) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + t.Fatalf("status code = %d; want 200", res.StatusCode) + } + br := bufio.NewReader(res.Body) + for _, str := range []string{"foo", "bar", "baz"} { + fmt.Fprintf(pw, "%s\n", str) + got, err := br.ReadString('\n') + if err != nil { + t.Fatal(err) + } + got = strings.TrimSpace(got) + want := strings.ToUpper(str) + if got != want { + t.Fatalf("got %q; want %q", got, want) + } + } +} + +func TestTransportRequestReplayable(t *testing.T) { + someBody := ioutil.NopCloser(strings.NewReader("")) + tests := []struct { + name string + req *Request + want bool + }{ + { + name: "GET", + req: &Request{Method: "GET"}, + want: true, + }, + { + name: "GET_http.NoBody", + req: &Request{Method: "GET", Body: NoBody}, + want: true, + }, + { + name: "GET_body", + req: &Request{Method: "GET", Body: someBody}, + want: false, + }, + { + name: "POST", + req: &Request{Method: "POST"}, + want: false, + }, + { + name: "POST_idempotency-key", + req: &Request{Method: "POST", Header: Header{"Idempotency-Key": {"x"}}}, + want: true, + }, + { + name: "POST_x-idempotency-key", + req: &Request{Method: "POST", Header: Header{"X-Idempotency-Key": {"x"}}}, + want: true, + }, + { + name: "POST_body", + req: &Request{Method: "POST", Header: Header{"Idempotency-Key": {"x"}}, Body: someBody}, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.req.ExportIsReplayable() + if got != tt.want { + t.Errorf("replyable = %v; want %v", got, tt.want) + } + }) + } +} diff --git a/src/net/interface.go b/src/net/interface.go index 375a4568e3f28..58248560a25c5 100644 --- a/src/net/interface.go +++ b/src/net/interface.go @@ -13,8 +13,8 @@ import ( // BUG(mikio): On JS and NaCl, methods and functions related to // Interface are not implemented. -// BUG(mikio): On DragonFly BSD, NetBSD, OpenBSD, Plan 9 and Solaris, -// the MulticastAddrs method of Interface is not implemented. +// BUG(mikio): On AIX, DragonFly BSD, NetBSD, OpenBSD, Plan 9 and +// Solaris, the MulticastAddrs method of Interface is not implemented. var ( errInvalidInterface = errors.New("invalid network interface") @@ -102,7 +102,7 @@ func Interfaces() ([]Interface, error) { return nil, &OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: err} } if len(ift) != 0 { - zoneCache.update(ift) + zoneCache.update(ift, false) } return ift, nil } @@ -159,7 +159,7 @@ func InterfaceByName(name string) (*Interface, error) { return nil, &OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: err} } if len(ift) != 0 { - zoneCache.update(ift) + zoneCache.update(ift, false) } for _, ifi := range ift { if name == ifi.Name { @@ -187,18 +187,21 @@ var zoneCache = ipv6ZoneCache{ toName: make(map[int]string), } -func (zc *ipv6ZoneCache) update(ift []Interface) { +// update refreshes the network interface information if the cache was last +// updated more than 1 minute ago, or if force is set. It reports whether the +// cache was updated. +func (zc *ipv6ZoneCache) update(ift []Interface, force bool) (updated bool) { zc.Lock() defer zc.Unlock() now := time.Now() - if zc.lastFetched.After(now.Add(-60 * time.Second)) { - return + if !force && zc.lastFetched.After(now.Add(-60*time.Second)) { + return false } zc.lastFetched = now if len(ift) == 0 { var err error if ift, err = interfaceTable(0); err != nil { - return + return false } } zc.toIndex = make(map[string]int, len(ift)) @@ -209,17 +212,24 @@ func (zc *ipv6ZoneCache) update(ift []Interface) { zc.toName[ifi.Index] = ifi.Name } } + return true } func (zc *ipv6ZoneCache) name(index int) string { if index == 0 { return "" } - zoneCache.update(nil) + updated := zoneCache.update(nil, false) zoneCache.RLock() - defer zoneCache.RUnlock() name, ok := zoneCache.toName[index] - if !ok { + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + name, ok = zoneCache.toName[index] + zoneCache.RUnlock() + } + if !ok { // last resort name = uitoa(uint(index)) } return name @@ -229,11 +239,17 @@ func (zc *ipv6ZoneCache) index(name string) int { if name == "" { return 0 } - zoneCache.update(nil) + updated := zoneCache.update(nil, false) zoneCache.RLock() - defer zoneCache.RUnlock() index, ok := zoneCache.toIndex[name] - if !ok { + zoneCache.RUnlock() + if !ok && !updated { + zoneCache.update(nil, true) + zoneCache.RLock() + index, ok = zoneCache.toIndex[name] + zoneCache.RUnlock() + } + if !ok { // last resort index, _, _ = dtoi(name) } return index diff --git a/src/net/interface_aix.go b/src/net/interface_aix.go new file mode 100644 index 0000000000000..9a8b5bbdb1b22 --- /dev/null +++ b/src/net/interface_aix.go @@ -0,0 +1,183 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "internal/syscall/unix" + "syscall" + "unsafe" +) + +type rawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [120]byte +} + +type ifreq struct { + Name [16]uint8 + Ifru [16]byte +} + +const _KINFO_RT_IFLIST = (0x1 << 8) | 3 | (1 << 30) + +const _RTAX_NETMASK = 2 +const _RTAX_IFA = 5 +const _RTAX_MAX = 8 + +func getIfList() ([]byte, error) { + needed, err := syscall.Getkerninfo(_KINFO_RT_IFLIST, 0, 0, 0) + if err != nil { + return nil, err + } + tab := make([]byte, needed) + _, err = syscall.Getkerninfo(_KINFO_RT_IFLIST, uintptr(unsafe.Pointer(&tab[0])), uintptr(unsafe.Pointer(&needed)), 0) + if err != nil { + return nil, err + } + return tab[:needed], nil +} + +// If the ifindex is zero, interfaceTable returns mappings of all +// network interfaces. Otherwise it returns a mapping of a specific +// interface. +func interfaceTable(ifindex int) ([]Interface, error) { + tab, err := getIfList() + if err != nil { + return nil, err + } + + var ift []Interface + for len(tab) > 0 { + ifm := (*syscall.IfMsgHdr)(unsafe.Pointer(&tab[0])) + if ifm.Msglen == 0 { + break + } + if ifm.Type == syscall.RTM_IFINFO { + if ifindex == 0 || ifindex == int(ifm.Index) { + sdl := (*rawSockaddrDatalink)(unsafe.Pointer(&tab[syscall.SizeofIfMsghdr])) + + ifi := &Interface{Index: int(ifm.Index), Flags: linkFlags(ifm.Flags)} + ifi.Name = string(sdl.Data[:sdl.Nlen]) + ifi.HardwareAddr = sdl.Data[sdl.Nlen : sdl.Nlen+sdl.Alen] + + // Retrieve MTU + ifr := &ifreq{} + copy(ifr.Name[:], ifi.Name) + sock, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, 0) + if err != nil { + return nil, err + } + err = unix.Ioctl(sock, syscall.SIOCGIFMTU, uintptr(unsafe.Pointer(ifr))) + if err != nil { + return nil, err + } + ifi.MTU = int(ifr.Ifru[0])<<24 | int(ifr.Ifru[1])<<16 | int(ifr.Ifru[2])<<8 | int(ifr.Ifru[3]) + + ift = append(ift, *ifi) + if ifindex == int(ifm.Index) { + break + } + } + } + tab = tab[ifm.Msglen:] + } + + return ift, nil +} + +func linkFlags(rawFlags int32) Flags { + var f Flags + if rawFlags&syscall.IFF_UP != 0 { + f |= FlagUp + } + if rawFlags&syscall.IFF_BROADCAST != 0 { + f |= FlagBroadcast + } + if rawFlags&syscall.IFF_LOOPBACK != 0 { + f |= FlagLoopback + } + if rawFlags&syscall.IFF_POINTOPOINT != 0 { + f |= FlagPointToPoint + } + if rawFlags&syscall.IFF_MULTICAST != 0 { + f |= FlagMulticast + } + return f +} + +// If the ifi is nil, interfaceAddrTable returns addresses for all +// network interfaces. Otherwise it returns addresses for a specific +// interface. +func interfaceAddrTable(ifi *Interface) ([]Addr, error) { + tab, err := getIfList() + if err != nil { + return nil, err + } + + var ifat []Addr + for len(tab) > 0 { + ifm := (*syscall.IfMsgHdr)(unsafe.Pointer(&tab[0])) + if ifm.Msglen == 0 { + break + } + if ifm.Type == syscall.RTM_NEWADDR { + if ifi == nil || ifi.Index == int(ifm.Index) { + mask := ifm.Addrs + off := uint(syscall.SizeofIfMsghdr) + + var iprsa, nmrsa *syscall.RawSockaddr + for i := uint(0); i < _RTAX_MAX; i++ { + if mask&(1<= 0 { + if bytealg.IndexByteString(host, ':') >= 0 { return addrErr(hostport, tooManyColons) } } - if byteIndex(hostport[j:], '[') >= 0 { + if bytealg.IndexByteString(hostport[j:], '[') >= 0 { return addrErr(hostport, "unexpected '[' in address") } - if byteIndex(hostport[k:], ']') >= 0 { + if bytealg.IndexByteString(hostport[k:], ']') >= 0 { return addrErr(hostport, "unexpected ']' in address") } @@ -226,7 +227,7 @@ func splitHostZone(s string) (host, zone string) { func JoinHostPort(host, port string) string { // We assume that host is a literal IPv6 address if host has // colons. - if byteIndex(host, ':') >= 0 { + if bytealg.IndexByteString(host, ':') >= 0 { return "[" + host + "]:" + port } return host + ":" + port @@ -276,7 +277,7 @@ func (r *Resolver) internetAddrList(ctx context.Context, net, addr string) (addr } // Try as a literal IP address, then as a DNS name. - ips, err := r.LookupIPAddr(ctx, host) + ips, err := r.lookupIPAddr(ctx, net, host) if err != nil { return nil, err } diff --git a/src/net/ipsock_plan9.go b/src/net/ipsock_plan9.go index 312e4adb47deb..d226585e086f8 100644 --- a/src/net/ipsock_plan9.go +++ b/src/net/ipsock_plan9.go @@ -6,6 +6,7 @@ package net import ( "context" + "internal/bytealg" "os" "syscall" ) @@ -49,7 +50,7 @@ func probe(filename, query string) bool { // parsePlan9Addr parses address of the form [ip!]port (e.g. 127.0.0.1!80). func parsePlan9Addr(s string) (ip IP, iport int, err error) { addr := IPv4zero // address contains port only - i := byteIndex(s, '!') + i := bytealg.IndexByteString(s, '!') if i >= 0 { addr = ParseIP(s[:i]) if addr == nil { diff --git a/src/net/ipsock_posix.go b/src/net/ipsock_posix.go index cb7483cd1f5b7..947f4ef6148b6 100644 --- a/src/net/ipsock_posix.go +++ b/src/net/ipsock_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net @@ -134,7 +134,7 @@ func favoriteAddrFamily(network string, laddr, raddr sockaddr, mode string) (fam } func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string, ctrlFn func(string, string, syscall.RawConn) error) (fd *netFD, err error) { - if (runtime.GOOS == "windows" || runtime.GOOS == "openbsd" || runtime.GOOS == "nacl") && mode == "dial" && raddr.isWildcard() { + if (runtime.GOOS == "aix" || runtime.GOOS == "windows" || runtime.GOOS == "openbsd" || runtime.GOOS == "nacl") && mode == "dial" && raddr.isWildcard() { raddr = raddr.toLocal(net) } family, ipv6only := favoriteAddrFamily(net, laddr, raddr, mode) diff --git a/src/net/listen_test.go b/src/net/listen_test.go index ffce8e22ec920..6c3f70cc7bf7e 100644 --- a/src/net/listen_test.go +++ b/src/net/listen_test.go @@ -674,7 +674,7 @@ func checkMulticastListener(c *UDPConn, ip IP) error { func multicastRIBContains(ip IP) (bool, error) { switch runtime.GOOS { - case "dragonfly", "netbsd", "openbsd", "plan9", "solaris", "windows": + case "aix", "dragonfly", "netbsd", "openbsd", "plan9", "solaris", "windows": return true, nil // not implemented yet case "linux": if runtime.GOARCH == "arm" || runtime.GOARCH == "alpha" { diff --git a/src/net/lookup.go b/src/net/lookup.go index e0f21fa9a8d3f..e10889331e4f5 100644 --- a/src/net/lookup.go +++ b/src/net/lookup.go @@ -97,6 +97,19 @@ func lookupPortMap(network, service string) (port int, error error) { return 0, &AddrError{Err: "unknown port", Addr: network + "/" + service} } +// ipVersion returns the provided network's IP version: '4', '6' or 0 +// if network does not end in a '4' or '6' byte. +func ipVersion(network string) byte { + if network == "" { + return 0 + } + n := network[len(network)-1] + if n != '4' && n != '6' { + n = 0 + } + return n +} + // DefaultResolver is the resolver used by the package-level Lookup // functions and by Dialers without a specified Resolver. var DefaultResolver = &Resolver{} @@ -189,6 +202,39 @@ func LookupIP(host string) ([]IP, error) { // LookupIPAddr looks up host using the local resolver. // It returns a slice of that host's IPv4 and IPv6 addresses. func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, error) { + return r.lookupIPAddr(ctx, "ip", host) +} + +// onlyValuesCtx is a context that uses an underlying context +// for value lookup if the underlying context hasn't yet expired. +type onlyValuesCtx struct { + context.Context + lookupValues context.Context +} + +var _ context.Context = (*onlyValuesCtx)(nil) + +// Value performs a lookup if the original context hasn't expired. +func (ovc *onlyValuesCtx) Value(key interface{}) interface{} { + select { + case <-ovc.lookupValues.Done(): + return nil + default: + return ovc.lookupValues.Value(key) + } +} + +// withUnexpiredValuesPreserved returns a context.Context that only uses lookupCtx +// for its values, otherwise it is never canceled and has no deadline. +// If the lookup context expires, any looked up values will return nil. +// See Issue 28600. +func withUnexpiredValuesPreserved(lookupCtx context.Context) context.Context { + return &onlyValuesCtx{Context: context.Background(), lookupValues: lookupCtx} +} + +// lookupIPAddr looks up host using the local resolver and particular network. +// It returns a slice of that host's IPv4 and IPv6 addresses. +func (r *Resolver) lookupIPAddr(ctx context.Context, network, host string) ([]IPAddr, error) { // Make sure that no matter what we do later, host=="" is rejected. // parseIP, for example, does accept empty strings. if host == "" { @@ -205,20 +251,21 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err // can be overridden by tests. This is needed by net/http, so it // uses a context key instead of unexported variables. resolverFunc := r.lookupIP - if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string) ([]IPAddr, error)); alt != nil { + if alt, _ := ctx.Value(nettrace.LookupIPAltResolverKey{}).(func(context.Context, string, string) ([]IPAddr, error)); alt != nil { resolverFunc = alt } // We don't want a cancelation of ctx to affect the // lookupGroup operation. Otherwise if our context gets // canceled it might cause an error to be returned to a lookup - // using a completely different context. - lookupGroupCtx, lookupGroupCancel := context.WithCancel(context.Background()) + // using a completely different context. However we need to preserve + // only the values in context. See Issue 28600. + lookupGroupCtx, lookupGroupCancel := context.WithCancel(withUnexpiredValuesPreserved(ctx)) dnsWaitGroup.Add(1) ch, called := r.getLookupGroup().DoChan(host, func() (interface{}, error) { defer dnsWaitGroup.Done() - return testHookLookupIP(lookupGroupCtx, resolverFunc, host) + return testHookLookupIP(lookupGroupCtx, resolverFunc, network, host) }) if !called { dnsWaitGroup.Done() @@ -289,6 +336,13 @@ func LookupPort(network, service string) (port int, err error) { func (r *Resolver) LookupPort(ctx context.Context, network, service string) (port int, err error) { port, needsLookup := parsePort(service) if needsLookup { + switch network { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + case "": // a hint wildcard for Go 1.0 undocumented behavior + network = "ip" + default: + return 0, &AddrError{Err: "unknown network", Addr: network} + } port, err = r.lookupPort(ctx, network, service) if err != nil { return 0, err diff --git a/src/net/lookup_fake.go b/src/net/lookup_fake.go index d3d1dbc90032f..6c8a151bcac1c 100644 --- a/src/net/lookup_fake.go +++ b/src/net/lookup_fake.go @@ -19,7 +19,7 @@ func (*Resolver) lookupHost(ctx context.Context, host string) (addrs []string, e return nil, syscall.ENOPROTOOPT } -func (*Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (*Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { return nil, syscall.ENOPROTOOPT } diff --git a/src/net/lookup_plan9.go b/src/net/lookup_plan9.go index 5547f0b0eeb3a..70805ddf4cd05 100644 --- a/src/net/lookup_plan9.go +++ b/src/net/lookup_plan9.go @@ -7,6 +7,7 @@ package net import ( "context" "errors" + "internal/bytealg" "io" "os" ) @@ -135,7 +136,7 @@ func lookupProtocol(ctx context.Context, name string) (proto int, err error) { return 0, UnknownNetworkError(name) } s := f[1] - if n, _, ok := dtoi(s[byteIndex(s, '=')+1:]); ok { + if n, _, ok := dtoi(s[bytealg.IndexByteString(s, '=')+1:]); ok { return n, nil } return 0, UnknownNetworkError(name) @@ -158,7 +159,7 @@ loop: continue } addr := f[1] - if i := byteIndex(addr, '!'); i >= 0 { + if i := bytealg.IndexByteString(addr, '!'); i >= 0 { addr = addr[:i] // remove port } if ParseIP(addr) == nil { @@ -175,7 +176,7 @@ loop: return } -func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (r *Resolver) lookupIP(ctx context.Context, _, host string) (addrs []IPAddr, err error) { lits, err := r.lookupHost(ctx, host) if err != nil { return @@ -210,7 +211,7 @@ func (*Resolver) lookupPort(ctx context.Context, network, service string) (port return 0, unknownPortError } s := f[1] - if i := byteIndex(s, '!'); i >= 0 { + if i := bytealg.IndexByteString(s, '!'); i >= 0 { s = s[i+1:] // remove address } if n, _, ok := dtoi(s); ok { @@ -304,7 +305,7 @@ func (*Resolver) lookupTXT(ctx context.Context, name string) (txt []string, err return } for _, line := range lines { - if i := byteIndex(line, '\t'); i >= 0 { + if i := bytealg.IndexByteString(line, '\t'); i >= 0 { txt = append(txt, absDomainName([]byte(line[i+1:]))) } } diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 5c66dfa2603ad..85bcb2b8960b2 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -20,7 +20,7 @@ import ( "time" ) -func lookupLocalhost(ctx context.Context, fn func(context.Context, string) ([]IPAddr, error), host string) ([]IPAddr, error) { +func lookupLocalhost(ctx context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { switch host { case "localhost": return []IPAddr{ @@ -28,7 +28,7 @@ func lookupLocalhost(ctx context.Context, fn func(context.Context, string) ([]IP {IP: IPv6loopback}, }, nil default: - return fn(ctx, host) + return fn(ctx, network, host) } } @@ -207,6 +207,9 @@ var lookupGmailTXTTests = []struct { } func TestLookupGmailTXT(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/29722") + } t.Parallel() mustHaveExternalNetwork(t) @@ -237,11 +240,16 @@ func TestLookupGmailTXT(t *testing.T) { if len(txts) == 0 { t.Error("got no record") } + found := false for _, txt := range txts { - if !strings.Contains(txt, tt.txt) || (!strings.HasSuffix(txt, tt.host) && !strings.HasSuffix(txt, tt.host+".")) { - t.Errorf("got %s; want a record containing %s, %s", txt, tt.txt, tt.host) + if strings.Contains(txt, tt.txt) && (strings.HasSuffix(txt, tt.host) || strings.HasSuffix(txt, tt.host+".")) { + found = true + break } } + if !found { + t.Errorf("got %v; want a record containing %s, %s", txts, tt.txt, tt.host) + } } } @@ -618,6 +626,9 @@ func TestLookupDotsWithLocalSource(t *testing.T) { } func TestLookupDotsWithRemoteSource(t *testing.T) { + if runtime.GOOS == "darwin" { + testenv.SkipFlaky(t, 27992) + } mustHaveExternalNetwork(t) if !supportsIPv4() || !*testIPv4 { @@ -925,8 +936,8 @@ func TestLookupHostCancel(t *testing.T) { const ( google = "www.google.com" - invalidDomain = "nonexistentdomain.golang.org" - n = 600 // this needs to be larger than threadLimit size + invalidDomain = "invalid.invalid" // RFC 2606 reserves .invalid + n = 600 // this needs to be larger than threadLimit size ) _, err := LookupHost(google) @@ -1008,3 +1019,104 @@ func TestConcurrentPreferGoResolversDial(t *testing.T) { } } } + +var ipVersionTests = []struct { + network string + version byte +}{ + {"tcp", 0}, + {"tcp4", '4'}, + {"tcp6", '6'}, + {"udp", 0}, + {"udp4", '4'}, + {"udp6", '6'}, + {"ip", 0}, + {"ip4", '4'}, + {"ip6", '6'}, + {"ip7", 0}, + {"", 0}, +} + +func TestIPVersion(t *testing.T) { + for _, tt := range ipVersionTests { + if version := ipVersion(tt.network); version != tt.version { + t.Errorf("Family for: %s. Expected: %s, Got: %s", tt.network, + string(tt.version), string(version)) + } + } +} + +// Issue 28600: The context that is used to lookup ips should always +// preserve the values from the context that was passed into LookupIPAddr. +func TestLookupIPAddrPreservesContextValues(t *testing.T) { + origTestHookLookupIP := testHookLookupIP + defer func() { testHookLookupIP = origTestHookLookupIP }() + + keyValues := []struct { + key, value interface{} + }{ + {"key-1", 12}, + {384, "value2"}, + {new(float64), 137}, + } + ctx := context.Background() + for _, kv := range keyValues { + ctx = context.WithValue(ctx, kv.key, kv.value) + } + + wantIPs := []IPAddr{ + {IP: IPv4(127, 0, 0, 1)}, + {IP: IPv6loopback}, + } + + checkCtxValues := func(ctx_ context.Context, fn func(context.Context, string, string) ([]IPAddr, error), network, host string) ([]IPAddr, error) { + for _, kv := range keyValues { + g, w := ctx_.Value(kv.key), kv.value + if !reflect.DeepEqual(g, w) { + t.Errorf("Value lookup:\n\tGot: %v\n\tWant: %v", g, w) + } + } + return wantIPs, nil + } + testHookLookupIP = checkCtxValues + + resolvers := []*Resolver{ + nil, + new(Resolver), + } + + for i, resolver := range resolvers { + gotIPs, err := resolver.LookupIPAddr(ctx, "golang.org") + if err != nil { + t.Errorf("Resolver #%d: unexpected error: %v", i, err) + } + if !reflect.DeepEqual(gotIPs, wantIPs) { + t.Errorf("#%d: mismatched IPAddr results\n\tGot: %v\n\tWant: %v", i, gotIPs, wantIPs) + } + } +} + +func TestWithUnexpiredValuesPreserved(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + // Insert a value into it. + key, value := "key-1", 2 + ctx = context.WithValue(ctx, key, value) + + // Now use the "values preserving context" like + // we would for LookupIPAddr. See Issue 28600. + ctx = withUnexpiredValuesPreserved(ctx) + + // Lookup before expiry. + if g, w := ctx.Value(key), value; g != w { + t.Errorf("Lookup before expiry: Got %v Want %v", g, w) + } + + // Cancel the context. + cancel() + + // Lookup after expiry should return nil + if g := ctx.Value(key); g != nil { + t.Errorf("Lookup after expiry: Got %v want nil", g) + } +} diff --git a/src/net/lookup_unix.go b/src/net/lookup_unix.go index 2c3191aca8a66..6543f121a738f 100644 --- a/src/net/lookup_unix.go +++ b/src/net/lookup_unix.go @@ -2,16 +2,17 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net import ( "context" + "internal/bytealg" "sync" "syscall" - "golang_org/x/net/dns/dnsmessage" + "internal/x/net/dns/dnsmessage" ) var onceReadProtocols sync.Once @@ -27,7 +28,7 @@ func readProtocols() { for line, ok := file.readLine(); ok; line, ok = file.readLine() { // tcp 6 TCP # transmission control protocol - if i := byteIndex(line, '#'); i >= 0 { + if i := bytealg.IndexByteString(line, '#'); i >= 0 { line = line[0:i] } f := getFields(line) @@ -86,13 +87,13 @@ func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, return r.goLookupHostOrder(ctx, host, order) } -func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) { +func (r *Resolver) lookupIP(ctx context.Context, network, host string) (addrs []IPAddr, err error) { if r.preferGo() { return r.goLookupIP(ctx, host) } order := systemConf().hostLookupOrder(r, host) if order == hostLookupCgo { - if addrs, err, ok := cgoLookupIP(ctx, host); ok { + if addrs, err, ok := cgoLookupIP(ctx, network, host); ok { return addrs, err } // cgo not available (or netgo); fall back to Go's DNS resolver @@ -299,11 +300,21 @@ func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) Server: server, } } + // Multiple strings in one TXT record need to be + // concatenated without separator to be consistent + // with previous Go resolver. + n := 0 + for _, s := range txt.TXT { + n += len(s) + } + txtJoin := make([]byte, 0, n) + for _, s := range txt.TXT { + txtJoin = append(txtJoin, s...) + } if len(txts) == 0 { - txts = txt.TXT - } else { - txts = append(txts, txt.TXT...) + txts = make([]string, 0, 1) } + txts = append(txts, string(txtJoin)) } return txts, nil } diff --git a/src/net/lookup_windows.go b/src/net/lookup_windows.go index f76e0af400dcc..8a68d18a674b3 100644 --- a/src/net/lookup_windows.go +++ b/src/net/lookup_windows.go @@ -65,7 +65,7 @@ func lookupProtocol(ctx context.Context, name string) (int, error) { } func (r *Resolver) lookupHost(ctx context.Context, name string) ([]string, error) { - ips, err := r.lookupIP(ctx, name) + ips, err := r.lookupIP(ctx, "ip", name) if err != nil { return nil, err } @@ -76,14 +76,22 @@ func (r *Resolver) lookupHost(ctx context.Context, name string) ([]string, error return addrs, nil } -func (r *Resolver) lookupIP(ctx context.Context, name string) ([]IPAddr, error) { +func (r *Resolver) lookupIP(ctx context.Context, network, name string) ([]IPAddr, error) { // TODO(bradfitz,brainman): use ctx more. See TODO below. + var family int32 = syscall.AF_UNSPEC + switch ipVersion(network) { + case '4': + family = syscall.AF_INET + case '6': + family = syscall.AF_INET6 + } + getaddr := func() ([]IPAddr, error) { acquireThread() defer releaseThread() hints := syscall.AddrinfoW{ - Family: syscall.AF_UNSPEC, + Family: family, Socktype: syscall.SOCK_STREAM, Protocol: syscall.IPPROTO_IP, } diff --git a/src/net/lookup_windows_test.go b/src/net/lookup_windows_test.go index cebb2d0558976..d3748f28c3f98 100644 --- a/src/net/lookup_windows_test.go +++ b/src/net/lookup_windows_test.go @@ -150,7 +150,7 @@ func nslookup(qtype, name string) (string, error) { if err := cmd.Run(); err != nil { return "", err } - r := strings.Replace(out.String(), "\r\n", "\n", -1) + r := strings.ReplaceAll(out.String(), "\r\n", "\n") // nslookup stderr output contains also debug information such as // "Non-authoritative answer" and it doesn't return the correct errcode if strings.Contains(err.String(), "can't find") { diff --git a/src/net/mail/message.go b/src/net/mail/message.go index 5912b9033477e..554377aa1da98 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -26,6 +26,7 @@ import ( "mime" "net/textproto" "strings" + "sync" "time" "unicode/utf8" ) @@ -65,9 +66,12 @@ func ReadMessage(r io.Reader) (msg *Message, err error) { // Layouts suitable for passing to time.Parse. // These are tried in order. -var dateLayouts []string +var ( + dateLayoutsBuildOnce sync.Once + dateLayouts []string +) -func init() { +func buildDateLayouts() { // Generate layouts based on RFC 5322, section 3.3. dows := [...]string{"", "Mon, "} // day-of-week @@ -93,6 +97,7 @@ func init() { // ParseDate parses an RFC 5322 date string. func ParseDate(date string) (time.Time, error) { + dateLayoutsBuildOnce.Do(buildDateLayouts) for _, layout := range dateLayouts { t, err := time.Parse(layout, date) if err == nil { diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go index b19da52c423a7..14ac9192a4af0 100644 --- a/src/net/mail/message_test.go +++ b/src/net/mail/message_test.go @@ -668,9 +668,9 @@ func TestAddressParser(t *testing.T) { switch charset { case "iso-8859-15": - in = bytes.Replace(in, []byte("\xf6"), []byte("ö"), -1) + in = bytes.ReplaceAll(in, []byte("\xf6"), []byte("ö")) case "windows-1252": - in = bytes.Replace(in, []byte("\xe9"), []byte("é"), -1) + in = bytes.ReplaceAll(in, []byte("\xe9"), []byte("é")) } return bytes.NewReader(in), nil diff --git a/src/net/main_unix_test.go b/src/net/main_unix_test.go index 9cfbc8efc406a..34a8a104e82e6 100644 --- a/src/net/main_unix_test.go +++ b/src/net/main_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package net diff --git a/src/net/net.go b/src/net/net.go index c909986269906..77b8f69074e39 100644 --- a/src/net/net.go +++ b/src/net/net.go @@ -357,7 +357,16 @@ type PacketConn interface { SetWriteDeadline(t time.Time) error } -var listenerBacklog = maxListenerBacklog() +var listenerBacklogCache struct { + sync.Once + val int +} + +// listenerBacklog is a caching wrapper around maxListenerBacklog. +func listenerBacklog() int { + listenerBacklogCache.Do(func() { listenerBacklogCache.val = maxListenerBacklog() }) + return listenerBacklogCache.val +} // A Listener is a generic network listener for stream-oriented protocols. // diff --git a/src/net/net_test.go b/src/net/net_test.go index 692f269e0c29b..2b5845bb1580f 100644 --- a/src/net/net_test.go +++ b/src/net/net_test.go @@ -529,7 +529,7 @@ func TestNotTemporaryRead(t *testing.T) { server := func(cs *TCPConn) error { cs.SetLinger(0) // Give the client time to get stuck in a Read. - time.Sleep(20 * time.Millisecond) + time.Sleep(50 * time.Millisecond) cs.Close() return nil } diff --git a/src/net/net_windows_test.go b/src/net/net_windows_test.go index 8dfd3129802c2..8aa719f433afa 100644 --- a/src/net/net_windows_test.go +++ b/src/net/net_windows_test.go @@ -571,7 +571,7 @@ func TestInterfaceHardwareAddrWithGetmac(t *testing.T) { // skip these return } - addr = strings.Replace(addr, "-", ":", -1) + addr = strings.ReplaceAll(addr, "-", ":") cname := getValue("Connection Name") want[cname] = addr group = make(map[string]string) diff --git a/src/net/netgo_unix_test.go b/src/net/netgo_unix_test.go index f2244ea69c40b..c672d3e8ebfbe 100644 --- a/src/net/netgo_unix_test.go +++ b/src/net/netgo_unix_test.go @@ -16,7 +16,7 @@ func TestGoLookupIP(t *testing.T) { defer dnsWaitGroup.Wait() host := "localhost" ctx := context.Background() - _, err, ok := cgoLookupIP(ctx, host) + _, err, ok := cgoLookupIP(ctx, "ip", host) if ok { t.Errorf("cgoLookupIP must be a placeholder") } diff --git a/src/net/nss.go b/src/net/nss.go index 08c3e6a69fe55..96b9cdd12106e 100644 --- a/src/net/nss.go +++ b/src/net/nss.go @@ -2,12 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net import ( "errors" + "internal/bytealg" "io" "os" ) @@ -85,7 +86,7 @@ func parseNSSConf(r io.Reader) *nssConf { if len(line) == 0 { return nil } - colon := bytesIndexByte(line, ':') + colon := bytealg.IndexByte(line, ':') if colon == -1 { return errors.New("no colon on line") } @@ -96,7 +97,7 @@ func parseNSSConf(r io.Reader) *nssConf { if len(srcs) == 0 { break } - sp := bytesIndexByte(srcs, ' ') + sp := bytealg.IndexByte(srcs, ' ') var src string if sp == -1 { src = string(srcs) @@ -108,7 +109,7 @@ func parseNSSConf(r io.Reader) *nssConf { var criteria []nssCriterion // See if there's a criteria block in brackets. if len(srcs) > 0 && srcs[0] == '[' { - bclose := bytesIndexByte(srcs, ']') + bclose := bytealg.IndexByte(srcs, ']') if bclose == -1 { return errors.New("unclosed criterion bracket") } @@ -143,7 +144,7 @@ func parseCriteria(x []byte) (c []nssCriterion, err error) { if len(f) < 3 { return errors.New("criterion too short") } - eq := bytesIndexByte(f, '=') + eq := bytealg.IndexByte(f, '=') if eq == -1 { return errors.New("criterion lacks equal sign") } diff --git a/src/net/parse.go b/src/net/parse.go index e356cb1559660..cdb35bb826e0e 100644 --- a/src/net/parse.go +++ b/src/net/parse.go @@ -8,10 +8,10 @@ package net import ( + "internal/bytealg" "io" "os" "time" - _ "unsafe" // For go:linkname ) type file struct { @@ -80,17 +80,11 @@ func stat(name string) (mtime time.Time, size int64, err error) { return st.ModTime(), st.Size(), nil } -// byteIndex is strings.IndexByte. It returns the index of the -// first instance of c in s, or -1 if c is not present in s. -// strings.IndexByte is implemented in runtime/asm_$GOARCH.s -//go:linkname byteIndex strings.IndexByte -func byteIndex(s string, c byte) int - // Count occurrences in s of any bytes in t. func countAnyByte(s string, t string) int { n := 0 for i := 0; i < len(s); i++ { - if byteIndex(t, s[i]) >= 0 { + if bytealg.IndexByteString(t, s[i]) >= 0 { n++ } } @@ -103,7 +97,7 @@ func splitAtBytes(s string, t string) []string { n := 0 last := 0 for i := 0; i < len(s); i++ { - if byteIndex(t, s[i]) >= 0 { + if bytealg.IndexByteString(t, s[i]) >= 0 { if last < i { a[n] = s[last:i] n++ @@ -276,7 +270,7 @@ func isSpace(b byte) bool { // removeComment returns line, removing any '#' byte and any following // bytes. func removeComment(line []byte) []byte { - if i := bytesIndexByte(line, '#'); i != -1 { + if i := bytealg.IndexByte(line, '#'); i != -1 { return line[:i] } return line @@ -287,7 +281,7 @@ func removeComment(line []byte) []byte { // It returns the first non-nil error returned by fn. func foreachLine(x []byte, fn func(line []byte) error) error { for len(x) > 0 { - nl := bytesIndexByte(x, '\n') + nl := bytealg.IndexByte(x, '\n') if nl == -1 { return fn(x) } @@ -305,7 +299,7 @@ func foreachLine(x []byte, fn func(line []byte) error) error { func foreachField(x []byte, fn func(field []byte) error) error { x = trimSpace(x) for len(x) > 0 { - sp := bytesIndexByte(x, ' ') + sp := bytealg.IndexByte(x, ' ') if sp == -1 { return fn(x) } @@ -319,12 +313,6 @@ func foreachField(x []byte, fn func(field []byte) error) error { return nil } -// bytesIndexByte is bytes.IndexByte. It returns the index of the -// first instance of c in s, or -1 if c is not present in s. -// bytes.IndexByte is implemented in runtime/asm_$GOARCH.s -//go:linkname bytesIndexByte bytes.IndexByte -func bytesIndexByte(s []byte, c byte) int - // stringsHasSuffix is strings.HasSuffix. It reports whether s ends in // suffix. func stringsHasSuffix(s, suffix string) bool { diff --git a/src/net/pipe_test.go b/src/net/pipe_test.go index 84a71b756bc04..53ddc16313d97 100644 --- a/src/net/pipe_test.go +++ b/src/net/pipe_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "golang_org/x/net/nettest" + "internal/x/net/nettest" ) func TestPipe(t *testing.T) { diff --git a/src/net/platform_test.go b/src/net/platform_test.go index 8e7d9151dee76..7e9ad70d19b28 100644 --- a/src/net/platform_test.go +++ b/src/net/platform_test.go @@ -7,7 +7,9 @@ package net import ( "internal/testenv" "os" + "os/exec" "runtime" + "strconv" "strings" "testing" ) @@ -35,6 +37,16 @@ func testableNetwork(network string) bool { switch runtime.GOOS { case "android", "nacl", "plan9", "windows": return false + case "aix": + // Unix network isn't properly working on AIX 7.2 with Technical Level < 2 + out, err := exec.Command("oslevel", "-s").Output() + if err != nil { + return false + } + if tl, err := strconv.Atoi(string(out[5:7])); err != nil || tl < 2 { + return false + } + return true } // iOS does not support unix, unixgram. if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { @@ -42,7 +54,7 @@ func testableNetwork(network string) bool { } case "unixpacket": switch runtime.GOOS { - case "android", "darwin", "nacl", "plan9", "windows": + case "aix", "android", "darwin", "nacl", "plan9", "windows": return false case "netbsd": // It passes on amd64 at least. 386 fails (Issue 22927). arm is unknown. diff --git a/src/net/port_unix.go b/src/net/port_unix.go index 64c7f575c7f4d..a6153c0c206df 100644 --- a/src/net/port_unix.go +++ b/src/net/port_unix.go @@ -2,13 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux netbsd openbsd solaris nacl +// +build aix darwin dragonfly freebsd js,wasm linux netbsd openbsd solaris nacl // Read system port mappings from /etc/services package net -import "sync" +import ( + "internal/bytealg" + "sync" +) var onceReadServices sync.Once @@ -21,7 +24,7 @@ func readServices() { for line, ok := file.readLine(); ok; line, ok = file.readLine() { // "http 80/tcp www www-http # World Wide Web HTTP" - if i := byteIndex(line, '#'); i >= 0 { + if i := bytealg.IndexByteString(line, '#'); i >= 0 { line = line[:i] } f := getFields(line) diff --git a/src/net/rawconn_unix_test.go b/src/net/rawconn_unix_test.go index a720a8a4a3e27..a71b6f3041d7e 100644 --- a/src/net/rawconn_unix_test.go +++ b/src/net/rawconn_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package net diff --git a/src/net/sendfile_linux.go b/src/net/sendfile_linux.go index c537ea68b2b41..297e625d24b1d 100644 --- a/src/net/sendfile_linux.go +++ b/src/net/sendfile_linux.go @@ -32,7 +32,19 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { return 0, nil, false } - written, err = poll.SendFile(&c.pfd, int(f.Fd()), remain) + sc, err := f.SyscallConn() + if err != nil { + return 0, nil, false + } + + var werr error + err = sc.Read(func(fd uintptr) bool { + written, werr = poll.SendFile(&c.pfd, int(fd), remain) + return true + }) + if werr == nil { + werr = err + } if lr != nil { lr.N = remain - written diff --git a/src/net/sendfile_stub.go b/src/net/sendfile_stub.go index d5b8755af3e4e..6d338da454f8d 100644 --- a/src/net/sendfile_stub.go +++ b/src/net/sendfile_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin js,wasm nacl netbsd openbsd +// +build aix darwin js,wasm nacl netbsd openbsd package net diff --git a/src/net/sendfile_test.go b/src/net/sendfile_test.go index ecc00d3c2a0ca..911e6139c57e3 100644 --- a/src/net/sendfile_test.go +++ b/src/net/sendfile_test.go @@ -12,14 +12,18 @@ import ( "encoding/hex" "fmt" "io" + "io/ioutil" "os" + "runtime" + "sync" "testing" + "time" ) const ( - twain = "testdata/Mark.Twain-Tom.Sawyer.txt" - twainLen = 387851 - twainSHA256 = "461eb7cb2d57d293fc680c836464c9125e4382be3596f7d415093ae9db8fcb0e" + newton = "../testdata/Isaac.Newton-Opticks.txt" + newtonLen = 567198 + newtonSHA256 = "d4a9ac22462b35e7821a4f2706c211093da678620a8f9997989ee7cf8d507bbd" ) func TestSendfile(t *testing.T) { @@ -43,7 +47,7 @@ func TestSendfile(t *testing.T) { defer close(errc) defer conn.Close() - f, err := os.Open(twain) + f, err := os.Open(newton) if err != nil { errc <- err return @@ -58,8 +62,8 @@ func TestSendfile(t *testing.T) { return } - if sbytes != twainLen { - errc <- fmt.Errorf("sent %d bytes; expected %d", sbytes, twainLen) + if sbytes != newtonLen { + errc <- fmt.Errorf("sent %d bytes; expected %d", sbytes, newtonLen) return } }() @@ -79,11 +83,11 @@ func TestSendfile(t *testing.T) { t.Error(err) } - if rbytes != twainLen { - t.Errorf("received %d bytes; expected %d", rbytes, twainLen) + if rbytes != newtonLen { + t.Errorf("received %d bytes; expected %d", rbytes, newtonLen) } - if res := hex.EncodeToString(h.Sum(nil)); res != twainSHA256 { + if res := hex.EncodeToString(h.Sum(nil)); res != newtonSHA256 { t.Error("retrieved data hash did not match") } @@ -113,7 +117,7 @@ func TestSendfileParts(t *testing.T) { defer close(errc) defer conn.Close() - f, err := os.Open(twain) + f, err := os.Open(newton) if err != nil { errc <- err return @@ -149,3 +153,164 @@ func TestSendfileParts(t *testing.T) { t.Error(err) } } + +func TestSendfileSeeked(t *testing.T) { + ln, err := newLocalListener("tcp") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + const seekTo = 65 << 10 + const sendSize = 10 << 10 + + errc := make(chan error, 1) + go func(ln Listener) { + // Wait for a connection. + conn, err := ln.Accept() + if err != nil { + errc <- err + close(errc) + return + } + + go func() { + defer close(errc) + defer conn.Close() + + f, err := os.Open(newton) + if err != nil { + errc <- err + return + } + defer f.Close() + if _, err := f.Seek(seekTo, os.SEEK_SET); err != nil { + errc <- err + return + } + + _, err = io.CopyN(conn, f, sendSize) + if err != nil { + errc <- err + return + } + }() + }(ln) + + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + buf := new(bytes.Buffer) + buf.ReadFrom(c) + + if buf.Len() != sendSize { + t.Errorf("Got %d bytes; want %d", buf.Len(), sendSize) + } + + for err := range errc { + t.Error(err) + } +} + +// Test that sendfile doesn't put a pipe into blocking mode. +func TestSendfilePipe(t *testing.T) { + switch runtime.GOOS { + case "nacl", "plan9", "windows": + // These systems don't support deadlines on pipes. + t.Skipf("skipping on %s", runtime.GOOS) + } + + t.Parallel() + + ln, err := newLocalListener("tcp") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer w.Close() + defer r.Close() + + copied := make(chan bool) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + // Accept a connection and copy 1 byte from the read end of + // the pipe to the connection. This will call into sendfile. + defer wg.Done() + conn, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + defer conn.Close() + _, err = io.CopyN(conn, r, 1) + if err != nil { + t.Error(err) + return + } + // Signal the main goroutine that we've copied the byte. + close(copied) + }() + + wg.Add(1) + go func() { + // Write 1 byte to the write end of the pipe. + defer wg.Done() + _, err := w.Write([]byte{'a'}) + if err != nil { + t.Error(err) + } + }() + + wg.Add(1) + go func() { + // Connect to the server started two goroutines up and + // discard any data that it writes. + defer wg.Done() + conn, err := Dial("tcp", ln.Addr().String()) + if err != nil { + t.Error(err) + return + } + defer conn.Close() + io.Copy(ioutil.Discard, conn) + }() + + // Wait for the byte to be copied, meaning that sendfile has + // been called on the pipe. + <-copied + + // Set a very short deadline on the read end of the pipe. + if err := r.SetDeadline(time.Now().Add(time.Microsecond)); err != nil { + t.Fatal(err) + } + + wg.Add(1) + go func() { + // Wait for much longer than the deadline and write a byte + // to the pipe. + defer wg.Done() + time.Sleep(50 * time.Millisecond) + w.Write([]byte{'b'}) + }() + + // If this read does not time out, the pipe was incorrectly + // put into blocking mode. + _, err = r.Read(make([]byte, 1)) + if err == nil { + t.Error("Read did not time out") + } else if !os.IsTimeout(err) { + t.Errorf("got error %v, expected a time out", err) + } + + wg.Wait() +} diff --git a/src/net/sendfile_unix_alt.go b/src/net/sendfile_unix_alt.go index 9b3ba4ee624a3..43df3bfd15eff 100644 --- a/src/net/sendfile_unix_alt.go +++ b/src/net/sendfile_unix_alt.go @@ -58,7 +58,19 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) { return 0, err, false } - written, err = poll.SendFile(&c.pfd, int(f.Fd()), pos, remain) + sc, err := f.SyscallConn() + if err != nil { + return 0, nil, false + } + + var werr error + err = sc.Read(func(fd uintptr) bool { + written, werr = poll.SendFile(&c.pfd, int(fd), pos, remain) + return true + }) + if werr == nil { + werr = err + } if lr != nil { lr.N = remain - written diff --git a/src/net/smtp/smtp_test.go b/src/net/smtp/smtp_test.go index 000cac4fcbbfe..e366ef8015878 100644 --- a/src/net/smtp/smtp_test.go +++ b/src/net/smtp/smtp_test.go @@ -540,7 +540,7 @@ func TestSendMail(t *testing.T) { bcmdbuf := bufio.NewWriter(&cmdbuf) l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - t.Fatalf("Unable to to create listener: %v", err) + t.Fatalf("Unable to create listener: %v", err) } defer l.Close() @@ -639,7 +639,7 @@ QUIT func TestSendMailWithAuth(t *testing.T) { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - t.Fatalf("Unable to to create listener: %v", err) + t.Fatalf("Unable to create listener: %v", err) } defer l.Close() wg := sync.WaitGroup{} @@ -880,31 +880,39 @@ func sendMail(hostPort string) error { return SendMail(hostPort, nil, from, to, []byte("Subject: test\n\nhowdy!")) } -// (copied from net/http/httptest) -// localhostCert is a PEM-encoded TLS cert with SAN IPs -// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end -// of ASN.1 time). -// generated from src/crypto/tls: -// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h -var localhostCert = []byte(`-----BEGIN CERTIFICATE----- -MIIBjjCCATigAwIBAgIQMon9v0s3pDFXvAMnPgelpzANBgkqhkiG9w0BAQsFADAS -MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw -MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB -AM0u/mNXKkhAzNsFkwKZPSpC4lZZaePQ55IyaJv3ovMM2smvthnlqaUfVKVmz7FF -wLP9csX6vGtvkZg1uWAtvfkCAwEAAaNoMGYwDgYDVR0PAQH/BAQDAgKkMBMGA1Ud -JQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhh -bXBsZS5jb22HBH8AAAGHEAAAAAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQAD -QQBOZsFVC7IwX+qibmSbt2IPHkUgXhfbq0a9MYhD6tHcj4gbDcTXh4kZCbgHCz22 -gfSj2/G2wxzopoISVDucuncj +// localhostCert is a PEM-encoded TLS cert generated from src/crypto/tls: +// go run generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,example.com \ +// --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var localhostCert = []byte(` +-----BEGIN CERTIFICATE----- +MIICFDCCAX2gAwIBAgIRAK0xjnaPuNDSreeXb+z+0u4wDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHQWNtZSBDbzAgFw03MDAxMDEwMDAwMDBaGA8yMDg0MDEyOTE2 +MDAwMFowEjEQMA4GA1UEChMHQWNtZSBDbzCBnzANBgkqhkiG9w0BAQEFAAOBjQAw +gYkCgYEA0nFbQQuOWsjbGtejcpWz153OlziZM4bVjJ9jYruNw5n2Ry6uYQAffhqa +JOInCmmcVe2siJglsyH9aRh6vKiobBbIUXXUU1ABd56ebAzlt0LobLlx7pZEMy30 +LqIi9E6zmL3YvdGzpYlkFRnRrqwEtWYbGBf3znO250S56CCWH2UCAwEAAaNoMGYw +DgYDVR0PAQH/BAQDAgKkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEwEB/wQF +MAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAAAAAAAAAA +AAAAAAEwDQYJKoZIhvcNAQELBQADgYEAbZtDS2dVuBYvb+MnolWnCNqvw1w5Gtgi +NmvQQPOMgM3m+oQSCPRTNGSg25e1Qbo7bgQDv8ZTnq8FgOJ/rbkyERw2JckkHpD4 +n4qcK27WkEDBtQFlPihIM8hLIuzWoi/9wygiElTy/tVL3y7fGCvY2/k1KBthtZGF +tN8URjVmyEo= -----END CERTIFICATE-----`) // localhostKey is the private key for localhostCert. -var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- -MIIBOwIBAAJBAM0u/mNXKkhAzNsFkwKZPSpC4lZZaePQ55IyaJv3ovMM2smvthnl -qaUfVKVmz7FFwLP9csX6vGtvkZg1uWAtvfkCAwEAAQJART2qkxODLUbQ2siSx7m2 -rmBLyR/7X+nLe8aPDrMOxj3heDNl4YlaAYLexbcY8d7VDfCRBKYoAOP0UCP1Vhuf -UQIhAO6PEI55K3SpNIdc2k5f0xz+9rodJCYzu51EwWX7r8ufAiEA3C9EkLiU2NuK -3L3DHCN5IlUSN1Nr/lw8NIt50Yorj2cCIQCDw1VbvCV6bDLtSSXzAA51B4ZzScE7 -sHtB5EYF9Dwm9QIhAJuCquuH4mDzVjUntXjXOQPdj7sRqVGCNWdrJwOukat7AiAy -LXLEwb77DIPoI5ZuaXQC+MnyyJj1ExC9RFcGz+bexA== +var localhostKey = []byte(` +-----BEGIN RSA PRIVATE KEY----- +MIICXgIBAAKBgQDScVtBC45ayNsa16NylbPXnc6XOJkzhtWMn2Niu43DmfZHLq5h +AB9+Gpok4icKaZxV7ayImCWzIf1pGHq8qKhsFshRddRTUAF3np5sDOW3QuhsuXHu +lkQzLfQuoiL0TrOYvdi90bOliWQVGdGurAS1ZhsYF/fOc7bnRLnoIJYfZQIDAQAB +AoGBAMst7OgpKyFV6c3JwyI/jWqxDySL3caU+RuTTBaodKAUx2ZEmNJIlx9eudLA +kucHvoxsM/eRxlxkhdFxdBcwU6J+zqooTnhu/FE3jhrT1lPrbhfGhyKnUrB0KKMM +VY3IQZyiehpxaeXAwoAou6TbWoTpl9t8ImAqAMY8hlULCUqlAkEA+9+Ry5FSYK/m +542LujIcCaIGoG1/Te6Sxr3hsPagKC2rH20rDLqXwEedSFOpSS0vpzlPAzy/6Rbb +PHTJUhNdwwJBANXkA+TkMdbJI5do9/mn//U0LfrCR9NkcoYohxfKz8JuhgRQxzF2 +6jpo3q7CdTuuRixLWVfeJzcrAyNrVcBq87cCQFkTCtOMNC7fZnCTPUv+9q1tcJyB +vNjJu3yvoEZeIeuzouX9TJE21/33FaeDdsXbRhQEj23cqR38qFHsF1qAYNMCQQDP +QXLEiJoClkR2orAmqjPLVhR3t2oB3INcnEjLNSq8LHyQEfXyaFfu4U9l5+fRPL2i +jiC0k/9L5dHUsF0XZothAkEA23ddgRs+Id/HxtojqqUT27B8MT/IGNrYsp4DvS/c +qgkeluku4GjxRlDMBuXk94xOBEinUs+p/hwP1Alll80Tpg== -----END RSA PRIVATE KEY-----`) diff --git a/src/net/sock_posix.go b/src/net/sock_posix.go index 677e423ffaff2..00ee61883cdd2 100644 --- a/src/net/sock_posix.go +++ b/src/net/sock_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows package net @@ -54,7 +54,7 @@ func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only if laddr != nil && raddr == nil { switch sotype { case syscall.SOCK_STREAM, syscall.SOCK_SEQPACKET: - if err := fd.listenStream(laddr, listenerBacklog, ctrlFn); err != nil { + if err := fd.listenStream(laddr, listenerBacklog(), ctrlFn); err != nil { fd.Close() return nil, err } diff --git a/src/net/sock_stub.go b/src/net/sock_stub.go index 38fc819199ccb..bbce61b68799e 100644 --- a/src/net/sock_stub.go +++ b/src/net/sock_stub.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build nacl js,wasm solaris +// +build aix nacl js,wasm solaris package net diff --git a/src/net/sockaddr_posix.go b/src/net/sockaddr_posix.go index 4b8699d1f56d3..ae83b5ab1965f 100644 --- a/src/net/sockaddr_posix.go +++ b/src/net/sockaddr_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net diff --git a/src/net/sockopt_aix.go b/src/net/sockopt_aix.go new file mode 100644 index 0000000000000..b49c4d5c7c720 --- /dev/null +++ b/src/net/sockopt_aix.go @@ -0,0 +1,36 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import ( + "os" + "syscall" +) + +func setDefaultSockopts(s, family, sotype int, ipv6only bool) error { + if family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) +} + +func setDefaultListenerSockopts(s int) error { + // Allow reuse of recently-used addresses. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)) +} + +func setDefaultMulticastSockopts(s int) error { + // Allow multicast UDP and raw IP datagram sockets to listen + // concurrently across multiple listeners. + if err := syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1); err != nil { + return os.NewSyscallError("setsockopt", err) + } + // Allow reuse of recently-used ports. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_REUSEPORT, 1)) +} diff --git a/src/net/sockopt_posix.go b/src/net/sockopt_posix.go index e8af84f418ebe..de7255667f751 100644 --- a/src/net/sockopt_posix.go +++ b/src/net/sockopt_posix.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package net import ( + "internal/bytealg" "runtime" "syscall" ) @@ -94,7 +95,7 @@ func setIPv4MreqToInterface(mreq *syscall.IPMreq, ifi *Interface) error { } } done: - if bytesEqual(mreq.Multiaddr[:], IPv4zero.To4()) { + if bytealg.Equal(mreq.Multiaddr[:], IPv4zero.To4()) { return errNoSuchMulticastInterface } return nil diff --git a/src/net/sockoptip_bsdvar.go b/src/net/sockoptip_bsdvar.go index 956010139878f..20a6dc9549774 100644 --- a/src/net/sockoptip_bsdvar.go +++ b/src/net/sockoptip_bsdvar.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd solaris +// +build aix darwin dragonfly freebsd netbsd openbsd solaris package net diff --git a/src/net/sockoptip_posix.go b/src/net/sockoptip_posix.go index 5d3077e552dcc..b14963ff32eec 100644 --- a/src/net/sockoptip_posix.go +++ b/src/net/sockoptip_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package net diff --git a/src/net/splice_linux.go b/src/net/splice_linux.go index b055f9335185d..69c3f65770f8a 100644 --- a/src/net/splice_linux.go +++ b/src/net/splice_linux.go @@ -11,7 +11,7 @@ import ( // splice transfers data from r to c using the splice system call to minimize // copies from and to userspace. c must be a TCP connection. Currently, splice -// is only enabled if r is also a TCP connection. +// is only enabled if r is a TCP or a stream-oriented Unix connection. // // If splice returns handled == false, it has performed no work. func splice(c *netFD, r io.Reader) (written int64, err error, handled bool) { @@ -23,11 +23,20 @@ func splice(c *netFD, r io.Reader) (written int64, err error, handled bool) { return 0, nil, true } } - s, ok := r.(*TCPConn) - if !ok { + + var s *netFD + if tc, ok := r.(*TCPConn); ok { + s = tc.fd + } else if uc, ok := r.(*UnixConn); ok { + if uc.fd.net != "unix" { + return 0, nil, false + } + s = uc.fd + } else { return 0, nil, false } - written, handled, sc, err := poll.Splice(&c.pfd, &s.fd.pfd, remain) + + written, handled, sc, err := poll.Splice(&c.pfd, &s.pfd, remain) if lr != nil { lr.N -= written } diff --git a/src/net/splice_test.go b/src/net/splice_test.go index 44a5c00ba872e..4c300172c5e9e 100644 --- a/src/net/splice_test.go +++ b/src/net/splice_test.go @@ -7,243 +7,156 @@ package net import ( - "bytes" - "fmt" "io" "io/ioutil" + "log" + "os" + "os/exec" + "strconv" "sync" "testing" + "time" ) func TestSplice(t *testing.T) { - t.Run("simple", testSpliceSimple) - t.Run("multipleWrite", testSpliceMultipleWrite) - t.Run("big", testSpliceBig) - t.Run("honorsLimitedReader", testSpliceHonorsLimitedReader) - t.Run("readerAtEOF", testSpliceReaderAtEOF) - t.Run("issue25985", testSpliceIssue25985) -} - -func testSpliceSimple(t *testing.T) { - srv, err := newSpliceTestServer() - if err != nil { - t.Fatal(err) - } - defer srv.Close() - copyDone := srv.Copy() - msg := []byte("splice test") - if _, err := srv.Write(msg); err != nil { - t.Fatal(err) - } - got := make([]byte, len(msg)) - if _, err := io.ReadFull(srv, got); err != nil { - t.Fatal(err) - } - if !bytes.Equal(got, msg) { - t.Errorf("got %q, wrote %q", got, msg) - } - srv.CloseWrite() - srv.CloseRead() - if err := <-copyDone; err != nil { - t.Errorf("splice: %v", err) + t.Run("tcp-to-tcp", func(t *testing.T) { testSplice(t, "tcp", "tcp") }) + if !testableNetwork("unixgram") { + t.Skip("skipping unix-to-tcp tests") } + t.Run("unix-to-tcp", func(t *testing.T) { testSplice(t, "unix", "tcp") }) + t.Run("no-unixpacket", testSpliceNoUnixpacket) + t.Run("no-unixgram", testSpliceNoUnixgram) } -func testSpliceMultipleWrite(t *testing.T) { - srv, err := newSpliceTestServer() - if err != nil { - t.Fatal(err) - } - defer srv.Close() - copyDone := srv.Copy() - msg1 := []byte("splice test part 1 ") - msg2 := []byte(" splice test part 2") - if _, err := srv.Write(msg1); err != nil { - t.Fatalf("Write: %v", err) - } - if _, err := srv.Write(msg2); err != nil { - t.Fatal(err) - } - got := make([]byte, len(msg1)+len(msg2)) - if _, err := io.ReadFull(srv, got); err != nil { - t.Fatal(err) - } - want := append(msg1, msg2...) - if !bytes.Equal(got, want) { - t.Errorf("got %q, wrote %q", got, want) - } - srv.CloseWrite() - srv.CloseRead() - if err := <-copyDone; err != nil { - t.Errorf("splice: %v", err) - } +func testSplice(t *testing.T, upNet, downNet string) { + t.Run("simple", spliceTestCase{upNet, downNet, 128, 128, 0}.test) + t.Run("multipleWrite", spliceTestCase{upNet, downNet, 4096, 1 << 20, 0}.test) + t.Run("big", spliceTestCase{upNet, downNet, 5 << 20, 1 << 30, 0}.test) + t.Run("honorsLimitedReader", spliceTestCase{upNet, downNet, 4096, 1 << 20, 1 << 10}.test) + t.Run("updatesLimitedReaderN", spliceTestCase{upNet, downNet, 1024, 4096, 4096 + 100}.test) + t.Run("limitedReaderAtLimit", spliceTestCase{upNet, downNet, 32, 128, 128}.test) + t.Run("readerAtEOF", func(t *testing.T) { testSpliceReaderAtEOF(t, upNet, downNet) }) + t.Run("issue25985", func(t *testing.T) { testSpliceIssue25985(t, upNet, downNet) }) } -func testSpliceBig(t *testing.T) { - // The maximum amount of data that internal/poll.Splice will use in a - // splice(2) call is 4 << 20. Use a bigger size here so that we test an - // amount that doesn't fit in a single call. - size := 5 << 20 - srv, err := newSpliceTestServer() - if err != nil { - t.Fatal(err) - } - defer srv.Close() - big := make([]byte, size) - copyDone := srv.Copy() - type readResult struct { - b []byte - err error - } - readDone := make(chan readResult) - go func() { - got := make([]byte, len(big)) - _, err := io.ReadFull(srv, got) - readDone <- readResult{got, err} - }() - if _, err := srv.Write(big); err != nil { - t.Fatal(err) - } - res := <-readDone - if res.err != nil { - t.Fatal(res.err) - } - got := res.b - if !bytes.Equal(got, big) { - t.Errorf("input and output differ") - } - srv.CloseWrite() - srv.CloseRead() - if err := <-copyDone; err != nil { - t.Errorf("splice: %v", err) - } -} +type spliceTestCase struct { + upNet, downNet string -func testSpliceHonorsLimitedReader(t *testing.T) { - t.Run("stopsAfterN", testSpliceStopsAfterN) - t.Run("updatesN", testSpliceUpdatesN) + chunkSize, totalSize int + limitReadSize int } -func testSpliceStopsAfterN(t *testing.T) { - clientUp, serverUp, err := spliceTestSocketPair("tcp") +func (tc spliceTestCase) test(t *testing.T) { + clientUp, serverUp, err := spliceTestSocketPair(tc.upNet) if err != nil { t.Fatal(err) } - defer clientUp.Close() defer serverUp.Close() - clientDown, serverDown, err := spliceTestSocketPair("tcp") + cleanup, err := startSpliceClient(clientUp, "w", tc.chunkSize, tc.totalSize) if err != nil { t.Fatal(err) } - defer clientDown.Close() - defer serverDown.Close() - count := 128 - copyDone := make(chan error) - lr := &io.LimitedReader{ - N: int64(count), - R: serverUp, - } - go func() { - _, err := io.Copy(serverDown, lr) - serverDown.Close() - copyDone <- err - }() - msg := make([]byte, 2*count) - if _, err := clientUp.Write(msg); err != nil { - t.Fatal(err) - } - clientUp.Close() - var buf bytes.Buffer - if _, err := io.Copy(&buf, clientDown); err != nil { - t.Fatal(err) - } - if buf.Len() != count { - t.Errorf("splice transferred %d bytes, want to stop after %d", buf.Len(), count) - } - clientDown.Close() - if err := <-copyDone; err != nil { - t.Errorf("splice: %v", err) - } -} - -func testSpliceUpdatesN(t *testing.T) { - clientUp, serverUp, err := spliceTestSocketPair("tcp") + defer cleanup() + clientDown, serverDown, err := spliceTestSocketPair(tc.downNet) if err != nil { t.Fatal(err) } - defer clientUp.Close() - defer serverUp.Close() - clientDown, serverDown, err := spliceTestSocketPair("tcp") + defer serverDown.Close() + cleanup, err = startSpliceClient(clientDown, "r", tc.chunkSize, tc.totalSize) if err != nil { t.Fatal(err) } - defer clientDown.Close() - defer serverDown.Close() - count := 128 - copyDone := make(chan error) - lr := &io.LimitedReader{ - N: int64(100 + count), - R: serverUp, - } - go func() { - _, err := io.Copy(serverDown, lr) - copyDone <- err - }() - msg := make([]byte, count) - if _, err := clientUp.Write(msg); err != nil { - t.Fatal(err) + defer cleanup() + var ( + r io.Reader = serverUp + size = tc.totalSize + ) + if tc.limitReadSize > 0 { + if tc.limitReadSize < size { + size = tc.limitReadSize + } + + r = &io.LimitedReader{ + N: int64(tc.limitReadSize), + R: serverUp, + } + defer serverUp.Close() } - clientUp.Close() - got := make([]byte, count) - if _, err := io.ReadFull(clientDown, got); err != nil { + n, err := io.Copy(serverDown, r) + serverDown.Close() + if err != nil { t.Fatal(err) } - clientDown.Close() - if err := <-copyDone; err != nil { - t.Errorf("splice: %v", err) + if want := int64(size); want != n { + t.Errorf("want %d bytes spliced, got %d", want, n) } - wantN := int64(100) - if lr.N != wantN { - t.Errorf("lr.N = %d, want %d", lr.N, wantN) + + if tc.limitReadSize > 0 { + wantN := 0 + if tc.limitReadSize > size { + wantN = tc.limitReadSize - size + } + + if n := r.(*io.LimitedReader).N; n != int64(wantN) { + t.Errorf("r.N = %d, want %d", n, wantN) + } } } -func testSpliceReaderAtEOF(t *testing.T) { - clientUp, serverUp, err := spliceTestSocketPair("tcp") +func testSpliceReaderAtEOF(t *testing.T, upNet, downNet string) { + clientUp, serverUp, err := spliceTestSocketPair(upNet) if err != nil { t.Fatal(err) } defer clientUp.Close() - defer serverUp.Close() - clientDown, serverDown, err := spliceTestSocketPair("tcp") + clientDown, serverDown, err := spliceTestSocketPair(downNet) if err != nil { t.Fatal(err) } defer clientDown.Close() - defer serverDown.Close() serverUp.Close() - _, err, handled := splice(serverDown.(*TCPConn).fd, serverUp) - if !handled { - t.Errorf("closed connection: got err = %v, handled = %t, want handled = true", err, handled) - } - lr := &io.LimitedReader{ - N: 0, - R: serverUp, + + // We'd like to call net.splice here and check the handled return + // value, but we disable splice on old Linux kernels. + // + // In that case, poll.Splice and net.splice return a non-nil error + // and handled == false. We'd ideally like to see handled == true + // because the source reader is at EOF, but if we're running on an old + // kernel, and splice is disabled, we won't see EOF from net.splice, + // because we won't touch the reader at all. + // + // Trying to untangle the errors from net.splice and match them + // against the errors created by the poll package would be brittle, + // so this is a higher level test. + // + // The following ReadFrom should return immediately, regardless of + // whether splice is disabled or not. The other side should then + // get a goodbye signal. Test for the goodbye signal. + msg := "bye" + go func() { + serverDown.(io.ReaderFrom).ReadFrom(serverUp) + io.WriteString(serverDown, msg) + serverDown.Close() + }() + + buf := make([]byte, 3) + _, err = io.ReadFull(clientDown, buf) + if err != nil { + t.Errorf("clientDown: %v", err) } - _, err, handled = splice(serverDown.(*TCPConn).fd, lr) - if !handled { - t.Errorf("exhausted LimitedReader: got err = %v, handled = %t, want handled = true", err, handled) + if string(buf) != msg { + t.Errorf("clientDown got %q, want %q", buf, msg) } } -func testSpliceIssue25985(t *testing.T) { - front, err := newLocalListener("tcp") +func testSpliceIssue25985(t *testing.T, upNet, downNet string) { + front, err := newLocalListener(upNet) if err != nil { t.Fatal(err) } defer front.Close() - back, err := newLocalListener("tcp") + back, err := newLocalListener(downNet) if err != nil { t.Fatal(err) } @@ -257,7 +170,7 @@ func testSpliceIssue25985(t *testing.T) { if err != nil { return } - dst, err := Dial("tcp", back.Addr().String()) + dst, err := Dial(downNet, back.Addr().String()) if err != nil { return } @@ -275,7 +188,7 @@ func testSpliceIssue25985(t *testing.T) { go proxy() - toFront, err := Dial("tcp", front.Addr().String()) + toFront, err := Dial(upNet, front.Addr().String()) if err != nil { t.Fatal(err) } @@ -297,166 +210,121 @@ func testSpliceIssue25985(t *testing.T) { wg.Wait() } -func BenchmarkTCPReadFrom(b *testing.B) { - testHookUninstaller.Do(uninstallTestHooks) - - var chunkSizes []int - for i := uint(10); i <= 20; i++ { - chunkSizes = append(chunkSizes, 1< totalSize { + buf = buf[:totalSize-count] + } + + var err error + if n, err = fn(buf); err != nil { + return + } + } +} diff --git a/src/net/sys_cloexec.go b/src/net/sys_cloexec.go index 7ab1407de92f2..e97fb21a1f449 100644 --- a/src/net/sys_cloexec.go +++ b/src/net/sys_cloexec.go @@ -5,7 +5,7 @@ // This file implements sysSocket and accept for platforms that do not // provide a fast path for setting SetNonblock and CloseOnExec. -// +build darwin nacl solaris +// +build aix darwin nacl solaris package net diff --git a/src/net/tcpsock_posix.go b/src/net/tcpsock_posix.go index 936a255b88d4c..64e71bf97c3cb 100644 --- a/src/net/tcpsock_posix.go +++ b/src/net/tcpsock_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net diff --git a/src/net/tcpsock_test.go b/src/net/tcpsock_test.go index c2f26b1770765..36d2ccb09a9b7 100644 --- a/src/net/tcpsock_test.go +++ b/src/net/tcpsock_test.go @@ -796,3 +796,34 @@ func TestCopyPipeIntoTCP(t *testing.T) { t.Fatal(err) } } + +func BenchmarkSetReadDeadline(b *testing.B) { + ln, err := newLocalListener("tcp") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + var serv Conn + done := make(chan error) + go func() { + var err error + serv, err = ln.Accept() + done <- err + }() + c, err := Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer c.Close() + if err := <-done; err != nil { + b.Fatal(err) + } + defer serv.Close() + c.SetWriteDeadline(time.Now().Add(2 * time.Hour)) + deadline := time.Now().Add(time.Hour) + b.ResetTimer() + for i := 0; i < b.N; i++ { + c.SetReadDeadline(deadline) + deadline = deadline.Add(1) + } +} diff --git a/src/net/tcpsockopt_posix.go b/src/net/tcpsockopt_posix.go index 9cef434b6fa26..5e00ba15647f2 100644 --- a/src/net/tcpsockopt_posix.go +++ b/src/net/tcpsockopt_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows package net diff --git a/src/net/tcpsockopt_unix.go b/src/net/tcpsockopt_unix.go index c1df6605be4f6..d5892588feaab 100644 --- a/src/net/tcpsockopt_unix.go +++ b/src/net/tcpsockopt_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build freebsd linux netbsd +// +build aix freebsd linux netbsd package net diff --git a/src/net/testdata/Mark.Twain-Tom.Sawyer.txt b/src/net/testdata/Mark.Twain-Tom.Sawyer.txt deleted file mode 100644 index c9106fd522cec..0000000000000 --- a/src/net/testdata/Mark.Twain-Tom.Sawyer.txt +++ /dev/null @@ -1,8465 +0,0 @@ -Produced by David Widger. The previous edition was updated by Jose -Menendez. - - - - - - THE ADVENTURES OF TOM SAWYER - BY - MARK TWAIN - (Samuel Langhorne Clemens) - - - - - P R E F A C E - -MOST of the adventures recorded in this book really occurred; one or -two were experiences of my own, the rest those of boys who were -schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but -not from an individual--he is a combination of the characteristics of -three boys whom I knew, and therefore belongs to the composite order of -architecture. - -The odd superstitions touched upon were all prevalent among children -and slaves in the West at the period of this story--that is to say, -thirty or forty years ago. - -Although my book is intended mainly for the entertainment of boys and -girls, I hope it will not be shunned by men and women on that account, -for part of my plan has been to try to pleasantly remind adults of what -they once were themselves, and of how they felt and thought and talked, -and what queer enterprises they sometimes engaged in. - - THE AUTHOR. - -HARTFORD, 1876. - - - - T O M S A W Y E R - - - -CHAPTER I - -"TOM!" - -No answer. - -"TOM!" - -No answer. - -"What's gone with that boy, I wonder? You TOM!" - -No answer. - -The old lady pulled her spectacles down and looked over them about the -room; then she put them up and looked out under them. She seldom or -never looked THROUGH them for so small a thing as a boy; they were her -state pair, the pride of her heart, and were built for "style," not -service--she could have seen through a pair of stove-lids just as well. -She looked perplexed for a moment, and then said, not fiercely, but -still loud enough for the furniture to hear: - -"Well, I lay if I get hold of you I'll--" - -She did not finish, for by this time she was bending down and punching -under the bed with the broom, and so she needed breath to punctuate the -punches with. She resurrected nothing but the cat. - -"I never did see the beat of that boy!" - -She went to the open door and stood in it and looked out among the -tomato vines and "jimpson" weeds that constituted the garden. No Tom. -So she lifted up her voice at an angle calculated for distance and -shouted: - -"Y-o-u-u TOM!" - -There was a slight noise behind her and she turned just in time to -seize a small boy by the slack of his roundabout and arrest his flight. - -"There! I might 'a' thought of that closet. What you been doing in -there?" - -"Nothing." - -"Nothing! Look at your hands. And look at your mouth. What IS that -truck?" - -"I don't know, aunt." - -"Well, I know. It's jam--that's what it is. Forty times I've said if -you didn't let that jam alone I'd skin you. Hand me that switch." - -The switch hovered in the air--the peril was desperate-- - -"My! Look behind you, aunt!" - -The old lady whirled round, and snatched her skirts out of danger. The -lad fled on the instant, scrambled up the high board-fence, and -disappeared over it. - -His aunt Polly stood surprised a moment, and then broke into a gentle -laugh. - -"Hang the boy, can't I never learn anything? Ain't he played me tricks -enough like that for me to be looking out for him by this time? But old -fools is the biggest fools there is. Can't learn an old dog new tricks, -as the saying is. But my goodness, he never plays them alike, two days, -and how is a body to know what's coming? He 'pears to know just how -long he can torment me before I get my dander up, and he knows if he -can make out to put me off for a minute or make me laugh, it's all down -again and I can't hit him a lick. I ain't doing my duty by that boy, -and that's the Lord's truth, goodness knows. Spare the rod and spile -the child, as the Good Book says. I'm a laying up sin and suffering for -us both, I know. He's full of the Old Scratch, but laws-a-me! he's my -own dead sister's boy, poor thing, and I ain't got the heart to lash -him, somehow. Every time I let him off, my conscience does hurt me so, -and every time I hit him my old heart most breaks. Well-a-well, man -that is born of woman is of few days and full of trouble, as the -Scripture says, and I reckon it's so. He'll play hookey this evening, * -and [* Southwestern for "afternoon"] I'll just be obleeged to make him -work, to-morrow, to punish him. It's mighty hard to make him work -Saturdays, when all the boys is having holiday, but he hates work more -than he hates anything else, and I've GOT to do some of my duty by him, -or I'll be the ruination of the child." - -Tom did play hookey, and he had a very good time. He got back home -barely in season to help Jim, the small colored boy, saw next-day's -wood and split the kindlings before supper--at least he was there in -time to tell his adventures to Jim while Jim did three-fourths of the -work. Tom's younger brother (or rather half-brother) Sid was already -through with his part of the work (picking up chips), for he was a -quiet boy, and had no adventurous, troublesome ways. - -While Tom was eating his supper, and stealing sugar as opportunity -offered, Aunt Polly asked him questions that were full of guile, and -very deep--for she wanted to trap him into damaging revealments. Like -many other simple-hearted souls, it was her pet vanity to believe she -was endowed with a talent for dark and mysterious diplomacy, and she -loved to contemplate her most transparent devices as marvels of low -cunning. Said she: - -"Tom, it was middling warm in school, warn't it?" - -"Yes'm." - -"Powerful warm, warn't it?" - -"Yes'm." - -"Didn't you want to go in a-swimming, Tom?" - -A bit of a scare shot through Tom--a touch of uncomfortable suspicion. -He searched Aunt Polly's face, but it told him nothing. So he said: - -"No'm--well, not very much." - -The old lady reached out her hand and felt Tom's shirt, and said: - -"But you ain't too warm now, though." And it flattered her to reflect -that she had discovered that the shirt was dry without anybody knowing -that that was what she had in her mind. But in spite of her, Tom knew -where the wind lay, now. So he forestalled what might be the next move: - -"Some of us pumped on our heads--mine's damp yet. See?" - -Aunt Polly was vexed to think she had overlooked that bit of -circumstantial evidence, and missed a trick. Then she had a new -inspiration: - -"Tom, you didn't have to undo your shirt collar where I sewed it, to -pump on your head, did you? Unbutton your jacket!" - -The trouble vanished out of Tom's face. He opened his jacket. His -shirt collar was securely sewed. - -"Bother! Well, go 'long with you. I'd made sure you'd played hookey -and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a -singed cat, as the saying is--better'n you look. THIS time." - -She was half sorry her sagacity had miscarried, and half glad that Tom -had stumbled into obedient conduct for once. - -But Sidney said: - -"Well, now, if I didn't think you sewed his collar with white thread, -but it's black." - -"Why, I did sew it with white! Tom!" - -But Tom did not wait for the rest. As he went out at the door he said: - -"Siddy, I'll lick you for that." - -In a safe place Tom examined two large needles which were thrust into -the lapels of his jacket, and had thread bound about them--one needle -carried white thread and the other black. He said: - -"She'd never noticed if it hadn't been for Sid. Confound it! sometimes -she sews it with white, and sometimes she sews it with black. I wish to -geeminy she'd stick to one or t'other--I can't keep the run of 'em. But -I bet you I'll lam Sid for that. I'll learn him!" - -He was not the Model Boy of the village. He knew the model boy very -well though--and loathed him. - -Within two minutes, or even less, he had forgotten all his troubles. -Not because his troubles were one whit less heavy and bitter to him -than a man's are to a man, but because a new and powerful interest bore -them down and drove them out of his mind for the time--just as men's -misfortunes are forgotten in the excitement of new enterprises. This -new interest was a valued novelty in whistling, which he had just -acquired from a negro, and he was suffering to practise it undisturbed. -It consisted in a peculiar bird-like turn, a sort of liquid warble, -produced by touching the tongue to the roof of the mouth at short -intervals in the midst of the music--the reader probably remembers how -to do it, if he has ever been a boy. Diligence and attention soon gave -him the knack of it, and he strode down the street with his mouth full -of harmony and his soul full of gratitude. He felt much as an -astronomer feels who has discovered a new planet--no doubt, as far as -strong, deep, unalloyed pleasure is concerned, the advantage was with -the boy, not the astronomer. - -The summer evenings were long. It was not dark, yet. Presently Tom -checked his whistle. A stranger was before him--a boy a shade larger -than himself. A new-comer of any age or either sex was an impressive -curiosity in the poor little shabby village of St. Petersburg. This boy -was well dressed, too--well dressed on a week-day. This was simply -astounding. His cap was a dainty thing, his close-buttoned blue cloth -roundabout was new and natty, and so were his pantaloons. He had shoes -on--and it was only Friday. He even wore a necktie, a bright bit of -ribbon. He had a citified air about him that ate into Tom's vitals. The -more Tom stared at the splendid marvel, the higher he turned up his -nose at his finery and the shabbier and shabbier his own outfit seemed -to him to grow. Neither boy spoke. If one moved, the other moved--but -only sidewise, in a circle; they kept face to face and eye to eye all -the time. Finally Tom said: - -"I can lick you!" - -"I'd like to see you try it." - -"Well, I can do it." - -"No you can't, either." - -"Yes I can." - -"No you can't." - -"I can." - -"You can't." - -"Can!" - -"Can't!" - -An uncomfortable pause. Then Tom said: - -"What's your name?" - -"'Tisn't any of your business, maybe." - -"Well I 'low I'll MAKE it my business." - -"Well why don't you?" - -"If you say much, I will." - -"Much--much--MUCH. There now." - -"Oh, you think you're mighty smart, DON'T you? I could lick you with -one hand tied behind me, if I wanted to." - -"Well why don't you DO it? You SAY you can do it." - -"Well I WILL, if you fool with me." - -"Oh yes--I've seen whole families in the same fix." - -"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" - -"You can lump that hat if you don't like it. I dare you to knock it -off--and anybody that'll take a dare will suck eggs." - -"You're a liar!" - -"You're another." - -"You're a fighting liar and dasn't take it up." - -"Aw--take a walk!" - -"Say--if you give me much more of your sass I'll take and bounce a -rock off'n your head." - -"Oh, of COURSE you will." - -"Well I WILL." - -"Well why don't you DO it then? What do you keep SAYING you will for? -Why don't you DO it? It's because you're afraid." - -"I AIN'T afraid." - -"You are." - -"I ain't." - -"You are." - -Another pause, and more eying and sidling around each other. Presently -they were shoulder to shoulder. Tom said: - -"Get away from here!" - -"Go away yourself!" - -"I won't." - -"I won't either." - -So they stood, each with a foot placed at an angle as a brace, and -both shoving with might and main, and glowering at each other with -hate. But neither could get an advantage. After struggling till both -were hot and flushed, each relaxed his strain with watchful caution, -and Tom said: - -"You're a coward and a pup. I'll tell my big brother on you, and he -can thrash you with his little finger, and I'll make him do it, too." - -"What do I care for your big brother? I've got a brother that's bigger -than he is--and what's more, he can throw him over that fence, too." -[Both brothers were imaginary.] - -"That's a lie." - -"YOUR saying so don't make it so." - -Tom drew a line in the dust with his big toe, and said: - -"I dare you to step over that, and I'll lick you till you can't stand -up. Anybody that'll take a dare will steal sheep." - -The new boy stepped over promptly, and said: - -"Now you said you'd do it, now let's see you do it." - -"Don't you crowd me now; you better look out." - -"Well, you SAID you'd do it--why don't you do it?" - -"By jingo! for two cents I WILL do it." - -The new boy took two broad coppers out of his pocket and held them out -with derision. Tom struck them to the ground. In an instant both boys -were rolling and tumbling in the dirt, gripped together like cats; and -for the space of a minute they tugged and tore at each other's hair and -clothes, punched and scratched each other's nose, and covered -themselves with dust and glory. Presently the confusion took form, and -through the fog of battle Tom appeared, seated astride the new boy, and -pounding him with his fists. "Holler 'nuff!" said he. - -The boy only struggled to free himself. He was crying--mainly from rage. - -"Holler 'nuff!"--and the pounding went on. - -At last the stranger got out a smothered "'Nuff!" and Tom let him up -and said: - -"Now that'll learn you. Better look out who you're fooling with next -time." - -The new boy went off brushing the dust from his clothes, sobbing, -snuffling, and occasionally looking back and shaking his head and -threatening what he would do to Tom the "next time he caught him out." -To which Tom responded with jeers, and started off in high feather, and -as soon as his back was turned the new boy snatched up a stone, threw -it and hit him between the shoulders and then turned tail and ran like -an antelope. Tom chased the traitor home, and thus found out where he -lived. He then held a position at the gate for some time, daring the -enemy to come outside, but the enemy only made faces at him through the -window and declined. At last the enemy's mother appeared, and called -Tom a bad, vicious, vulgar child, and ordered him away. So he went -away; but he said he "'lowed" to "lay" for that boy. - -He got home pretty late that night, and when he climbed cautiously in -at the window, he uncovered an ambuscade, in the person of his aunt; -and when she saw the state his clothes were in her resolution to turn -his Saturday holiday into captivity at hard labor became adamantine in -its firmness. - - - -CHAPTER II - -SATURDAY morning was come, and all the summer world was bright and -fresh, and brimming with life. There was a song in every heart; and if -the heart was young the music issued at the lips. There was cheer in -every face and a spring in every step. The locust-trees were in bloom -and the fragrance of the blossoms filled the air. Cardiff Hill, beyond -the village and above it, was green with vegetation and it lay just far -enough away to seem a Delectable Land, dreamy, reposeful, and inviting. - -Tom appeared on the sidewalk with a bucket of whitewash and a -long-handled brush. He surveyed the fence, and all gladness left him and -a deep melancholy settled down upon his spirit. Thirty yards of board -fence nine feet high. Life to him seemed hollow, and existence but a -burden. Sighing, he dipped his brush and passed it along the topmost -plank; repeated the operation; did it again; compared the insignificant -whitewashed streak with the far-reaching continent of unwhitewashed -fence, and sat down on a tree-box discouraged. Jim came skipping out at -the gate with a tin pail, and singing Buffalo Gals. Bringing water from -the town pump had always been hateful work in Tom's eyes, before, but -now it did not strike him so. He remembered that there was company at -the pump. White, mulatto, and negro boys and girls were always there -waiting their turns, resting, trading playthings, quarrelling, -fighting, skylarking. And he remembered that although the pump was only -a hundred and fifty yards off, Jim never got back with a bucket of -water under an hour--and even then somebody generally had to go after -him. Tom said: - -"Say, Jim, I'll fetch the water if you'll whitewash some." - -Jim shook his head and said: - -"Can't, Mars Tom. Ole missis, she tole me I got to go an' git dis -water an' not stop foolin' roun' wid anybody. She say she spec' Mars -Tom gwine to ax me to whitewash, an' so she tole me go 'long an' 'tend -to my own business--she 'lowed SHE'D 'tend to de whitewashin'." - -"Oh, never you mind what she said, Jim. That's the way she always -talks. Gimme the bucket--I won't be gone only a a minute. SHE won't -ever know." - -"Oh, I dasn't, Mars Tom. Ole missis she'd take an' tar de head off'n -me. 'Deed she would." - -"SHE! She never licks anybody--whacks 'em over the head with her -thimble--and who cares for that, I'd like to know. She talks awful, but -talk don't hurt--anyways it don't if she don't cry. Jim, I'll give you -a marvel. I'll give you a white alley!" - -Jim began to waver. - -"White alley, Jim! And it's a bully taw." - -"My! Dat's a mighty gay marvel, I tell you! But Mars Tom I's powerful -'fraid ole missis--" - -"And besides, if you will I'll show you my sore toe." - -Jim was only human--this attraction was too much for him. He put down -his pail, took the white alley, and bent over the toe with absorbing -interest while the bandage was being unwound. In another moment he was -flying down the street with his pail and a tingling rear, Tom was -whitewashing with vigor, and Aunt Polly was retiring from the field -with a slipper in her hand and triumph in her eye. - -But Tom's energy did not last. He began to think of the fun he had -planned for this day, and his sorrows multiplied. Soon the free boys -would come tripping along on all sorts of delicious expeditions, and -they would make a world of fun of him for having to work--the very -thought of it burnt him like fire. He got out his worldly wealth and -examined it--bits of toys, marbles, and trash; enough to buy an -exchange of WORK, maybe, but not half enough to buy so much as half an -hour of pure freedom. So he returned his straitened means to his -pocket, and gave up the idea of trying to buy the boys. At this dark -and hopeless moment an inspiration burst upon him! Nothing less than a -great, magnificent inspiration. - -He took up his brush and went tranquilly to work. Ben Rogers hove in -sight presently--the very boy, of all boys, whose ridicule he had been -dreading. Ben's gait was the hop-skip-and-jump--proof enough that his -heart was light and his anticipations high. He was eating an apple, and -giving a long, melodious whoop, at intervals, followed by a deep-toned -ding-dong-dong, ding-dong-dong, for he was personating a steamboat. As -he drew near, he slackened speed, took the middle of the street, leaned -far over to starboard and rounded to ponderously and with laborious -pomp and circumstance--for he was personating the Big Missouri, and -considered himself to be drawing nine feet of water. He was boat and -captain and engine-bells combined, so he had to imagine himself -standing on his own hurricane-deck giving the orders and executing them: - -"Stop her, sir! Ting-a-ling-ling!" The headway ran almost out, and he -drew up slowly toward the sidewalk. - -"Ship up to back! Ting-a-ling-ling!" His arms straightened and -stiffened down his sides. - -"Set her back on the stabboard! Ting-a-ling-ling! Chow! ch-chow-wow! -Chow!" His right hand, meantime, describing stately circles--for it was -representing a forty-foot wheel. - -"Let her go back on the labboard! Ting-a-lingling! Chow-ch-chow-chow!" -The left hand began to describe circles. - -"Stop the stabboard! Ting-a-ling-ling! Stop the labboard! Come ahead -on the stabboard! Stop her! Let your outside turn over slow! -Ting-a-ling-ling! Chow-ow-ow! Get out that head-line! LIVELY now! -Come--out with your spring-line--what're you about there! Take a turn -round that stump with the bight of it! Stand by that stage, now--let her -go! Done with the engines, sir! Ting-a-ling-ling! SH'T! S'H'T! SH'T!" -(trying the gauge-cocks). - -Tom went on whitewashing--paid no attention to the steamboat. Ben -stared a moment and then said: "Hi-YI! YOU'RE up a stump, ain't you!" - -No answer. Tom surveyed his last touch with the eye of an artist, then -he gave his brush another gentle sweep and surveyed the result, as -before. Ben ranged up alongside of him. Tom's mouth watered for the -apple, but he stuck to his work. Ben said: - -"Hello, old chap, you got to work, hey?" - -Tom wheeled suddenly and said: - -"Why, it's you, Ben! I warn't noticing." - -"Say--I'm going in a-swimming, I am. Don't you wish you could? But of -course you'd druther WORK--wouldn't you? Course you would!" - -Tom contemplated the boy a bit, and said: - -"What do you call work?" - -"Why, ain't THAT work?" - -Tom resumed his whitewashing, and answered carelessly: - -"Well, maybe it is, and maybe it ain't. All I know, is, it suits Tom -Sawyer." - -"Oh come, now, you don't mean to let on that you LIKE it?" - -The brush continued to move. - -"Like it? Well, I don't see why I oughtn't to like it. Does a boy get -a chance to whitewash a fence every day?" - -That put the thing in a new light. Ben stopped nibbling his apple. Tom -swept his brush daintily back and forth--stepped back to note the -effect--added a touch here and there--criticised the effect again--Ben -watching every move and getting more and more interested, more and more -absorbed. Presently he said: - -"Say, Tom, let ME whitewash a little." - -Tom considered, was about to consent; but he altered his mind: - -"No--no--I reckon it wouldn't hardly do, Ben. You see, Aunt Polly's -awful particular about this fence--right here on the street, you know ---but if it was the back fence I wouldn't mind and SHE wouldn't. Yes, -she's awful particular about this fence; it's got to be done very -careful; I reckon there ain't one boy in a thousand, maybe two -thousand, that can do it the way it's got to be done." - -"No--is that so? Oh come, now--lemme just try. Only just a little--I'd -let YOU, if you was me, Tom." - -"Ben, I'd like to, honest injun; but Aunt Polly--well, Jim wanted to -do it, but she wouldn't let him; Sid wanted to do it, and she wouldn't -let Sid. Now don't you see how I'm fixed? If you was to tackle this -fence and anything was to happen to it--" - -"Oh, shucks, I'll be just as careful. Now lemme try. Say--I'll give -you the core of my apple." - -"Well, here--No, Ben, now don't. I'm afeard--" - -"I'll give you ALL of it!" - -Tom gave up the brush with reluctance in his face, but alacrity in his -heart. And while the late steamer Big Missouri worked and sweated in -the sun, the retired artist sat on a barrel in the shade close by, -dangled his legs, munched his apple, and planned the slaughter of more -innocents. There was no lack of material; boys happened along every -little while; they came to jeer, but remained to whitewash. By the time -Ben was fagged out, Tom had traded the next chance to Billy Fisher for -a kite, in good repair; and when he played out, Johnny Miller bought in -for a dead rat and a string to swing it with--and so on, and so on, -hour after hour. And when the middle of the afternoon came, from being -a poor poverty-stricken boy in the morning, Tom was literally rolling -in wealth. He had besides the things before mentioned, twelve marbles, -part of a jews-harp, a piece of blue bottle-glass to look through, a -spool cannon, a key that wouldn't unlock anything, a fragment of chalk, -a glass stopper of a decanter, a tin soldier, a couple of tadpoles, six -fire-crackers, a kitten with only one eye, a brass doorknob, a -dog-collar--but no dog--the handle of a knife, four pieces of -orange-peel, and a dilapidated old window sash. - -He had had a nice, good, idle time all the while--plenty of company ---and the fence had three coats of whitewash on it! If he hadn't run out -of whitewash he would have bankrupted every boy in the village. - -Tom said to himself that it was not such a hollow world, after all. He -had discovered a great law of human action, without knowing it--namely, -that in order to make a man or a boy covet a thing, it is only -necessary to make the thing difficult to attain. If he had been a great -and wise philosopher, like the writer of this book, he would now have -comprehended that Work consists of whatever a body is OBLIGED to do, -and that Play consists of whatever a body is not obliged to do. And -this would help him to understand why constructing artificial flowers -or performing on a tread-mill is work, while rolling ten-pins or -climbing Mont Blanc is only amusement. There are wealthy gentlemen in -England who drive four-horse passenger-coaches twenty or thirty miles -on a daily line, in the summer, because the privilege costs them -considerable money; but if they were offered wages for the service, -that would turn it into work and then they would resign. - -The boy mused awhile over the substantial change which had taken place -in his worldly circumstances, and then wended toward headquarters to -report. - - - -CHAPTER III - -TOM presented himself before Aunt Polly, who was sitting by an open -window in a pleasant rearward apartment, which was bedroom, -breakfast-room, dining-room, and library, combined. The balmy summer -air, the restful quiet, the odor of the flowers, and the drowsing murmur -of the bees had had their effect, and she was nodding over her knitting ---for she had no company but the cat, and it was asleep in her lap. Her -spectacles were propped up on her gray head for safety. She had thought -that of course Tom had deserted long ago, and she wondered at seeing him -place himself in her power again in this intrepid way. He said: "Mayn't -I go and play now, aunt?" - -"What, a'ready? How much have you done?" - -"It's all done, aunt." - -"Tom, don't lie to me--I can't bear it." - -"I ain't, aunt; it IS all done." - -Aunt Polly placed small trust in such evidence. She went out to see -for herself; and she would have been content to find twenty per cent. -of Tom's statement true. When she found the entire fence whitewashed, -and not only whitewashed but elaborately coated and recoated, and even -a streak added to the ground, her astonishment was almost unspeakable. -She said: - -"Well, I never! There's no getting round it, you can work when you're -a mind to, Tom." And then she diluted the compliment by adding, "But -it's powerful seldom you're a mind to, I'm bound to say. Well, go 'long -and play; but mind you get back some time in a week, or I'll tan you." - -She was so overcome by the splendor of his achievement that she took -him into the closet and selected a choice apple and delivered it to -him, along with an improving lecture upon the added value and flavor a -treat took to itself when it came without sin through virtuous effort. -And while she closed with a happy Scriptural flourish, he "hooked" a -doughnut. - -Then he skipped out, and saw Sid just starting up the outside stairway -that led to the back rooms on the second floor. Clods were handy and -the air was full of them in a twinkling. They raged around Sid like a -hail-storm; and before Aunt Polly could collect her surprised faculties -and sally to the rescue, six or seven clods had taken personal effect, -and Tom was over the fence and gone. There was a gate, but as a general -thing he was too crowded for time to make use of it. His soul was at -peace, now that he had settled with Sid for calling attention to his -black thread and getting him into trouble. - -Tom skirted the block, and came round into a muddy alley that led by -the back of his aunt's cow-stable. He presently got safely beyond the -reach of capture and punishment, and hastened toward the public square -of the village, where two "military" companies of boys had met for -conflict, according to previous appointment. Tom was General of one of -these armies, Joe Harper (a bosom friend) General of the other. These -two great commanders did not condescend to fight in person--that being -better suited to the still smaller fry--but sat together on an eminence -and conducted the field operations by orders delivered through -aides-de-camp. Tom's army won a great victory, after a long and -hard-fought battle. Then the dead were counted, prisoners exchanged, -the terms of the next disagreement agreed upon, and the day for the -necessary battle appointed; after which the armies fell into line and -marched away, and Tom turned homeward alone. - -As he was passing by the house where Jeff Thatcher lived, he saw a new -girl in the garden--a lovely little blue-eyed creature with yellow hair -plaited into two long-tails, white summer frock and embroidered -pantalettes. The fresh-crowned hero fell without firing a shot. A -certain Amy Lawrence vanished out of his heart and left not even a -memory of herself behind. He had thought he loved her to distraction; -he had regarded his passion as adoration; and behold it was only a poor -little evanescent partiality. He had been months winning her; she had -confessed hardly a week ago; he had been the happiest and the proudest -boy in the world only seven short days, and here in one instant of time -she had gone out of his heart like a casual stranger whose visit is -done. - -He worshipped this new angel with furtive eye, till he saw that she -had discovered him; then he pretended he did not know she was present, -and began to "show off" in all sorts of absurd boyish ways, in order to -win her admiration. He kept up this grotesque foolishness for some -time; but by-and-by, while he was in the midst of some dangerous -gymnastic performances, he glanced aside and saw that the little girl -was wending her way toward the house. Tom came up to the fence and -leaned on it, grieving, and hoping she would tarry yet awhile longer. -She halted a moment on the steps and then moved toward the door. Tom -heaved a great sigh as she put her foot on the threshold. But his face -lit up, right away, for she tossed a pansy over the fence a moment -before she disappeared. - -The boy ran around and stopped within a foot or two of the flower, and -then shaded his eyes with his hand and began to look down street as if -he had discovered something of interest going on in that direction. -Presently he picked up a straw and began trying to balance it on his -nose, with his head tilted far back; and as he moved from side to side, -in his efforts, he edged nearer and nearer toward the pansy; finally -his bare foot rested upon it, his pliant toes closed upon it, and he -hopped away with the treasure and disappeared round the corner. But -only for a minute--only while he could button the flower inside his -jacket, next his heart--or next his stomach, possibly, for he was not -much posted in anatomy, and not hypercritical, anyway. - -He returned, now, and hung about the fence till nightfall, "showing -off," as before; but the girl never exhibited herself again, though Tom -comforted himself a little with the hope that she had been near some -window, meantime, and been aware of his attentions. Finally he strode -home reluctantly, with his poor head full of visions. - -All through supper his spirits were so high that his aunt wondered -"what had got into the child." He took a good scolding about clodding -Sid, and did not seem to mind it in the least. He tried to steal sugar -under his aunt's very nose, and got his knuckles rapped for it. He said: - -"Aunt, you don't whack Sid when he takes it." - -"Well, Sid don't torment a body the way you do. You'd be always into -that sugar if I warn't watching you." - -Presently she stepped into the kitchen, and Sid, happy in his -immunity, reached for the sugar-bowl--a sort of glorying over Tom which -was wellnigh unbearable. But Sid's fingers slipped and the bowl dropped -and broke. Tom was in ecstasies. In such ecstasies that he even -controlled his tongue and was silent. He said to himself that he would -not speak a word, even when his aunt came in, but would sit perfectly -still till she asked who did the mischief; and then he would tell, and -there would be nothing so good in the world as to see that pet model -"catch it." He was so brimful of exultation that he could hardly hold -himself when the old lady came back and stood above the wreck -discharging lightnings of wrath from over her spectacles. He said to -himself, "Now it's coming!" And the next instant he was sprawling on -the floor! The potent palm was uplifted to strike again when Tom cried -out: - -"Hold on, now, what 'er you belting ME for?--Sid broke it!" - -Aunt Polly paused, perplexed, and Tom looked for healing pity. But -when she got her tongue again, she only said: - -"Umf! Well, you didn't get a lick amiss, I reckon. You been into some -other audacious mischief when I wasn't around, like enough." - -Then her conscience reproached her, and she yearned to say something -kind and loving; but she judged that this would be construed into a -confession that she had been in the wrong, and discipline forbade that. -So she kept silence, and went about her affairs with a troubled heart. -Tom sulked in a corner and exalted his woes. He knew that in her heart -his aunt was on her knees to him, and he was morosely gratified by the -consciousness of it. He would hang out no signals, he would take notice -of none. He knew that a yearning glance fell upon him, now and then, -through a film of tears, but he refused recognition of it. He pictured -himself lying sick unto death and his aunt bending over him beseeching -one little forgiving word, but he would turn his face to the wall, and -die with that word unsaid. Ah, how would she feel then? And he pictured -himself brought home from the river, dead, with his curls all wet, and -his sore heart at rest. How she would throw herself upon him, and how -her tears would fall like rain, and her lips pray God to give her back -her boy and she would never, never abuse him any more! But he would lie -there cold and white and make no sign--a poor little sufferer, whose -griefs were at an end. He so worked upon his feelings with the pathos -of these dreams, that he had to keep swallowing, he was so like to -choke; and his eyes swam in a blur of water, which overflowed when he -winked, and ran down and trickled from the end of his nose. And such a -luxury to him was this petting of his sorrows, that he could not bear -to have any worldly cheeriness or any grating delight intrude upon it; -it was too sacred for such contact; and so, presently, when his cousin -Mary danced in, all alive with the joy of seeing home again after an -age-long visit of one week to the country, he got up and moved in -clouds and darkness out at one door as she brought song and sunshine in -at the other. - -He wandered far from the accustomed haunts of boys, and sought -desolate places that were in harmony with his spirit. A log raft in the -river invited him, and he seated himself on its outer edge and -contemplated the dreary vastness of the stream, wishing, the while, -that he could only be drowned, all at once and unconsciously, without -undergoing the uncomfortable routine devised by nature. Then he thought -of his flower. He got it out, rumpled and wilted, and it mightily -increased his dismal felicity. He wondered if she would pity him if she -knew? Would she cry, and wish that she had a right to put her arms -around his neck and comfort him? Or would she turn coldly away like all -the hollow world? This picture brought such an agony of pleasurable -suffering that he worked it over and over again in his mind and set it -up in new and varied lights, till he wore it threadbare. At last he -rose up sighing and departed in the darkness. - -About half-past nine or ten o'clock he came along the deserted street -to where the Adored Unknown lived; he paused a moment; no sound fell -upon his listening ear; a candle was casting a dull glow upon the -curtain of a second-story window. Was the sacred presence there? He -climbed the fence, threaded his stealthy way through the plants, till -he stood under that window; he looked up at it long, and with emotion; -then he laid him down on the ground under it, disposing himself upon -his back, with his hands clasped upon his breast and holding his poor -wilted flower. And thus he would die--out in the cold world, with no -shelter over his homeless head, no friendly hand to wipe the -death-damps from his brow, no loving face to bend pityingly over him -when the great agony came. And thus SHE would see him when she looked -out upon the glad morning, and oh! would she drop one little tear upon -his poor, lifeless form, would she heave one little sigh to see a bright -young life so rudely blighted, so untimely cut down? - -The window went up, a maid-servant's discordant voice profaned the -holy calm, and a deluge of water drenched the prone martyr's remains! - -The strangling hero sprang up with a relieving snort. There was a whiz -as of a missile in the air, mingled with the murmur of a curse, a sound -as of shivering glass followed, and a small, vague form went over the -fence and shot away in the gloom. - -Not long after, as Tom, all undressed for bed, was surveying his -drenched garments by the light of a tallow dip, Sid woke up; but if he -had any dim idea of making any "references to allusions," he thought -better of it and held his peace, for there was danger in Tom's eye. - -Tom turned in without the added vexation of prayers, and Sid made -mental note of the omission. - - - -CHAPTER IV - -THE sun rose upon a tranquil world, and beamed down upon the peaceful -village like a benediction. Breakfast over, Aunt Polly had family -worship: it began with a prayer built from the ground up of solid -courses of Scriptural quotations, welded together with a thin mortar of -originality; and from the summit of this she delivered a grim chapter -of the Mosaic Law, as from Sinai. - -Then Tom girded up his loins, so to speak, and went to work to "get -his verses." Sid had learned his lesson days before. Tom bent all his -energies to the memorizing of five verses, and he chose part of the -Sermon on the Mount, because he could find no verses that were shorter. -At the end of half an hour Tom had a vague general idea of his lesson, -but no more, for his mind was traversing the whole field of human -thought, and his hands were busy with distracting recreations. Mary -took his book to hear him recite, and he tried to find his way through -the fog: - -"Blessed are the--a--a--" - -"Poor"-- - -"Yes--poor; blessed are the poor--a--a--" - -"In spirit--" - -"In spirit; blessed are the poor in spirit, for they--they--" - -"THEIRS--" - -"For THEIRS. Blessed are the poor in spirit, for theirs is the kingdom -of heaven. Blessed are they that mourn, for they--they--" - -"Sh--" - -"For they--a--" - -"S, H, A--" - -"For they S, H--Oh, I don't know what it is!" - -"SHALL!" - -"Oh, SHALL! for they shall--for they shall--a--a--shall mourn--a--a-- -blessed are they that shall--they that--a--they that shall mourn, for -they shall--a--shall WHAT? Why don't you tell me, Mary?--what do you -want to be so mean for?" - -"Oh, Tom, you poor thick-headed thing, I'm not teasing you. I wouldn't -do that. You must go and learn it again. Don't you be discouraged, Tom, -you'll manage it--and if you do, I'll give you something ever so nice. -There, now, that's a good boy." - -"All right! What is it, Mary, tell me what it is." - -"Never you mind, Tom. You know if I say it's nice, it is nice." - -"You bet you that's so, Mary. All right, I'll tackle it again." - -And he did "tackle it again"--and under the double pressure of -curiosity and prospective gain he did it with such spirit that he -accomplished a shining success. Mary gave him a brand-new "Barlow" -knife worth twelve and a half cents; and the convulsion of delight that -swept his system shook him to his foundations. True, the knife would -not cut anything, but it was a "sure-enough" Barlow, and there was -inconceivable grandeur in that--though where the Western boys ever got -the idea that such a weapon could possibly be counterfeited to its -injury is an imposing mystery and will always remain so, perhaps. Tom -contrived to scarify the cupboard with it, and was arranging to begin -on the bureau, when he was called off to dress for Sunday-school. - -Mary gave him a tin basin of water and a piece of soap, and he went -outside the door and set the basin on a little bench there; then he -dipped the soap in the water and laid it down; turned up his sleeves; -poured out the water on the ground, gently, and then entered the -kitchen and began to wipe his face diligently on the towel behind the -door. But Mary removed the towel and said: - -"Now ain't you ashamed, Tom. You mustn't be so bad. Water won't hurt -you." - -Tom was a trifle disconcerted. The basin was refilled, and this time -he stood over it a little while, gathering resolution; took in a big -breath and began. When he entered the kitchen presently, with both eyes -shut and groping for the towel with his hands, an honorable testimony -of suds and water was dripping from his face. But when he emerged from -the towel, he was not yet satisfactory, for the clean territory stopped -short at his chin and his jaws, like a mask; below and beyond this line -there was a dark expanse of unirrigated soil that spread downward in -front and backward around his neck. Mary took him in hand, and when she -was done with him he was a man and a brother, without distinction of -color, and his saturated hair was neatly brushed, and its short curls -wrought into a dainty and symmetrical general effect. [He privately -smoothed out the curls, with labor and difficulty, and plastered his -hair close down to his head; for he held curls to be effeminate, and -his own filled his life with bitterness.] Then Mary got out a suit of -his clothing that had been used only on Sundays during two years--they -were simply called his "other clothes"--and so by that we know the -size of his wardrobe. The girl "put him to rights" after he had dressed -himself; she buttoned his neat roundabout up to his chin, turned his -vast shirt collar down over his shoulders, brushed him off and crowned -him with his speckled straw hat. He now looked exceedingly improved and -uncomfortable. He was fully as uncomfortable as he looked; for there -was a restraint about whole clothes and cleanliness that galled him. He -hoped that Mary would forget his shoes, but the hope was blighted; she -coated them thoroughly with tallow, as was the custom, and brought them -out. He lost his temper and said he was always being made to do -everything he didn't want to do. But Mary said, persuasively: - -"Please, Tom--that's a good boy." - -So he got into the shoes snarling. Mary was soon ready, and the three -children set out for Sunday-school--a place that Tom hated with his -whole heart; but Sid and Mary were fond of it. - -Sabbath-school hours were from nine to half-past ten; and then church -service. Two of the children always remained for the sermon -voluntarily, and the other always remained too--for stronger reasons. -The church's high-backed, uncushioned pews would seat about three -hundred persons; the edifice was but a small, plain affair, with a sort -of pine board tree-box on top of it for a steeple. At the door Tom -dropped back a step and accosted a Sunday-dressed comrade: - -"Say, Billy, got a yaller ticket?" - -"Yes." - -"What'll you take for her?" - -"What'll you give?" - -"Piece of lickrish and a fish-hook." - -"Less see 'em." - -Tom exhibited. They were satisfactory, and the property changed hands. -Then Tom traded a couple of white alleys for three red tickets, and -some small trifle or other for a couple of blue ones. He waylaid other -boys as they came, and went on buying tickets of various colors ten or -fifteen minutes longer. He entered the church, now, with a swarm of -clean and noisy boys and girls, proceeded to his seat and started a -quarrel with the first boy that came handy. The teacher, a grave, -elderly man, interfered; then turned his back a moment and Tom pulled a -boy's hair in the next bench, and was absorbed in his book when the boy -turned around; stuck a pin in another boy, presently, in order to hear -him say "Ouch!" and got a new reprimand from his teacher. Tom's whole -class were of a pattern--restless, noisy, and troublesome. When they -came to recite their lessons, not one of them knew his verses -perfectly, but had to be prompted all along. However, they worried -through, and each got his reward--in small blue tickets, each with a -passage of Scripture on it; each blue ticket was pay for two verses of -the recitation. Ten blue tickets equalled a red one, and could be -exchanged for it; ten red tickets equalled a yellow one; for ten yellow -tickets the superintendent gave a very plainly bound Bible (worth forty -cents in those easy times) to the pupil. How many of my readers would -have the industry and application to memorize two thousand verses, even -for a Dore Bible? And yet Mary had acquired two Bibles in this way--it -was the patient work of two years--and a boy of German parentage had -won four or five. He once recited three thousand verses without -stopping; but the strain upon his mental faculties was too great, and -he was little better than an idiot from that day forth--a grievous -misfortune for the school, for on great occasions, before company, the -superintendent (as Tom expressed it) had always made this boy come out -and "spread himself." Only the older pupils managed to keep their -tickets and stick to their tedious work long enough to get a Bible, and -so the delivery of one of these prizes was a rare and noteworthy -circumstance; the successful pupil was so great and conspicuous for -that day that on the spot every scholar's heart was fired with a fresh -ambition that often lasted a couple of weeks. It is possible that Tom's -mental stomach had never really hungered for one of those prizes, but -unquestionably his entire being had for many a day longed for the glory -and the eclat that came with it. - -In due course the superintendent stood up in front of the pulpit, with -a closed hymn-book in his hand and his forefinger inserted between its -leaves, and commanded attention. When a Sunday-school superintendent -makes his customary little speech, a hymn-book in the hand is as -necessary as is the inevitable sheet of music in the hand of a singer -who stands forward on the platform and sings a solo at a concert ---though why, is a mystery: for neither the hymn-book nor the sheet of -music is ever referred to by the sufferer. This superintendent was a -slim creature of thirty-five, with a sandy goatee and short sandy hair; -he wore a stiff standing-collar whose upper edge almost reached his -ears and whose sharp points curved forward abreast the corners of his -mouth--a fence that compelled a straight lookout ahead, and a turning -of the whole body when a side view was required; his chin was propped -on a spreading cravat which was as broad and as long as a bank-note, -and had fringed ends; his boot toes were turned sharply up, in the -fashion of the day, like sleigh-runners--an effect patiently and -laboriously produced by the young men by sitting with their toes -pressed against a wall for hours together. Mr. Walters was very earnest -of mien, and very sincere and honest at heart; and he held sacred -things and places in such reverence, and so separated them from worldly -matters, that unconsciously to himself his Sunday-school voice had -acquired a peculiar intonation which was wholly absent on week-days. He -began after this fashion: - -"Now, children, I want you all to sit up just as straight and pretty -as you can and give me all your attention for a minute or two. There ---that is it. That is the way good little boys and girls should do. I see -one little girl who is looking out of the window--I am afraid she -thinks I am out there somewhere--perhaps up in one of the trees making -a speech to the little birds. [Applausive titter.] I want to tell you -how good it makes me feel to see so many bright, clean little faces -assembled in a place like this, learning to do right and be good." And -so forth and so on. It is not necessary to set down the rest of the -oration. It was of a pattern which does not vary, and so it is familiar -to us all. - -The latter third of the speech was marred by the resumption of fights -and other recreations among certain of the bad boys, and by fidgetings -and whisperings that extended far and wide, washing even to the bases -of isolated and incorruptible rocks like Sid and Mary. But now every -sound ceased suddenly, with the subsidence of Mr. Walters' voice, and -the conclusion of the speech was received with a burst of silent -gratitude. - -A good part of the whispering had been occasioned by an event which -was more or less rare--the entrance of visitors: lawyer Thatcher, -accompanied by a very feeble and aged man; a fine, portly, middle-aged -gentleman with iron-gray hair; and a dignified lady who was doubtless -the latter's wife. The lady was leading a child. Tom had been restless -and full of chafings and repinings; conscience-smitten, too--he could -not meet Amy Lawrence's eye, he could not brook her loving gaze. But -when he saw this small new-comer his soul was all ablaze with bliss in -a moment. The next moment he was "showing off" with all his might ---cuffing boys, pulling hair, making faces--in a word, using every art -that seemed likely to fascinate a girl and win her applause. His -exaltation had but one alloy--the memory of his humiliation in this -angel's garden--and that record in sand was fast washing out, under -the waves of happiness that were sweeping over it now. - -The visitors were given the highest seat of honor, and as soon as Mr. -Walters' speech was finished, he introduced them to the school. The -middle-aged man turned out to be a prodigious personage--no less a one -than the county judge--altogether the most august creation these -children had ever looked upon--and they wondered what kind of material -he was made of--and they half wanted to hear him roar, and were half -afraid he might, too. He was from Constantinople, twelve miles away--so -he had travelled, and seen the world--these very eyes had looked upon -the county court-house--which was said to have a tin roof. The awe -which these reflections inspired was attested by the impressive silence -and the ranks of staring eyes. This was the great Judge Thatcher, -brother of their own lawyer. Jeff Thatcher immediately went forward, to -be familiar with the great man and be envied by the school. It would -have been music to his soul to hear the whisperings: - -"Look at him, Jim! He's a going up there. Say--look! he's a going to -shake hands with him--he IS shaking hands with him! By jings, don't you -wish you was Jeff?" - -Mr. Walters fell to "showing off," with all sorts of official -bustlings and activities, giving orders, delivering judgments, -discharging directions here, there, everywhere that he could find a -target. The librarian "showed off"--running hither and thither with his -arms full of books and making a deal of the splutter and fuss that -insect authority delights in. The young lady teachers "showed off" ---bending sweetly over pupils that were lately being boxed, lifting -pretty warning fingers at bad little boys and patting good ones -lovingly. The young gentlemen teachers "showed off" with small -scoldings and other little displays of authority and fine attention to -discipline--and most of the teachers, of both sexes, found business up -at the library, by the pulpit; and it was business that frequently had -to be done over again two or three times (with much seeming vexation). -The little girls "showed off" in various ways, and the little boys -"showed off" with such diligence that the air was thick with paper wads -and the murmur of scufflings. And above it all the great man sat and -beamed a majestic judicial smile upon all the house, and warmed himself -in the sun of his own grandeur--for he was "showing off," too. - -There was only one thing wanting to make Mr. Walters' ecstasy -complete, and that was a chance to deliver a Bible-prize and exhibit a -prodigy. Several pupils had a few yellow tickets, but none had enough ---he had been around among the star pupils inquiring. He would have given -worlds, now, to have that German lad back again with a sound mind. - -And now at this moment, when hope was dead, Tom Sawyer came forward -with nine yellow tickets, nine red tickets, and ten blue ones, and -demanded a Bible. This was a thunderbolt out of a clear sky. Walters -was not expecting an application from this source for the next ten -years. But there was no getting around it--here were the certified -checks, and they were good for their face. Tom was therefore elevated -to a place with the Judge and the other elect, and the great news was -announced from headquarters. It was the most stunning surprise of the -decade, and so profound was the sensation that it lifted the new hero -up to the judicial one's altitude, and the school had two marvels to -gaze upon in place of one. The boys were all eaten up with envy--but -those that suffered the bitterest pangs were those who perceived too -late that they themselves had contributed to this hated splendor by -trading tickets to Tom for the wealth he had amassed in selling -whitewashing privileges. These despised themselves, as being the dupes -of a wily fraud, a guileful snake in the grass. - -The prize was delivered to Tom with as much effusion as the -superintendent could pump up under the circumstances; but it lacked -somewhat of the true gush, for the poor fellow's instinct taught him -that there was a mystery here that could not well bear the light, -perhaps; it was simply preposterous that this boy had warehoused two -thousand sheaves of Scriptural wisdom on his premises--a dozen would -strain his capacity, without a doubt. - -Amy Lawrence was proud and glad, and she tried to make Tom see it in -her face--but he wouldn't look. She wondered; then she was just a grain -troubled; next a dim suspicion came and went--came again; she watched; -a furtive glance told her worlds--and then her heart broke, and she was -jealous, and angry, and the tears came and she hated everybody. Tom -most of all (she thought). - -Tom was introduced to the Judge; but his tongue was tied, his breath -would hardly come, his heart quaked--partly because of the awful -greatness of the man, but mainly because he was her parent. He would -have liked to fall down and worship him, if it were in the dark. The -Judge put his hand on Tom's head and called him a fine little man, and -asked him what his name was. The boy stammered, gasped, and got it out: - -"Tom." - -"Oh, no, not Tom--it is--" - -"Thomas." - -"Ah, that's it. I thought there was more to it, maybe. That's very -well. But you've another one I daresay, and you'll tell it to me, won't -you?" - -"Tell the gentleman your other name, Thomas," said Walters, "and say -sir. You mustn't forget your manners." - -"Thomas Sawyer--sir." - -"That's it! That's a good boy. Fine boy. Fine, manly little fellow. -Two thousand verses is a great many--very, very great many. And you -never can be sorry for the trouble you took to learn them; for -knowledge is worth more than anything there is in the world; it's what -makes great men and good men; you'll be a great man and a good man -yourself, some day, Thomas, and then you'll look back and say, It's all -owing to the precious Sunday-school privileges of my boyhood--it's all -owing to my dear teachers that taught me to learn--it's all owing to -the good superintendent, who encouraged me, and watched over me, and -gave me a beautiful Bible--a splendid elegant Bible--to keep and have -it all for my own, always--it's all owing to right bringing up! That is -what you will say, Thomas--and you wouldn't take any money for those -two thousand verses--no indeed you wouldn't. And now you wouldn't mind -telling me and this lady some of the things you've learned--no, I know -you wouldn't--for we are proud of little boys that learn. Now, no -doubt you know the names of all the twelve disciples. Won't you tell us -the names of the first two that were appointed?" - -Tom was tugging at a button-hole and looking sheepish. He blushed, -now, and his eyes fell. Mr. Walters' heart sank within him. He said to -himself, it is not possible that the boy can answer the simplest -question--why DID the Judge ask him? Yet he felt obliged to speak up -and say: - -"Answer the gentleman, Thomas--don't be afraid." - -Tom still hung fire. - -"Now I know you'll tell me," said the lady. "The names of the first -two disciples were--" - -"DAVID AND GOLIAH!" - -Let us draw the curtain of charity over the rest of the scene. - - - -CHAPTER V - -ABOUT half-past ten the cracked bell of the small church began to -ring, and presently the people began to gather for the morning sermon. -The Sunday-school children distributed themselves about the house and -occupied pews with their parents, so as to be under supervision. Aunt -Polly came, and Tom and Sid and Mary sat with her--Tom being placed -next the aisle, in order that he might be as far away from the open -window and the seductive outside summer scenes as possible. The crowd -filed up the aisles: the aged and needy postmaster, who had seen better -days; the mayor and his wife--for they had a mayor there, among other -unnecessaries; the justice of the peace; the widow Douglass, fair, -smart, and forty, a generous, good-hearted soul and well-to-do, her -hill mansion the only palace in the town, and the most hospitable and -much the most lavish in the matter of festivities that St. Petersburg -could boast; the bent and venerable Major and Mrs. Ward; lawyer -Riverson, the new notable from a distance; next the belle of the -village, followed by a troop of lawn-clad and ribbon-decked young -heart-breakers; then all the young clerks in town in a body--for they -had stood in the vestibule sucking their cane-heads, a circling wall of -oiled and simpering admirers, till the last girl had run their gantlet; -and last of all came the Model Boy, Willie Mufferson, taking as heedful -care of his mother as if she were cut glass. He always brought his -mother to church, and was the pride of all the matrons. The boys all -hated him, he was so good. And besides, he had been "thrown up to them" -so much. His white handkerchief was hanging out of his pocket behind, as -usual on Sundays--accidentally. Tom had no handkerchief, and he looked -upon boys who had as snobs. - -The congregation being fully assembled, now, the bell rang once more, -to warn laggards and stragglers, and then a solemn hush fell upon the -church which was only broken by the tittering and whispering of the -choir in the gallery. The choir always tittered and whispered all -through service. There was once a church choir that was not ill-bred, -but I have forgotten where it was, now. It was a great many years ago, -and I can scarcely remember anything about it, but I think it was in -some foreign country. - -The minister gave out the hymn, and read it through with a relish, in -a peculiar style which was much admired in that part of the country. -His voice began on a medium key and climbed steadily up till it reached -a certain point, where it bore with strong emphasis upon the topmost -word and then plunged down as if from a spring-board: - - Shall I be car-ri-ed toe the skies, on flow'ry BEDS of ease, - - Whilst others fight to win the prize, and sail thro' BLOODY seas? - -He was regarded as a wonderful reader. At church "sociables" he was -always called upon to read poetry; and when he was through, the ladies -would lift up their hands and let them fall helplessly in their laps, -and "wall" their eyes, and shake their heads, as much as to say, "Words -cannot express it; it is too beautiful, TOO beautiful for this mortal -earth." - -After the hymn had been sung, the Rev. Mr. Sprague turned himself into -a bulletin-board, and read off "notices" of meetings and societies and -things till it seemed that the list would stretch out to the crack of -doom--a queer custom which is still kept up in America, even in cities, -away here in this age of abundant newspapers. Often, the less there is -to justify a traditional custom, the harder it is to get rid of it. - -And now the minister prayed. A good, generous prayer it was, and went -into details: it pleaded for the church, and the little children of the -church; for the other churches of the village; for the village itself; -for the county; for the State; for the State officers; for the United -States; for the churches of the United States; for Congress; for the -President; for the officers of the Government; for poor sailors, tossed -by stormy seas; for the oppressed millions groaning under the heel of -European monarchies and Oriental despotisms; for such as have the light -and the good tidings, and yet have not eyes to see nor ears to hear -withal; for the heathen in the far islands of the sea; and closed with -a supplication that the words he was about to speak might find grace -and favor, and be as seed sown in fertile ground, yielding in time a -grateful harvest of good. Amen. - -There was a rustling of dresses, and the standing congregation sat -down. The boy whose history this book relates did not enjoy the prayer, -he only endured it--if he even did that much. He was restive all -through it; he kept tally of the details of the prayer, unconsciously ---for he was not listening, but he knew the ground of old, and the -clergyman's regular route over it--and when a little trifle of new -matter was interlarded, his ear detected it and his whole nature -resented it; he considered additions unfair, and scoundrelly. In the -midst of the prayer a fly had lit on the back of the pew in front of -him and tortured his spirit by calmly rubbing its hands together, -embracing its head with its arms, and polishing it so vigorously that -it seemed to almost part company with the body, and the slender thread -of a neck was exposed to view; scraping its wings with its hind legs -and smoothing them to its body as if they had been coat-tails; going -through its whole toilet as tranquilly as if it knew it was perfectly -safe. As indeed it was; for as sorely as Tom's hands itched to grab for -it they did not dare--he believed his soul would be instantly destroyed -if he did such a thing while the prayer was going on. But with the -closing sentence his hand began to curve and steal forward; and the -instant the "Amen" was out the fly was a prisoner of war. His aunt -detected the act and made him let it go. - -The minister gave out his text and droned along monotonously through -an argument that was so prosy that many a head by and by began to nod ---and yet it was an argument that dealt in limitless fire and brimstone -and thinned the predestined elect down to a company so small as to be -hardly worth the saving. Tom counted the pages of the sermon; after -church he always knew how many pages there had been, but he seldom knew -anything else about the discourse. However, this time he was really -interested for a little while. The minister made a grand and moving -picture of the assembling together of the world's hosts at the -millennium when the lion and the lamb should lie down together and a -little child should lead them. But the pathos, the lesson, the moral of -the great spectacle were lost upon the boy; he only thought of the -conspicuousness of the principal character before the on-looking -nations; his face lit with the thought, and he said to himself that he -wished he could be that child, if it was a tame lion. - -Now he lapsed into suffering again, as the dry argument was resumed. -Presently he bethought him of a treasure he had and got it out. It was -a large black beetle with formidable jaws--a "pinchbug," he called it. -It was in a percussion-cap box. The first thing the beetle did was to -take him by the finger. A natural fillip followed, the beetle went -floundering into the aisle and lit on its back, and the hurt finger -went into the boy's mouth. The beetle lay there working its helpless -legs, unable to turn over. Tom eyed it, and longed for it; but it was -safe out of his reach. Other people uninterested in the sermon found -relief in the beetle, and they eyed it too. Presently a vagrant poodle -dog came idling along, sad at heart, lazy with the summer softness and -the quiet, weary of captivity, sighing for change. He spied the beetle; -the drooping tail lifted and wagged. He surveyed the prize; walked -around it; smelt at it from a safe distance; walked around it again; -grew bolder, and took a closer smell; then lifted his lip and made a -gingerly snatch at it, just missing it; made another, and another; -began to enjoy the diversion; subsided to his stomach with the beetle -between his paws, and continued his experiments; grew weary at last, -and then indifferent and absent-minded. His head nodded, and little by -little his chin descended and touched the enemy, who seized it. There -was a sharp yelp, a flirt of the poodle's head, and the beetle fell a -couple of yards away, and lit on its back once more. The neighboring -spectators shook with a gentle inward joy, several faces went behind -fans and handkerchiefs, and Tom was entirely happy. The dog looked -foolish, and probably felt so; but there was resentment in his heart, -too, and a craving for revenge. So he went to the beetle and began a -wary attack on it again; jumping at it from every point of a circle, -lighting with his fore-paws within an inch of the creature, making even -closer snatches at it with his teeth, and jerking his head till his -ears flapped again. But he grew tired once more, after a while; tried -to amuse himself with a fly but found no relief; followed an ant -around, with his nose close to the floor, and quickly wearied of that; -yawned, sighed, forgot the beetle entirely, and sat down on it. Then -there was a wild yelp of agony and the poodle went sailing up the -aisle; the yelps continued, and so did the dog; he crossed the house in -front of the altar; he flew down the other aisle; he crossed before the -doors; he clamored up the home-stretch; his anguish grew with his -progress, till presently he was but a woolly comet moving in its orbit -with the gleam and the speed of light. At last the frantic sufferer -sheered from its course, and sprang into its master's lap; he flung it -out of the window, and the voice of distress quickly thinned away and -died in the distance. - -By this time the whole church was red-faced and suffocating with -suppressed laughter, and the sermon had come to a dead standstill. The -discourse was resumed presently, but it went lame and halting, all -possibility of impressiveness being at an end; for even the gravest -sentiments were constantly being received with a smothered burst of -unholy mirth, under cover of some remote pew-back, as if the poor -parson had said a rarely facetious thing. It was a genuine relief to -the whole congregation when the ordeal was over and the benediction -pronounced. - -Tom Sawyer went home quite cheerful, thinking to himself that there -was some satisfaction about divine service when there was a bit of -variety in it. He had but one marring thought; he was willing that the -dog should play with his pinchbug, but he did not think it was upright -in him to carry it off. - - - -CHAPTER VI - -MONDAY morning found Tom Sawyer miserable. Monday morning always found -him so--because it began another week's slow suffering in school. He -generally began that day with wishing he had had no intervening -holiday, it made the going into captivity and fetters again so much -more odious. - -Tom lay thinking. Presently it occurred to him that he wished he was -sick; then he could stay home from school. Here was a vague -possibility. He canvassed his system. No ailment was found, and he -investigated again. This time he thought he could detect colicky -symptoms, and he began to encourage them with considerable hope. But -they soon grew feeble, and presently died wholly away. He reflected -further. Suddenly he discovered something. One of his upper front teeth -was loose. This was lucky; he was about to begin to groan, as a -"starter," as he called it, when it occurred to him that if he came -into court with that argument, his aunt would pull it out, and that -would hurt. So he thought he would hold the tooth in reserve for the -present, and seek further. Nothing offered for some little time, and -then he remembered hearing the doctor tell about a certain thing that -laid up a patient for two or three weeks and threatened to make him -lose a finger. So the boy eagerly drew his sore toe from under the -sheet and held it up for inspection. But now he did not know the -necessary symptoms. However, it seemed well worth while to chance it, -so he fell to groaning with considerable spirit. - -But Sid slept on unconscious. - -Tom groaned louder, and fancied that he began to feel pain in the toe. - -No result from Sid. - -Tom was panting with his exertions by this time. He took a rest and -then swelled himself up and fetched a succession of admirable groans. - -Sid snored on. - -Tom was aggravated. He said, "Sid, Sid!" and shook him. This course -worked well, and Tom began to groan again. Sid yawned, stretched, then -brought himself up on his elbow with a snort, and began to stare at -Tom. Tom went on groaning. Sid said: - -"Tom! Say, Tom!" [No response.] "Here, Tom! TOM! What is the matter, -Tom?" And he shook him and looked in his face anxiously. - -Tom moaned out: - -"Oh, don't, Sid. Don't joggle me." - -"Why, what's the matter, Tom? I must call auntie." - -"No--never mind. It'll be over by and by, maybe. Don't call anybody." - -"But I must! DON'T groan so, Tom, it's awful. How long you been this -way?" - -"Hours. Ouch! Oh, don't stir so, Sid, you'll kill me." - -"Tom, why didn't you wake me sooner? Oh, Tom, DON'T! It makes my -flesh crawl to hear you. Tom, what is the matter?" - -"I forgive you everything, Sid. [Groan.] Everything you've ever done -to me. When I'm gone--" - -"Oh, Tom, you ain't dying, are you? Don't, Tom--oh, don't. Maybe--" - -"I forgive everybody, Sid. [Groan.] Tell 'em so, Sid. And Sid, you -give my window-sash and my cat with one eye to that new girl that's -come to town, and tell her--" - -But Sid had snatched his clothes and gone. Tom was suffering in -reality, now, so handsomely was his imagination working, and so his -groans had gathered quite a genuine tone. - -Sid flew down-stairs and said: - -"Oh, Aunt Polly, come! Tom's dying!" - -"Dying!" - -"Yes'm. Don't wait--come quick!" - -"Rubbage! I don't believe it!" - -But she fled up-stairs, nevertheless, with Sid and Mary at her heels. -And her face grew white, too, and her lip trembled. When she reached -the bedside she gasped out: - -"You, Tom! Tom, what's the matter with you?" - -"Oh, auntie, I'm--" - -"What's the matter with you--what is the matter with you, child?" - -"Oh, auntie, my sore toe's mortified!" - -The old lady sank down into a chair and laughed a little, then cried a -little, then did both together. This restored her and she said: - -"Tom, what a turn you did give me. Now you shut up that nonsense and -climb out of this." - -The groans ceased and the pain vanished from the toe. The boy felt a -little foolish, and he said: - -"Aunt Polly, it SEEMED mortified, and it hurt so I never minded my -tooth at all." - -"Your tooth, indeed! What's the matter with your tooth?" - -"One of them's loose, and it aches perfectly awful." - -"There, there, now, don't begin that groaning again. Open your mouth. -Well--your tooth IS loose, but you're not going to die about that. -Mary, get me a silk thread, and a chunk of fire out of the kitchen." - -Tom said: - -"Oh, please, auntie, don't pull it out. It don't hurt any more. I wish -I may never stir if it does. Please don't, auntie. I don't want to stay -home from school." - -"Oh, you don't, don't you? So all this row was because you thought -you'd get to stay home from school and go a-fishing? Tom, Tom, I love -you so, and you seem to try every way you can to break my old heart -with your outrageousness." By this time the dental instruments were -ready. The old lady made one end of the silk thread fast to Tom's tooth -with a loop and tied the other to the bedpost. Then she seized the -chunk of fire and suddenly thrust it almost into the boy's face. The -tooth hung dangling by the bedpost, now. - -But all trials bring their compensations. As Tom wended to school -after breakfast, he was the envy of every boy he met because the gap in -his upper row of teeth enabled him to expectorate in a new and -admirable way. He gathered quite a following of lads interested in the -exhibition; and one that had cut his finger and had been a centre of -fascination and homage up to this time, now found himself suddenly -without an adherent, and shorn of his glory. His heart was heavy, and -he said with a disdain which he did not feel that it wasn't anything to -spit like Tom Sawyer; but another boy said, "Sour grapes!" and he -wandered away a dismantled hero. - -Shortly Tom came upon the juvenile pariah of the village, Huckleberry -Finn, son of the town drunkard. Huckleberry was cordially hated and -dreaded by all the mothers of the town, because he was idle and lawless -and vulgar and bad--and because all their children admired him so, and -delighted in his forbidden society, and wished they dared to be like -him. Tom was like the rest of the respectable boys, in that he envied -Huckleberry his gaudy outcast condition, and was under strict orders -not to play with him. So he played with him every time he got a chance. -Huckleberry was always dressed in the cast-off clothes of full-grown -men, and they were in perennial bloom and fluttering with rags. His hat -was a vast ruin with a wide crescent lopped out of its brim; his coat, -when he wore one, hung nearly to his heels and had the rearward buttons -far down the back; but one suspender supported his trousers; the seat -of the trousers bagged low and contained nothing, the fringed legs -dragged in the dirt when not rolled up. - -Huckleberry came and went, at his own free will. He slept on doorsteps -in fine weather and in empty hogsheads in wet; he did not have to go to -school or to church, or call any being master or obey anybody; he could -go fishing or swimming when and where he chose, and stay as long as it -suited him; nobody forbade him to fight; he could sit up as late as he -pleased; he was always the first boy that went barefoot in the spring -and the last to resume leather in the fall; he never had to wash, nor -put on clean clothes; he could swear wonderfully. In a word, everything -that goes to make life precious that boy had. So thought every -harassed, hampered, respectable boy in St. Petersburg. - -Tom hailed the romantic outcast: - -"Hello, Huckleberry!" - -"Hello yourself, and see how you like it." - -"What's that you got?" - -"Dead cat." - -"Lemme see him, Huck. My, he's pretty stiff. Where'd you get him?" - -"Bought him off'n a boy." - -"What did you give?" - -"I give a blue ticket and a bladder that I got at the slaughter-house." - -"Where'd you get the blue ticket?" - -"Bought it off'n Ben Rogers two weeks ago for a hoop-stick." - -"Say--what is dead cats good for, Huck?" - -"Good for? Cure warts with." - -"No! Is that so? I know something that's better." - -"I bet you don't. What is it?" - -"Why, spunk-water." - -"Spunk-water! I wouldn't give a dern for spunk-water." - -"You wouldn't, wouldn't you? D'you ever try it?" - -"No, I hain't. But Bob Tanner did." - -"Who told you so!" - -"Why, he told Jeff Thatcher, and Jeff told Johnny Baker, and Johnny -told Jim Hollis, and Jim told Ben Rogers, and Ben told a nigger, and -the nigger told me. There now!" - -"Well, what of it? They'll all lie. Leastways all but the nigger. I -don't know HIM. But I never see a nigger that WOULDN'T lie. Shucks! Now -you tell me how Bob Tanner done it, Huck." - -"Why, he took and dipped his hand in a rotten stump where the -rain-water was." - -"In the daytime?" - -"Certainly." - -"With his face to the stump?" - -"Yes. Least I reckon so." - -"Did he say anything?" - -"I don't reckon he did. I don't know." - -"Aha! Talk about trying to cure warts with spunk-water such a blame -fool way as that! Why, that ain't a-going to do any good. You got to go -all by yourself, to the middle of the woods, where you know there's a -spunk-water stump, and just as it's midnight you back up against the -stump and jam your hand in and say: - - 'Barley-corn, barley-corn, injun-meal shorts, - Spunk-water, spunk-water, swaller these warts,' - -and then walk away quick, eleven steps, with your eyes shut, and then -turn around three times and walk home without speaking to anybody. -Because if you speak the charm's busted." - -"Well, that sounds like a good way; but that ain't the way Bob Tanner -done." - -"No, sir, you can bet he didn't, becuz he's the wartiest boy in this -town; and he wouldn't have a wart on him if he'd knowed how to work -spunk-water. I've took off thousands of warts off of my hands that way, -Huck. I play with frogs so much that I've always got considerable many -warts. Sometimes I take 'em off with a bean." - -"Yes, bean's good. I've done that." - -"Have you? What's your way?" - -"You take and split the bean, and cut the wart so as to get some -blood, and then you put the blood on one piece of the bean and take and -dig a hole and bury it 'bout midnight at the crossroads in the dark of -the moon, and then you burn up the rest of the bean. You see that piece -that's got the blood on it will keep drawing and drawing, trying to -fetch the other piece to it, and so that helps the blood to draw the -wart, and pretty soon off she comes." - -"Yes, that's it, Huck--that's it; though when you're burying it if you -say 'Down bean; off wart; come no more to bother me!' it's better. -That's the way Joe Harper does, and he's been nearly to Coonville and -most everywheres. But say--how do you cure 'em with dead cats?" - -"Why, you take your cat and go and get in the graveyard 'long about -midnight when somebody that was wicked has been buried; and when it's -midnight a devil will come, or maybe two or three, but you can't see -'em, you can only hear something like the wind, or maybe hear 'em talk; -and when they're taking that feller away, you heave your cat after 'em -and say, 'Devil follow corpse, cat follow devil, warts follow cat, I'm -done with ye!' That'll fetch ANY wart." - -"Sounds right. D'you ever try it, Huck?" - -"No, but old Mother Hopkins told me." - -"Well, I reckon it's so, then. Becuz they say she's a witch." - -"Say! Why, Tom, I KNOW she is. She witched pap. Pap says so his own -self. He come along one day, and he see she was a-witching him, so he -took up a rock, and if she hadn't dodged, he'd a got her. Well, that -very night he rolled off'n a shed wher' he was a layin drunk, and broke -his arm." - -"Why, that's awful. How did he know she was a-witching him?" - -"Lord, pap can tell, easy. Pap says when they keep looking at you -right stiddy, they're a-witching you. Specially if they mumble. Becuz -when they mumble they're saying the Lord's Prayer backards." - -"Say, Hucky, when you going to try the cat?" - -"To-night. I reckon they'll come after old Hoss Williams to-night." - -"But they buried him Saturday. Didn't they get him Saturday night?" - -"Why, how you talk! How could their charms work till midnight?--and -THEN it's Sunday. Devils don't slosh around much of a Sunday, I don't -reckon." - -"I never thought of that. That's so. Lemme go with you?" - -"Of course--if you ain't afeard." - -"Afeard! 'Tain't likely. Will you meow?" - -"Yes--and you meow back, if you get a chance. Last time, you kep' me -a-meowing around till old Hays went to throwing rocks at me and says -'Dern that cat!' and so I hove a brick through his window--but don't -you tell." - -"I won't. I couldn't meow that night, becuz auntie was watching me, -but I'll meow this time. Say--what's that?" - -"Nothing but a tick." - -"Where'd you get him?" - -"Out in the woods." - -"What'll you take for him?" - -"I don't know. I don't want to sell him." - -"All right. It's a mighty small tick, anyway." - -"Oh, anybody can run a tick down that don't belong to them. I'm -satisfied with it. It's a good enough tick for me." - -"Sho, there's ticks a plenty. I could have a thousand of 'em if I -wanted to." - -"Well, why don't you? Becuz you know mighty well you can't. This is a -pretty early tick, I reckon. It's the first one I've seen this year." - -"Say, Huck--I'll give you my tooth for him." - -"Less see it." - -Tom got out a bit of paper and carefully unrolled it. Huckleberry -viewed it wistfully. The temptation was very strong. At last he said: - -"Is it genuwyne?" - -Tom lifted his lip and showed the vacancy. - -"Well, all right," said Huckleberry, "it's a trade." - -Tom enclosed the tick in the percussion-cap box that had lately been -the pinchbug's prison, and the boys separated, each feeling wealthier -than before. - -When Tom reached the little isolated frame schoolhouse, he strode in -briskly, with the manner of one who had come with all honest speed. -He hung his hat on a peg and flung himself into his seat with -business-like alacrity. The master, throned on high in his great -splint-bottom arm-chair, was dozing, lulled by the drowsy hum of study. -The interruption roused him. - -"Thomas Sawyer!" - -Tom knew that when his name was pronounced in full, it meant trouble. - -"Sir!" - -"Come up here. Now, sir, why are you late again, as usual?" - -Tom was about to take refuge in a lie, when he saw two long tails of -yellow hair hanging down a back that he recognized by the electric -sympathy of love; and by that form was THE ONLY VACANT PLACE on the -girls' side of the schoolhouse. He instantly said: - -"I STOPPED TO TALK WITH HUCKLEBERRY FINN!" - -The master's pulse stood still, and he stared helplessly. The buzz of -study ceased. The pupils wondered if this foolhardy boy had lost his -mind. The master said: - -"You--you did what?" - -"Stopped to talk with Huckleberry Finn." - -There was no mistaking the words. - -"Thomas Sawyer, this is the most astounding confession I have ever -listened to. No mere ferule will answer for this offence. Take off your -jacket." - -The master's arm performed until it was tired and the stock of -switches notably diminished. Then the order followed: - -"Now, sir, go and sit with the girls! And let this be a warning to you." - -The titter that rippled around the room appeared to abash the boy, but -in reality that result was caused rather more by his worshipful awe of -his unknown idol and the dread pleasure that lay in his high good -fortune. He sat down upon the end of the pine bench and the girl -hitched herself away from him with a toss of her head. Nudges and winks -and whispers traversed the room, but Tom sat still, with his arms upon -the long, low desk before him, and seemed to study his book. - -By and by attention ceased from him, and the accustomed school murmur -rose upon the dull air once more. Presently the boy began to steal -furtive glances at the girl. She observed it, "made a mouth" at him and -gave him the back of her head for the space of a minute. When she -cautiously faced around again, a peach lay before her. She thrust it -away. Tom gently put it back. She thrust it away again, but with less -animosity. Tom patiently returned it to its place. Then she let it -remain. Tom scrawled on his slate, "Please take it--I got more." The -girl glanced at the words, but made no sign. Now the boy began to draw -something on the slate, hiding his work with his left hand. For a time -the girl refused to notice; but her human curiosity presently began to -manifest itself by hardly perceptible signs. The boy worked on, -apparently unconscious. The girl made a sort of noncommittal attempt to -see, but the boy did not betray that he was aware of it. At last she -gave in and hesitatingly whispered: - -"Let me see it." - -Tom partly uncovered a dismal caricature of a house with two gable -ends to it and a corkscrew of smoke issuing from the chimney. Then the -girl's interest began to fasten itself upon the work and she forgot -everything else. When it was finished, she gazed a moment, then -whispered: - -"It's nice--make a man." - -The artist erected a man in the front yard, that resembled a derrick. -He could have stepped over the house; but the girl was not -hypercritical; she was satisfied with the monster, and whispered: - -"It's a beautiful man--now make me coming along." - -Tom drew an hour-glass with a full moon and straw limbs to it and -armed the spreading fingers with a portentous fan. The girl said: - -"It's ever so nice--I wish I could draw." - -"It's easy," whispered Tom, "I'll learn you." - -"Oh, will you? When?" - -"At noon. Do you go home to dinner?" - -"I'll stay if you will." - -"Good--that's a whack. What's your name?" - -"Becky Thatcher. What's yours? Oh, I know. It's Thomas Sawyer." - -"That's the name they lick me by. I'm Tom when I'm good. You call me -Tom, will you?" - -"Yes." - -Now Tom began to scrawl something on the slate, hiding the words from -the girl. But she was not backward this time. She begged to see. Tom -said: - -"Oh, it ain't anything." - -"Yes it is." - -"No it ain't. You don't want to see." - -"Yes I do, indeed I do. Please let me." - -"You'll tell." - -"No I won't--deed and deed and double deed won't." - -"You won't tell anybody at all? Ever, as long as you live?" - -"No, I won't ever tell ANYbody. Now let me." - -"Oh, YOU don't want to see!" - -"Now that you treat me so, I WILL see." And she put her small hand -upon his and a little scuffle ensued, Tom pretending to resist in -earnest but letting his hand slip by degrees till these words were -revealed: "I LOVE YOU." - -"Oh, you bad thing!" And she hit his hand a smart rap, but reddened -and looked pleased, nevertheless. - -Just at this juncture the boy felt a slow, fateful grip closing on his -ear, and a steady lifting impulse. In that wise he was borne across the -house and deposited in his own seat, under a peppering fire of giggles -from the whole school. Then the master stood over him during a few -awful moments, and finally moved away to his throne without saying a -word. But although Tom's ear tingled, his heart was jubilant. - -As the school quieted down Tom made an honest effort to study, but the -turmoil within him was too great. In turn he took his place in the -reading class and made a botch of it; then in the geography class and -turned lakes into mountains, mountains into rivers, and rivers into -continents, till chaos was come again; then in the spelling class, and -got "turned down," by a succession of mere baby words, till he brought -up at the foot and yielded up the pewter medal which he had worn with -ostentation for months. - - - -CHAPTER VII - -THE harder Tom tried to fasten his mind on his book, the more his -ideas wandered. So at last, with a sigh and a yawn, he gave it up. It -seemed to him that the noon recess would never come. The air was -utterly dead. There was not a breath stirring. It was the sleepiest of -sleepy days. The drowsing murmur of the five and twenty studying -scholars soothed the soul like the spell that is in the murmur of bees. -Away off in the flaming sunshine, Cardiff Hill lifted its soft green -sides through a shimmering veil of heat, tinted with the purple of -distance; a few birds floated on lazy wing high in the air; no other -living thing was visible but some cows, and they were asleep. Tom's -heart ached to be free, or else to have something of interest to do to -pass the dreary time. His hand wandered into his pocket and his face -lit up with a glow of gratitude that was prayer, though he did not know -it. Then furtively the percussion-cap box came out. He released the -tick and put him on the long flat desk. The creature probably glowed -with a gratitude that amounted to prayer, too, at this moment, but it -was premature: for when he started thankfully to travel off, Tom turned -him aside with a pin and made him take a new direction. - -Tom's bosom friend sat next him, suffering just as Tom had been, and -now he was deeply and gratefully interested in this entertainment in an -instant. This bosom friend was Joe Harper. The two boys were sworn -friends all the week, and embattled enemies on Saturdays. Joe took a -pin out of his lapel and began to assist in exercising the prisoner. -The sport grew in interest momently. Soon Tom said that they were -interfering with each other, and neither getting the fullest benefit of -the tick. So he put Joe's slate on the desk and drew a line down the -middle of it from top to bottom. - -"Now," said he, "as long as he is on your side you can stir him up and -I'll let him alone; but if you let him get away and get on my side, -you're to leave him alone as long as I can keep him from crossing over." - -"All right, go ahead; start him up." - -The tick escaped from Tom, presently, and crossed the equator. Joe -harassed him awhile, and then he got away and crossed back again. This -change of base occurred often. While one boy was worrying the tick with -absorbing interest, the other would look on with interest as strong, -the two heads bowed together over the slate, and the two souls dead to -all things else. At last luck seemed to settle and abide with Joe. The -tick tried this, that, and the other course, and got as excited and as -anxious as the boys themselves, but time and again just as he would -have victory in his very grasp, so to speak, and Tom's fingers would be -twitching to begin, Joe's pin would deftly head him off, and keep -possession. At last Tom could stand it no longer. The temptation was -too strong. So he reached out and lent a hand with his pin. Joe was -angry in a moment. Said he: - -"Tom, you let him alone." - -"I only just want to stir him up a little, Joe." - -"No, sir, it ain't fair; you just let him alone." - -"Blame it, I ain't going to stir him much." - -"Let him alone, I tell you." - -"I won't!" - -"You shall--he's on my side of the line." - -"Look here, Joe Harper, whose is that tick?" - -"I don't care whose tick he is--he's on my side of the line, and you -sha'n't touch him." - -"Well, I'll just bet I will, though. He's my tick and I'll do what I -blame please with him, or die!" - -A tremendous whack came down on Tom's shoulders, and its duplicate on -Joe's; and for the space of two minutes the dust continued to fly from -the two jackets and the whole school to enjoy it. The boys had been too -absorbed to notice the hush that had stolen upon the school awhile -before when the master came tiptoeing down the room and stood over -them. He had contemplated a good part of the performance before he -contributed his bit of variety to it. - -When school broke up at noon, Tom flew to Becky Thatcher, and -whispered in her ear: - -"Put on your bonnet and let on you're going home; and when you get to -the corner, give the rest of 'em the slip, and turn down through the -lane and come back. I'll go the other way and come it over 'em the same -way." - -So the one went off with one group of scholars, and the other with -another. In a little while the two met at the bottom of the lane, and -when they reached the school they had it all to themselves. Then they -sat together, with a slate before them, and Tom gave Becky the pencil -and held her hand in his, guiding it, and so created another surprising -house. When the interest in art began to wane, the two fell to talking. -Tom was swimming in bliss. He said: - -"Do you love rats?" - -"No! I hate them!" - -"Well, I do, too--LIVE ones. But I mean dead ones, to swing round your -head with a string." - -"No, I don't care for rats much, anyway. What I like is chewing-gum." - -"Oh, I should say so! I wish I had some now." - -"Do you? I've got some. I'll let you chew it awhile, but you must give -it back to me." - -That was agreeable, so they chewed it turn about, and dangled their -legs against the bench in excess of contentment. - -"Was you ever at a circus?" said Tom. - -"Yes, and my pa's going to take me again some time, if I'm good." - -"I been to the circus three or four times--lots of times. Church ain't -shucks to a circus. There's things going on at a circus all the time. -I'm going to be a clown in a circus when I grow up." - -"Oh, are you! That will be nice. They're so lovely, all spotted up." - -"Yes, that's so. And they get slathers of money--most a dollar a day, -Ben Rogers says. Say, Becky, was you ever engaged?" - -"What's that?" - -"Why, engaged to be married." - -"No." - -"Would you like to?" - -"I reckon so. I don't know. What is it like?" - -"Like? Why it ain't like anything. You only just tell a boy you won't -ever have anybody but him, ever ever ever, and then you kiss and that's -all. Anybody can do it." - -"Kiss? What do you kiss for?" - -"Why, that, you know, is to--well, they always do that." - -"Everybody?" - -"Why, yes, everybody that's in love with each other. Do you remember -what I wrote on the slate?" - -"Ye--yes." - -"What was it?" - -"I sha'n't tell you." - -"Shall I tell YOU?" - -"Ye--yes--but some other time." - -"No, now." - -"No, not now--to-morrow." - -"Oh, no, NOW. Please, Becky--I'll whisper it, I'll whisper it ever so -easy." - -Becky hesitating, Tom took silence for consent, and passed his arm -about her waist and whispered the tale ever so softly, with his mouth -close to her ear. And then he added: - -"Now you whisper it to me--just the same." - -She resisted, for a while, and then said: - -"You turn your face away so you can't see, and then I will. But you -mustn't ever tell anybody--WILL you, Tom? Now you won't, WILL you?" - -"No, indeed, indeed I won't. Now, Becky." - -He turned his face away. She bent timidly around till her breath -stirred his curls and whispered, "I--love--you!" - -Then she sprang away and ran around and around the desks and benches, -with Tom after her, and took refuge in a corner at last, with her -little white apron to her face. Tom clasped her about her neck and -pleaded: - -"Now, Becky, it's all done--all over but the kiss. Don't you be afraid -of that--it ain't anything at all. Please, Becky." And he tugged at her -apron and the hands. - -By and by she gave up, and let her hands drop; her face, all glowing -with the struggle, came up and submitted. Tom kissed the red lips and -said: - -"Now it's all done, Becky. And always after this, you know, you ain't -ever to love anybody but me, and you ain't ever to marry anybody but -me, ever never and forever. Will you?" - -"No, I'll never love anybody but you, Tom, and I'll never marry -anybody but you--and you ain't to ever marry anybody but me, either." - -"Certainly. Of course. That's PART of it. And always coming to school -or when we're going home, you're to walk with me, when there ain't -anybody looking--and you choose me and I choose you at parties, because -that's the way you do when you're engaged." - -"It's so nice. I never heard of it before." - -"Oh, it's ever so gay! Why, me and Amy Lawrence--" - -The big eyes told Tom his blunder and he stopped, confused. - -"Oh, Tom! Then I ain't the first you've ever been engaged to!" - -The child began to cry. Tom said: - -"Oh, don't cry, Becky, I don't care for her any more." - -"Yes, you do, Tom--you know you do." - -Tom tried to put his arm about her neck, but she pushed him away and -turned her face to the wall, and went on crying. Tom tried again, with -soothing words in his mouth, and was repulsed again. Then his pride was -up, and he strode away and went outside. He stood about, restless and -uneasy, for a while, glancing at the door, every now and then, hoping -she would repent and come to find him. But she did not. Then he began -to feel badly and fear that he was in the wrong. It was a hard struggle -with him to make new advances, now, but he nerved himself to it and -entered. She was still standing back there in the corner, sobbing, with -her face to the wall. Tom's heart smote him. He went to her and stood a -moment, not knowing exactly how to proceed. Then he said hesitatingly: - -"Becky, I--I don't care for anybody but you." - -No reply--but sobs. - -"Becky"--pleadingly. "Becky, won't you say something?" - -More sobs. - -Tom got out his chiefest jewel, a brass knob from the top of an -andiron, and passed it around her so that she could see it, and said: - -"Please, Becky, won't you take it?" - -She struck it to the floor. Then Tom marched out of the house and over -the hills and far away, to return to school no more that day. Presently -Becky began to suspect. She ran to the door; he was not in sight; she -flew around to the play-yard; he was not there. Then she called: - -"Tom! Come back, Tom!" - -She listened intently, but there was no answer. She had no companions -but silence and loneliness. So she sat down to cry again and upbraid -herself; and by this time the scholars began to gather again, and she -had to hide her griefs and still her broken heart and take up the cross -of a long, dreary, aching afternoon, with none among the strangers -about her to exchange sorrows with. - - - -CHAPTER VIII - -TOM dodged hither and thither through lanes until he was well out of -the track of returning scholars, and then fell into a moody jog. He -crossed a small "branch" two or three times, because of a prevailing -juvenile superstition that to cross water baffled pursuit. Half an hour -later he was disappearing behind the Douglas mansion on the summit of -Cardiff Hill, and the schoolhouse was hardly distinguishable away off -in the valley behind him. He entered a dense wood, picked his pathless -way to the centre of it, and sat down on a mossy spot under a spreading -oak. There was not even a zephyr stirring; the dead noonday heat had -even stilled the songs of the birds; nature lay in a trance that was -broken by no sound but the occasional far-off hammering of a -woodpecker, and this seemed to render the pervading silence and sense -of loneliness the more profound. The boy's soul was steeped in -melancholy; his feelings were in happy accord with his surroundings. He -sat long with his elbows on his knees and his chin in his hands, -meditating. It seemed to him that life was but a trouble, at best, and -he more than half envied Jimmy Hodges, so lately released; it must be -very peaceful, he thought, to lie and slumber and dream forever and -ever, with the wind whispering through the trees and caressing the -grass and the flowers over the grave, and nothing to bother and grieve -about, ever any more. If he only had a clean Sunday-school record he -could be willing to go, and be done with it all. Now as to this girl. -What had he done? Nothing. He had meant the best in the world, and been -treated like a dog--like a very dog. She would be sorry some day--maybe -when it was too late. Ah, if he could only die TEMPORARILY! - -But the elastic heart of youth cannot be compressed into one -constrained shape long at a time. Tom presently began to drift -insensibly back into the concerns of this life again. What if he turned -his back, now, and disappeared mysteriously? What if he went away--ever -so far away, into unknown countries beyond the seas--and never came -back any more! How would she feel then! The idea of being a clown -recurred to him now, only to fill him with disgust. For frivolity and -jokes and spotted tights were an offense, when they intruded themselves -upon a spirit that was exalted into the vague august realm of the -romantic. No, he would be a soldier, and return after long years, all -war-worn and illustrious. No--better still, he would join the Indians, -and hunt buffaloes and go on the warpath in the mountain ranges and the -trackless great plains of the Far West, and away in the future come -back a great chief, bristling with feathers, hideous with paint, and -prance into Sunday-school, some drowsy summer morning, with a -bloodcurdling war-whoop, and sear the eyeballs of all his companions -with unappeasable envy. But no, there was something gaudier even than -this. He would be a pirate! That was it! NOW his future lay plain -before him, and glowing with unimaginable splendor. How his name would -fill the world, and make people shudder! How gloriously he would go -plowing the dancing seas, in his long, low, black-hulled racer, the -Spirit of the Storm, with his grisly flag flying at the fore! And at -the zenith of his fame, how he would suddenly appear at the old village -and stalk into church, brown and weather-beaten, in his black velvet -doublet and trunks, his great jack-boots, his crimson sash, his belt -bristling with horse-pistols, his crime-rusted cutlass at his side, his -slouch hat with waving plumes, his black flag unfurled, with the skull -and crossbones on it, and hear with swelling ecstasy the whisperings, -"It's Tom Sawyer the Pirate!--the Black Avenger of the Spanish Main!" - -Yes, it was settled; his career was determined. He would run away from -home and enter upon it. He would start the very next morning. Therefore -he must now begin to get ready. He would collect his resources -together. He went to a rotten log near at hand and began to dig under -one end of it with his Barlow knife. He soon struck wood that sounded -hollow. He put his hand there and uttered this incantation impressively: - -"What hasn't come here, come! What's here, stay here!" - -Then he scraped away the dirt, and exposed a pine shingle. He took it -up and disclosed a shapely little treasure-house whose bottom and sides -were of shingles. In it lay a marble. Tom's astonishment was boundless! -He scratched his head with a perplexed air, and said: - -"Well, that beats anything!" - -Then he tossed the marble away pettishly, and stood cogitating. The -truth was, that a superstition of his had failed, here, which he and -all his comrades had always looked upon as infallible. If you buried a -marble with certain necessary incantations, and left it alone a -fortnight, and then opened the place with the incantation he had just -used, you would find that all the marbles you had ever lost had -gathered themselves together there, meantime, no matter how widely they -had been separated. But now, this thing had actually and unquestionably -failed. Tom's whole structure of faith was shaken to its foundations. -He had many a time heard of this thing succeeding but never of its -failing before. It did not occur to him that he had tried it several -times before, himself, but could never find the hiding-places -afterward. He puzzled over the matter some time, and finally decided -that some witch had interfered and broken the charm. He thought he -would satisfy himself on that point; so he searched around till he -found a small sandy spot with a little funnel-shaped depression in it. -He laid himself down and put his mouth close to this depression and -called-- - -"Doodle-bug, doodle-bug, tell me what I want to know! Doodle-bug, -doodle-bug, tell me what I want to know!" - -The sand began to work, and presently a small black bug appeared for a -second and then darted under again in a fright. - -"He dasn't tell! So it WAS a witch that done it. I just knowed it." - -He well knew the futility of trying to contend against witches, so he -gave up discouraged. But it occurred to him that he might as well have -the marble he had just thrown away, and therefore he went and made a -patient search for it. But he could not find it. Now he went back to -his treasure-house and carefully placed himself just as he had been -standing when he tossed the marble away; then he took another marble -from his pocket and tossed it in the same way, saying: - -"Brother, go find your brother!" - -He watched where it stopped, and went there and looked. But it must -have fallen short or gone too far; so he tried twice more. The last -repetition was successful. The two marbles lay within a foot of each -other. - -Just here the blast of a toy tin trumpet came faintly down the green -aisles of the forest. Tom flung off his jacket and trousers, turned a -suspender into a belt, raked away some brush behind the rotten log, -disclosing a rude bow and arrow, a lath sword and a tin trumpet, and in -a moment had seized these things and bounded away, barelegged, with -fluttering shirt. He presently halted under a great elm, blew an -answering blast, and then began to tiptoe and look warily out, this way -and that. He said cautiously--to an imaginary company: - -"Hold, my merry men! Keep hid till I blow." - -Now appeared Joe Harper, as airily clad and elaborately armed as Tom. -Tom called: - -"Hold! Who comes here into Sherwood Forest without my pass?" - -"Guy of Guisborne wants no man's pass. Who art thou that--that--" - -"Dares to hold such language," said Tom, prompting--for they talked -"by the book," from memory. - -"Who art thou that dares to hold such language?" - -"I, indeed! I am Robin Hood, as thy caitiff carcase soon shall know." - -"Then art thou indeed that famous outlaw? Right gladly will I dispute -with thee the passes of the merry wood. Have at thee!" - -They took their lath swords, dumped their other traps on the ground, -struck a fencing attitude, foot to foot, and began a grave, careful -combat, "two up and two down." Presently Tom said: - -"Now, if you've got the hang, go it lively!" - -So they "went it lively," panting and perspiring with the work. By and -by Tom shouted: - -"Fall! fall! Why don't you fall?" - -"I sha'n't! Why don't you fall yourself? You're getting the worst of -it." - -"Why, that ain't anything. I can't fall; that ain't the way it is in -the book. The book says, 'Then with one back-handed stroke he slew poor -Guy of Guisborne.' You're to turn around and let me hit you in the -back." - -There was no getting around the authorities, so Joe turned, received -the whack and fell. - -"Now," said Joe, getting up, "you got to let me kill YOU. That's fair." - -"Why, I can't do that, it ain't in the book." - -"Well, it's blamed mean--that's all." - -"Well, say, Joe, you can be Friar Tuck or Much the miller's son, and -lam me with a quarter-staff; or I'll be the Sheriff of Nottingham and -you be Robin Hood a little while and kill me." - -This was satisfactory, and so these adventures were carried out. Then -Tom became Robin Hood again, and was allowed by the treacherous nun to -bleed his strength away through his neglected wound. And at last Joe, -representing a whole tribe of weeping outlaws, dragged him sadly forth, -gave his bow into his feeble hands, and Tom said, "Where this arrow -falls, there bury poor Robin Hood under the greenwood tree." Then he -shot the arrow and fell back and would have died, but he lit on a -nettle and sprang up too gaily for a corpse. - -The boys dressed themselves, hid their accoutrements, and went off -grieving that there were no outlaws any more, and wondering what modern -civilization could claim to have done to compensate for their loss. -They said they would rather be outlaws a year in Sherwood Forest than -President of the United States forever. - - - -CHAPTER IX - -AT half-past nine, that night, Tom and Sid were sent to bed, as usual. -They said their prayers, and Sid was soon asleep. Tom lay awake and -waited, in restless impatience. When it seemed to him that it must be -nearly daylight, he heard the clock strike ten! This was despair. He -would have tossed and fidgeted, as his nerves demanded, but he was -afraid he might wake Sid. So he lay still, and stared up into the dark. -Everything was dismally still. By and by, out of the stillness, little, -scarcely perceptible noises began to emphasize themselves. The ticking -of the clock began to bring itself into notice. Old beams began to -crack mysteriously. The stairs creaked faintly. Evidently spirits were -abroad. A measured, muffled snore issued from Aunt Polly's chamber. And -now the tiresome chirping of a cricket that no human ingenuity could -locate, began. Next the ghastly ticking of a deathwatch in the wall at -the bed's head made Tom shudder--it meant that somebody's days were -numbered. Then the howl of a far-off dog rose on the night air, and was -answered by a fainter howl from a remoter distance. Tom was in an -agony. At last he was satisfied that time had ceased and eternity -begun; he began to doze, in spite of himself; the clock chimed eleven, -but he did not hear it. And then there came, mingling with his -half-formed dreams, a most melancholy caterwauling. The raising of a -neighboring window disturbed him. A cry of "Scat! you devil!" and the -crash of an empty bottle against the back of his aunt's woodshed -brought him wide awake, and a single minute later he was dressed and -out of the window and creeping along the roof of the "ell" on all -fours. He "meow'd" with caution once or twice, as he went; then jumped -to the roof of the woodshed and thence to the ground. Huckleberry Finn -was there, with his dead cat. The boys moved off and disappeared in the -gloom. At the end of half an hour they were wading through the tall -grass of the graveyard. - -It was a graveyard of the old-fashioned Western kind. It was on a -hill, about a mile and a half from the village. It had a crazy board -fence around it, which leaned inward in places, and outward the rest of -the time, but stood upright nowhere. Grass and weeds grew rank over the -whole cemetery. All the old graves were sunken in, there was not a -tombstone on the place; round-topped, worm-eaten boards staggered over -the graves, leaning for support and finding none. "Sacred to the memory -of" So-and-So had been painted on them once, but it could no longer -have been read, on the most of them, now, even if there had been light. - -A faint wind moaned through the trees, and Tom feared it might be the -spirits of the dead, complaining at being disturbed. The boys talked -little, and only under their breath, for the time and the place and the -pervading solemnity and silence oppressed their spirits. They found the -sharp new heap they were seeking, and ensconced themselves within the -protection of three great elms that grew in a bunch within a few feet -of the grave. - -Then they waited in silence for what seemed a long time. The hooting -of a distant owl was all the sound that troubled the dead stillness. -Tom's reflections grew oppressive. He must force some talk. So he said -in a whisper: - -"Hucky, do you believe the dead people like it for us to be here?" - -Huckleberry whispered: - -"I wisht I knowed. It's awful solemn like, AIN'T it?" - -"I bet it is." - -There was a considerable pause, while the boys canvassed this matter -inwardly. Then Tom whispered: - -"Say, Hucky--do you reckon Hoss Williams hears us talking?" - -"O' course he does. Least his sperrit does." - -Tom, after a pause: - -"I wish I'd said Mister Williams. But I never meant any harm. -Everybody calls him Hoss." - -"A body can't be too partic'lar how they talk 'bout these-yer dead -people, Tom." - -This was a damper, and conversation died again. - -Presently Tom seized his comrade's arm and said: - -"Sh!" - -"What is it, Tom?" And the two clung together with beating hearts. - -"Sh! There 'tis again! Didn't you hear it?" - -"I--" - -"There! Now you hear it." - -"Lord, Tom, they're coming! They're coming, sure. What'll we do?" - -"I dono. Think they'll see us?" - -"Oh, Tom, they can see in the dark, same as cats. I wisht I hadn't -come." - -"Oh, don't be afeard. I don't believe they'll bother us. We ain't -doing any harm. If we keep perfectly still, maybe they won't notice us -at all." - -"I'll try to, Tom, but, Lord, I'm all of a shiver." - -"Listen!" - -The boys bent their heads together and scarcely breathed. A muffled -sound of voices floated up from the far end of the graveyard. - -"Look! See there!" whispered Tom. "What is it?" - -"It's devil-fire. Oh, Tom, this is awful." - -Some vague figures approached through the gloom, swinging an -old-fashioned tin lantern that freckled the ground with innumerable -little spangles of light. Presently Huckleberry whispered with a -shudder: - -"It's the devils sure enough. Three of 'em! Lordy, Tom, we're goners! -Can you pray?" - -"I'll try, but don't you be afeard. They ain't going to hurt us. 'Now -I lay me down to sleep, I--'" - -"Sh!" - -"What is it, Huck?" - -"They're HUMANS! One of 'em is, anyway. One of 'em's old Muff Potter's -voice." - -"No--'tain't so, is it?" - -"I bet I know it. Don't you stir nor budge. He ain't sharp enough to -notice us. Drunk, the same as usual, likely--blamed old rip!" - -"All right, I'll keep still. Now they're stuck. Can't find it. Here -they come again. Now they're hot. Cold again. Hot again. Red hot! -They're p'inted right, this time. Say, Huck, I know another o' them -voices; it's Injun Joe." - -"That's so--that murderin' half-breed! I'd druther they was devils a -dern sight. What kin they be up to?" - -The whisper died wholly out, now, for the three men had reached the -grave and stood within a few feet of the boys' hiding-place. - -"Here it is," said the third voice; and the owner of it held the -lantern up and revealed the face of young Doctor Robinson. - -Potter and Injun Joe were carrying a handbarrow with a rope and a -couple of shovels on it. They cast down their load and began to open -the grave. The doctor put the lantern at the head of the grave and came -and sat down with his back against one of the elm trees. He was so -close the boys could have touched him. - -"Hurry, men!" he said, in a low voice; "the moon might come out at any -moment." - -They growled a response and went on digging. For some time there was -no noise but the grating sound of the spades discharging their freight -of mould and gravel. It was very monotonous. Finally a spade struck -upon the coffin with a dull woody accent, and within another minute or -two the men had hoisted it out on the ground. They pried off the lid -with their shovels, got out the body and dumped it rudely on the -ground. The moon drifted from behind the clouds and exposed the pallid -face. The barrow was got ready and the corpse placed on it, covered -with a blanket, and bound to its place with the rope. Potter took out a -large spring-knife and cut off the dangling end of the rope and then -said: - -"Now the cussed thing's ready, Sawbones, and you'll just out with -another five, or here she stays." - -"That's the talk!" said Injun Joe. - -"Look here, what does this mean?" said the doctor. "You required your -pay in advance, and I've paid you." - -"Yes, and you done more than that," said Injun Joe, approaching the -doctor, who was now standing. "Five years ago you drove me away from -your father's kitchen one night, when I come to ask for something to -eat, and you said I warn't there for any good; and when I swore I'd get -even with you if it took a hundred years, your father had me jailed for -a vagrant. Did you think I'd forget? The Injun blood ain't in me for -nothing. And now I've GOT you, and you got to SETTLE, you know!" - -He was threatening the doctor, with his fist in his face, by this -time. The doctor struck out suddenly and stretched the ruffian on the -ground. Potter dropped his knife, and exclaimed: - -"Here, now, don't you hit my pard!" and the next moment he had -grappled with the doctor and the two were struggling with might and -main, trampling the grass and tearing the ground with their heels. -Injun Joe sprang to his feet, his eyes flaming with passion, snatched -up Potter's knife, and went creeping, catlike and stooping, round and -round about the combatants, seeking an opportunity. All at once the -doctor flung himself free, seized the heavy headboard of Williams' -grave and felled Potter to the earth with it--and in the same instant -the half-breed saw his chance and drove the knife to the hilt in the -young man's breast. He reeled and fell partly upon Potter, flooding him -with his blood, and in the same moment the clouds blotted out the -dreadful spectacle and the two frightened boys went speeding away in -the dark. - -Presently, when the moon emerged again, Injun Joe was standing over -the two forms, contemplating them. The doctor murmured inarticulately, -gave a long gasp or two and was still. The half-breed muttered: - -"THAT score is settled--damn you." - -Then he robbed the body. After which he put the fatal knife in -Potter's open right hand, and sat down on the dismantled coffin. Three ---four--five minutes passed, and then Potter began to stir and moan. His -hand closed upon the knife; he raised it, glanced at it, and let it -fall, with a shudder. Then he sat up, pushing the body from him, and -gazed at it, and then around him, confusedly. His eyes met Joe's. - -"Lord, how is this, Joe?" he said. - -"It's a dirty business," said Joe, without moving. - -"What did you do it for?" - -"I! I never done it!" - -"Look here! That kind of talk won't wash." - -Potter trembled and grew white. - -"I thought I'd got sober. I'd no business to drink to-night. But it's -in my head yet--worse'n when we started here. I'm all in a muddle; -can't recollect anything of it, hardly. Tell me, Joe--HONEST, now, old -feller--did I do it? Joe, I never meant to--'pon my soul and honor, I -never meant to, Joe. Tell me how it was, Joe. Oh, it's awful--and him -so young and promising." - -"Why, you two was scuffling, and he fetched you one with the headboard -and you fell flat; and then up you come, all reeling and staggering -like, and snatched the knife and jammed it into him, just as he fetched -you another awful clip--and here you've laid, as dead as a wedge til -now." - -"Oh, I didn't know what I was a-doing. I wish I may die this minute if -I did. It was all on account of the whiskey and the excitement, I -reckon. I never used a weepon in my life before, Joe. I've fought, but -never with weepons. They'll all say that. Joe, don't tell! Say you -won't tell, Joe--that's a good feller. I always liked you, Joe, and -stood up for you, too. Don't you remember? You WON'T tell, WILL you, -Joe?" And the poor creature dropped on his knees before the stolid -murderer, and clasped his appealing hands. - -"No, you've always been fair and square with me, Muff Potter, and I -won't go back on you. There, now, that's as fair as a man can say." - -"Oh, Joe, you're an angel. I'll bless you for this the longest day I -live." And Potter began to cry. - -"Come, now, that's enough of that. This ain't any time for blubbering. -You be off yonder way and I'll go this. Move, now, and don't leave any -tracks behind you." - -Potter started on a trot that quickly increased to a run. The -half-breed stood looking after him. He muttered: - -"If he's as much stunned with the lick and fuddled with the rum as he -had the look of being, he won't think of the knife till he's gone so -far he'll be afraid to come back after it to such a place by himself ---chicken-heart!" - -Two or three minutes later the murdered man, the blanketed corpse, the -lidless coffin, and the open grave were under no inspection but the -moon's. The stillness was complete again, too. - - - -CHAPTER X - -THE two boys flew on and on, toward the village, speechless with -horror. They glanced backward over their shoulders from time to time, -apprehensively, as if they feared they might be followed. Every stump -that started up in their path seemed a man and an enemy, and made them -catch their breath; and as they sped by some outlying cottages that lay -near the village, the barking of the aroused watch-dogs seemed to give -wings to their feet. - -"If we can only get to the old tannery before we break down!" -whispered Tom, in short catches between breaths. "I can't stand it much -longer." - -Huckleberry's hard pantings were his only reply, and the boys fixed -their eyes on the goal of their hopes and bent to their work to win it. -They gained steadily on it, and at last, breast to breast, they burst -through the open door and fell grateful and exhausted in the sheltering -shadows beyond. By and by their pulses slowed down, and Tom whispered: - -"Huckleberry, what do you reckon'll come of this?" - -"If Doctor Robinson dies, I reckon hanging'll come of it." - -"Do you though?" - -"Why, I KNOW it, Tom." - -Tom thought a while, then he said: - -"Who'll tell? We?" - -"What are you talking about? S'pose something happened and Injun Joe -DIDN'T hang? Why, he'd kill us some time or other, just as dead sure as -we're a laying here." - -"That's just what I was thinking to myself, Huck." - -"If anybody tells, let Muff Potter do it, if he's fool enough. He's -generally drunk enough." - -Tom said nothing--went on thinking. Presently he whispered: - -"Huck, Muff Potter don't know it. How can he tell?" - -"What's the reason he don't know it?" - -"Because he'd just got that whack when Injun Joe done it. D'you reckon -he could see anything? D'you reckon he knowed anything?" - -"By hokey, that's so, Tom!" - -"And besides, look-a-here--maybe that whack done for HIM!" - -"No, 'taint likely, Tom. He had liquor in him; I could see that; and -besides, he always has. Well, when pap's full, you might take and belt -him over the head with a church and you couldn't phase him. He says so, -his own self. So it's the same with Muff Potter, of course. But if a -man was dead sober, I reckon maybe that whack might fetch him; I dono." - -After another reflective silence, Tom said: - -"Hucky, you sure you can keep mum?" - -"Tom, we GOT to keep mum. You know that. That Injun devil wouldn't -make any more of drownding us than a couple of cats, if we was to -squeak 'bout this and they didn't hang him. Now, look-a-here, Tom, less -take and swear to one another--that's what we got to do--swear to keep -mum." - -"I'm agreed. It's the best thing. Would you just hold hands and swear -that we--" - -"Oh no, that wouldn't do for this. That's good enough for little -rubbishy common things--specially with gals, cuz THEY go back on you -anyway, and blab if they get in a huff--but there orter be writing -'bout a big thing like this. And blood." - -Tom's whole being applauded this idea. It was deep, and dark, and -awful; the hour, the circumstances, the surroundings, were in keeping -with it. He picked up a clean pine shingle that lay in the moonlight, -took a little fragment of "red keel" out of his pocket, got the moon on -his work, and painfully scrawled these lines, emphasizing each slow -down-stroke by clamping his tongue between his teeth, and letting up -the pressure on the up-strokes. [See next page.] - - "Huck Finn and - Tom Sawyer swears - they will keep mum - about This and They - wish They may Drop - down dead in Their - Tracks if They ever - Tell and Rot." - -Huckleberry was filled with admiration of Tom's facility in writing, -and the sublimity of his language. He at once took a pin from his lapel -and was going to prick his flesh, but Tom said: - -"Hold on! Don't do that. A pin's brass. It might have verdigrease on -it." - -"What's verdigrease?" - -"It's p'ison. That's what it is. You just swaller some of it once ---you'll see." - -So Tom unwound the thread from one of his needles, and each boy -pricked the ball of his thumb and squeezed out a drop of blood. In -time, after many squeezes, Tom managed to sign his initials, using the -ball of his little finger for a pen. Then he showed Huckleberry how to -make an H and an F, and the oath was complete. They buried the shingle -close to the wall, with some dismal ceremonies and incantations, and -the fetters that bound their tongues were considered to be locked and -the key thrown away. - -A figure crept stealthily through a break in the other end of the -ruined building, now, but they did not notice it. - -"Tom," whispered Huckleberry, "does this keep us from EVER telling ---ALWAYS?" - -"Of course it does. It don't make any difference WHAT happens, we got -to keep mum. We'd drop down dead--don't YOU know that?" - -"Yes, I reckon that's so." - -They continued to whisper for some little time. Presently a dog set up -a long, lugubrious howl just outside--within ten feet of them. The boys -clasped each other suddenly, in an agony of fright. - -"Which of us does he mean?" gasped Huckleberry. - -"I dono--peep through the crack. Quick!" - -"No, YOU, Tom!" - -"I can't--I can't DO it, Huck!" - -"Please, Tom. There 'tis again!" - -"Oh, lordy, I'm thankful!" whispered Tom. "I know his voice. It's Bull -Harbison." * - -[* If Mr. Harbison owned a slave named Bull, Tom would have spoken of -him as "Harbison's Bull," but a son or a dog of that name was "Bull -Harbison."] - -"Oh, that's good--I tell you, Tom, I was most scared to death; I'd a -bet anything it was a STRAY dog." - -The dog howled again. The boys' hearts sank once more. - -"Oh, my! that ain't no Bull Harbison!" whispered Huckleberry. "DO, Tom!" - -Tom, quaking with fear, yielded, and put his eye to the crack. His -whisper was hardly audible when he said: - -"Oh, Huck, IT S A STRAY DOG!" - -"Quick, Tom, quick! Who does he mean?" - -"Huck, he must mean us both--we're right together." - -"Oh, Tom, I reckon we're goners. I reckon there ain't no mistake 'bout -where I'LL go to. I been so wicked." - -"Dad fetch it! This comes of playing hookey and doing everything a -feller's told NOT to do. I might a been good, like Sid, if I'd a tried ---but no, I wouldn't, of course. But if ever I get off this time, I lay -I'll just WALLER in Sunday-schools!" And Tom began to snuffle a little. - -"YOU bad!" and Huckleberry began to snuffle too. "Consound it, Tom -Sawyer, you're just old pie, 'longside o' what I am. Oh, LORDY, lordy, -lordy, I wisht I only had half your chance." - -Tom choked off and whispered: - -"Look, Hucky, look! He's got his BACK to us!" - -Hucky looked, with joy in his heart. - -"Well, he has, by jingoes! Did he before?" - -"Yes, he did. But I, like a fool, never thought. Oh, this is bully, -you know. NOW who can he mean?" - -The howling stopped. Tom pricked up his ears. - -"Sh! What's that?" he whispered. - -"Sounds like--like hogs grunting. No--it's somebody snoring, Tom." - -"That IS it! Where 'bouts is it, Huck?" - -"I bleeve it's down at 'tother end. Sounds so, anyway. Pap used to -sleep there, sometimes, 'long with the hogs, but laws bless you, he -just lifts things when HE snores. Besides, I reckon he ain't ever -coming back to this town any more." - -The spirit of adventure rose in the boys' souls once more. - -"Hucky, do you das't to go if I lead?" - -"I don't like to, much. Tom, s'pose it's Injun Joe!" - -Tom quailed. But presently the temptation rose up strong again and the -boys agreed to try, with the understanding that they would take to -their heels if the snoring stopped. So they went tiptoeing stealthily -down, the one behind the other. When they had got to within five steps -of the snorer, Tom stepped on a stick, and it broke with a sharp snap. -The man moaned, writhed a little, and his face came into the moonlight. -It was Muff Potter. The boys' hearts had stood still, and their hopes -too, when the man moved, but their fears passed away now. They tiptoed -out, through the broken weather-boarding, and stopped at a little -distance to exchange a parting word. That long, lugubrious howl rose on -the night air again! They turned and saw the strange dog standing -within a few feet of where Potter was lying, and FACING Potter, with -his nose pointing heavenward. - -"Oh, geeminy, it's HIM!" exclaimed both boys, in a breath. - -"Say, Tom--they say a stray dog come howling around Johnny Miller's -house, 'bout midnight, as much as two weeks ago; and a whippoorwill -come in and lit on the banisters and sung, the very same evening; and -there ain't anybody dead there yet." - -"Well, I know that. And suppose there ain't. Didn't Gracie Miller fall -in the kitchen fire and burn herself terrible the very next Saturday?" - -"Yes, but she ain't DEAD. And what's more, she's getting better, too." - -"All right, you wait and see. She's a goner, just as dead sure as Muff -Potter's a goner. That's what the niggers say, and they know all about -these kind of things, Huck." - -Then they separated, cogitating. When Tom crept in at his bedroom -window the night was almost spent. He undressed with excessive caution, -and fell asleep congratulating himself that nobody knew of his -escapade. He was not aware that the gently-snoring Sid was awake, and -had been so for an hour. - -When Tom awoke, Sid was dressed and gone. There was a late look in the -light, a late sense in the atmosphere. He was startled. Why had he not -been called--persecuted till he was up, as usual? The thought filled -him with bodings. Within five minutes he was dressed and down-stairs, -feeling sore and drowsy. The family were still at table, but they had -finished breakfast. There was no voice of rebuke; but there were -averted eyes; there was a silence and an air of solemnity that struck a -chill to the culprit's heart. He sat down and tried to seem gay, but it -was up-hill work; it roused no smile, no response, and he lapsed into -silence and let his heart sink down to the depths. - -After breakfast his aunt took him aside, and Tom almost brightened in -the hope that he was going to be flogged; but it was not so. His aunt -wept over him and asked him how he could go and break her old heart so; -and finally told him to go on, and ruin himself and bring her gray -hairs with sorrow to the grave, for it was no use for her to try any -more. This was worse than a thousand whippings, and Tom's heart was -sorer now than his body. He cried, he pleaded for forgiveness, promised -to reform over and over again, and then received his dismissal, feeling -that he had won but an imperfect forgiveness and established but a -feeble confidence. - -He left the presence too miserable to even feel revengeful toward Sid; -and so the latter's prompt retreat through the back gate was -unnecessary. He moped to school gloomy and sad, and took his flogging, -along with Joe Harper, for playing hookey the day before, with the air -of one whose heart was busy with heavier woes and wholly dead to -trifles. Then he betook himself to his seat, rested his elbows on his -desk and his jaws in his hands, and stared at the wall with the stony -stare of suffering that has reached the limit and can no further go. -His elbow was pressing against some hard substance. After a long time -he slowly and sadly changed his position, and took up this object with -a sigh. It was in a paper. He unrolled it. A long, lingering, colossal -sigh followed, and his heart broke. It was his brass andiron knob! - -This final feather broke the camel's back. - - - -CHAPTER XI - -CLOSE upon the hour of noon the whole village was suddenly electrified -with the ghastly news. No need of the as yet undreamed-of telegraph; -the tale flew from man to man, from group to group, from house to -house, with little less than telegraphic speed. Of course the -schoolmaster gave holiday for that afternoon; the town would have -thought strangely of him if he had not. - -A gory knife had been found close to the murdered man, and it had been -recognized by somebody as belonging to Muff Potter--so the story ran. -And it was said that a belated citizen had come upon Potter washing -himself in the "branch" about one or two o'clock in the morning, and -that Potter had at once sneaked off--suspicious circumstances, -especially the washing which was not a habit with Potter. It was also -said that the town had been ransacked for this "murderer" (the public -are not slow in the matter of sifting evidence and arriving at a -verdict), but that he could not be found. Horsemen had departed down -all the roads in every direction, and the Sheriff "was confident" that -he would be captured before night. - -All the town was drifting toward the graveyard. Tom's heartbreak -vanished and he joined the procession, not because he would not a -thousand times rather go anywhere else, but because an awful, -unaccountable fascination drew him on. Arrived at the dreadful place, -he wormed his small body through the crowd and saw the dismal -spectacle. It seemed to him an age since he was there before. Somebody -pinched his arm. He turned, and his eyes met Huckleberry's. Then both -looked elsewhere at once, and wondered if anybody had noticed anything -in their mutual glance. But everybody was talking, and intent upon the -grisly spectacle before them. - -"Poor fellow!" "Poor young fellow!" "This ought to be a lesson to -grave robbers!" "Muff Potter'll hang for this if they catch him!" This -was the drift of remark; and the minister said, "It was a judgment; His -hand is here." - -Now Tom shivered from head to heel; for his eye fell upon the stolid -face of Injun Joe. At this moment the crowd began to sway and struggle, -and voices shouted, "It's him! it's him! he's coming himself!" - -"Who? Who?" from twenty voices. - -"Muff Potter!" - -"Hallo, he's stopped!--Look out, he's turning! Don't let him get away!" - -People in the branches of the trees over Tom's head said he wasn't -trying to get away--he only looked doubtful and perplexed. - -"Infernal impudence!" said a bystander; "wanted to come and take a -quiet look at his work, I reckon--didn't expect any company." - -The crowd fell apart, now, and the Sheriff came through, -ostentatiously leading Potter by the arm. The poor fellow's face was -haggard, and his eyes showed the fear that was upon him. When he stood -before the murdered man, he shook as with a palsy, and he put his face -in his hands and burst into tears. - -"I didn't do it, friends," he sobbed; "'pon my word and honor I never -done it." - -"Who's accused you?" shouted a voice. - -This shot seemed to carry home. Potter lifted his face and looked -around him with a pathetic hopelessness in his eyes. He saw Injun Joe, -and exclaimed: - -"Oh, Injun Joe, you promised me you'd never--" - -"Is that your knife?" and it was thrust before him by the Sheriff. - -Potter would have fallen if they had not caught him and eased him to -the ground. Then he said: - -"Something told me 't if I didn't come back and get--" He shuddered; -then waved his nerveless hand with a vanquished gesture and said, "Tell -'em, Joe, tell 'em--it ain't any use any more." - -Then Huckleberry and Tom stood dumb and staring, and heard the -stony-hearted liar reel off his serene statement, they expecting every -moment that the clear sky would deliver God's lightnings upon his head, -and wondering to see how long the stroke was delayed. And when he had -finished and still stood alive and whole, their wavering impulse to -break their oath and save the poor betrayed prisoner's life faded and -vanished away, for plainly this miscreant had sold himself to Satan and -it would be fatal to meddle with the property of such a power as that. - -"Why didn't you leave? What did you want to come here for?" somebody -said. - -"I couldn't help it--I couldn't help it," Potter moaned. "I wanted to -run away, but I couldn't seem to come anywhere but here." And he fell -to sobbing again. - -Injun Joe repeated his statement, just as calmly, a few minutes -afterward on the inquest, under oath; and the boys, seeing that the -lightnings were still withheld, were confirmed in their belief that Joe -had sold himself to the devil. He was now become, to them, the most -balefully interesting object they had ever looked upon, and they could -not take their fascinated eyes from his face. - -They inwardly resolved to watch him nights, when opportunity should -offer, in the hope of getting a glimpse of his dread master. - -Injun Joe helped to raise the body of the murdered man and put it in a -wagon for removal; and it was whispered through the shuddering crowd -that the wound bled a little! The boys thought that this happy -circumstance would turn suspicion in the right direction; but they were -disappointed, for more than one villager remarked: - -"It was within three feet of Muff Potter when it done it." - -Tom's fearful secret and gnawing conscience disturbed his sleep for as -much as a week after this; and at breakfast one morning Sid said: - -"Tom, you pitch around and talk in your sleep so much that you keep me -awake half the time." - -Tom blanched and dropped his eyes. - -"It's a bad sign," said Aunt Polly, gravely. "What you got on your -mind, Tom?" - -"Nothing. Nothing 't I know of." But the boy's hand shook so that he -spilled his coffee. - -"And you do talk such stuff," Sid said. "Last night you said, 'It's -blood, it's blood, that's what it is!' You said that over and over. And -you said, 'Don't torment me so--I'll tell!' Tell WHAT? What is it -you'll tell?" - -Everything was swimming before Tom. There is no telling what might -have happened, now, but luckily the concern passed out of Aunt Polly's -face and she came to Tom's relief without knowing it. She said: - -"Sho! It's that dreadful murder. I dream about it most every night -myself. Sometimes I dream it's me that done it." - -Mary said she had been affected much the same way. Sid seemed -satisfied. Tom got out of the presence as quick as he plausibly could, -and after that he complained of toothache for a week, and tied up his -jaws every night. He never knew that Sid lay nightly watching, and -frequently slipped the bandage free and then leaned on his elbow -listening a good while at a time, and afterward slipped the bandage -back to its place again. Tom's distress of mind wore off gradually and -the toothache grew irksome and was discarded. If Sid really managed to -make anything out of Tom's disjointed mutterings, he kept it to himself. - -It seemed to Tom that his schoolmates never would get done holding -inquests on dead cats, and thus keeping his trouble present to his -mind. Sid noticed that Tom never was coroner at one of these inquiries, -though it had been his habit to take the lead in all new enterprises; -he noticed, too, that Tom never acted as a witness--and that was -strange; and Sid did not overlook the fact that Tom even showed a -marked aversion to these inquests, and always avoided them when he -could. Sid marvelled, but said nothing. However, even inquests went out -of vogue at last, and ceased to torture Tom's conscience. - -Every day or two, during this time of sorrow, Tom watched his -opportunity and went to the little grated jail-window and smuggled such -small comforts through to the "murderer" as he could get hold of. The -jail was a trifling little brick den that stood in a marsh at the edge -of the village, and no guards were afforded for it; indeed, it was -seldom occupied. These offerings greatly helped to ease Tom's -conscience. - -The villagers had a strong desire to tar-and-feather Injun Joe and -ride him on a rail, for body-snatching, but so formidable was his -character that nobody could be found who was willing to take the lead -in the matter, so it was dropped. He had been careful to begin both of -his inquest-statements with the fight, without confessing the -grave-robbery that preceded it; therefore it was deemed wisest not -to try the case in the courts at present. - - - -CHAPTER XII - -ONE of the reasons why Tom's mind had drifted away from its secret -troubles was, that it had found a new and weighty matter to interest -itself about. Becky Thatcher had stopped coming to school. Tom had -struggled with his pride a few days, and tried to "whistle her down the -wind," but failed. He began to find himself hanging around her father's -house, nights, and feeling very miserable. She was ill. What if she -should die! There was distraction in the thought. He no longer took an -interest in war, nor even in piracy. The charm of life was gone; there -was nothing but dreariness left. He put his hoop away, and his bat; -there was no joy in them any more. His aunt was concerned. She began to -try all manner of remedies on him. She was one of those people who are -infatuated with patent medicines and all new-fangled methods of -producing health or mending it. She was an inveterate experimenter in -these things. When something fresh in this line came out she was in a -fever, right away, to try it; not on herself, for she was never ailing, -but on anybody else that came handy. She was a subscriber for all the -"Health" periodicals and phrenological frauds; and the solemn ignorance -they were inflated with was breath to her nostrils. All the "rot" they -contained about ventilation, and how to go to bed, and how to get up, -and what to eat, and what to drink, and how much exercise to take, and -what frame of mind to keep one's self in, and what sort of clothing to -wear, was all gospel to her, and she never observed that her -health-journals of the current month customarily upset everything they -had recommended the month before. She was as simple-hearted and honest -as the day was long, and so she was an easy victim. She gathered -together her quack periodicals and her quack medicines, and thus armed -with death, went about on her pale horse, metaphorically speaking, with -"hell following after." But she never suspected that she was not an -angel of healing and the balm of Gilead in disguise, to the suffering -neighbors. - -The water treatment was new, now, and Tom's low condition was a -windfall to her. She had him out at daylight every morning, stood him -up in the woodshed and drowned him with a deluge of cold water; then -she scrubbed him down with a towel like a file, and so brought him to; -then she rolled him up in a wet sheet and put him away under blankets -till she sweated his soul clean and "the yellow stains of it came -through his pores"--as Tom said. - -Yet notwithstanding all this, the boy grew more and more melancholy -and pale and dejected. She added hot baths, sitz baths, shower baths, -and plunges. The boy remained as dismal as a hearse. She began to -assist the water with a slim oatmeal diet and blister-plasters. She -calculated his capacity as she would a jug's, and filled him up every -day with quack cure-alls. - -Tom had become indifferent to persecution by this time. This phase -filled the old lady's heart with consternation. This indifference must -be broken up at any cost. Now she heard of Pain-killer for the first -time. She ordered a lot at once. She tasted it and was filled with -gratitude. It was simply fire in a liquid form. She dropped the water -treatment and everything else, and pinned her faith to Pain-killer. She -gave Tom a teaspoonful and watched with the deepest anxiety for the -result. Her troubles were instantly at rest, her soul at peace again; -for the "indifference" was broken up. The boy could not have shown a -wilder, heartier interest, if she had built a fire under him. - -Tom felt that it was time to wake up; this sort of life might be -romantic enough, in his blighted condition, but it was getting to have -too little sentiment and too much distracting variety about it. So he -thought over various plans for relief, and finally hit pon that of -professing to be fond of Pain-killer. He asked for it so often that he -became a nuisance, and his aunt ended by telling him to help himself -and quit bothering her. If it had been Sid, she would have had no -misgivings to alloy her delight; but since it was Tom, she watched the -bottle clandestinely. She found that the medicine did really diminish, -but it did not occur to her that the boy was mending the health of a -crack in the sitting-room floor with it. - -One day Tom was in the act of dosing the crack when his aunt's yellow -cat came along, purring, eying the teaspoon avariciously, and begging -for a taste. Tom said: - -"Don't ask for it unless you want it, Peter." - -But Peter signified that he did want it. - -"You better make sure." - -Peter was sure. - -"Now you've asked for it, and I'll give it to you, because there ain't -anything mean about me; but if you find you don't like it, you mustn't -blame anybody but your own self." - -Peter was agreeable. So Tom pried his mouth open and poured down the -Pain-killer. Peter sprang a couple of yards in the air, and then -delivered a war-whoop and set off round and round the room, banging -against furniture, upsetting flower-pots, and making general havoc. -Next he rose on his hind feet and pranced around, in a frenzy of -enjoyment, with his head over his shoulder and his voice proclaiming -his unappeasable happiness. Then he went tearing around the house again -spreading chaos and destruction in his path. Aunt Polly entered in time -to see him throw a few double summersets, deliver a final mighty -hurrah, and sail through the open window, carrying the rest of the -flower-pots with him. The old lady stood petrified with astonishment, -peering over her glasses; Tom lay on the floor expiring with laughter. - -"Tom, what on earth ails that cat?" - -"I don't know, aunt," gasped the boy. - -"Why, I never see anything like it. What did make him act so?" - -"Deed I don't know, Aunt Polly; cats always act so when they're having -a good time." - -"They do, do they?" There was something in the tone that made Tom -apprehensive. - -"Yes'm. That is, I believe they do." - -"You DO?" - -"Yes'm." - -The old lady was bending down, Tom watching, with interest emphasized -by anxiety. Too late he divined her "drift." The handle of the telltale -teaspoon was visible under the bed-valance. Aunt Polly took it, held it -up. Tom winced, and dropped his eyes. Aunt Polly raised him by the -usual handle--his ear--and cracked his head soundly with her thimble. - -"Now, sir, what did you want to treat that poor dumb beast so, for?" - -"I done it out of pity for him--because he hadn't any aunt." - -"Hadn't any aunt!--you numskull. What has that got to do with it?" - -"Heaps. Because if he'd had one she'd a burnt him out herself! She'd a -roasted his bowels out of him 'thout any more feeling than if he was a -human!" - -Aunt Polly felt a sudden pang of remorse. This was putting the thing -in a new light; what was cruelty to a cat MIGHT be cruelty to a boy, -too. She began to soften; she felt sorry. Her eyes watered a little, -and she put her hand on Tom's head and said gently: - -"I was meaning for the best, Tom. And, Tom, it DID do you good." - -Tom looked up in her face with just a perceptible twinkle peeping -through his gravity. - -"I know you was meaning for the best, aunty, and so was I with Peter. -It done HIM good, too. I never see him get around so since--" - -"Oh, go 'long with you, Tom, before you aggravate me again. And you -try and see if you can't be a good boy, for once, and you needn't take -any more medicine." - -Tom reached school ahead of time. It was noticed that this strange -thing had been occurring every day latterly. And now, as usual of late, -he hung about the gate of the schoolyard instead of playing with his -comrades. He was sick, he said, and he looked it. He tried to seem to -be looking everywhere but whither he really was looking--down the road. -Presently Jeff Thatcher hove in sight, and Tom's face lighted; he gazed -a moment, and then turned sorrowfully away. When Jeff arrived, Tom -accosted him; and "led up" warily to opportunities for remark about -Becky, but the giddy lad never could see the bait. Tom watched and -watched, hoping whenever a frisking frock came in sight, and hating the -owner of it as soon as he saw she was not the right one. At last frocks -ceased to appear, and he dropped hopelessly into the dumps; he entered -the empty schoolhouse and sat down to suffer. Then one more frock -passed in at the gate, and Tom's heart gave a great bound. The next -instant he was out, and "going on" like an Indian; yelling, laughing, -chasing boys, jumping over the fence at risk of life and limb, throwing -handsprings, standing on his head--doing all the heroic things he could -conceive of, and keeping a furtive eye out, all the while, to see if -Becky Thatcher was noticing. But she seemed to be unconscious of it -all; she never looked. Could it be possible that she was not aware that -he was there? He carried his exploits to her immediate vicinity; came -war-whooping around, snatched a boy's cap, hurled it to the roof of the -schoolhouse, broke through a group of boys, tumbling them in every -direction, and fell sprawling, himself, under Becky's nose, almost -upsetting her--and she turned, with her nose in the air, and he heard -her say: "Mf! some people think they're mighty smart--always showing -off!" - -Tom's cheeks burned. He gathered himself up and sneaked off, crushed -and crestfallen. - - - -CHAPTER XIII - -TOM'S mind was made up now. He was gloomy and desperate. He was a -forsaken, friendless boy, he said; nobody loved him; when they found -out what they had driven him to, perhaps they would be sorry; he had -tried to do right and get along, but they would not let him; since -nothing would do them but to be rid of him, let it be so; and let them -blame HIM for the consequences--why shouldn't they? What right had the -friendless to complain? Yes, they had forced him to it at last: he -would lead a life of crime. There was no choice. - -By this time he was far down Meadow Lane, and the bell for school to -"take up" tinkled faintly upon his ear. He sobbed, now, to think he -should never, never hear that old familiar sound any more--it was very -hard, but it was forced on him; since he was driven out into the cold -world, he must submit--but he forgave them. Then the sobs came thick -and fast. - -Just at this point he met his soul's sworn comrade, Joe Harper ---hard-eyed, and with evidently a great and dismal purpose in his heart. -Plainly here were "two souls with but a single thought." Tom, wiping -his eyes with his sleeve, began to blubber out something about a -resolution to escape from hard usage and lack of sympathy at home by -roaming abroad into the great world never to return; and ended by -hoping that Joe would not forget him. - -But it transpired that this was a request which Joe had just been -going to make of Tom, and had come to hunt him up for that purpose. His -mother had whipped him for drinking some cream which he had never -tasted and knew nothing about; it was plain that she was tired of him -and wished him to go; if she felt that way, there was nothing for him -to do but succumb; he hoped she would be happy, and never regret having -driven her poor boy out into the unfeeling world to suffer and die. - -As the two boys walked sorrowing along, they made a new compact to -stand by each other and be brothers and never separate till death -relieved them of their troubles. Then they began to lay their plans. -Joe was for being a hermit, and living on crusts in a remote cave, and -dying, some time, of cold and want and grief; but after listening to -Tom, he conceded that there were some conspicuous advantages about a -life of crime, and so he consented to be a pirate. - -Three miles below St. Petersburg, at a point where the Mississippi -River was a trifle over a mile wide, there was a long, narrow, wooded -island, with a shallow bar at the head of it, and this offered well as -a rendezvous. It was not inhabited; it lay far over toward the further -shore, abreast a dense and almost wholly unpeopled forest. So Jackson's -Island was chosen. Who were to be the subjects of their piracies was a -matter that did not occur to them. Then they hunted up Huckleberry -Finn, and he joined them promptly, for all careers were one to him; he -was indifferent. They presently separated to meet at a lonely spot on -the river-bank two miles above the village at the favorite hour--which -was midnight. There was a small log raft there which they meant to -capture. Each would bring hooks and lines, and such provision as he -could steal in the most dark and mysterious way--as became outlaws. And -before the afternoon was done, they had all managed to enjoy the sweet -glory of spreading the fact that pretty soon the town would "hear -something." All who got this vague hint were cautioned to "be mum and -wait." - -About midnight Tom arrived with a boiled ham and a few trifles, -and stopped in a dense undergrowth on a small bluff overlooking the -meeting-place. It was starlight, and very still. The mighty river lay -like an ocean at rest. Tom listened a moment, but no sound disturbed the -quiet. Then he gave a low, distinct whistle. It was answered from under -the bluff. Tom whistled twice more; these signals were answered in the -same way. Then a guarded voice said: - -"Who goes there?" - -"Tom Sawyer, the Black Avenger of the Spanish Main. Name your names." - -"Huck Finn the Red-Handed, and Joe Harper the Terror of the Seas." Tom -had furnished these titles, from his favorite literature. - -"'Tis well. Give the countersign." - -Two hoarse whispers delivered the same awful word simultaneously to -the brooding night: - -"BLOOD!" - -Then Tom tumbled his ham over the bluff and let himself down after it, -tearing both skin and clothes to some extent in the effort. There was -an easy, comfortable path along the shore under the bluff, but it -lacked the advantages of difficulty and danger so valued by a pirate. - -The Terror of the Seas had brought a side of bacon, and had about worn -himself out with getting it there. Finn the Red-Handed had stolen a -skillet and a quantity of half-cured leaf tobacco, and had also brought -a few corn-cobs to make pipes with. But none of the pirates smoked or -"chewed" but himself. The Black Avenger of the Spanish Main said it -would never do to start without some fire. That was a wise thought; -matches were hardly known there in that day. They saw a fire -smouldering upon a great raft a hundred yards above, and they went -stealthily thither and helped themselves to a chunk. They made an -imposing adventure of it, saying, "Hist!" every now and then, and -suddenly halting with finger on lip; moving with hands on imaginary -dagger-hilts; and giving orders in dismal whispers that if "the foe" -stirred, to "let him have it to the hilt," because "dead men tell no -tales." They knew well enough that the raftsmen were all down at the -village laying in stores or having a spree, but still that was no -excuse for their conducting this thing in an unpiratical way. - -They shoved off, presently, Tom in command, Huck at the after oar and -Joe at the forward. Tom stood amidships, gloomy-browed, and with folded -arms, and gave his orders in a low, stern whisper: - -"Luff, and bring her to the wind!" - -"Aye-aye, sir!" - -"Steady, steady-y-y-y!" - -"Steady it is, sir!" - -"Let her go off a point!" - -"Point it is, sir!" - -As the boys steadily and monotonously drove the raft toward mid-stream -it was no doubt understood that these orders were given only for -"style," and were not intended to mean anything in particular. - -"What sail's she carrying?" - -"Courses, tops'ls, and flying-jib, sir." - -"Send the r'yals up! Lay out aloft, there, half a dozen of ye ---foretopmaststuns'l! Lively, now!" - -"Aye-aye, sir!" - -"Shake out that maintogalans'l! Sheets and braces! NOW my hearties!" - -"Aye-aye, sir!" - -"Hellum-a-lee--hard a port! Stand by to meet her when she comes! Port, -port! NOW, men! With a will! Stead-y-y-y!" - -"Steady it is, sir!" - -The raft drew beyond the middle of the river; the boys pointed her -head right, and then lay on their oars. The river was not high, so -there was not more than a two or three mile current. Hardly a word was -said during the next three-quarters of an hour. Now the raft was -passing before the distant town. Two or three glimmering lights showed -where it lay, peacefully sleeping, beyond the vague vast sweep of -star-gemmed water, unconscious of the tremendous event that was happening. -The Black Avenger stood still with folded arms, "looking his last" upon -the scene of his former joys and his later sufferings, and wishing -"she" could see him now, abroad on the wild sea, facing peril and death -with dauntless heart, going to his doom with a grim smile on his lips. -It was but a small strain on his imagination to remove Jackson's Island -beyond eyeshot of the village, and so he "looked his last" with a -broken and satisfied heart. The other pirates were looking their last, -too; and they all looked so long that they came near letting the -current drift them out of the range of the island. But they discovered -the danger in time, and made shift to avert it. About two o'clock in -the morning the raft grounded on the bar two hundred yards above the -head of the island, and they waded back and forth until they had landed -their freight. Part of the little raft's belongings consisted of an old -sail, and this they spread over a nook in the bushes for a tent to -shelter their provisions; but they themselves would sleep in the open -air in good weather, as became outlaws. - -They built a fire against the side of a great log twenty or thirty -steps within the sombre depths of the forest, and then cooked some -bacon in the frying-pan for supper, and used up half of the corn "pone" -stock they had brought. It seemed glorious sport to be feasting in that -wild, free way in the virgin forest of an unexplored and uninhabited -island, far from the haunts of men, and they said they never would -return to civilization. The climbing fire lit up their faces and threw -its ruddy glare upon the pillared tree-trunks of their forest temple, -and upon the varnished foliage and festooning vines. - -When the last crisp slice of bacon was gone, and the last allowance of -corn pone devoured, the boys stretched themselves out on the grass, -filled with contentment. They could have found a cooler place, but they -would not deny themselves such a romantic feature as the roasting -camp-fire. - -"AIN'T it gay?" said Joe. - -"It's NUTS!" said Tom. "What would the boys say if they could see us?" - -"Say? Well, they'd just die to be here--hey, Hucky!" - -"I reckon so," said Huckleberry; "anyways, I'm suited. I don't want -nothing better'n this. I don't ever get enough to eat, gen'ally--and -here they can't come and pick at a feller and bullyrag him so." - -"It's just the life for me," said Tom. "You don't have to get up, -mornings, and you don't have to go to school, and wash, and all that -blame foolishness. You see a pirate don't have to do ANYTHING, Joe, -when he's ashore, but a hermit HE has to be praying considerable, and -then he don't have any fun, anyway, all by himself that way." - -"Oh yes, that's so," said Joe, "but I hadn't thought much about it, -you know. I'd a good deal rather be a pirate, now that I've tried it." - -"You see," said Tom, "people don't go much on hermits, nowadays, like -they used to in old times, but a pirate's always respected. And a -hermit's got to sleep on the hardest place he can find, and put -sackcloth and ashes on his head, and stand out in the rain, and--" - -"What does he put sackcloth and ashes on his head for?" inquired Huck. - -"I dono. But they've GOT to do it. Hermits always do. You'd have to do -that if you was a hermit." - -"Dern'd if I would," said Huck. - -"Well, what would you do?" - -"I dono. But I wouldn't do that." - -"Why, Huck, you'd HAVE to. How'd you get around it?" - -"Why, I just wouldn't stand it. I'd run away." - -"Run away! Well, you WOULD be a nice old slouch of a hermit. You'd be -a disgrace." - -The Red-Handed made no response, being better employed. He had -finished gouging out a cob, and now he fitted a weed stem to it, loaded -it with tobacco, and was pressing a coal to the charge and blowing a -cloud of fragrant smoke--he was in the full bloom of luxurious -contentment. The other pirates envied him this majestic vice, and -secretly resolved to acquire it shortly. Presently Huck said: - -"What does pirates have to do?" - -Tom said: - -"Oh, they have just a bully time--take ships and burn them, and get -the money and bury it in awful places in their island where there's -ghosts and things to watch it, and kill everybody in the ships--make -'em walk a plank." - -"And they carry the women to the island," said Joe; "they don't kill -the women." - -"No," assented Tom, "they don't kill the women--they're too noble. And -the women's always beautiful, too. - -"And don't they wear the bulliest clothes! Oh no! All gold and silver -and di'monds," said Joe, with enthusiasm. - -"Who?" said Huck. - -"Why, the pirates." - -Huck scanned his own clothing forlornly. - -"I reckon I ain't dressed fitten for a pirate," said he, with a -regretful pathos in his voice; "but I ain't got none but these." - -But the other boys told him the fine clothes would come fast enough, -after they should have begun their adventures. They made him understand -that his poor rags would do to begin with, though it was customary for -wealthy pirates to start with a proper wardrobe. - -Gradually their talk died out and drowsiness began to steal upon the -eyelids of the little waifs. The pipe dropped from the fingers of the -Red-Handed, and he slept the sleep of the conscience-free and the -weary. The Terror of the Seas and the Black Avenger of the Spanish Main -had more difficulty in getting to sleep. They said their prayers -inwardly, and lying down, since there was nobody there with authority -to make them kneel and recite aloud; in truth, they had a mind not to -say them at all, but they were afraid to proceed to such lengths as -that, lest they might call down a sudden and special thunderbolt from -heaven. Then at once they reached and hovered upon the imminent verge -of sleep--but an intruder came, now, that would not "down." It was -conscience. They began to feel a vague fear that they had been doing -wrong to run away; and next they thought of the stolen meat, and then -the real torture came. They tried to argue it away by reminding -conscience that they had purloined sweetmeats and apples scores of -times; but conscience was not to be appeased by such thin -plausibilities; it seemed to them, in the end, that there was no -getting around the stubborn fact that taking sweetmeats was only -"hooking," while taking bacon and hams and such valuables was plain -simple stealing--and there was a command against that in the Bible. So -they inwardly resolved that so long as they remained in the business, -their piracies should not again be sullied with the crime of stealing. -Then conscience granted a truce, and these curiously inconsistent -pirates fell peacefully to sleep. - - - -CHAPTER XIV - -WHEN Tom awoke in the morning, he wondered where he was. He sat up and -rubbed his eyes and looked around. Then he comprehended. It was the -cool gray dawn, and there was a delicious sense of repose and peace in -the deep pervading calm and silence of the woods. Not a leaf stirred; -not a sound obtruded upon great Nature's meditation. Beaded dewdrops -stood upon the leaves and grasses. A white layer of ashes covered the -fire, and a thin blue breath of smoke rose straight into the air. Joe -and Huck still slept. - -Now, far away in the woods a bird called; another answered; presently -the hammering of a woodpecker was heard. Gradually the cool dim gray of -the morning whitened, and as gradually sounds multiplied and life -manifested itself. The marvel of Nature shaking off sleep and going to -work unfolded itself to the musing boy. A little green worm came -crawling over a dewy leaf, lifting two-thirds of his body into the air -from time to time and "sniffing around," then proceeding again--for he -was measuring, Tom said; and when the worm approached him, of its own -accord, he sat as still as a stone, with his hopes rising and falling, -by turns, as the creature still came toward him or seemed inclined to -go elsewhere; and when at last it considered a painful moment with its -curved body in the air and then came decisively down upon Tom's leg and -began a journey over him, his whole heart was glad--for that meant that -he was going to have a new suit of clothes--without the shadow of a -doubt a gaudy piratical uniform. Now a procession of ants appeared, -from nowhere in particular, and went about their labors; one struggled -manfully by with a dead spider five times as big as itself in its arms, -and lugged it straight up a tree-trunk. A brown spotted lady-bug -climbed the dizzy height of a grass blade, and Tom bent down close to -it and said, "Lady-bug, lady-bug, fly away home, your house is on fire, -your children's alone," and she took wing and went off to see about it ---which did not surprise the boy, for he knew of old that this insect was -credulous about conflagrations, and he had practised upon its -simplicity more than once. A tumblebug came next, heaving sturdily at -its ball, and Tom touched the creature, to see it shut its legs against -its body and pretend to be dead. The birds were fairly rioting by this -time. A catbird, the Northern mocker, lit in a tree over Tom's head, -and trilled out her imitations of her neighbors in a rapture of -enjoyment; then a shrill jay swept down, a flash of blue flame, and -stopped on a twig almost within the boy's reach, cocked his head to one -side and eyed the strangers with a consuming curiosity; a gray squirrel -and a big fellow of the "fox" kind came skurrying along, sitting up at -intervals to inspect and chatter at the boys, for the wild things had -probably never seen a human being before and scarcely knew whether to -be afraid or not. All Nature was wide awake and stirring, now; long -lances of sunlight pierced down through the dense foliage far and near, -and a few butterflies came fluttering upon the scene. - -Tom stirred up the other pirates and they all clattered away with a -shout, and in a minute or two were stripped and chasing after and -tumbling over each other in the shallow limpid water of the white -sandbar. They felt no longing for the little village sleeping in the -distance beyond the majestic waste of water. A vagrant current or a -slight rise in the river had carried off their raft, but this only -gratified them, since its going was something like burning the bridge -between them and civilization. - -They came back to camp wonderfully refreshed, glad-hearted, and -ravenous; and they soon had the camp-fire blazing up again. Huck found -a spring of clear cold water close by, and the boys made cups of broad -oak or hickory leaves, and felt that water, sweetened with such a -wildwood charm as that, would be a good enough substitute for coffee. -While Joe was slicing bacon for breakfast, Tom and Huck asked him to -hold on a minute; they stepped to a promising nook in the river-bank -and threw in their lines; almost immediately they had reward. Joe had -not had time to get impatient before they were back again with some -handsome bass, a couple of sun-perch and a small catfish--provisions -enough for quite a family. They fried the fish with the bacon, and were -astonished; for no fish had ever seemed so delicious before. They did -not know that the quicker a fresh-water fish is on the fire after he is -caught the better he is; and they reflected little upon what a sauce -open-air sleeping, open-air exercise, bathing, and a large ingredient -of hunger make, too. - -They lay around in the shade, after breakfast, while Huck had a smoke, -and then went off through the woods on an exploring expedition. They -tramped gayly along, over decaying logs, through tangled underbrush, -among solemn monarchs of the forest, hung from their crowns to the -ground with a drooping regalia of grape-vines. Now and then they came -upon snug nooks carpeted with grass and jeweled with flowers. - -They found plenty of things to be delighted with, but nothing to be -astonished at. They discovered that the island was about three miles -long and a quarter of a mile wide, and that the shore it lay closest to -was only separated from it by a narrow channel hardly two hundred yards -wide. They took a swim about every hour, so it was close upon the -middle of the afternoon when they got back to camp. They were too -hungry to stop to fish, but they fared sumptuously upon cold ham, and -then threw themselves down in the shade to talk. But the talk soon -began to drag, and then died. The stillness, the solemnity that brooded -in the woods, and the sense of loneliness, began to tell upon the -spirits of the boys. They fell to thinking. A sort of undefined longing -crept upon them. This took dim shape, presently--it was budding -homesickness. Even Finn the Red-Handed was dreaming of his doorsteps -and empty hogsheads. But they were all ashamed of their weakness, and -none was brave enough to speak his thought. - -For some time, now, the boys had been dully conscious of a peculiar -sound in the distance, just as one sometimes is of the ticking of a -clock which he takes no distinct note of. But now this mysterious sound -became more pronounced, and forced a recognition. The boys started, -glanced at each other, and then each assumed a listening attitude. -There was a long silence, profound and unbroken; then a deep, sullen -boom came floating down out of the distance. - -"What is it!" exclaimed Joe, under his breath. - -"I wonder," said Tom in a whisper. - -"'Tain't thunder," said Huckleberry, in an awed tone, "becuz thunder--" - -"Hark!" said Tom. "Listen--don't talk." - -They waited a time that seemed an age, and then the same muffled boom -troubled the solemn hush. - -"Let's go and see." - -They sprang to their feet and hurried to the shore toward the town. -They parted the bushes on the bank and peered out over the water. The -little steam ferryboat was about a mile below the village, drifting -with the current. Her broad deck seemed crowded with people. There were -a great many skiffs rowing about or floating with the stream in the -neighborhood of the ferryboat, but the boys could not determine what -the men in them were doing. Presently a great jet of white smoke burst -from the ferryboat's side, and as it expanded and rose in a lazy cloud, -that same dull throb of sound was borne to the listeners again. - -"I know now!" exclaimed Tom; "somebody's drownded!" - -"That's it!" said Huck; "they done that last summer, when Bill Turner -got drownded; they shoot a cannon over the water, and that makes him -come up to the top. Yes, and they take loaves of bread and put -quicksilver in 'em and set 'em afloat, and wherever there's anybody -that's drownded, they'll float right there and stop." - -"Yes, I've heard about that," said Joe. "I wonder what makes the bread -do that." - -"Oh, it ain't the bread, so much," said Tom; "I reckon it's mostly -what they SAY over it before they start it out." - -"But they don't say anything over it," said Huck. "I've seen 'em and -they don't." - -"Well, that's funny," said Tom. "But maybe they say it to themselves. -Of COURSE they do. Anybody might know that." - -The other boys agreed that there was reason in what Tom said, because -an ignorant lump of bread, uninstructed by an incantation, could not be -expected to act very intelligently when set upon an errand of such -gravity. - -"By jings, I wish I was over there, now," said Joe. - -"I do too" said Huck "I'd give heaps to know who it is." - -The boys still listened and watched. Presently a revealing thought -flashed through Tom's mind, and he exclaimed: - -"Boys, I know who's drownded--it's us!" - -They felt like heroes in an instant. Here was a gorgeous triumph; they -were missed; they were mourned; hearts were breaking on their account; -tears were being shed; accusing memories of unkindness to these poor -lost lads were rising up, and unavailing regrets and remorse were being -indulged; and best of all, the departed were the talk of the whole -town, and the envy of all the boys, as far as this dazzling notoriety -was concerned. This was fine. It was worth while to be a pirate, after -all. - -As twilight drew on, the ferryboat went back to her accustomed -business and the skiffs disappeared. The pirates returned to camp. They -were jubilant with vanity over their new grandeur and the illustrious -trouble they were making. They caught fish, cooked supper and ate it, -and then fell to guessing at what the village was thinking and saying -about them; and the pictures they drew of the public distress on their -account were gratifying to look upon--from their point of view. But -when the shadows of night closed them in, they gradually ceased to -talk, and sat gazing into the fire, with their minds evidently -wandering elsewhere. The excitement was gone, now, and Tom and Joe -could not keep back thoughts of certain persons at home who were not -enjoying this fine frolic as much as they were. Misgivings came; they -grew troubled and unhappy; a sigh or two escaped, unawares. By and by -Joe timidly ventured upon a roundabout "feeler" as to how the others -might look upon a return to civilization--not right now, but-- - -Tom withered him with derision! Huck, being uncommitted as yet, joined -in with Tom, and the waverer quickly "explained," and was glad to get -out of the scrape with as little taint of chicken-hearted homesickness -clinging to his garments as he could. Mutiny was effectually laid to -rest for the moment. - -As the night deepened, Huck began to nod, and presently to snore. Joe -followed next. Tom lay upon his elbow motionless, for some time, -watching the two intently. At last he got up cautiously, on his knees, -and went searching among the grass and the flickering reflections flung -by the camp-fire. He picked up and inspected several large -semi-cylinders of the thin white bark of a sycamore, and finally chose -two which seemed to suit him. Then he knelt by the fire and painfully -wrote something upon each of these with his "red keel"; one he rolled up -and put in his jacket pocket, and the other he put in Joe's hat and -removed it to a little distance from the owner. And he also put into the -hat certain schoolboy treasures of almost inestimable value--among them -a lump of chalk, an India-rubber ball, three fishhooks, and one of that -kind of marbles known as a "sure 'nough crystal." Then he tiptoed his -way cautiously among the trees till he felt that he was out of hearing, -and straightway broke into a keen run in the direction of the sandbar. - - - -CHAPTER XV - -A FEW minutes later Tom was in the shoal water of the bar, wading -toward the Illinois shore. Before the depth reached his middle he was -half-way over; the current would permit no more wading, now, so he -struck out confidently to swim the remaining hundred yards. He swam -quartering upstream, but still was swept downward rather faster than he -had expected. However, he reached the shore finally, and drifted along -till he found a low place and drew himself out. He put his hand on his -jacket pocket, found his piece of bark safe, and then struck through -the woods, following the shore, with streaming garments. Shortly before -ten o'clock he came out into an open place opposite the village, and -saw the ferryboat lying in the shadow of the trees and the high bank. -Everything was quiet under the blinking stars. He crept down the bank, -watching with all his eyes, slipped into the water, swam three or four -strokes and climbed into the skiff that did "yawl" duty at the boat's -stern. He laid himself down under the thwarts and waited, panting. - -Presently the cracked bell tapped and a voice gave the order to "cast -off." A minute or two later the skiff's head was standing high up, -against the boat's swell, and the voyage was begun. Tom felt happy in -his success, for he knew it was the boat's last trip for the night. At -the end of a long twelve or fifteen minutes the wheels stopped, and Tom -slipped overboard and swam ashore in the dusk, landing fifty yards -downstream, out of danger of possible stragglers. - -He flew along unfrequented alleys, and shortly found himself at his -aunt's back fence. He climbed over, approached the "ell," and looked in -at the sitting-room window, for a light was burning there. There sat -Aunt Polly, Sid, Mary, and Joe Harper's mother, grouped together, -talking. They were by the bed, and the bed was between them and the -door. Tom went to the door and began to softly lift the latch; then he -pressed gently and the door yielded a crack; he continued pushing -cautiously, and quaking every time it creaked, till he judged he might -squeeze through on his knees; so he put his head through and began, -warily. - -"What makes the candle blow so?" said Aunt Polly. Tom hurried up. -"Why, that door's open, I believe. Why, of course it is. No end of -strange things now. Go 'long and shut it, Sid." - -Tom disappeared under the bed just in time. He lay and "breathed" -himself for a time, and then crept to where he could almost touch his -aunt's foot. - -"But as I was saying," said Aunt Polly, "he warn't BAD, so to say ---only mischEEvous. Only just giddy, and harum-scarum, you know. He -warn't any more responsible than a colt. HE never meant any harm, and -he was the best-hearted boy that ever was"--and she began to cry. - -"It was just so with my Joe--always full of his devilment, and up to -every kind of mischief, but he was just as unselfish and kind as he -could be--and laws bless me, to think I went and whipped him for taking -that cream, never once recollecting that I throwed it out myself -because it was sour, and I never to see him again in this world, never, -never, never, poor abused boy!" And Mrs. Harper sobbed as if her heart -would break. - -"I hope Tom's better off where he is," said Sid, "but if he'd been -better in some ways--" - -"SID!" Tom felt the glare of the old lady's eye, though he could not -see it. "Not a word against my Tom, now that he's gone! God'll take -care of HIM--never you trouble YOURself, sir! Oh, Mrs. Harper, I don't -know how to give him up! I don't know how to give him up! He was such a -comfort to me, although he tormented my old heart out of me, 'most." - -"The Lord giveth and the Lord hath taken away--Blessed be the name of -the Lord! But it's so hard--Oh, it's so hard! Only last Saturday my -Joe busted a firecracker right under my nose and I knocked him -sprawling. Little did I know then, how soon--Oh, if it was to do over -again I'd hug him and bless him for it." - -"Yes, yes, yes, I know just how you feel, Mrs. Harper, I know just -exactly how you feel. No longer ago than yesterday noon, my Tom took -and filled the cat full of Pain-killer, and I did think the cretur -would tear the house down. And God forgive me, I cracked Tom's head -with my thimble, poor boy, poor dead boy. But he's out of all his -troubles now. And the last words I ever heard him say was to reproach--" - -But this memory was too much for the old lady, and she broke entirely -down. Tom was snuffling, now, himself--and more in pity of himself than -anybody else. He could hear Mary crying, and putting in a kindly word -for him from time to time. He began to have a nobler opinion of himself -than ever before. Still, he was sufficiently touched by his aunt's -grief to long to rush out from under the bed and overwhelm her with -joy--and the theatrical gorgeousness of the thing appealed strongly to -his nature, too, but he resisted and lay still. - -He went on listening, and gathered by odds and ends that it was -conjectured at first that the boys had got drowned while taking a swim; -then the small raft had been missed; next, certain boys said the -missing lads had promised that the village should "hear something" -soon; the wise-heads had "put this and that together" and decided that -the lads had gone off on that raft and would turn up at the next town -below, presently; but toward noon the raft had been found, lodged -against the Missouri shore some five or six miles below the village ---and then hope perished; they must be drowned, else hunger would have -driven them home by nightfall if not sooner. It was believed that the -search for the bodies had been a fruitless effort merely because the -drowning must have occurred in mid-channel, since the boys, being good -swimmers, would otherwise have escaped to shore. This was Wednesday -night. If the bodies continued missing until Sunday, all hope would be -given over, and the funerals would be preached on that morning. Tom -shuddered. - -Mrs. Harper gave a sobbing good-night and turned to go. Then with a -mutual impulse the two bereaved women flung themselves into each -other's arms and had a good, consoling cry, and then parted. Aunt Polly -was tender far beyond her wont, in her good-night to Sid and Mary. Sid -snuffled a bit and Mary went off crying with all her heart. - -Aunt Polly knelt down and prayed for Tom so touchingly, so -appealingly, and with such measureless love in her words and her old -trembling voice, that he was weltering in tears again, long before she -was through. - -He had to keep still long after she went to bed, for she kept making -broken-hearted ejaculations from time to time, tossing unrestfully, and -turning over. But at last she was still, only moaning a little in her -sleep. Now the boy stole out, rose gradually by the bedside, shaded the -candle-light with his hand, and stood regarding her. His heart was full -of pity for her. He took out his sycamore scroll and placed it by the -candle. But something occurred to him, and he lingered considering. His -face lighted with a happy solution of his thought; he put the bark -hastily in his pocket. Then he bent over and kissed the faded lips, and -straightway made his stealthy exit, latching the door behind him. - -He threaded his way back to the ferry landing, found nobody at large -there, and walked boldly on board the boat, for he knew she was -tenantless except that there was a watchman, who always turned in and -slept like a graven image. He untied the skiff at the stern, slipped -into it, and was soon rowing cautiously upstream. When he had pulled a -mile above the village, he started quartering across and bent himself -stoutly to his work. He hit the landing on the other side neatly, for -this was a familiar bit of work to him. He was moved to capture the -skiff, arguing that it might be considered a ship and therefore -legitimate prey for a pirate, but he knew a thorough search would be -made for it and that might end in revelations. So he stepped ashore and -entered the woods. - -He sat down and took a long rest, torturing himself meanwhile to keep -awake, and then started warily down the home-stretch. The night was far -spent. It was broad daylight before he found himself fairly abreast the -island bar. He rested again until the sun was well up and gilding the -great river with its splendor, and then he plunged into the stream. A -little later he paused, dripping, upon the threshold of the camp, and -heard Joe say: - -"No, Tom's true-blue, Huck, and he'll come back. He won't desert. He -knows that would be a disgrace to a pirate, and Tom's too proud for -that sort of thing. He's up to something or other. Now I wonder what?" - -"Well, the things is ours, anyway, ain't they?" - -"Pretty near, but not yet, Huck. The writing says they are if he ain't -back here to breakfast." - -"Which he is!" exclaimed Tom, with fine dramatic effect, stepping -grandly into camp. - -A sumptuous breakfast of bacon and fish was shortly provided, and as -the boys set to work upon it, Tom recounted (and adorned) his -adventures. They were a vain and boastful company of heroes when the -tale was done. Then Tom hid himself away in a shady nook to sleep till -noon, and the other pirates got ready to fish and explore. - - - -CHAPTER XVI - -AFTER dinner all the gang turned out to hunt for turtle eggs on the -bar. They went about poking sticks into the sand, and when they found a -soft place they went down on their knees and dug with their hands. -Sometimes they would take fifty or sixty eggs out of one hole. They -were perfectly round white things a trifle smaller than an English -walnut. They had a famous fried-egg feast that night, and another on -Friday morning. - -After breakfast they went whooping and prancing out on the bar, and -chased each other round and round, shedding clothes as they went, until -they were naked, and then continued the frolic far away up the shoal -water of the bar, against the stiff current, which latter tripped their -legs from under them from time to time and greatly increased the fun. -And now and then they stooped in a group and splashed water in each -other's faces with their palms, gradually approaching each other, with -averted faces to avoid the strangling sprays, and finally gripping and -struggling till the best man ducked his neighbor, and then they all -went under in a tangle of white legs and arms and came up blowing, -sputtering, laughing, and gasping for breath at one and the same time. - -When they were well exhausted, they would run out and sprawl on the -dry, hot sand, and lie there and cover themselves up with it, and by -and by break for the water again and go through the original -performance once more. Finally it occurred to them that their naked -skin represented flesh-colored "tights" very fairly; so they drew a -ring in the sand and had a circus--with three clowns in it, for none -would yield this proudest post to his neighbor. - -Next they got their marbles and played "knucks" and "ring-taw" and -"keeps" till that amusement grew stale. Then Joe and Huck had another -swim, but Tom would not venture, because he found that in kicking off -his trousers he had kicked his string of rattlesnake rattles off his -ankle, and he wondered how he had escaped cramp so long without the -protection of this mysterious charm. He did not venture again until he -had found it, and by that time the other boys were tired and ready to -rest. They gradually wandered apart, dropped into the "dumps," and fell -to gazing longingly across the wide river to where the village lay -drowsing in the sun. Tom found himself writing "BECKY" in the sand with -his big toe; he scratched it out, and was angry with himself for his -weakness. But he wrote it again, nevertheless; he could not help it. He -erased it once more and then took himself out of temptation by driving -the other boys together and joining them. - -But Joe's spirits had gone down almost beyond resurrection. He was so -homesick that he could hardly endure the misery of it. The tears lay -very near the surface. Huck was melancholy, too. Tom was downhearted, -but tried hard not to show it. He had a secret which he was not ready -to tell, yet, but if this mutinous depression was not broken up soon, -he would have to bring it out. He said, with a great show of -cheerfulness: - -"I bet there's been pirates on this island before, boys. We'll explore -it again. They've hid treasures here somewhere. How'd you feel to light -on a rotten chest full of gold and silver--hey?" - -But it roused only faint enthusiasm, which faded out, with no reply. -Tom tried one or two other seductions; but they failed, too. It was -discouraging work. Joe sat poking up the sand with a stick and looking -very gloomy. Finally he said: - -"Oh, boys, let's give it up. I want to go home. It's so lonesome." - -"Oh no, Joe, you'll feel better by and by," said Tom. "Just think of -the fishing that's here." - -"I don't care for fishing. I want to go home." - -"But, Joe, there ain't such another swimming-place anywhere." - -"Swimming's no good. I don't seem to care for it, somehow, when there -ain't anybody to say I sha'n't go in. I mean to go home." - -"Oh, shucks! Baby! You want to see your mother, I reckon." - -"Yes, I DO want to see my mother--and you would, too, if you had one. -I ain't any more baby than you are." And Joe snuffled a little. - -"Well, we'll let the cry-baby go home to his mother, won't we, Huck? -Poor thing--does it want to see its mother? And so it shall. You like -it here, don't you, Huck? We'll stay, won't we?" - -Huck said, "Y-e-s"--without any heart in it. - -"I'll never speak to you again as long as I live," said Joe, rising. -"There now!" And he moved moodily away and began to dress himself. - -"Who cares!" said Tom. "Nobody wants you to. Go 'long home and get -laughed at. Oh, you're a nice pirate. Huck and me ain't cry-babies. -We'll stay, won't we, Huck? Let him go if he wants to. I reckon we can -get along without him, per'aps." - -But Tom was uneasy, nevertheless, and was alarmed to see Joe go -sullenly on with his dressing. And then it was discomforting to see -Huck eying Joe's preparations so wistfully, and keeping up such an -ominous silence. Presently, without a parting word, Joe began to wade -off toward the Illinois shore. Tom's heart began to sink. He glanced at -Huck. Huck could not bear the look, and dropped his eyes. Then he said: - -"I want to go, too, Tom. It was getting so lonesome anyway, and now -it'll be worse. Let's us go, too, Tom." - -"I won't! You can all go, if you want to. I mean to stay." - -"Tom, I better go." - -"Well, go 'long--who's hendering you." - -Huck began to pick up his scattered clothes. He said: - -"Tom, I wisht you'd come, too. Now you think it over. We'll wait for -you when we get to shore." - -"Well, you'll wait a blame long time, that's all." - -Huck started sorrowfully away, and Tom stood looking after him, with a -strong desire tugging at his heart to yield his pride and go along too. -He hoped the boys would stop, but they still waded slowly on. It -suddenly dawned on Tom that it was become very lonely and still. He -made one final struggle with his pride, and then darted after his -comrades, yelling: - -"Wait! Wait! I want to tell you something!" - -They presently stopped and turned around. When he got to where they -were, he began unfolding his secret, and they listened moodily till at -last they saw the "point" he was driving at, and then they set up a -war-whoop of applause and said it was "splendid!" and said if he had -told them at first, they wouldn't have started away. He made a plausible -excuse; but his real reason had been the fear that not even the secret -would keep them with him any very great length of time, and so he had -meant to hold it in reserve as a last seduction. - -The lads came gayly back and went at their sports again with a will, -chattering all the time about Tom's stupendous plan and admiring the -genius of it. After a dainty egg and fish dinner, Tom said he wanted to -learn to smoke, now. Joe caught at the idea and said he would like to -try, too. So Huck made pipes and filled them. These novices had never -smoked anything before but cigars made of grape-vine, and they "bit" -the tongue, and were not considered manly anyway. - -Now they stretched themselves out on their elbows and began to puff, -charily, and with slender confidence. The smoke had an unpleasant -taste, and they gagged a little, but Tom said: - -"Why, it's just as easy! If I'd a knowed this was all, I'd a learnt -long ago." - -"So would I," said Joe. "It's just nothing." - -"Why, many a time I've looked at people smoking, and thought well I -wish I could do that; but I never thought I could," said Tom. - -"That's just the way with me, hain't it, Huck? You've heard me talk -just that way--haven't you, Huck? I'll leave it to Huck if I haven't." - -"Yes--heaps of times," said Huck. - -"Well, I have too," said Tom; "oh, hundreds of times. Once down by the -slaughter-house. Don't you remember, Huck? Bob Tanner was there, and -Johnny Miller, and Jeff Thatcher, when I said it. Don't you remember, -Huck, 'bout me saying that?" - -"Yes, that's so," said Huck. "That was the day after I lost a white -alley. No, 'twas the day before." - -"There--I told you so," said Tom. "Huck recollects it." - -"I bleeve I could smoke this pipe all day," said Joe. "I don't feel -sick." - -"Neither do I," said Tom. "I could smoke it all day. But I bet you -Jeff Thatcher couldn't." - -"Jeff Thatcher! Why, he'd keel over just with two draws. Just let him -try it once. HE'D see!" - -"I bet he would. And Johnny Miller--I wish could see Johnny Miller -tackle it once." - -"Oh, don't I!" said Joe. "Why, I bet you Johnny Miller couldn't any -more do this than nothing. Just one little snifter would fetch HIM." - -"'Deed it would, Joe. Say--I wish the boys could see us now." - -"So do I." - -"Say--boys, don't say anything about it, and some time when they're -around, I'll come up to you and say, 'Joe, got a pipe? I want a smoke.' -And you'll say, kind of careless like, as if it warn't anything, you'll -say, 'Yes, I got my OLD pipe, and another one, but my tobacker ain't -very good.' And I'll say, 'Oh, that's all right, if it's STRONG -enough.' And then you'll out with the pipes, and we'll light up just as -ca'm, and then just see 'em look!" - -"By jings, that'll be gay, Tom! I wish it was NOW!" - -"So do I! And when we tell 'em we learned when we was off pirating, -won't they wish they'd been along?" - -"Oh, I reckon not! I'll just BET they will!" - -So the talk ran on. But presently it began to flag a trifle, and grow -disjointed. The silences widened; the expectoration marvellously -increased. Every pore inside the boys' cheeks became a spouting -fountain; they could scarcely bail out the cellars under their tongues -fast enough to prevent an inundation; little overflowings down their -throats occurred in spite of all they could do, and sudden retchings -followed every time. Both boys were looking very pale and miserable, -now. Joe's pipe dropped from his nerveless fingers. Tom's followed. -Both fountains were going furiously and both pumps bailing with might -and main. Joe said feebly: - -"I've lost my knife. I reckon I better go and find it." - -Tom said, with quivering lips and halting utterance: - -"I'll help you. You go over that way and I'll hunt around by the -spring. No, you needn't come, Huck--we can find it." - -So Huck sat down again, and waited an hour. Then he found it lonesome, -and went to find his comrades. They were wide apart in the woods, both -very pale, both fast asleep. But something informed him that if they -had had any trouble they had got rid of it. - -They were not talkative at supper that night. They had a humble look, -and when Huck prepared his pipe after the meal and was going to prepare -theirs, they said no, they were not feeling very well--something they -ate at dinner had disagreed with them. - -About midnight Joe awoke, and called the boys. There was a brooding -oppressiveness in the air that seemed to bode something. The boys -huddled themselves together and sought the friendly companionship of -the fire, though the dull dead heat of the breathless atmosphere was -stifling. They sat still, intent and waiting. The solemn hush -continued. Beyond the light of the fire everything was swallowed up in -the blackness of darkness. Presently there came a quivering glow that -vaguely revealed the foliage for a moment and then vanished. By and by -another came, a little stronger. Then another. Then a faint moan came -sighing through the branches of the forest and the boys felt a fleeting -breath upon their cheeks, and shuddered with the fancy that the Spirit -of the Night had gone by. There was a pause. Now a weird flash turned -night into day and showed every little grass-blade, separate and -distinct, that grew about their feet. And it showed three white, -startled faces, too. A deep peal of thunder went rolling and tumbling -down the heavens and lost itself in sullen rumblings in the distance. A -sweep of chilly air passed by, rustling all the leaves and snowing the -flaky ashes broadcast about the fire. Another fierce glare lit up the -forest and an instant crash followed that seemed to rend the tree-tops -right over the boys' heads. They clung together in terror, in the thick -gloom that followed. A few big rain-drops fell pattering upon the -leaves. - -"Quick! boys, go for the tent!" exclaimed Tom. - -They sprang away, stumbling over roots and among vines in the dark, no -two plunging in the same direction. A furious blast roared through the -trees, making everything sing as it went. One blinding flash after -another came, and peal on peal of deafening thunder. And now a -drenching rain poured down and the rising hurricane drove it in sheets -along the ground. The boys cried out to each other, but the roaring -wind and the booming thunder-blasts drowned their voices utterly. -However, one by one they straggled in at last and took shelter under -the tent, cold, scared, and streaming with water; but to have company -in misery seemed something to be grateful for. They could not talk, the -old sail flapped so furiously, even if the other noises would have -allowed them. The tempest rose higher and higher, and presently the -sail tore loose from its fastenings and went winging away on the blast. -The boys seized each others' hands and fled, with many tumblings and -bruises, to the shelter of a great oak that stood upon the river-bank. -Now the battle was at its highest. Under the ceaseless conflagration of -lightning that flamed in the skies, everything below stood out in -clean-cut and shadowless distinctness: the bending trees, the billowy -river, white with foam, the driving spray of spume-flakes, the dim -outlines of the high bluffs on the other side, glimpsed through the -drifting cloud-rack and the slanting veil of rain. Every little while -some giant tree yielded the fight and fell crashing through the younger -growth; and the unflagging thunder-peals came now in ear-splitting -explosive bursts, keen and sharp, and unspeakably appalling. The storm -culminated in one matchless effort that seemed likely to tear the island -to pieces, burn it up, drown it to the tree-tops, blow it away, and -deafen every creature in it, all at one and the same moment. It was a -wild night for homeless young heads to be out in. - -But at last the battle was done, and the forces retired with weaker -and weaker threatenings and grumblings, and peace resumed her sway. The -boys went back to camp, a good deal awed; but they found there was -still something to be thankful for, because the great sycamore, the -shelter of their beds, was a ruin, now, blasted by the lightnings, and -they were not under it when the catastrophe happened. - -Everything in camp was drenched, the camp-fire as well; for they were -but heedless lads, like their generation, and had made no provision -against rain. Here was matter for dismay, for they were soaked through -and chilled. They were eloquent in their distress; but they presently -discovered that the fire had eaten so far up under the great log it had -been built against (where it curved upward and separated itself from -the ground), that a handbreadth or so of it had escaped wetting; so -they patiently wrought until, with shreds and bark gathered from the -under sides of sheltered logs, they coaxed the fire to burn again. Then -they piled on great dead boughs till they had a roaring furnace, and -were glad-hearted once more. They dried their boiled ham and had a -feast, and after that they sat by the fire and expanded and glorified -their midnight adventure until morning, for there was not a dry spot to -sleep on, anywhere around. - -As the sun began to steal in upon the boys, drowsiness came over them, -and they went out on the sandbar and lay down to sleep. They got -scorched out by and by, and drearily set about getting breakfast. After -the meal they felt rusty, and stiff-jointed, and a little homesick once -more. Tom saw the signs, and fell to cheering up the pirates as well as -he could. But they cared nothing for marbles, or circus, or swimming, -or anything. He reminded them of the imposing secret, and raised a ray -of cheer. While it lasted, he got them interested in a new device. This -was to knock off being pirates, for a while, and be Indians for a -change. They were attracted by this idea; so it was not long before -they were stripped, and striped from head to heel with black mud, like -so many zebras--all of them chiefs, of course--and then they went -tearing through the woods to attack an English settlement. - -By and by they separated into three hostile tribes, and darted upon -each other from ambush with dreadful war-whoops, and killed and scalped -each other by thousands. It was a gory day. Consequently it was an -extremely satisfactory one. - -They assembled in camp toward supper-time, hungry and happy; but now a -difficulty arose--hostile Indians could not break the bread of -hospitality together without first making peace, and this was a simple -impossibility without smoking a pipe of peace. There was no other -process that ever they had heard of. Two of the savages almost wished -they had remained pirates. However, there was no other way; so with -such show of cheerfulness as they could muster they called for the pipe -and took their whiff as it passed, in due form. - -And behold, they were glad they had gone into savagery, for they had -gained something; they found that they could now smoke a little without -having to go and hunt for a lost knife; they did not get sick enough to -be seriously uncomfortable. They were not likely to fool away this high -promise for lack of effort. No, they practised cautiously, after -supper, with right fair success, and so they spent a jubilant evening. -They were prouder and happier in their new acquirement than they would -have been in the scalping and skinning of the Six Nations. We will -leave them to smoke and chatter and brag, since we have no further use -for them at present. - - - -CHAPTER XVII - -BUT there was no hilarity in the little town that same tranquil -Saturday afternoon. The Harpers, and Aunt Polly's family, were being -put into mourning, with great grief and many tears. An unusual quiet -possessed the village, although it was ordinarily quiet enough, in all -conscience. The villagers conducted their concerns with an absent air, -and talked little; but they sighed often. The Saturday holiday seemed a -burden to the children. They had no heart in their sports, and -gradually gave them up. - -In the afternoon Becky Thatcher found herself moping about the -deserted schoolhouse yard, and feeling very melancholy. But she found -nothing there to comfort her. She soliloquized: - -"Oh, if I only had a brass andiron-knob again! But I haven't got -anything now to remember him by." And she choked back a little sob. - -Presently she stopped, and said to herself: - -"It was right here. Oh, if it was to do over again, I wouldn't say -that--I wouldn't say it for the whole world. But he's gone now; I'll -never, never, never see him any more." - -This thought broke her down, and she wandered away, with tears rolling -down her cheeks. Then quite a group of boys and girls--playmates of -Tom's and Joe's--came by, and stood looking over the paling fence and -talking in reverent tones of how Tom did so-and-so the last time they -saw him, and how Joe said this and that small trifle (pregnant with -awful prophecy, as they could easily see now!)--and each speaker -pointed out the exact spot where the lost lads stood at the time, and -then added something like "and I was a-standing just so--just as I am -now, and as if you was him--I was as close as that--and he smiled, just -this way--and then something seemed to go all over me, like--awful, you -know--and I never thought what it meant, of course, but I can see now!" - -Then there was a dispute about who saw the dead boys last in life, and -many claimed that dismal distinction, and offered evidences, more or -less tampered with by the witness; and when it was ultimately decided -who DID see the departed last, and exchanged the last words with them, -the lucky parties took upon themselves a sort of sacred importance, and -were gaped at and envied by all the rest. One poor chap, who had no -other grandeur to offer, said with tolerably manifest pride in the -remembrance: - -"Well, Tom Sawyer he licked me once." - -But that bid for glory was a failure. Most of the boys could say that, -and so that cheapened the distinction too much. The group loitered -away, still recalling memories of the lost heroes, in awed voices. - -When the Sunday-school hour was finished, the next morning, the bell -began to toll, instead of ringing in the usual way. It was a very still -Sabbath, and the mournful sound seemed in keeping with the musing hush -that lay upon nature. The villagers began to gather, loitering a moment -in the vestibule to converse in whispers about the sad event. But there -was no whispering in the house; only the funereal rustling of dresses -as the women gathered to their seats disturbed the silence there. None -could remember when the little church had been so full before. There -was finally a waiting pause, an expectant dumbness, and then Aunt Polly -entered, followed by Sid and Mary, and they by the Harper family, all -in deep black, and the whole congregation, the old minister as well, -rose reverently and stood until the mourners were seated in the front -pew. There was another communing silence, broken at intervals by -muffled sobs, and then the minister spread his hands abroad and prayed. -A moving hymn was sung, and the text followed: "I am the Resurrection -and the Life." - -As the service proceeded, the clergyman drew such pictures of the -graces, the winning ways, and the rare promise of the lost lads that -every soul there, thinking he recognized these pictures, felt a pang in -remembering that he had persistently blinded himself to them always -before, and had as persistently seen only faults and flaws in the poor -boys. The minister related many a touching incident in the lives of the -departed, too, which illustrated their sweet, generous natures, and the -people could easily see, now, how noble and beautiful those episodes -were, and remembered with grief that at the time they occurred they had -seemed rank rascalities, well deserving of the cowhide. The -congregation became more and more moved, as the pathetic tale went on, -till at last the whole company broke down and joined the weeping -mourners in a chorus of anguished sobs, the preacher himself giving way -to his feelings, and crying in the pulpit. - -There was a rustle in the gallery, which nobody noticed; a moment -later the church door creaked; the minister raised his streaming eyes -above his handkerchief, and stood transfixed! First one and then -another pair of eyes followed the minister's, and then almost with one -impulse the congregation rose and stared while the three dead boys came -marching up the aisle, Tom in the lead, Joe next, and Huck, a ruin of -drooping rags, sneaking sheepishly in the rear! They had been hid in -the unused gallery listening to their own funeral sermon! - -Aunt Polly, Mary, and the Harpers threw themselves upon their restored -ones, smothered them with kisses and poured out thanksgivings, while -poor Huck stood abashed and uncomfortable, not knowing exactly what to -do or where to hide from so many unwelcoming eyes. He wavered, and -started to slink away, but Tom seized him and said: - -"Aunt Polly, it ain't fair. Somebody's got to be glad to see Huck." - -"And so they shall. I'm glad to see him, poor motherless thing!" And -the loving attentions Aunt Polly lavished upon him were the one thing -capable of making him more uncomfortable than he was before. - -Suddenly the minister shouted at the top of his voice: "Praise God -from whom all blessings flow--SING!--and put your hearts in it!" - -And they did. Old Hundred swelled up with a triumphant burst, and -while it shook the rafters Tom Sawyer the Pirate looked around upon the -envying juveniles about him and confessed in his heart that this was -the proudest moment of his life. - -As the "sold" congregation trooped out they said they would almost be -willing to be made ridiculous again to hear Old Hundred sung like that -once more. - -Tom got more cuffs and kisses that day--according to Aunt Polly's -varying moods--than he had earned before in a year; and he hardly knew -which expressed the most gratefulness to God and affection for himself. - - - -CHAPTER XVIII - -THAT was Tom's great secret--the scheme to return home with his -brother pirates and attend their own funerals. They had paddled over to -the Missouri shore on a log, at dusk on Saturday, landing five or six -miles below the village; they had slept in the woods at the edge of the -town till nearly daylight, and had then crept through back lanes and -alleys and finished their sleep in the gallery of the church among a -chaos of invalided benches. - -At breakfast, Monday morning, Aunt Polly and Mary were very loving to -Tom, and very attentive to his wants. There was an unusual amount of -talk. In the course of it Aunt Polly said: - -"Well, I don't say it wasn't a fine joke, Tom, to keep everybody -suffering 'most a week so you boys had a good time, but it is a pity -you could be so hard-hearted as to let me suffer so. If you could come -over on a log to go to your funeral, you could have come over and give -me a hint some way that you warn't dead, but only run off." - -"Yes, you could have done that, Tom," said Mary; "and I believe you -would if you had thought of it." - -"Would you, Tom?" said Aunt Polly, her face lighting wistfully. "Say, -now, would you, if you'd thought of it?" - -"I--well, I don't know. 'Twould 'a' spoiled everything." - -"Tom, I hoped you loved me that much," said Aunt Polly, with a grieved -tone that discomforted the boy. "It would have been something if you'd -cared enough to THINK of it, even if you didn't DO it." - -"Now, auntie, that ain't any harm," pleaded Mary; "it's only Tom's -giddy way--he is always in such a rush that he never thinks of -anything." - -"More's the pity. Sid would have thought. And Sid would have come and -DONE it, too. Tom, you'll look back, some day, when it's too late, and -wish you'd cared a little more for me when it would have cost you so -little." - -"Now, auntie, you know I do care for you," said Tom. - -"I'd know it better if you acted more like it." - -"I wish now I'd thought," said Tom, with a repentant tone; "but I -dreamt about you, anyway. That's something, ain't it?" - -"It ain't much--a cat does that much--but it's better than nothing. -What did you dream?" - -"Why, Wednesday night I dreamt that you was sitting over there by the -bed, and Sid was sitting by the woodbox, and Mary next to him." - -"Well, so we did. So we always do. I'm glad your dreams could take -even that much trouble about us." - -"And I dreamt that Joe Harper's mother was here." - -"Why, she was here! Did you dream any more?" - -"Oh, lots. But it's so dim, now." - -"Well, try to recollect--can't you?" - -"Somehow it seems to me that the wind--the wind blowed the--the--" - -"Try harder, Tom! The wind did blow something. Come!" - -Tom pressed his fingers on his forehead an anxious minute, and then -said: - -"I've got it now! I've got it now! It blowed the candle!" - -"Mercy on us! Go on, Tom--go on!" - -"And it seems to me that you said, 'Why, I believe that that door--'" - -"Go ON, Tom!" - -"Just let me study a moment--just a moment. Oh, yes--you said you -believed the door was open." - -"As I'm sitting here, I did! Didn't I, Mary! Go on!" - -"And then--and then--well I won't be certain, but it seems like as if -you made Sid go and--and--" - -"Well? Well? What did I make him do, Tom? What did I make him do?" - -"You made him--you--Oh, you made him shut it." - -"Well, for the land's sake! I never heard the beat of that in all my -days! Don't tell ME there ain't anything in dreams, any more. Sereny -Harper shall know of this before I'm an hour older. I'd like to see her -get around THIS with her rubbage 'bout superstition. Go on, Tom!" - -"Oh, it's all getting just as bright as day, now. Next you said I -warn't BAD, only mischeevous and harum-scarum, and not any more -responsible than--than--I think it was a colt, or something." - -"And so it was! Well, goodness gracious! Go on, Tom!" - -"And then you began to cry." - -"So I did. So I did. Not the first time, neither. And then--" - -"Then Mrs. Harper she began to cry, and said Joe was just the same, -and she wished she hadn't whipped him for taking cream when she'd -throwed it out her own self--" - -"Tom! The sperrit was upon you! You was a prophesying--that's what you -was doing! Land alive, go on, Tom!" - -"Then Sid he said--he said--" - -"I don't think I said anything," said Sid. - -"Yes you did, Sid," said Mary. - -"Shut your heads and let Tom go on! What did he say, Tom?" - -"He said--I THINK he said he hoped I was better off where I was gone -to, but if I'd been better sometimes--" - -"THERE, d'you hear that! It was his very words!" - -"And you shut him up sharp." - -"I lay I did! There must 'a' been an angel there. There WAS an angel -there, somewheres!" - -"And Mrs. Harper told about Joe scaring her with a firecracker, and -you told about Peter and the Painkiller--" - -"Just as true as I live!" - -"And then there was a whole lot of talk 'bout dragging the river for -us, and 'bout having the funeral Sunday, and then you and old Miss -Harper hugged and cried, and she went." - -"It happened just so! It happened just so, as sure as I'm a-sitting in -these very tracks. Tom, you couldn't told it more like if you'd 'a' -seen it! And then what? Go on, Tom!" - -"Then I thought you prayed for me--and I could see you and hear every -word you said. And you went to bed, and I was so sorry that I took and -wrote on a piece of sycamore bark, 'We ain't dead--we are only off -being pirates,' and put it on the table by the candle; and then you -looked so good, laying there asleep, that I thought I went and leaned -over and kissed you on the lips." - -"Did you, Tom, DID you! I just forgive you everything for that!" And -she seized the boy in a crushing embrace that made him feel like the -guiltiest of villains. - -"It was very kind, even though it was only a--dream," Sid soliloquized -just audibly. - -"Shut up, Sid! A body does just the same in a dream as he'd do if he -was awake. Here's a big Milum apple I've been saving for you, Tom, if -you was ever found again--now go 'long to school. I'm thankful to the -good God and Father of us all I've got you back, that's long-suffering -and merciful to them that believe on Him and keep His word, though -goodness knows I'm unworthy of it, but if only the worthy ones got His -blessings and had His hand to help them over the rough places, there's -few enough would smile here or ever enter into His rest when the long -night comes. Go 'long Sid, Mary, Tom--take yourselves off--you've -hendered me long enough." - -The children left for school, and the old lady to call on Mrs. Harper -and vanquish her realism with Tom's marvellous dream. Sid had better -judgment than to utter the thought that was in his mind as he left the -house. It was this: "Pretty thin--as long a dream as that, without any -mistakes in it!" - -What a hero Tom was become, now! He did not go skipping and prancing, -but moved with a dignified swagger as became a pirate who felt that the -public eye was on him. And indeed it was; he tried not to seem to see -the looks or hear the remarks as he passed along, but they were food -and drink to him. Smaller boys than himself flocked at his heels, as -proud to be seen with him, and tolerated by him, as if he had been the -drummer at the head of a procession or the elephant leading a menagerie -into town. Boys of his own size pretended not to know he had been away -at all; but they were consuming with envy, nevertheless. They would -have given anything to have that swarthy suntanned skin of his, and his -glittering notoriety; and Tom would not have parted with either for a -circus. - -At school the children made so much of him and of Joe, and delivered -such eloquent admiration from their eyes, that the two heroes were not -long in becoming insufferably "stuck-up." They began to tell their -adventures to hungry listeners--but they only began; it was not a thing -likely to have an end, with imaginations like theirs to furnish -material. And finally, when they got out their pipes and went serenely -puffing around, the very summit of glory was reached. - -Tom decided that he could be independent of Becky Thatcher now. Glory -was sufficient. He would live for glory. Now that he was distinguished, -maybe she would be wanting to "make up." Well, let her--she should see -that he could be as indifferent as some other people. Presently she -arrived. Tom pretended not to see her. He moved away and joined a group -of boys and girls and began to talk. Soon he observed that she was -tripping gayly back and forth with flushed face and dancing eyes, -pretending to be busy chasing schoolmates, and screaming with laughter -when she made a capture; but he noticed that she always made her -captures in his vicinity, and that she seemed to cast a conscious eye -in his direction at such times, too. It gratified all the vicious -vanity that was in him; and so, instead of winning him, it only "set -him up" the more and made him the more diligent to avoid betraying that -he knew she was about. Presently she gave over skylarking, and moved -irresolutely about, sighing once or twice and glancing furtively and -wistfully toward Tom. Then she observed that now Tom was talking more -particularly to Amy Lawrence than to any one else. She felt a sharp -pang and grew disturbed and uneasy at once. She tried to go away, but -her feet were treacherous, and carried her to the group instead. She -said to a girl almost at Tom's elbow--with sham vivacity: - -"Why, Mary Austin! you bad girl, why didn't you come to Sunday-school?" - -"I did come--didn't you see me?" - -"Why, no! Did you? Where did you sit?" - -"I was in Miss Peters' class, where I always go. I saw YOU." - -"Did you? Why, it's funny I didn't see you. I wanted to tell you about -the picnic." - -"Oh, that's jolly. Who's going to give it?" - -"My ma's going to let me have one." - -"Oh, goody; I hope she'll let ME come." - -"Well, she will. The picnic's for me. She'll let anybody come that I -want, and I want you." - -"That's ever so nice. When is it going to be?" - -"By and by. Maybe about vacation." - -"Oh, won't it be fun! You going to have all the girls and boys?" - -"Yes, every one that's friends to me--or wants to be"; and she glanced -ever so furtively at Tom, but he talked right along to Amy Lawrence -about the terrible storm on the island, and how the lightning tore the -great sycamore tree "all to flinders" while he was "standing within -three feet of it." - -"Oh, may I come?" said Grace Miller. - -"Yes." - -"And me?" said Sally Rogers. - -"Yes." - -"And me, too?" said Susy Harper. "And Joe?" - -"Yes." - -And so on, with clapping of joyful hands till all the group had begged -for invitations but Tom and Amy. Then Tom turned coolly away, still -talking, and took Amy with him. Becky's lips trembled and the tears -came to her eyes; she hid these signs with a forced gayety and went on -chattering, but the life had gone out of the picnic, now, and out of -everything else; she got away as soon as she could and hid herself and -had what her sex call "a good cry." Then she sat moody, with wounded -pride, till the bell rang. She roused up, now, with a vindictive cast -in her eye, and gave her plaited tails a shake and said she knew what -SHE'D do. - -At recess Tom continued his flirtation with Amy with jubilant -self-satisfaction. And he kept drifting about to find Becky and lacerate -her with the performance. At last he spied her, but there was a sudden -falling of his mercury. She was sitting cosily on a little bench behind -the schoolhouse looking at a picture-book with Alfred Temple--and so -absorbed were they, and their heads so close together over the book, -that they did not seem to be conscious of anything in the world besides. -Jealousy ran red-hot through Tom's veins. He began to hate himself for -throwing away the chance Becky had offered for a reconciliation. He -called himself a fool, and all the hard names he could think of. He -wanted to cry with vexation. Amy chatted happily along, as they walked, -for her heart was singing, but Tom's tongue had lost its function. He -did not hear what Amy was saying, and whenever she paused expectantly he -could only stammer an awkward assent, which was as often misplaced as -otherwise. He kept drifting to the rear of the schoolhouse, again and -again, to sear his eyeballs with the hateful spectacle there. He could -not help it. And it maddened him to see, as he thought he saw, that -Becky Thatcher never once suspected that he was even in the land of the -living. But she did see, nevertheless; and she knew she was winning her -fight, too, and was glad to see him suffer as she had suffered. - -Amy's happy prattle became intolerable. Tom hinted at things he had to -attend to; things that must be done; and time was fleeting. But in -vain--the girl chirped on. Tom thought, "Oh, hang her, ain't I ever -going to get rid of her?" At last he must be attending to those -things--and she said artlessly that she would be "around" when school -let out. And he hastened away, hating her for it. - -"Any other boy!" Tom thought, grating his teeth. "Any boy in the whole -town but that Saint Louis smarty that thinks he dresses so fine and is -aristocracy! Oh, all right, I licked you the first day you ever saw -this town, mister, and I'll lick you again! You just wait till I catch -you out! I'll just take and--" - -And he went through the motions of thrashing an imaginary boy ---pummelling the air, and kicking and gouging. "Oh, you do, do you? You -holler 'nough, do you? Now, then, let that learn you!" And so the -imaginary flogging was finished to his satisfaction. - -Tom fled home at noon. His conscience could not endure any more of -Amy's grateful happiness, and his jealousy could bear no more of the -other distress. Becky resumed her picture inspections with Alfred, but -as the minutes dragged along and no Tom came to suffer, her triumph -began to cloud and she lost interest; gravity and absent-mindedness -followed, and then melancholy; two or three times she pricked up her -ear at a footstep, but it was a false hope; no Tom came. At last she -grew entirely miserable and wished she hadn't carried it so far. When -poor Alfred, seeing that he was losing her, he did not know how, kept -exclaiming: "Oh, here's a jolly one! look at this!" she lost patience -at last, and said, "Oh, don't bother me! I don't care for them!" and -burst into tears, and got up and walked away. - -Alfred dropped alongside and was going to try to comfort her, but she -said: - -"Go away and leave me alone, can't you! I hate you!" - -So the boy halted, wondering what he could have done--for she had said -she would look at pictures all through the nooning--and she walked on, -crying. Then Alfred went musing into the deserted schoolhouse. He was -humiliated and angry. He easily guessed his way to the truth--the girl -had simply made a convenience of him to vent her spite upon Tom Sawyer. -He was far from hating Tom the less when this thought occurred to him. -He wished there was some way to get that boy into trouble without much -risk to himself. Tom's spelling-book fell under his eye. Here was his -opportunity. He gratefully opened to the lesson for the afternoon and -poured ink upon the page. - -Becky, glancing in at a window behind him at the moment, saw the act, -and moved on, without discovering herself. She started homeward, now, -intending to find Tom and tell him; Tom would be thankful and their -troubles would be healed. Before she was half way home, however, she -had changed her mind. The thought of Tom's treatment of her when she -was talking about her picnic came scorching back and filled her with -shame. She resolved to let him get whipped on the damaged -spelling-book's account, and to hate him forever, into the bargain. - - - -CHAPTER XIX - -TOM arrived at home in a dreary mood, and the first thing his aunt -said to him showed him that he had brought his sorrows to an -unpromising market: - -"Tom, I've a notion to skin you alive!" - -"Auntie, what have I done?" - -"Well, you've done enough. Here I go over to Sereny Harper, like an -old softy, expecting I'm going to make her believe all that rubbage -about that dream, when lo and behold you she'd found out from Joe that -you was over here and heard all the talk we had that night. Tom, I -don't know what is to become of a boy that will act like that. It makes -me feel so bad to think you could let me go to Sereny Harper and make -such a fool of myself and never say a word." - -This was a new aspect of the thing. His smartness of the morning had -seemed to Tom a good joke before, and very ingenious. It merely looked -mean and shabby now. He hung his head and could not think of anything -to say for a moment. Then he said: - -"Auntie, I wish I hadn't done it--but I didn't think." - -"Oh, child, you never think. You never think of anything but your own -selfishness. You could think to come all the way over here from -Jackson's Island in the night to laugh at our troubles, and you could -think to fool me with a lie about a dream; but you couldn't ever think -to pity us and save us from sorrow." - -"Auntie, I know now it was mean, but I didn't mean to be mean. I -didn't, honest. And besides, I didn't come over here to laugh at you -that night." - -"What did you come for, then?" - -"It was to tell you not to be uneasy about us, because we hadn't got -drownded." - -"Tom, Tom, I would be the thankfullest soul in this world if I could -believe you ever had as good a thought as that, but you know you never -did--and I know it, Tom." - -"Indeed and 'deed I did, auntie--I wish I may never stir if I didn't." - -"Oh, Tom, don't lie--don't do it. It only makes things a hundred times -worse." - -"It ain't a lie, auntie; it's the truth. I wanted to keep you from -grieving--that was all that made me come." - -"I'd give the whole world to believe that--it would cover up a power -of sins, Tom. I'd 'most be glad you'd run off and acted so bad. But it -ain't reasonable; because, why didn't you tell me, child?" - -"Why, you see, when you got to talking about the funeral, I just got -all full of the idea of our coming and hiding in the church, and I -couldn't somehow bear to spoil it. So I just put the bark back in my -pocket and kept mum." - -"What bark?" - -"The bark I had wrote on to tell you we'd gone pirating. I wish, now, -you'd waked up when I kissed you--I do, honest." - -The hard lines in his aunt's face relaxed and a sudden tenderness -dawned in her eyes. - -"DID you kiss me, Tom?" - -"Why, yes, I did." - -"Are you sure you did, Tom?" - -"Why, yes, I did, auntie--certain sure." - -"What did you kiss me for, Tom?" - -"Because I loved you so, and you laid there moaning and I was so sorry." - -The words sounded like truth. The old lady could not hide a tremor in -her voice when she said: - -"Kiss me again, Tom!--and be off with you to school, now, and don't -bother me any more." - -The moment he was gone, she ran to a closet and got out the ruin of a -jacket which Tom had gone pirating in. Then she stopped, with it in her -hand, and said to herself: - -"No, I don't dare. Poor boy, I reckon he's lied about it--but it's a -blessed, blessed lie, there's such a comfort come from it. I hope the -Lord--I KNOW the Lord will forgive him, because it was such -goodheartedness in him to tell it. But I don't want to find out it's a -lie. I won't look." - -She put the jacket away, and stood by musing a minute. Twice she put -out her hand to take the garment again, and twice she refrained. Once -more she ventured, and this time she fortified herself with the -thought: "It's a good lie--it's a good lie--I won't let it grieve me." -So she sought the jacket pocket. A moment later she was reading Tom's -piece of bark through flowing tears and saying: "I could forgive the -boy, now, if he'd committed a million sins!" - - - -CHAPTER XX - -THERE was something about Aunt Polly's manner, when she kissed Tom, -that swept away his low spirits and made him lighthearted and happy -again. He started to school and had the luck of coming upon Becky -Thatcher at the head of Meadow Lane. His mood always determined his -manner. Without a moment's hesitation he ran to her and said: - -"I acted mighty mean to-day, Becky, and I'm so sorry. I won't ever, -ever do that way again, as long as ever I live--please make up, won't -you?" - -The girl stopped and looked him scornfully in the face: - -"I'll thank you to keep yourself TO yourself, Mr. Thomas Sawyer. I'll -never speak to you again." - -She tossed her head and passed on. Tom was so stunned that he had not -even presence of mind enough to say "Who cares, Miss Smarty?" until the -right time to say it had gone by. So he said nothing. But he was in a -fine rage, nevertheless. He moped into the schoolyard wishing she were -a boy, and imagining how he would trounce her if she were. He presently -encountered her and delivered a stinging remark as he passed. She -hurled one in return, and the angry breach was complete. It seemed to -Becky, in her hot resentment, that she could hardly wait for school to -"take in," she was so impatient to see Tom flogged for the injured -spelling-book. If she had had any lingering notion of exposing Alfred -Temple, Tom's offensive fling had driven it entirely away. - -Poor girl, she did not know how fast she was nearing trouble herself. -The master, Mr. Dobbins, had reached middle age with an unsatisfied -ambition. The darling of his desires was, to be a doctor, but poverty -had decreed that he should be nothing higher than a village -schoolmaster. Every day he took a mysterious book out of his desk and -absorbed himself in it at times when no classes were reciting. He kept -that book under lock and key. There was not an urchin in school but was -perishing to have a glimpse of it, but the chance never came. Every boy -and girl had a theory about the nature of that book; but no two -theories were alike, and there was no way of getting at the facts in -the case. Now, as Becky was passing by the desk, which stood near the -door, she noticed that the key was in the lock! It was a precious -moment. She glanced around; found herself alone, and the next instant -she had the book in her hands. The title-page--Professor Somebody's -ANATOMY--carried no information to her mind; so she began to turn the -leaves. She came at once upon a handsomely engraved and colored -frontispiece--a human figure, stark naked. At that moment a shadow fell -on the page and Tom Sawyer stepped in at the door and caught a glimpse -of the picture. Becky snatched at the book to close it, and had the -hard luck to tear the pictured page half down the middle. She thrust -the volume into the desk, turned the key, and burst out crying with -shame and vexation. - -"Tom Sawyer, you are just as mean as you can be, to sneak up on a -person and look at what they're looking at." - -"How could I know you was looking at anything?" - -"You ought to be ashamed of yourself, Tom Sawyer; you know you're -going to tell on me, and oh, what shall I do, what shall I do! I'll be -whipped, and I never was whipped in school." - -Then she stamped her little foot and said: - -"BE so mean if you want to! I know something that's going to happen. -You just wait and you'll see! Hateful, hateful, hateful!"--and she -flung out of the house with a new explosion of crying. - -Tom stood still, rather flustered by this onslaught. Presently he said -to himself: - -"What a curious kind of a fool a girl is! Never been licked in school! -Shucks! What's a licking! That's just like a girl--they're so -thin-skinned and chicken-hearted. Well, of course I ain't going to tell -old Dobbins on this little fool, because there's other ways of getting -even on her, that ain't so mean; but what of it? Old Dobbins will ask -who it was tore his book. Nobody'll answer. Then he'll do just the way -he always does--ask first one and then t'other, and when he comes to the -right girl he'll know it, without any telling. Girls' faces always tell -on them. They ain't got any backbone. She'll get licked. Well, it's a -kind of a tight place for Becky Thatcher, because there ain't any way -out of it." Tom conned the thing a moment longer, and then added: "All -right, though; she'd like to see me in just such a fix--let her sweat it -out!" - -Tom joined the mob of skylarking scholars outside. In a few moments -the master arrived and school "took in." Tom did not feel a strong -interest in his studies. Every time he stole a glance at the girls' -side of the room Becky's face troubled him. Considering all things, he -did not want to pity her, and yet it was all he could do to help it. He -could get up no exultation that was really worthy the name. Presently -the spelling-book discovery was made, and Tom's mind was entirely full -of his own matters for a while after that. Becky roused up from her -lethargy of distress and showed good interest in the proceedings. She -did not expect that Tom could get out of his trouble by denying that he -spilt the ink on the book himself; and she was right. The denial only -seemed to make the thing worse for Tom. Becky supposed she would be -glad of that, and she tried to believe she was glad of it, but she -found she was not certain. When the worst came to the worst, she had an -impulse to get up and tell on Alfred Temple, but she made an effort and -forced herself to keep still--because, said she to herself, "he'll tell -about me tearing the picture sure. I wouldn't say a word, not to save -his life!" - -Tom took his whipping and went back to his seat not at all -broken-hearted, for he thought it was possible that he had unknowingly -upset the ink on the spelling-book himself, in some skylarking bout--he -had denied it for form's sake and because it was custom, and had stuck -to the denial from principle. - -A whole hour drifted by, the master sat nodding in his throne, the air -was drowsy with the hum of study. By and by, Mr. Dobbins straightened -himself up, yawned, then unlocked his desk, and reached for his book, -but seemed undecided whether to take it out or leave it. Most of the -pupils glanced up languidly, but there were two among them that watched -his movements with intent eyes. Mr. Dobbins fingered his book absently -for a while, then took it out and settled himself in his chair to read! -Tom shot a glance at Becky. He had seen a hunted and helpless rabbit -look as she did, with a gun levelled at its head. Instantly he forgot -his quarrel with her. Quick--something must be done! done in a flash, -too! But the very imminence of the emergency paralyzed his invention. -Good!--he had an inspiration! He would run and snatch the book, spring -through the door and fly. But his resolution shook for one little -instant, and the chance was lost--the master opened the volume. If Tom -only had the wasted opportunity back again! Too late. There was no help -for Becky now, he said. The next moment the master faced the school. -Every eye sank under his gaze. There was that in it which smote even -the innocent with fear. There was silence while one might count ten ---the master was gathering his wrath. Then he spoke: "Who tore this book?" - -There was not a sound. One could have heard a pin drop. The stillness -continued; the master searched face after face for signs of guilt. - -"Benjamin Rogers, did you tear this book?" - -A denial. Another pause. - -"Joseph Harper, did you?" - -Another denial. Tom's uneasiness grew more and more intense under the -slow torture of these proceedings. The master scanned the ranks of -boys--considered a while, then turned to the girls: - -"Amy Lawrence?" - -A shake of the head. - -"Gracie Miller?" - -The same sign. - -"Susan Harper, did you do this?" - -Another negative. The next girl was Becky Thatcher. Tom was trembling -from head to foot with excitement and a sense of the hopelessness of -the situation. - -"Rebecca Thatcher" [Tom glanced at her face--it was white with terror] ---"did you tear--no, look me in the face" [her hands rose in appeal] ---"did you tear this book?" - -A thought shot like lightning through Tom's brain. He sprang to his -feet and shouted--"I done it!" - -The school stared in perplexity at this incredible folly. Tom stood a -moment, to gather his dismembered faculties; and when he stepped -forward to go to his punishment the surprise, the gratitude, the -adoration that shone upon him out of poor Becky's eyes seemed pay -enough for a hundred floggings. Inspired by the splendor of his own -act, he took without an outcry the most merciless flaying that even Mr. -Dobbins had ever administered; and also received with indifference the -added cruelty of a command to remain two hours after school should be -dismissed--for he knew who would wait for him outside till his -captivity was done, and not count the tedious time as loss, either. - -Tom went to bed that night planning vengeance against Alfred Temple; -for with shame and repentance Becky had told him all, not forgetting -her own treachery; but even the longing for vengeance had to give way, -soon, to pleasanter musings, and he fell asleep at last with Becky's -latest words lingering dreamily in his ear-- - -"Tom, how COULD you be so noble!" - - - -CHAPTER XXI - -VACATION was approaching. The schoolmaster, always severe, grew -severer and more exacting than ever, for he wanted the school to make a -good showing on "Examination" day. His rod and his ferule were seldom -idle now--at least among the smaller pupils. Only the biggest boys, and -young ladies of eighteen and twenty, escaped lashing. Mr. Dobbins' -lashings were very vigorous ones, too; for although he carried, under -his wig, a perfectly bald and shiny head, he had only reached middle -age, and there was no sign of feebleness in his muscle. As the great -day approached, all the tyranny that was in him came to the surface; he -seemed to take a vindictive pleasure in punishing the least -shortcomings. The consequence was, that the smaller boys spent their -days in terror and suffering and their nights in plotting revenge. They -threw away no opportunity to do the master a mischief. But he kept -ahead all the time. The retribution that followed every vengeful -success was so sweeping and majestic that the boys always retired from -the field badly worsted. At last they conspired together and hit upon a -plan that promised a dazzling victory. They swore in the sign-painter's -boy, told him the scheme, and asked his help. He had his own reasons -for being delighted, for the master boarded in his father's family and -had given the boy ample cause to hate him. The master's wife would go -on a visit to the country in a few days, and there would be nothing to -interfere with the plan; the master always prepared himself for great -occasions by getting pretty well fuddled, and the sign-painter's boy -said that when the dominie had reached the proper condition on -Examination Evening he would "manage the thing" while he napped in his -chair; then he would have him awakened at the right time and hurried -away to school. - -In the fulness of time the interesting occasion arrived. At eight in -the evening the schoolhouse was brilliantly lighted, and adorned with -wreaths and festoons of foliage and flowers. The master sat throned in -his great chair upon a raised platform, with his blackboard behind him. -He was looking tolerably mellow. Three rows of benches on each side and -six rows in front of him were occupied by the dignitaries of the town -and by the parents of the pupils. To his left, back of the rows of -citizens, was a spacious temporary platform upon which were seated the -scholars who were to take part in the exercises of the evening; rows of -small boys, washed and dressed to an intolerable state of discomfort; -rows of gawky big boys; snowbanks of girls and young ladies clad in -lawn and muslin and conspicuously conscious of their bare arms, their -grandmothers' ancient trinkets, their bits of pink and blue ribbon and -the flowers in their hair. All the rest of the house was filled with -non-participating scholars. - -The exercises began. A very little boy stood up and sheepishly -recited, "You'd scarce expect one of my age to speak in public on the -stage," etc.--accompanying himself with the painfully exact and -spasmodic gestures which a machine might have used--supposing the -machine to be a trifle out of order. But he got through safely, though -cruelly scared, and got a fine round of applause when he made his -manufactured bow and retired. - -A little shamefaced girl lisped, "Mary had a little lamb," etc., -performed a compassion-inspiring curtsy, got her meed of applause, and -sat down flushed and happy. - -Tom Sawyer stepped forward with conceited confidence and soared into -the unquenchable and indestructible "Give me liberty or give me death" -speech, with fine fury and frantic gesticulation, and broke down in the -middle of it. A ghastly stage-fright seized him, his legs quaked under -him and he was like to choke. True, he had the manifest sympathy of the -house but he had the house's silence, too, which was even worse than -its sympathy. The master frowned, and this completed the disaster. Tom -struggled awhile and then retired, utterly defeated. There was a weak -attempt at applause, but it died early. - -"The Boy Stood on the Burning Deck" followed; also "The Assyrian Came -Down," and other declamatory gems. Then there were reading exercises, -and a spelling fight. The meagre Latin class recited with honor. The -prime feature of the evening was in order, now--original "compositions" -by the young ladies. Each in her turn stepped forward to the edge of -the platform, cleared her throat, held up her manuscript (tied with -dainty ribbon), and proceeded to read, with labored attention to -"expression" and punctuation. The themes were the same that had been -illuminated upon similar occasions by their mothers before them, their -grandmothers, and doubtless all their ancestors in the female line -clear back to the Crusades. "Friendship" was one; "Memories of Other -Days"; "Religion in History"; "Dream Land"; "The Advantages of -Culture"; "Forms of Political Government Compared and Contrasted"; -"Melancholy"; "Filial Love"; "Heart Longings," etc., etc. - -A prevalent feature in these compositions was a nursed and petted -melancholy; another was a wasteful and opulent gush of "fine language"; -another was a tendency to lug in by the ears particularly prized words -and phrases until they were worn entirely out; and a peculiarity that -conspicuously marked and marred them was the inveterate and intolerable -sermon that wagged its crippled tail at the end of each and every one -of them. No matter what the subject might be, a brain-racking effort -was made to squirm it into some aspect or other that the moral and -religious mind could contemplate with edification. The glaring -insincerity of these sermons was not sufficient to compass the -banishment of the fashion from the schools, and it is not sufficient -to-day; it never will be sufficient while the world stands, perhaps. -There is no school in all our land where the young ladies do not feel -obliged to close their compositions with a sermon; and you will find -that the sermon of the most frivolous and the least religious girl in -the school is always the longest and the most relentlessly pious. But -enough of this. Homely truth is unpalatable. - -Let us return to the "Examination." The first composition that was -read was one entitled "Is this, then, Life?" Perhaps the reader can -endure an extract from it: - - "In the common walks of life, with what delightful - emotions does the youthful mind look forward to some - anticipated scene of festivity! Imagination is busy - sketching rose-tinted pictures of joy. In fancy, the - voluptuous votary of fashion sees herself amid the - festive throng, 'the observed of all observers.' Her - graceful form, arrayed in snowy robes, is whirling - through the mazes of the joyous dance; her eye is - brightest, her step is lightest in the gay assembly. - - "In such delicious fancies time quickly glides by, - and the welcome hour arrives for her entrance into - the Elysian world, of which she has had such bright - dreams. How fairy-like does everything appear to - her enchanted vision! Each new scene is more charming - than the last. But after a while she finds that - beneath this goodly exterior, all is vanity, the - flattery which once charmed her soul, now grates - harshly upon her ear; the ball-room has lost its - charms; and with wasted health and imbittered heart, - she turns away with the conviction that earthly - pleasures cannot satisfy the longings of the soul!" - -And so forth and so on. There was a buzz of gratification from time to -time during the reading, accompanied by whispered ejaculations of "How -sweet!" "How eloquent!" "So true!" etc., and after the thing had closed -with a peculiarly afflicting sermon the applause was enthusiastic. - -Then arose a slim, melancholy girl, whose face had the "interesting" -paleness that comes of pills and indigestion, and read a "poem." Two -stanzas of it will do: - - "A MISSOURI MAIDEN'S FAREWELL TO ALABAMA - - "Alabama, good-bye! I love thee well! - But yet for a while do I leave thee now! - Sad, yes, sad thoughts of thee my heart doth swell, - And burning recollections throng my brow! - For I have wandered through thy flowery woods; - Have roamed and read near Tallapoosa's stream; - Have listened to Tallassee's warring floods, - And wooed on Coosa's side Aurora's beam. - - "Yet shame I not to bear an o'er-full heart, - Nor blush to turn behind my tearful eyes; - 'Tis from no stranger land I now must part, - 'Tis to no strangers left I yield these sighs. - Welcome and home were mine within this State, - Whose vales I leave--whose spires fade fast from me - And cold must be mine eyes, and heart, and tete, - When, dear Alabama! they turn cold on thee!" - -There were very few there who knew what "tete" meant, but the poem was -very satisfactory, nevertheless. - -Next appeared a dark-complexioned, black-eyed, black-haired young -lady, who paused an impressive moment, assumed a tragic expression, and -began to read in a measured, solemn tone: - - "A VISION - - "Dark and tempestuous was night. Around the - throne on high not a single star quivered; but - the deep intonations of the heavy thunder - constantly vibrated upon the ear; whilst the - terrific lightning revelled in angry mood - through the cloudy chambers of heaven, seeming - to scorn the power exerted over its terror by - the illustrious Franklin! Even the boisterous - winds unanimously came forth from their mystic - homes, and blustered about as if to enhance by - their aid the wildness of the scene. - - "At such a time, so dark, so dreary, for human - sympathy my very spirit sighed; but instead thereof, - - "'My dearest friend, my counsellor, my comforter - and guide--My joy in grief, my second bliss - in joy,' came to my side. She moved like one of - those bright beings pictured in the sunny walks - of fancy's Eden by the romantic and young, a - queen of beauty unadorned save by her own - transcendent loveliness. So soft was her step, it - failed to make even a sound, and but for the - magical thrill imparted by her genial touch, as - other unobtrusive beauties, she would have glided - away un-perceived--unsought. A strange sadness - rested upon her features, like icy tears upon - the robe of December, as she pointed to the - contending elements without, and bade me contemplate - the two beings presented." - -This nightmare occupied some ten pages of manuscript and wound up with -a sermon so destructive of all hope to non-Presbyterians that it took -the first prize. This composition was considered to be the very finest -effort of the evening. The mayor of the village, in delivering the -prize to the author of it, made a warm speech in which he said that it -was by far the most "eloquent" thing he had ever listened to, and that -Daniel Webster himself might well be proud of it. - -It may be remarked, in passing, that the number of compositions in -which the word "beauteous" was over-fondled, and human experience -referred to as "life's page," was up to the usual average. - -Now the master, mellow almost to the verge of geniality, put his chair -aside, turned his back to the audience, and began to draw a map of -America on the blackboard, to exercise the geography class upon. But he -made a sad business of it with his unsteady hand, and a smothered -titter rippled over the house. He knew what the matter was, and set -himself to right it. He sponged out lines and remade them; but he only -distorted them more than ever, and the tittering was more pronounced. -He threw his entire attention upon his work, now, as if determined not -to be put down by the mirth. He felt that all eyes were fastened upon -him; he imagined he was succeeding, and yet the tittering continued; it -even manifestly increased. And well it might. There was a garret above, -pierced with a scuttle over his head; and down through this scuttle -came a cat, suspended around the haunches by a string; she had a rag -tied about her head and jaws to keep her from mewing; as she slowly -descended she curved upward and clawed at the string, she swung -downward and clawed at the intangible air. The tittering rose higher -and higher--the cat was within six inches of the absorbed teacher's -head--down, down, a little lower, and she grabbed his wig with her -desperate claws, clung to it, and was snatched up into the garret in an -instant with her trophy still in her possession! And how the light did -blaze abroad from the master's bald pate--for the sign-painter's boy -had GILDED it! - -That broke up the meeting. The boys were avenged. Vacation had come. - - NOTE:--The pretended "compositions" quoted in - this chapter are taken without alteration from a - volume entitled "Prose and Poetry, by a Western - Lady"--but they are exactly and precisely after - the schoolgirl pattern, and hence are much - happier than any mere imitations could be. - - - -CHAPTER XXII - -TOM joined the new order of Cadets of Temperance, being attracted by -the showy character of their "regalia." He promised to abstain from -smoking, chewing, and profanity as long as he remained a member. Now he -found out a new thing--namely, that to promise not to do a thing is the -surest way in the world to make a body want to go and do that very -thing. Tom soon found himself tormented with a desire to drink and -swear; the desire grew to be so intense that nothing but the hope of a -chance to display himself in his red sash kept him from withdrawing -from the order. Fourth of July was coming; but he soon gave that up ---gave it up before he had worn his shackles over forty-eight hours--and -fixed his hopes upon old Judge Frazer, justice of the peace, who was -apparently on his deathbed and would have a big public funeral, since -he was so high an official. During three days Tom was deeply concerned -about the Judge's condition and hungry for news of it. Sometimes his -hopes ran high--so high that he would venture to get out his regalia -and practise before the looking-glass. But the Judge had a most -discouraging way of fluctuating. At last he was pronounced upon the -mend--and then convalescent. Tom was disgusted; and felt a sense of -injury, too. He handed in his resignation at once--and that night the -Judge suffered a relapse and died. Tom resolved that he would never -trust a man like that again. - -The funeral was a fine thing. The Cadets paraded in a style calculated -to kill the late member with envy. Tom was a free boy again, however ---there was something in that. He could drink and swear, now--but found -to his surprise that he did not want to. The simple fact that he could, -took the desire away, and the charm of it. - -Tom presently wondered to find that his coveted vacation was beginning -to hang a little heavily on his hands. - -He attempted a diary--but nothing happened during three days, and so -he abandoned it. - -The first of all the negro minstrel shows came to town, and made a -sensation. Tom and Joe Harper got up a band of performers and were -happy for two days. - -Even the Glorious Fourth was in some sense a failure, for it rained -hard, there was no procession in consequence, and the greatest man in -the world (as Tom supposed), Mr. Benton, an actual United States -Senator, proved an overwhelming disappointment--for he was not -twenty-five feet high, nor even anywhere in the neighborhood of it. - -A circus came. The boys played circus for three days afterward in -tents made of rag carpeting--admission, three pins for boys, two for -girls--and then circusing was abandoned. - -A phrenologist and a mesmerizer came--and went again and left the -village duller and drearier than ever. - -There were some boys-and-girls' parties, but they were so few and so -delightful that they only made the aching voids between ache the harder. - -Becky Thatcher was gone to her Constantinople home to stay with her -parents during vacation--so there was no bright side to life anywhere. - -The dreadful secret of the murder was a chronic misery. It was a very -cancer for permanency and pain. - -Then came the measles. - -During two long weeks Tom lay a prisoner, dead to the world and its -happenings. He was very ill, he was interested in nothing. When he got -upon his feet at last and moved feebly down-town, a melancholy change -had come over everything and every creature. There had been a -"revival," and everybody had "got religion," not only the adults, but -even the boys and girls. Tom went about, hoping against hope for the -sight of one blessed sinful face, but disappointment crossed him -everywhere. He found Joe Harper studying a Testament, and turned sadly -away from the depressing spectacle. He sought Ben Rogers, and found him -visiting the poor with a basket of tracts. He hunted up Jim Hollis, who -called his attention to the precious blessing of his late measles as a -warning. Every boy he encountered added another ton to his depression; -and when, in desperation, he flew for refuge at last to the bosom of -Huckleberry Finn and was received with a Scriptural quotation, his -heart broke and he crept home and to bed realizing that he alone of all -the town was lost, forever and forever. - -And that night there came on a terrific storm, with driving rain, -awful claps of thunder and blinding sheets of lightning. He covered his -head with the bedclothes and waited in a horror of suspense for his -doom; for he had not the shadow of a doubt that all this hubbub was -about him. He believed he had taxed the forbearance of the powers above -to the extremity of endurance and that this was the result. It might -have seemed to him a waste of pomp and ammunition to kill a bug with a -battery of artillery, but there seemed nothing incongruous about the -getting up such an expensive thunderstorm as this to knock the turf -from under an insect like himself. - -By and by the tempest spent itself and died without accomplishing its -object. The boy's first impulse was to be grateful, and reform. His -second was to wait--for there might not be any more storms. - -The next day the doctors were back; Tom had relapsed. The three weeks -he spent on his back this time seemed an entire age. When he got abroad -at last he was hardly grateful that he had been spared, remembering how -lonely was his estate, how companionless and forlorn he was. He drifted -listlessly down the street and found Jim Hollis acting as judge in a -juvenile court that was trying a cat for murder, in the presence of her -victim, a bird. He found Joe Harper and Huck Finn up an alley eating a -stolen melon. Poor lads! they--like Tom--had suffered a relapse. - - - -CHAPTER XXIII - -AT last the sleepy atmosphere was stirred--and vigorously: the murder -trial came on in the court. It became the absorbing topic of village -talk immediately. Tom could not get away from it. Every reference to -the murder sent a shudder to his heart, for his troubled conscience and -fears almost persuaded him that these remarks were put forth in his -hearing as "feelers"; he did not see how he could be suspected of -knowing anything about the murder, but still he could not be -comfortable in the midst of this gossip. It kept him in a cold shiver -all the time. He took Huck to a lonely place to have a talk with him. -It would be some relief to unseal his tongue for a little while; to -divide his burden of distress with another sufferer. Moreover, he -wanted to assure himself that Huck had remained discreet. - -"Huck, have you ever told anybody about--that?" - -"'Bout what?" - -"You know what." - -"Oh--'course I haven't." - -"Never a word?" - -"Never a solitary word, so help me. What makes you ask?" - -"Well, I was afeard." - -"Why, Tom Sawyer, we wouldn't be alive two days if that got found out. -YOU know that." - -Tom felt more comfortable. After a pause: - -"Huck, they couldn't anybody get you to tell, could they?" - -"Get me to tell? Why, if I wanted that half-breed devil to drownd me -they could get me to tell. They ain't no different way." - -"Well, that's all right, then. I reckon we're safe as long as we keep -mum. But let's swear again, anyway. It's more surer." - -"I'm agreed." - -So they swore again with dread solemnities. - -"What is the talk around, Huck? I've heard a power of it." - -"Talk? Well, it's just Muff Potter, Muff Potter, Muff Potter all the -time. It keeps me in a sweat, constant, so's I want to hide som'ers." - -"That's just the same way they go on round me. I reckon he's a goner. -Don't you feel sorry for him, sometimes?" - -"Most always--most always. He ain't no account; but then he hain't -ever done anything to hurt anybody. Just fishes a little, to get money -to get drunk on--and loafs around considerable; but lord, we all do -that--leastways most of us--preachers and such like. But he's kind of -good--he give me half a fish, once, when there warn't enough for two; -and lots of times he's kind of stood by me when I was out of luck." - -"Well, he's mended kites for me, Huck, and knitted hooks on to my -line. I wish we could get him out of there." - -"My! we couldn't get him out, Tom. And besides, 'twouldn't do any -good; they'd ketch him again." - -"Yes--so they would. But I hate to hear 'em abuse him so like the -dickens when he never done--that." - -"I do too, Tom. Lord, I hear 'em say he's the bloodiest looking -villain in this country, and they wonder he wasn't ever hung before." - -"Yes, they talk like that, all the time. I've heard 'em say that if he -was to get free they'd lynch him." - -"And they'd do it, too." - -The boys had a long talk, but it brought them little comfort. As the -twilight drew on, they found themselves hanging about the neighborhood -of the little isolated jail, perhaps with an undefined hope that -something would happen that might clear away their difficulties. But -nothing happened; there seemed to be no angels or fairies interested in -this luckless captive. - -The boys did as they had often done before--went to the cell grating -and gave Potter some tobacco and matches. He was on the ground floor -and there were no guards. - -His gratitude for their gifts had always smote their consciences -before--it cut deeper than ever, this time. They felt cowardly and -treacherous to the last degree when Potter said: - -"You've been mighty good to me, boys--better'n anybody else in this -town. And I don't forget it, I don't. Often I says to myself, says I, -'I used to mend all the boys' kites and things, and show 'em where the -good fishin' places was, and befriend 'em what I could, and now they've -all forgot old Muff when he's in trouble; but Tom don't, and Huck -don't--THEY don't forget him, says I, 'and I don't forget them.' Well, -boys, I done an awful thing--drunk and crazy at the time--that's the -only way I account for it--and now I got to swing for it, and it's -right. Right, and BEST, too, I reckon--hope so, anyway. Well, we won't -talk about that. I don't want to make YOU feel bad; you've befriended -me. But what I want to say, is, don't YOU ever get drunk--then you won't -ever get here. Stand a litter furder west--so--that's it; it's a prime -comfort to see faces that's friendly when a body's in such a muck of -trouble, and there don't none come here but yourn. Good friendly -faces--good friendly faces. Git up on one another's backs and let me -touch 'em. That's it. Shake hands--yourn'll come through the bars, but -mine's too big. Little hands, and weak--but they've helped Muff Potter -a power, and they'd help him more if they could." - -Tom went home miserable, and his dreams that night were full of -horrors. The next day and the day after, he hung about the court-room, -drawn by an almost irresistible impulse to go in, but forcing himself -to stay out. Huck was having the same experience. They studiously -avoided each other. Each wandered away, from time to time, but the same -dismal fascination always brought them back presently. Tom kept his -ears open when idlers sauntered out of the court-room, but invariably -heard distressing news--the toils were closing more and more -relentlessly around poor Potter. At the end of the second day the -village talk was to the effect that Injun Joe's evidence stood firm and -unshaken, and that there was not the slightest question as to what the -jury's verdict would be. - -Tom was out late, that night, and came to bed through the window. He -was in a tremendous state of excitement. It was hours before he got to -sleep. All the village flocked to the court-house the next morning, for -this was to be the great day. Both sexes were about equally represented -in the packed audience. After a long wait the jury filed in and took -their places; shortly afterward, Potter, pale and haggard, timid and -hopeless, was brought in, with chains upon him, and seated where all -the curious eyes could stare at him; no less conspicuous was Injun Joe, -stolid as ever. There was another pause, and then the judge arrived and -the sheriff proclaimed the opening of the court. The usual whisperings -among the lawyers and gathering together of papers followed. These -details and accompanying delays worked up an atmosphere of preparation -that was as impressive as it was fascinating. - -Now a witness was called who testified that he found Muff Potter -washing in the brook, at an early hour of the morning that the murder -was discovered, and that he immediately sneaked away. After some -further questioning, counsel for the prosecution said: - -"Take the witness." - -The prisoner raised his eyes for a moment, but dropped them again when -his own counsel said: - -"I have no questions to ask him." - -The next witness proved the finding of the knife near the corpse. -Counsel for the prosecution said: - -"Take the witness." - -"I have no questions to ask him," Potter's lawyer replied. - -A third witness swore he had often seen the knife in Potter's -possession. - -"Take the witness." - -Counsel for Potter declined to question him. The faces of the audience -began to betray annoyance. Did this attorney mean to throw away his -client's life without an effort? - -Several witnesses deposed concerning Potter's guilty behavior when -brought to the scene of the murder. They were allowed to leave the -stand without being cross-questioned. - -Every detail of the damaging circumstances that occurred in the -graveyard upon that morning which all present remembered so well was -brought out by credible witnesses, but none of them were cross-examined -by Potter's lawyer. The perplexity and dissatisfaction of the house -expressed itself in murmurs and provoked a reproof from the bench. -Counsel for the prosecution now said: - -"By the oaths of citizens whose simple word is above suspicion, we -have fastened this awful crime, beyond all possibility of question, -upon the unhappy prisoner at the bar. We rest our case here." - -A groan escaped from poor Potter, and he put his face in his hands and -rocked his body softly to and fro, while a painful silence reigned in -the court-room. Many men were moved, and many women's compassion -testified itself in tears. Counsel for the defence rose and said: - -"Your honor, in our remarks at the opening of this trial, we -foreshadowed our purpose to prove that our client did this fearful deed -while under the influence of a blind and irresponsible delirium -produced by drink. We have changed our mind. We shall not offer that -plea." [Then to the clerk:] "Call Thomas Sawyer!" - -A puzzled amazement awoke in every face in the house, not even -excepting Potter's. Every eye fastened itself with wondering interest -upon Tom as he rose and took his place upon the stand. The boy looked -wild enough, for he was badly scared. The oath was administered. - -"Thomas Sawyer, where were you on the seventeenth of June, about the -hour of midnight?" - -Tom glanced at Injun Joe's iron face and his tongue failed him. The -audience listened breathless, but the words refused to come. After a -few moments, however, the boy got a little of his strength back, and -managed to put enough of it into his voice to make part of the house -hear: - -"In the graveyard!" - -"A little bit louder, please. Don't be afraid. You were--" - -"In the graveyard." - -A contemptuous smile flitted across Injun Joe's face. - -"Were you anywhere near Horse Williams' grave?" - -"Yes, sir." - -"Speak up--just a trifle louder. How near were you?" - -"Near as I am to you." - -"Were you hidden, or not?" - -"I was hid." - -"Where?" - -"Behind the elms that's on the edge of the grave." - -Injun Joe gave a barely perceptible start. - -"Any one with you?" - -"Yes, sir. I went there with--" - -"Wait--wait a moment. Never mind mentioning your companion's name. We -will produce him at the proper time. Did you carry anything there with -you." - -Tom hesitated and looked confused. - -"Speak out, my boy--don't be diffident. The truth is always -respectable. What did you take there?" - -"Only a--a--dead cat." - -There was a ripple of mirth, which the court checked. - -"We will produce the skeleton of that cat. Now, my boy, tell us -everything that occurred--tell it in your own way--don't skip anything, -and don't be afraid." - -Tom began--hesitatingly at first, but as he warmed to his subject his -words flowed more and more easily; in a little while every sound ceased -but his own voice; every eye fixed itself upon him; with parted lips -and bated breath the audience hung upon his words, taking no note of -time, rapt in the ghastly fascinations of the tale. The strain upon -pent emotion reached its climax when the boy said: - -"--and as the doctor fetched the board around and Muff Potter fell, -Injun Joe jumped with the knife and--" - -Crash! Quick as lightning the half-breed sprang for a window, tore his -way through all opposers, and was gone! - - - -CHAPTER XXIV - -TOM was a glittering hero once more--the pet of the old, the envy of -the young. His name even went into immortal print, for the village -paper magnified him. There were some that believed he would be -President, yet, if he escaped hanging. - -As usual, the fickle, unreasoning world took Muff Potter to its bosom -and fondled him as lavishly as it had abused him before. But that sort -of conduct is to the world's credit; therefore it is not well to find -fault with it. - -Tom's days were days of splendor and exultation to him, but his nights -were seasons of horror. Injun Joe infested all his dreams, and always -with doom in his eye. Hardly any temptation could persuade the boy to -stir abroad after nightfall. Poor Huck was in the same state of -wretchedness and terror, for Tom had told the whole story to the lawyer -the night before the great day of the trial, and Huck was sore afraid -that his share in the business might leak out, yet, notwithstanding -Injun Joe's flight had saved him the suffering of testifying in court. -The poor fellow had got the attorney to promise secrecy, but what of -that? Since Tom's harassed conscience had managed to drive him to the -lawyer's house by night and wring a dread tale from lips that had been -sealed with the dismalest and most formidable of oaths, Huck's -confidence in the human race was well-nigh obliterated. - -Daily Muff Potter's gratitude made Tom glad he had spoken; but nightly -he wished he had sealed up his tongue. - -Half the time Tom was afraid Injun Joe would never be captured; the -other half he was afraid he would be. He felt sure he never could draw -a safe breath again until that man was dead and he had seen the corpse. - -Rewards had been offered, the country had been scoured, but no Injun -Joe was found. One of those omniscient and awe-inspiring marvels, a -detective, came up from St. Louis, moused around, shook his head, -looked wise, and made that sort of astounding success which members of -that craft usually achieve. That is to say, he "found a clew." But you -can't hang a "clew" for murder, and so after that detective had got -through and gone home, Tom felt just as insecure as he was before. - -The slow days drifted on, and each left behind it a slightly lightened -weight of apprehension. - - - -CHAPTER XXV - -THERE comes a time in every rightly-constructed boy's life when he has -a raging desire to go somewhere and dig for hidden treasure. This -desire suddenly came upon Tom one day. He sallied out to find Joe -Harper, but failed of success. Next he sought Ben Rogers; he had gone -fishing. Presently he stumbled upon Huck Finn the Red-Handed. Huck -would answer. Tom took him to a private place and opened the matter to -him confidentially. Huck was willing. Huck was always willing to take a -hand in any enterprise that offered entertainment and required no -capital, for he had a troublesome superabundance of that sort of time -which is not money. "Where'll we dig?" said Huck. - -"Oh, most anywhere." - -"Why, is it hid all around?" - -"No, indeed it ain't. It's hid in mighty particular places, Huck ---sometimes on islands, sometimes in rotten chests under the end of a -limb of an old dead tree, just where the shadow falls at midnight; but -mostly under the floor in ha'nted houses." - -"Who hides it?" - -"Why, robbers, of course--who'd you reckon? Sunday-school -sup'rintendents?" - -"I don't know. If 'twas mine I wouldn't hide it; I'd spend it and have -a good time." - -"So would I. But robbers don't do that way. They always hide it and -leave it there." - -"Don't they come after it any more?" - -"No, they think they will, but they generally forget the marks, or -else they die. Anyway, it lays there a long time and gets rusty; and by -and by somebody finds an old yellow paper that tells how to find the -marks--a paper that's got to be ciphered over about a week because it's -mostly signs and hy'roglyphics." - -"Hyro--which?" - -"Hy'roglyphics--pictures and things, you know, that don't seem to mean -anything." - -"Have you got one of them papers, Tom?" - -"No." - -"Well then, how you going to find the marks?" - -"I don't want any marks. They always bury it under a ha'nted house or -on an island, or under a dead tree that's got one limb sticking out. -Well, we've tried Jackson's Island a little, and we can try it again -some time; and there's the old ha'nted house up the Still-House branch, -and there's lots of dead-limb trees--dead loads of 'em." - -"Is it under all of them?" - -"How you talk! No!" - -"Then how you going to know which one to go for?" - -"Go for all of 'em!" - -"Why, Tom, it'll take all summer." - -"Well, what of that? Suppose you find a brass pot with a hundred -dollars in it, all rusty and gray, or rotten chest full of di'monds. -How's that?" - -Huck's eyes glowed. - -"That's bully. Plenty bully enough for me. Just you gimme the hundred -dollars and I don't want no di'monds." - -"All right. But I bet you I ain't going to throw off on di'monds. Some -of 'em's worth twenty dollars apiece--there ain't any, hardly, but's -worth six bits or a dollar." - -"No! Is that so?" - -"Cert'nly--anybody'll tell you so. Hain't you ever seen one, Huck?" - -"Not as I remember." - -"Oh, kings have slathers of them." - -"Well, I don' know no kings, Tom." - -"I reckon you don't. But if you was to go to Europe you'd see a raft -of 'em hopping around." - -"Do they hop?" - -"Hop?--your granny! No!" - -"Well, what did you say they did, for?" - -"Shucks, I only meant you'd SEE 'em--not hopping, of course--what do -they want to hop for?--but I mean you'd just see 'em--scattered around, -you know, in a kind of a general way. Like that old humpbacked Richard." - -"Richard? What's his other name?" - -"He didn't have any other name. Kings don't have any but a given name." - -"No?" - -"But they don't." - -"Well, if they like it, Tom, all right; but I don't want to be a king -and have only just a given name, like a nigger. But say--where you -going to dig first?" - -"Well, I don't know. S'pose we tackle that old dead-limb tree on the -hill t'other side of Still-House branch?" - -"I'm agreed." - -So they got a crippled pick and a shovel, and set out on their -three-mile tramp. They arrived hot and panting, and threw themselves -down in the shade of a neighboring elm to rest and have a smoke. - -"I like this," said Tom. - -"So do I." - -"Say, Huck, if we find a treasure here, what you going to do with your -share?" - -"Well, I'll have pie and a glass of soda every day, and I'll go to -every circus that comes along. I bet I'll have a gay time." - -"Well, ain't you going to save any of it?" - -"Save it? What for?" - -"Why, so as to have something to live on, by and by." - -"Oh, that ain't any use. Pap would come back to thish-yer town some -day and get his claws on it if I didn't hurry up, and I tell you he'd -clean it out pretty quick. What you going to do with yourn, Tom?" - -"I'm going to buy a new drum, and a sure-'nough sword, and a red -necktie and a bull pup, and get married." - -"Married!" - -"That's it." - -"Tom, you--why, you ain't in your right mind." - -"Wait--you'll see." - -"Well, that's the foolishest thing you could do. Look at pap and my -mother. Fight! Why, they used to fight all the time. I remember, mighty -well." - -"That ain't anything. The girl I'm going to marry won't fight." - -"Tom, I reckon they're all alike. They'll all comb a body. Now you -better think 'bout this awhile. I tell you you better. What's the name -of the gal?" - -"It ain't a gal at all--it's a girl." - -"It's all the same, I reckon; some says gal, some says girl--both's -right, like enough. Anyway, what's her name, Tom?" - -"I'll tell you some time--not now." - -"All right--that'll do. Only if you get married I'll be more lonesomer -than ever." - -"No you won't. You'll come and live with me. Now stir out of this and -we'll go to digging." - -They worked and sweated for half an hour. No result. They toiled -another half-hour. Still no result. Huck said: - -"Do they always bury it as deep as this?" - -"Sometimes--not always. Not generally. I reckon we haven't got the -right place." - -So they chose a new spot and began again. The labor dragged a little, -but still they made progress. They pegged away in silence for some -time. Finally Huck leaned on his shovel, swabbed the beaded drops from -his brow with his sleeve, and said: - -"Where you going to dig next, after we get this one?" - -"I reckon maybe we'll tackle the old tree that's over yonder on -Cardiff Hill back of the widow's." - -"I reckon that'll be a good one. But won't the widow take it away from -us, Tom? It's on her land." - -"SHE take it away! Maybe she'd like to try it once. Whoever finds one -of these hid treasures, it belongs to him. It don't make any difference -whose land it's on." - -That was satisfactory. The work went on. By and by Huck said: - -"Blame it, we must be in the wrong place again. What do you think?" - -"It is mighty curious, Huck. I don't understand it. Sometimes witches -interfere. I reckon maybe that's what's the trouble now." - -"Shucks! Witches ain't got no power in the daytime." - -"Well, that's so. I didn't think of that. Oh, I know what the matter -is! What a blamed lot of fools we are! You got to find out where the -shadow of the limb falls at midnight, and that's where you dig!" - -"Then consound it, we've fooled away all this work for nothing. Now -hang it all, we got to come back in the night. It's an awful long way. -Can you get out?" - -"I bet I will. We've got to do it to-night, too, because if somebody -sees these holes they'll know in a minute what's here and they'll go -for it." - -"Well, I'll come around and maow to-night." - -"All right. Let's hide the tools in the bushes." - -The boys were there that night, about the appointed time. They sat in -the shadow waiting. It was a lonely place, and an hour made solemn by -old traditions. Spirits whispered in the rustling leaves, ghosts lurked -in the murky nooks, the deep baying of a hound floated up out of the -distance, an owl answered with his sepulchral note. The boys were -subdued by these solemnities, and talked little. By and by they judged -that twelve had come; they marked where the shadow fell, and began to -dig. Their hopes commenced to rise. Their interest grew stronger, and -their industry kept pace with it. The hole deepened and still deepened, -but every time their hearts jumped to hear the pick strike upon -something, they only suffered a new disappointment. It was only a stone -or a chunk. At last Tom said: - -"It ain't any use, Huck, we're wrong again." - -"Well, but we CAN'T be wrong. We spotted the shadder to a dot." - -"I know it, but then there's another thing." - -"What's that?". - -"Why, we only guessed at the time. Like enough it was too late or too -early." - -Huck dropped his shovel. - -"That's it," said he. "That's the very trouble. We got to give this -one up. We can't ever tell the right time, and besides this kind of -thing's too awful, here this time of night with witches and ghosts -a-fluttering around so. I feel as if something's behind me all the time; -and I'm afeard to turn around, becuz maybe there's others in front -a-waiting for a chance. I been creeping all over, ever since I got here." - -"Well, I've been pretty much so, too, Huck. They most always put in a -dead man when they bury a treasure under a tree, to look out for it." - -"Lordy!" - -"Yes, they do. I've always heard that." - -"Tom, I don't like to fool around much where there's dead people. A -body's bound to get into trouble with 'em, sure." - -"I don't like to stir 'em up, either. S'pose this one here was to -stick his skull out and say something!" - -"Don't Tom! It's awful." - -"Well, it just is. Huck, I don't feel comfortable a bit." - -"Say, Tom, let's give this place up, and try somewheres else." - -"All right, I reckon we better." - -"What'll it be?" - -Tom considered awhile; and then said: - -"The ha'nted house. That's it!" - -"Blame it, I don't like ha'nted houses, Tom. Why, they're a dern sight -worse'n dead people. Dead people might talk, maybe, but they don't come -sliding around in a shroud, when you ain't noticing, and peep over your -shoulder all of a sudden and grit their teeth, the way a ghost does. I -couldn't stand such a thing as that, Tom--nobody could." - -"Yes, but, Huck, ghosts don't travel around only at night. They won't -hender us from digging there in the daytime." - -"Well, that's so. But you know mighty well people don't go about that -ha'nted house in the day nor the night." - -"Well, that's mostly because they don't like to go where a man's been -murdered, anyway--but nothing's ever been seen around that house except -in the night--just some blue lights slipping by the windows--no regular -ghosts." - -"Well, where you see one of them blue lights flickering around, Tom, -you can bet there's a ghost mighty close behind it. It stands to -reason. Becuz you know that they don't anybody but ghosts use 'em." - -"Yes, that's so. But anyway they don't come around in the daytime, so -what's the use of our being afeard?" - -"Well, all right. We'll tackle the ha'nted house if you say so--but I -reckon it's taking chances." - -They had started down the hill by this time. There in the middle of -the moonlit valley below them stood the "ha'nted" house, utterly -isolated, its fences gone long ago, rank weeds smothering the very -doorsteps, the chimney crumbled to ruin, the window-sashes vacant, a -corner of the roof caved in. The boys gazed awhile, half expecting to -see a blue light flit past a window; then talking in a low tone, as -befitted the time and the circumstances, they struck far off to the -right, to give the haunted house a wide berth, and took their way -homeward through the woods that adorned the rearward side of Cardiff -Hill. - - - -CHAPTER XXVI - -ABOUT noon the next day the boys arrived at the dead tree; they had -come for their tools. Tom was impatient to go to the haunted house; -Huck was measurably so, also--but suddenly said: - -"Lookyhere, Tom, do you know what day it is?" - -Tom mentally ran over the days of the week, and then quickly lifted -his eyes with a startled look in them-- - -"My! I never once thought of it, Huck!" - -"Well, I didn't neither, but all at once it popped onto me that it was -Friday." - -"Blame it, a body can't be too careful, Huck. We might 'a' got into an -awful scrape, tackling such a thing on a Friday." - -"MIGHT! Better say we WOULD! There's some lucky days, maybe, but -Friday ain't." - -"Any fool knows that. I don't reckon YOU was the first that found it -out, Huck." - -"Well, I never said I was, did I? And Friday ain't all, neither. I had -a rotten bad dream last night--dreampt about rats." - -"No! Sure sign of trouble. Did they fight?" - -"No." - -"Well, that's good, Huck. When they don't fight it's only a sign that -there's trouble around, you know. All we got to do is to look mighty -sharp and keep out of it. We'll drop this thing for to-day, and play. -Do you know Robin Hood, Huck?" - -"No. Who's Robin Hood?" - -"Why, he was one of the greatest men that was ever in England--and the -best. He was a robber." - -"Cracky, I wisht I was. Who did he rob?" - -"Only sheriffs and bishops and rich people and kings, and such like. -But he never bothered the poor. He loved 'em. He always divided up with -'em perfectly square." - -"Well, he must 'a' been a brick." - -"I bet you he was, Huck. Oh, he was the noblest man that ever was. -They ain't any such men now, I can tell you. He could lick any man in -England, with one hand tied behind him; and he could take his yew bow -and plug a ten-cent piece every time, a mile and a half." - -"What's a YEW bow?" - -"I don't know. It's some kind of a bow, of course. And if he hit that -dime only on the edge he would set down and cry--and curse. But we'll -play Robin Hood--it's nobby fun. I'll learn you." - -"I'm agreed." - -So they played Robin Hood all the afternoon, now and then casting a -yearning eye down upon the haunted house and passing a remark about the -morrow's prospects and possibilities there. As the sun began to sink -into the west they took their way homeward athwart the long shadows of -the trees and soon were buried from sight in the forests of Cardiff -Hill. - -On Saturday, shortly after noon, the boys were at the dead tree again. -They had a smoke and a chat in the shade, and then dug a little in -their last hole, not with great hope, but merely because Tom said there -were so many cases where people had given up a treasure after getting -down within six inches of it, and then somebody else had come along and -turned it up with a single thrust of a shovel. The thing failed this -time, however, so the boys shouldered their tools and went away feeling -that they had not trifled with fortune, but had fulfilled all the -requirements that belong to the business of treasure-hunting. - -When they reached the haunted house there was something so weird and -grisly about the dead silence that reigned there under the baking sun, -and something so depressing about the loneliness and desolation of the -place, that they were afraid, for a moment, to venture in. Then they -crept to the door and took a trembling peep. They saw a weed-grown, -floorless room, unplastered, an ancient fireplace, vacant windows, a -ruinous staircase; and here, there, and everywhere hung ragged and -abandoned cobwebs. They presently entered, softly, with quickened -pulses, talking in whispers, ears alert to catch the slightest sound, -and muscles tense and ready for instant retreat. - -In a little while familiarity modified their fears and they gave the -place a critical and interested examination, rather admiring their own -boldness, and wondering at it, too. Next they wanted to look up-stairs. -This was something like cutting off retreat, but they got to daring -each other, and of course there could be but one result--they threw -their tools into a corner and made the ascent. Up there were the same -signs of decay. In one corner they found a closet that promised -mystery, but the promise was a fraud--there was nothing in it. Their -courage was up now and well in hand. They were about to go down and -begin work when-- - -"Sh!" said Tom. - -"What is it?" whispered Huck, blanching with fright. - -"Sh!... There!... Hear it?" - -"Yes!... Oh, my! Let's run!" - -"Keep still! Don't you budge! They're coming right toward the door." - -The boys stretched themselves upon the floor with their eyes to -knot-holes in the planking, and lay waiting, in a misery of fear. - -"They've stopped.... No--coming.... Here they are. Don't whisper -another word, Huck. My goodness, I wish I was out of this!" - -Two men entered. Each boy said to himself: "There's the old deaf and -dumb Spaniard that's been about town once or twice lately--never saw -t'other man before." - -"T'other" was a ragged, unkempt creature, with nothing very pleasant -in his face. The Spaniard was wrapped in a serape; he had bushy white -whiskers; long white hair flowed from under his sombrero, and he wore -green goggles. When they came in, "t'other" was talking in a low voice; -they sat down on the ground, facing the door, with their backs to the -wall, and the speaker continued his remarks. His manner became less -guarded and his words more distinct as he proceeded: - -"No," said he, "I've thought it all over, and I don't like it. It's -dangerous." - -"Dangerous!" grunted the "deaf and dumb" Spaniard--to the vast -surprise of the boys. "Milksop!" - -This voice made the boys gasp and quake. It was Injun Joe's! There was -silence for some time. Then Joe said: - -"What's any more dangerous than that job up yonder--but nothing's come -of it." - -"That's different. Away up the river so, and not another house about. -'Twon't ever be known that we tried, anyway, long as we didn't succeed." - -"Well, what's more dangerous than coming here in the daytime!--anybody -would suspicion us that saw us." - -"I know that. But there warn't any other place as handy after that -fool of a job. I want to quit this shanty. I wanted to yesterday, only -it warn't any use trying to stir out of here, with those infernal boys -playing over there on the hill right in full view." - -"Those infernal boys" quaked again under the inspiration of this -remark, and thought how lucky it was that they had remembered it was -Friday and concluded to wait a day. They wished in their hearts they -had waited a year. - -The two men got out some food and made a luncheon. After a long and -thoughtful silence, Injun Joe said: - -"Look here, lad--you go back up the river where you belong. Wait there -till you hear from me. I'll take the chances on dropping into this town -just once more, for a look. We'll do that 'dangerous' job after I've -spied around a little and think things look well for it. Then for -Texas! We'll leg it together!" - -This was satisfactory. Both men presently fell to yawning, and Injun -Joe said: - -"I'm dead for sleep! It's your turn to watch." - -He curled down in the weeds and soon began to snore. His comrade -stirred him once or twice and he became quiet. Presently the watcher -began to nod; his head drooped lower and lower, both men began to snore -now. - -The boys drew a long, grateful breath. Tom whispered: - -"Now's our chance--come!" - -Huck said: - -"I can't--I'd die if they was to wake." - -Tom urged--Huck held back. At last Tom rose slowly and softly, and -started alone. But the first step he made wrung such a hideous creak -from the crazy floor that he sank down almost dead with fright. He -never made a second attempt. The boys lay there counting the dragging -moments till it seemed to them that time must be done and eternity -growing gray; and then they were grateful to note that at last the sun -was setting. - -Now one snore ceased. Injun Joe sat up, stared around--smiled grimly -upon his comrade, whose head was drooping upon his knees--stirred him -up with his foot and said: - -"Here! YOU'RE a watchman, ain't you! All right, though--nothing's -happened." - -"My! have I been asleep?" - -"Oh, partly, partly. Nearly time for us to be moving, pard. What'll we -do with what little swag we've got left?" - -"I don't know--leave it here as we've always done, I reckon. No use to -take it away till we start south. Six hundred and fifty in silver's -something to carry." - -"Well--all right--it won't matter to come here once more." - -"No--but I'd say come in the night as we used to do--it's better." - -"Yes: but look here; it may be a good while before I get the right -chance at that job; accidents might happen; 'tain't in such a very good -place; we'll just regularly bury it--and bury it deep." - -"Good idea," said the comrade, who walked across the room, knelt down, -raised one of the rearward hearth-stones and took out a bag that -jingled pleasantly. He subtracted from it twenty or thirty dollars for -himself and as much for Injun Joe, and passed the bag to the latter, -who was on his knees in the corner, now, digging with his bowie-knife. - -The boys forgot all their fears, all their miseries in an instant. -With gloating eyes they watched every movement. Luck!--the splendor of -it was beyond all imagination! Six hundred dollars was money enough to -make half a dozen boys rich! Here was treasure-hunting under the -happiest auspices--there would not be any bothersome uncertainty as to -where to dig. They nudged each other every moment--eloquent nudges and -easily understood, for they simply meant--"Oh, but ain't you glad NOW -we're here!" - -Joe's knife struck upon something. - -"Hello!" said he. - -"What is it?" said his comrade. - -"Half-rotten plank--no, it's a box, I believe. Here--bear a hand and -we'll see what it's here for. Never mind, I've broke a hole." - -He reached his hand in and drew it out-- - -"Man, it's money!" - -The two men examined the handful of coins. They were gold. The boys -above were as excited as themselves, and as delighted. - -Joe's comrade said: - -"We'll make quick work of this. There's an old rusty pick over amongst -the weeds in the corner the other side of the fireplace--I saw it a -minute ago." - -He ran and brought the boys' pick and shovel. Injun Joe took the pick, -looked it over critically, shook his head, muttered something to -himself, and then began to use it. The box was soon unearthed. It was -not very large; it was iron bound and had been very strong before the -slow years had injured it. The men contemplated the treasure awhile in -blissful silence. - -"Pard, there's thousands of dollars here," said Injun Joe. - -"'Twas always said that Murrel's gang used to be around here one -summer," the stranger observed. - -"I know it," said Injun Joe; "and this looks like it, I should say." - -"Now you won't need to do that job." - -The half-breed frowned. Said he: - -"You don't know me. Least you don't know all about that thing. 'Tain't -robbery altogether--it's REVENGE!" and a wicked light flamed in his -eyes. "I'll need your help in it. When it's finished--then Texas. Go -home to your Nance and your kids, and stand by till you hear from me." - -"Well--if you say so; what'll we do with this--bury it again?" - -"Yes. [Ravishing delight overhead.] NO! by the great Sachem, no! -[Profound distress overhead.] I'd nearly forgot. That pick had fresh -earth on it! [The boys were sick with terror in a moment.] What -business has a pick and a shovel here? What business with fresh earth -on them? Who brought them here--and where are they gone? Have you heard -anybody?--seen anybody? What! bury it again and leave them to come and -see the ground disturbed? Not exactly--not exactly. We'll take it to my -den." - -"Why, of course! Might have thought of that before. You mean Number -One?" - -"No--Number Two--under the cross. The other place is bad--too common." - -"All right. It's nearly dark enough to start." - -Injun Joe got up and went about from window to window cautiously -peeping out. Presently he said: - -"Who could have brought those tools here? Do you reckon they can be -up-stairs?" - -The boys' breath forsook them. Injun Joe put his hand on his knife, -halted a moment, undecided, and then turned toward the stairway. The -boys thought of the closet, but their strength was gone. The steps came -creaking up the stairs--the intolerable distress of the situation woke -the stricken resolution of the lads--they were about to spring for the -closet, when there was a crash of rotten timbers and Injun Joe landed -on the ground amid the debris of the ruined stairway. He gathered -himself up cursing, and his comrade said: - -"Now what's the use of all that? If it's anybody, and they're up -there, let them STAY there--who cares? If they want to jump down, now, -and get into trouble, who objects? It will be dark in fifteen minutes ---and then let them follow us if they want to. I'm willing. In my -opinion, whoever hove those things in here caught a sight of us and -took us for ghosts or devils or something. I'll bet they're running -yet." - -Joe grumbled awhile; then he agreed with his friend that what daylight -was left ought to be economized in getting things ready for leaving. -Shortly afterward they slipped out of the house in the deepening -twilight, and moved toward the river with their precious box. - -Tom and Huck rose up, weak but vastly relieved, and stared after them -through the chinks between the logs of the house. Follow? Not they. -They were content to reach ground again without broken necks, and take -the townward track over the hill. They did not talk much. They were too -much absorbed in hating themselves--hating the ill luck that made them -take the spade and the pick there. But for that, Injun Joe never would -have suspected. He would have hidden the silver with the gold to wait -there till his "revenge" was satisfied, and then he would have had the -misfortune to find that money turn up missing. Bitter, bitter luck that -the tools were ever brought there! - -They resolved to keep a lookout for that Spaniard when he should come -to town spying out for chances to do his revengeful job, and follow him -to "Number Two," wherever that might be. Then a ghastly thought -occurred to Tom. - -"Revenge? What if he means US, Huck!" - -"Oh, don't!" said Huck, nearly fainting. - -They talked it all over, and as they entered town they agreed to -believe that he might possibly mean somebody else--at least that he -might at least mean nobody but Tom, since only Tom had testified. - -Very, very small comfort it was to Tom to be alone in danger! Company -would be a palpable improvement, he thought. - - - -CHAPTER XXVII - -THE adventure of the day mightily tormented Tom's dreams that night. -Four times he had his hands on that rich treasure and four times it -wasted to nothingness in his fingers as sleep forsook him and -wakefulness brought back the hard reality of his misfortune. As he lay -in the early morning recalling the incidents of his great adventure, he -noticed that they seemed curiously subdued and far away--somewhat as if -they had happened in another world, or in a time long gone by. Then it -occurred to him that the great adventure itself must be a dream! There -was one very strong argument in favor of this idea--namely, that the -quantity of coin he had seen was too vast to be real. He had never seen -as much as fifty dollars in one mass before, and he was like all boys -of his age and station in life, in that he imagined that all references -to "hundreds" and "thousands" were mere fanciful forms of speech, and -that no such sums really existed in the world. He never had supposed -for a moment that so large a sum as a hundred dollars was to be found -in actual money in any one's possession. If his notions of hidden -treasure had been analyzed, they would have been found to consist of a -handful of real dimes and a bushel of vague, splendid, ungraspable -dollars. - -But the incidents of his adventure grew sensibly sharper and clearer -under the attrition of thinking them over, and so he presently found -himself leaning to the impression that the thing might not have been a -dream, after all. This uncertainty must be swept away. He would snatch -a hurried breakfast and go and find Huck. Huck was sitting on the -gunwale of a flatboat, listlessly dangling his feet in the water and -looking very melancholy. Tom concluded to let Huck lead up to the -subject. If he did not do it, then the adventure would be proved to -have been only a dream. - -"Hello, Huck!" - -"Hello, yourself." - -Silence, for a minute. - -"Tom, if we'd 'a' left the blame tools at the dead tree, we'd 'a' got -the money. Oh, ain't it awful!" - -"'Tain't a dream, then, 'tain't a dream! Somehow I most wish it was. -Dog'd if I don't, Huck." - -"What ain't a dream?" - -"Oh, that thing yesterday. I been half thinking it was." - -"Dream! If them stairs hadn't broke down you'd 'a' seen how much dream -it was! I've had dreams enough all night--with that patch-eyed Spanish -devil going for me all through 'em--rot him!" - -"No, not rot him. FIND him! Track the money!" - -"Tom, we'll never find him. A feller don't have only one chance for -such a pile--and that one's lost. I'd feel mighty shaky if I was to see -him, anyway." - -"Well, so'd I; but I'd like to see him, anyway--and track him out--to -his Number Two." - -"Number Two--yes, that's it. I been thinking 'bout that. But I can't -make nothing out of it. What do you reckon it is?" - -"I dono. It's too deep. Say, Huck--maybe it's the number of a house!" - -"Goody!... No, Tom, that ain't it. If it is, it ain't in this -one-horse town. They ain't no numbers here." - -"Well, that's so. Lemme think a minute. Here--it's the number of a -room--in a tavern, you know!" - -"Oh, that's the trick! They ain't only two taverns. We can find out -quick." - -"You stay here, Huck, till I come." - -Tom was off at once. He did not care to have Huck's company in public -places. He was gone half an hour. He found that in the best tavern, No. -2 had long been occupied by a young lawyer, and was still so occupied. -In the less ostentatious house, No. 2 was a mystery. The -tavern-keeper's young son said it was kept locked all the time, and he -never saw anybody go into it or come out of it except at night; he did -not know any particular reason for this state of things; had had some -little curiosity, but it was rather feeble; had made the most of the -mystery by entertaining himself with the idea that that room was -"ha'nted"; had noticed that there was a light in there the night before. - -"That's what I've found out, Huck. I reckon that's the very No. 2 -we're after." - -"I reckon it is, Tom. Now what you going to do?" - -"Lemme think." - -Tom thought a long time. Then he said: - -"I'll tell you. The back door of that No. 2 is the door that comes out -into that little close alley between the tavern and the old rattle trap -of a brick store. Now you get hold of all the door-keys you can find, -and I'll nip all of auntie's, and the first dark night we'll go there -and try 'em. And mind you, keep a lookout for Injun Joe, because he -said he was going to drop into town and spy around once more for a -chance to get his revenge. If you see him, you just follow him; and if -he don't go to that No. 2, that ain't the place." - -"Lordy, I don't want to foller him by myself!" - -"Why, it'll be night, sure. He mightn't ever see you--and if he did, -maybe he'd never think anything." - -"Well, if it's pretty dark I reckon I'll track him. I dono--I dono. -I'll try." - -"You bet I'll follow him, if it's dark, Huck. Why, he might 'a' found -out he couldn't get his revenge, and be going right after that money." - -"It's so, Tom, it's so. I'll foller him; I will, by jingoes!" - -"Now you're TALKING! Don't you ever weaken, Huck, and I won't." - - - -CHAPTER XXVIII - -THAT night Tom and Huck were ready for their adventure. They hung -about the neighborhood of the tavern until after nine, one watching the -alley at a distance and the other the tavern door. Nobody entered the -alley or left it; nobody resembling the Spaniard entered or left the -tavern door. The night promised to be a fair one; so Tom went home with -the understanding that if a considerable degree of darkness came on, -Huck was to come and "maow," whereupon he would slip out and try the -keys. But the night remained clear, and Huck closed his watch and -retired to bed in an empty sugar hogshead about twelve. - -Tuesday the boys had the same ill luck. Also Wednesday. But Thursday -night promised better. Tom slipped out in good season with his aunt's -old tin lantern, and a large towel to blindfold it with. He hid the -lantern in Huck's sugar hogshead and the watch began. An hour before -midnight the tavern closed up and its lights (the only ones -thereabouts) were put out. No Spaniard had been seen. Nobody had -entered or left the alley. Everything was auspicious. The blackness of -darkness reigned, the perfect stillness was interrupted only by -occasional mutterings of distant thunder. - -Tom got his lantern, lit it in the hogshead, wrapped it closely in the -towel, and the two adventurers crept in the gloom toward the tavern. -Huck stood sentry and Tom felt his way into the alley. Then there was a -season of waiting anxiety that weighed upon Huck's spirits like a -mountain. He began to wish he could see a flash from the lantern--it -would frighten him, but it would at least tell him that Tom was alive -yet. It seemed hours since Tom had disappeared. Surely he must have -fainted; maybe he was dead; maybe his heart had burst under terror and -excitement. In his uneasiness Huck found himself drawing closer and -closer to the alley; fearing all sorts of dreadful things, and -momentarily expecting some catastrophe to happen that would take away -his breath. There was not much to take away, for he seemed only able to -inhale it by thimblefuls, and his heart would soon wear itself out, the -way it was beating. Suddenly there was a flash of light and Tom came -tearing by him: "Run!" said he; "run, for your life!" - -He needn't have repeated it; once was enough; Huck was making thirty -or forty miles an hour before the repetition was uttered. The boys -never stopped till they reached the shed of a deserted slaughter-house -at the lower end of the village. Just as they got within its shelter -the storm burst and the rain poured down. As soon as Tom got his breath -he said: - -"Huck, it was awful! I tried two of the keys, just as soft as I could; -but they seemed to make such a power of racket that I couldn't hardly -get my breath I was so scared. They wouldn't turn in the lock, either. -Well, without noticing what I was doing, I took hold of the knob, and -open comes the door! It warn't locked! I hopped in, and shook off the -towel, and, GREAT CAESAR'S GHOST!" - -"What!--what'd you see, Tom?" - -"Huck, I most stepped onto Injun Joe's hand!" - -"No!" - -"Yes! He was lying there, sound asleep on the floor, with his old -patch on his eye and his arms spread out." - -"Lordy, what did you do? Did he wake up?" - -"No, never budged. Drunk, I reckon. I just grabbed that towel and -started!" - -"I'd never 'a' thought of the towel, I bet!" - -"Well, I would. My aunt would make me mighty sick if I lost it." - -"Say, Tom, did you see that box?" - -"Huck, I didn't wait to look around. I didn't see the box, I didn't -see the cross. I didn't see anything but a bottle and a tin cup on the -floor by Injun Joe; yes, I saw two barrels and lots more bottles in the -room. Don't you see, now, what's the matter with that ha'nted room?" - -"How?" - -"Why, it's ha'nted with whiskey! Maybe ALL the Temperance Taverns have -got a ha'nted room, hey, Huck?" - -"Well, I reckon maybe that's so. Who'd 'a' thought such a thing? But -say, Tom, now's a mighty good time to get that box, if Injun Joe's -drunk." - -"It is, that! You try it!" - -Huck shuddered. - -"Well, no--I reckon not." - -"And I reckon not, Huck. Only one bottle alongside of Injun Joe ain't -enough. If there'd been three, he'd be drunk enough and I'd do it." - -There was a long pause for reflection, and then Tom said: - -"Lookyhere, Huck, less not try that thing any more till we know Injun -Joe's not in there. It's too scary. Now, if we watch every night, we'll -be dead sure to see him go out, some time or other, and then we'll -snatch that box quicker'n lightning." - -"Well, I'm agreed. I'll watch the whole night long, and I'll do it -every night, too, if you'll do the other part of the job." - -"All right, I will. All you got to do is to trot up Hooper Street a -block and maow--and if I'm asleep, you throw some gravel at the window -and that'll fetch me." - -"Agreed, and good as wheat!" - -"Now, Huck, the storm's over, and I'll go home. It'll begin to be -daylight in a couple of hours. You go back and watch that long, will -you?" - -"I said I would, Tom, and I will. I'll ha'nt that tavern every night -for a year! I'll sleep all day and I'll stand watch all night." - -"That's all right. Now, where you going to sleep?" - -"In Ben Rogers' hayloft. He lets me, and so does his pap's nigger man, -Uncle Jake. I tote water for Uncle Jake whenever he wants me to, and -any time I ask him he gives me a little something to eat if he can -spare it. That's a mighty good nigger, Tom. He likes me, becuz I don't -ever act as if I was above him. Sometime I've set right down and eat -WITH him. But you needn't tell that. A body's got to do things when -he's awful hungry he wouldn't want to do as a steady thing." - -"Well, if I don't want you in the daytime, I'll let you sleep. I won't -come bothering around. Any time you see something's up, in the night, -just skip right around and maow." - - - -CHAPTER XXIX - -THE first thing Tom heard on Friday morning was a glad piece of news ---Judge Thatcher's family had come back to town the night before. Both -Injun Joe and the treasure sunk into secondary importance for a moment, -and Becky took the chief place in the boy's interest. He saw her and -they had an exhausting good time playing "hi-spy" and "gully-keeper" -with a crowd of their school-mates. The day was completed and crowned -in a peculiarly satisfactory way: Becky teased her mother to appoint -the next day for the long-promised and long-delayed picnic, and she -consented. The child's delight was boundless; and Tom's not more -moderate. The invitations were sent out before sunset, and straightway -the young folks of the village were thrown into a fever of preparation -and pleasurable anticipation. Tom's excitement enabled him to keep -awake until a pretty late hour, and he had good hopes of hearing Huck's -"maow," and of having his treasure to astonish Becky and the picnickers -with, next day; but he was disappointed. No signal came that night. - -Morning came, eventually, and by ten or eleven o'clock a giddy and -rollicking company were gathered at Judge Thatcher's, and everything -was ready for a start. It was not the custom for elderly people to mar -the picnics with their presence. The children were considered safe -enough under the wings of a few young ladies of eighteen and a few -young gentlemen of twenty-three or thereabouts. The old steam ferryboat -was chartered for the occasion; presently the gay throng filed up the -main street laden with provision-baskets. Sid was sick and had to miss -the fun; Mary remained at home to entertain him. The last thing Mrs. -Thatcher said to Becky, was: - -"You'll not get back till late. Perhaps you'd better stay all night -with some of the girls that live near the ferry-landing, child." - -"Then I'll stay with Susy Harper, mamma." - -"Very well. And mind and behave yourself and don't be any trouble." - -Presently, as they tripped along, Tom said to Becky: - -"Say--I'll tell you what we'll do. 'Stead of going to Joe Harper's -we'll climb right up the hill and stop at the Widow Douglas'. She'll -have ice-cream! She has it most every day--dead loads of it. And she'll -be awful glad to have us." - -"Oh, that will be fun!" - -Then Becky reflected a moment and said: - -"But what will mamma say?" - -"How'll she ever know?" - -The girl turned the idea over in her mind, and said reluctantly: - -"I reckon it's wrong--but--" - -"But shucks! Your mother won't know, and so what's the harm? All she -wants is that you'll be safe; and I bet you she'd 'a' said go there if -she'd 'a' thought of it. I know she would!" - -The Widow Douglas' splendid hospitality was a tempting bait. It and -Tom's persuasions presently carried the day. So it was decided to say -nothing anybody about the night's programme. Presently it occurred to -Tom that maybe Huck might come this very night and give the signal. The -thought took a deal of the spirit out of his anticipations. Still he -could not bear to give up the fun at Widow Douglas'. And why should he -give it up, he reasoned--the signal did not come the night before, so -why should it be any more likely to come to-night? The sure fun of the -evening outweighed the uncertain treasure; and, boy-like, he determined -to yield to the stronger inclination and not allow himself to think of -the box of money another time that day. - -Three miles below town the ferryboat stopped at the mouth of a woody -hollow and tied up. The crowd swarmed ashore and soon the forest -distances and craggy heights echoed far and near with shoutings and -laughter. All the different ways of getting hot and tired were gone -through with, and by-and-by the rovers straggled back to camp fortified -with responsible appetites, and then the destruction of the good things -began. After the feast there was a refreshing season of rest and chat -in the shade of spreading oaks. By-and-by somebody shouted: - -"Who's ready for the cave?" - -Everybody was. Bundles of candles were procured, and straightway there -was a general scamper up the hill. The mouth of the cave was up the -hillside--an opening shaped like a letter A. Its massive oaken door -stood unbarred. Within was a small chamber, chilly as an ice-house, and -walled by Nature with solid limestone that was dewy with a cold sweat. -It was romantic and mysterious to stand here in the deep gloom and look -out upon the green valley shining in the sun. But the impressiveness of -the situation quickly wore off, and the romping began again. The moment -a candle was lighted there was a general rush upon the owner of it; a -struggle and a gallant defence followed, but the candle was soon -knocked down or blown out, and then there was a glad clamor of laughter -and a new chase. But all things have an end. By-and-by the procession -went filing down the steep descent of the main avenue, the flickering -rank of lights dimly revealing the lofty walls of rock almost to their -point of junction sixty feet overhead. This main avenue was not more -than eight or ten feet wide. Every few steps other lofty and still -narrower crevices branched from it on either hand--for McDougal's cave -was but a vast labyrinth of crooked aisles that ran into each other and -out again and led nowhere. It was said that one might wander days and -nights together through its intricate tangle of rifts and chasms, and -never find the end of the cave; and that he might go down, and down, -and still down, into the earth, and it was just the same--labyrinth -under labyrinth, and no end to any of them. No man "knew" the cave. -That was an impossible thing. Most of the young men knew a portion of -it, and it was not customary to venture much beyond this known portion. -Tom Sawyer knew as much of the cave as any one. - -The procession moved along the main avenue some three-quarters of a -mile, and then groups and couples began to slip aside into branch -avenues, fly along the dismal corridors, and take each other by -surprise at points where the corridors joined again. Parties were able -to elude each other for the space of half an hour without going beyond -the "known" ground. - -By-and-by, one group after another came straggling back to the mouth -of the cave, panting, hilarious, smeared from head to foot with tallow -drippings, daubed with clay, and entirely delighted with the success of -the day. Then they were astonished to find that they had been taking no -note of time and that night was about at hand. The clanging bell had -been calling for half an hour. However, this sort of close to the day's -adventures was romantic and therefore satisfactory. When the ferryboat -with her wild freight pushed into the stream, nobody cared sixpence for -the wasted time but the captain of the craft. - -Huck was already upon his watch when the ferryboat's lights went -glinting past the wharf. He heard no noise on board, for the young -people were as subdued and still as people usually are who are nearly -tired to death. He wondered what boat it was, and why she did not stop -at the wharf--and then he dropped her out of his mind and put his -attention upon his business. The night was growing cloudy and dark. Ten -o'clock came, and the noise of vehicles ceased, scattered lights began -to wink out, all straggling foot-passengers disappeared, the village -betook itself to its slumbers and left the small watcher alone with the -silence and the ghosts. Eleven o'clock came, and the tavern lights were -put out; darkness everywhere, now. Huck waited what seemed a weary long -time, but nothing happened. His faith was weakening. Was there any use? -Was there really any use? Why not give it up and turn in? - -A noise fell upon his ear. He was all attention in an instant. The -alley door closed softly. He sprang to the corner of the brick store. -The next moment two men brushed by him, and one seemed to have -something under his arm. It must be that box! So they were going to -remove the treasure. Why call Tom now? It would be absurd--the men -would get away with the box and never be found again. No, he would -stick to their wake and follow them; he would trust to the darkness for -security from discovery. So communing with himself, Huck stepped out -and glided along behind the men, cat-like, with bare feet, allowing -them to keep just far enough ahead not to be invisible. - -They moved up the river street three blocks, then turned to the left -up a cross-street. They went straight ahead, then, until they came to -the path that led up Cardiff Hill; this they took. They passed by the -old Welshman's house, half-way up the hill, without hesitating, and -still climbed upward. Good, thought Huck, they will bury it in the old -quarry. But they never stopped at the quarry. They passed on, up the -summit. They plunged into the narrow path between the tall sumach -bushes, and were at once hidden in the gloom. Huck closed up and -shortened his distance, now, for they would never be able to see him. -He trotted along awhile; then slackened his pace, fearing he was -gaining too fast; moved on a piece, then stopped altogether; listened; -no sound; none, save that he seemed to hear the beating of his own -heart. The hooting of an owl came over the hill--ominous sound! But no -footsteps. Heavens, was everything lost! He was about to spring with -winged feet, when a man cleared his throat not four feet from him! -Huck's heart shot into his throat, but he swallowed it again; and then -he stood there shaking as if a dozen agues had taken charge of him at -once, and so weak that he thought he must surely fall to the ground. He -knew where he was. He knew he was within five steps of the stile -leading into Widow Douglas' grounds. Very well, he thought, let them -bury it there; it won't be hard to find. - -Now there was a voice--a very low voice--Injun Joe's: - -"Damn her, maybe she's got company--there's lights, late as it is." - -"I can't see any." - -This was that stranger's voice--the stranger of the haunted house. A -deadly chill went to Huck's heart--this, then, was the "revenge" job! -His thought was, to fly. Then he remembered that the Widow Douglas had -been kind to him more than once, and maybe these men were going to -murder her. He wished he dared venture to warn her; but he knew he -didn't dare--they might come and catch him. He thought all this and -more in the moment that elapsed between the stranger's remark and Injun -Joe's next--which was-- - -"Because the bush is in your way. Now--this way--now you see, don't -you?" - -"Yes. Well, there IS company there, I reckon. Better give it up." - -"Give it up, and I just leaving this country forever! Give it up and -maybe never have another chance. I tell you again, as I've told you -before, I don't care for her swag--you may have it. But her husband was -rough on me--many times he was rough on me--and mainly he was the -justice of the peace that jugged me for a vagrant. And that ain't all. -It ain't a millionth part of it! He had me HORSEWHIPPED!--horsewhipped -in front of the jail, like a nigger!--with all the town looking on! -HORSEWHIPPED!--do you understand? He took advantage of me and died. But -I'll take it out of HER." - -"Oh, don't kill her! Don't do that!" - -"Kill? Who said anything about killing? I would kill HIM if he was -here; but not her. When you want to get revenge on a woman you don't -kill her--bosh! you go for her looks. You slit her nostrils--you notch -her ears like a sow!" - -"By God, that's--" - -"Keep your opinion to yourself! It will be safest for you. I'll tie -her to the bed. If she bleeds to death, is that my fault? I'll not cry, -if she does. My friend, you'll help me in this thing--for MY sake ---that's why you're here--I mightn't be able alone. If you flinch, I'll -kill you. Do you understand that? And if I have to kill you, I'll kill -her--and then I reckon nobody'll ever know much about who done this -business." - -"Well, if it's got to be done, let's get at it. The quicker the -better--I'm all in a shiver." - -"Do it NOW? And company there? Look here--I'll get suspicious of you, -first thing you know. No--we'll wait till the lights are out--there's -no hurry." - -Huck felt that a silence was going to ensue--a thing still more awful -than any amount of murderous talk; so he held his breath and stepped -gingerly back; planted his foot carefully and firmly, after balancing, -one-legged, in a precarious way and almost toppling over, first on one -side and then on the other. He took another step back, with the same -elaboration and the same risks; then another and another, and--a twig -snapped under his foot! His breath stopped and he listened. There was -no sound--the stillness was perfect. His gratitude was measureless. Now -he turned in his tracks, between the walls of sumach bushes--turned -himself as carefully as if he were a ship--and then stepped quickly but -cautiously along. When he emerged at the quarry he felt secure, and so -he picked up his nimble heels and flew. Down, down he sped, till he -reached the Welshman's. He banged at the door, and presently the heads -of the old man and his two stalwart sons were thrust from windows. - -"What's the row there? Who's banging? What do you want?" - -"Let me in--quick! I'll tell everything." - -"Why, who are you?" - -"Huckleberry Finn--quick, let me in!" - -"Huckleberry Finn, indeed! It ain't a name to open many doors, I -judge! But let him in, lads, and let's see what's the trouble." - -"Please don't ever tell I told you," were Huck's first words when he -got in. "Please don't--I'd be killed, sure--but the widow's been good -friends to me sometimes, and I want to tell--I WILL tell if you'll -promise you won't ever say it was me." - -"By George, he HAS got something to tell, or he wouldn't act so!" -exclaimed the old man; "out with it and nobody here'll ever tell, lad." - -Three minutes later the old man and his sons, well armed, were up the -hill, and just entering the sumach path on tiptoe, their weapons in -their hands. Huck accompanied them no further. He hid behind a great -bowlder and fell to listening. There was a lagging, anxious silence, -and then all of a sudden there was an explosion of firearms and a cry. - -Huck waited for no particulars. He sprang away and sped down the hill -as fast as his legs could carry him. - - - -CHAPTER XXX - -AS the earliest suspicion of dawn appeared on Sunday morning, Huck -came groping up the hill and rapped gently at the old Welshman's door. -The inmates were asleep, but it was a sleep that was set on a -hair-trigger, on account of the exciting episode of the night. A call -came from a window: - -"Who's there!" - -Huck's scared voice answered in a low tone: - -"Please let me in! It's only Huck Finn!" - -"It's a name that can open this door night or day, lad!--and welcome!" - -These were strange words to the vagabond boy's ears, and the -pleasantest he had ever heard. He could not recollect that the closing -word had ever been applied in his case before. The door was quickly -unlocked, and he entered. Huck was given a seat and the old man and his -brace of tall sons speedily dressed themselves. - -"Now, my boy, I hope you're good and hungry, because breakfast will be -ready as soon as the sun's up, and we'll have a piping hot one, too ---make yourself easy about that! I and the boys hoped you'd turn up and -stop here last night." - -"I was awful scared," said Huck, "and I run. I took out when the -pistols went off, and I didn't stop for three mile. I've come now becuz -I wanted to know about it, you know; and I come before daylight becuz I -didn't want to run across them devils, even if they was dead." - -"Well, poor chap, you do look as if you'd had a hard night of it--but -there's a bed here for you when you've had your breakfast. No, they -ain't dead, lad--we are sorry enough for that. You see we knew right -where to put our hands on them, by your description; so we crept along -on tiptoe till we got within fifteen feet of them--dark as a cellar -that sumach path was--and just then I found I was going to sneeze. It -was the meanest kind of luck! I tried to keep it back, but no use ---'twas bound to come, and it did come! I was in the lead with my pistol -raised, and when the sneeze started those scoundrels a-rustling to get -out of the path, I sung out, 'Fire boys!' and blazed away at the place -where the rustling was. So did the boys. But they were off in a jiffy, -those villains, and we after them, down through the woods. I judge we -never touched them. They fired a shot apiece as they started, but their -bullets whizzed by and didn't do us any harm. As soon as we lost the -sound of their feet we quit chasing, and went down and stirred up the -constables. They got a posse together, and went off to guard the river -bank, and as soon as it is light the sheriff and a gang are going to -beat up the woods. My boys will be with them presently. I wish we had -some sort of description of those rascals--'twould help a good deal. -But you couldn't see what they were like, in the dark, lad, I suppose?" - -"Oh yes; I saw them down-town and follered them." - -"Splendid! Describe them--describe them, my boy!" - -"One's the old deaf and dumb Spaniard that's ben around here once or -twice, and t'other's a mean-looking, ragged--" - -"That's enough, lad, we know the men! Happened on them in the woods -back of the widow's one day, and they slunk away. Off with you, boys, -and tell the sheriff--get your breakfast to-morrow morning!" - -The Welshman's sons departed at once. As they were leaving the room -Huck sprang up and exclaimed: - -"Oh, please don't tell ANYbody it was me that blowed on them! Oh, -please!" - -"All right if you say it, Huck, but you ought to have the credit of -what you did." - -"Oh no, no! Please don't tell!" - -When the young men were gone, the old Welshman said: - -"They won't tell--and I won't. But why don't you want it known?" - -Huck would not explain, further than to say that he already knew too -much about one of those men and would not have the man know that he -knew anything against him for the whole world--he would be killed for -knowing it, sure. - -The old man promised secrecy once more, and said: - -"How did you come to follow these fellows, lad? Were they looking -suspicious?" - -Huck was silent while he framed a duly cautious reply. Then he said: - -"Well, you see, I'm a kind of a hard lot,--least everybody says so, -and I don't see nothing agin it--and sometimes I can't sleep much, on -account of thinking about it and sort of trying to strike out a new way -of doing. That was the way of it last night. I couldn't sleep, and so I -come along up-street 'bout midnight, a-turning it all over, and when I -got to that old shackly brick store by the Temperance Tavern, I backed -up agin the wall to have another think. Well, just then along comes -these two chaps slipping along close by me, with something under their -arm, and I reckoned they'd stole it. One was a-smoking, and t'other one -wanted a light; so they stopped right before me and the cigars lit up -their faces and I see that the big one was the deaf and dumb Spaniard, -by his white whiskers and the patch on his eye, and t'other one was a -rusty, ragged-looking devil." - -"Could you see the rags by the light of the cigars?" - -This staggered Huck for a moment. Then he said: - -"Well, I don't know--but somehow it seems as if I did." - -"Then they went on, and you--" - -"Follered 'em--yes. That was it. I wanted to see what was up--they -sneaked along so. I dogged 'em to the widder's stile, and stood in the -dark and heard the ragged one beg for the widder, and the Spaniard -swear he'd spile her looks just as I told you and your two--" - -"What! The DEAF AND DUMB man said all that!" - -Huck had made another terrible mistake! He was trying his best to keep -the old man from getting the faintest hint of who the Spaniard might -be, and yet his tongue seemed determined to get him into trouble in -spite of all he could do. He made several efforts to creep out of his -scrape, but the old man's eye was upon him and he made blunder after -blunder. Presently the Welshman said: - -"My boy, don't be afraid of me. I wouldn't hurt a hair of your head -for all the world. No--I'd protect you--I'd protect you. This Spaniard -is not deaf and dumb; you've let that slip without intending it; you -can't cover that up now. You know something about that Spaniard that -you want to keep dark. Now trust me--tell me what it is, and trust me ---I won't betray you." - -Huck looked into the old man's honest eyes a moment, then bent over -and whispered in his ear: - -"'Tain't a Spaniard--it's Injun Joe!" - -The Welshman almost jumped out of his chair. In a moment he said: - -"It's all plain enough, now. When you talked about notching ears and -slitting noses I judged that that was your own embellishment, because -white men don't take that sort of revenge. But an Injun! That's a -different matter altogether." - -During breakfast the talk went on, and in the course of it the old man -said that the last thing which he and his sons had done, before going -to bed, was to get a lantern and examine the stile and its vicinity for -marks of blood. They found none, but captured a bulky bundle of-- - -"Of WHAT?" - -If the words had been lightning they could not have leaped with a more -stunning suddenness from Huck's blanched lips. His eyes were staring -wide, now, and his breath suspended--waiting for the answer. The -Welshman started--stared in return--three seconds--five seconds--ten ---then replied: - -"Of burglar's tools. Why, what's the MATTER with you?" - -Huck sank back, panting gently, but deeply, unutterably grateful. The -Welshman eyed him gravely, curiously--and presently said: - -"Yes, burglar's tools. That appears to relieve you a good deal. But -what did give you that turn? What were YOU expecting we'd found?" - -Huck was in a close place--the inquiring eye was upon him--he would -have given anything for material for a plausible answer--nothing -suggested itself--the inquiring eye was boring deeper and deeper--a -senseless reply offered--there was no time to weigh it, so at a venture -he uttered it--feebly: - -"Sunday-school books, maybe." - -Poor Huck was too distressed to smile, but the old man laughed loud -and joyously, shook up the details of his anatomy from head to foot, -and ended by saying that such a laugh was money in a-man's pocket, -because it cut down the doctor's bill like everything. Then he added: - -"Poor old chap, you're white and jaded--you ain't well a bit--no -wonder you're a little flighty and off your balance. But you'll come -out of it. Rest and sleep will fetch you out all right, I hope." - -Huck was irritated to think he had been such a goose and betrayed such -a suspicious excitement, for he had dropped the idea that the parcel -brought from the tavern was the treasure, as soon as he had heard the -talk at the widow's stile. He had only thought it was not the treasure, -however--he had not known that it wasn't--and so the suggestion of a -captured bundle was too much for his self-possession. But on the whole -he felt glad the little episode had happened, for now he knew beyond -all question that that bundle was not THE bundle, and so his mind was -at rest and exceedingly comfortable. In fact, everything seemed to be -drifting just in the right direction, now; the treasure must be still -in No. 2, the men would be captured and jailed that day, and he and Tom -could seize the gold that night without any trouble or any fear of -interruption. - -Just as breakfast was completed there was a knock at the door. Huck -jumped for a hiding-place, for he had no mind to be connected even -remotely with the late event. The Welshman admitted several ladies and -gentlemen, among them the Widow Douglas, and noticed that groups of -citizens were climbing up the hill--to stare at the stile. So the news -had spread. The Welshman had to tell the story of the night to the -visitors. The widow's gratitude for her preservation was outspoken. - -"Don't say a word about it, madam. There's another that you're more -beholden to than you are to me and my boys, maybe, but he don't allow -me to tell his name. We wouldn't have been there but for him." - -Of course this excited a curiosity so vast that it almost belittled -the main matter--but the Welshman allowed it to eat into the vitals of -his visitors, and through them be transmitted to the whole town, for he -refused to part with his secret. When all else had been learned, the -widow said: - -"I went to sleep reading in bed and slept straight through all that -noise. Why didn't you come and wake me?" - -"We judged it warn't worth while. Those fellows warn't likely to come -again--they hadn't any tools left to work with, and what was the use of -waking you up and scaring you to death? My three negro men stood guard -at your house all the rest of the night. They've just come back." - -More visitors came, and the story had to be told and retold for a -couple of hours more. - -There was no Sabbath-school during day-school vacation, but everybody -was early at church. The stirring event was well canvassed. News came -that not a sign of the two villains had been yet discovered. When the -sermon was finished, Judge Thatcher's wife dropped alongside of Mrs. -Harper as she moved down the aisle with the crowd and said: - -"Is my Becky going to sleep all day? I just expected she would be -tired to death." - -"Your Becky?" - -"Yes," with a startled look--"didn't she stay with you last night?" - -"Why, no." - -Mrs. Thatcher turned pale, and sank into a pew, just as Aunt Polly, -talking briskly with a friend, passed by. Aunt Polly said: - -"Good-morning, Mrs. Thatcher. Good-morning, Mrs. Harper. I've got a -boy that's turned up missing. I reckon my Tom stayed at your house last -night--one of you. And now he's afraid to come to church. I've got to -settle with him." - -Mrs. Thatcher shook her head feebly and turned paler than ever. - -"He didn't stay with us," said Mrs. Harper, beginning to look uneasy. -A marked anxiety came into Aunt Polly's face. - -"Joe Harper, have you seen my Tom this morning?" - -"No'm." - -"When did you see him last?" - -Joe tried to remember, but was not sure he could say. The people had -stopped moving out of church. Whispers passed along, and a boding -uneasiness took possession of every countenance. Children were -anxiously questioned, and young teachers. They all said they had not -noticed whether Tom and Becky were on board the ferryboat on the -homeward trip; it was dark; no one thought of inquiring if any one was -missing. One young man finally blurted out his fear that they were -still in the cave! Mrs. Thatcher swooned away. Aunt Polly fell to -crying and wringing her hands. - -The alarm swept from lip to lip, from group to group, from street to -street, and within five minutes the bells were wildly clanging and the -whole town was up! The Cardiff Hill episode sank into instant -insignificance, the burglars were forgotten, horses were saddled, -skiffs were manned, the ferryboat ordered out, and before the horror -was half an hour old, two hundred men were pouring down highroad and -river toward the cave. - -All the long afternoon the village seemed empty and dead. Many women -visited Aunt Polly and Mrs. Thatcher and tried to comfort them. They -cried with them, too, and that was still better than words. All the -tedious night the town waited for news; but when the morning dawned at -last, all the word that came was, "Send more candles--and send food." -Mrs. Thatcher was almost crazed; and Aunt Polly, also. Judge Thatcher -sent messages of hope and encouragement from the cave, but they -conveyed no real cheer. - -The old Welshman came home toward daylight, spattered with -candle-grease, smeared with clay, and almost worn out. He found Huck -still in the bed that had been provided for him, and delirious with -fever. The physicians were all at the cave, so the Widow Douglas came -and took charge of the patient. She said she would do her best by him, -because, whether he was good, bad, or indifferent, he was the Lord's, -and nothing that was the Lord's was a thing to be neglected. The -Welshman said Huck had good spots in him, and the widow said: - -"You can depend on it. That's the Lord's mark. He don't leave it off. -He never does. Puts it somewhere on every creature that comes from his -hands." - -Early in the forenoon parties of jaded men began to straggle into the -village, but the strongest of the citizens continued searching. All the -news that could be gained was that remotenesses of the cavern were -being ransacked that had never been visited before; that every corner -and crevice was going to be thoroughly searched; that wherever one -wandered through the maze of passages, lights were to be seen flitting -hither and thither in the distance, and shoutings and pistol-shots sent -their hollow reverberations to the ear down the sombre aisles. In one -place, far from the section usually traversed by tourists, the names -"BECKY & TOM" had been found traced upon the rocky wall with -candle-smoke, and near at hand a grease-soiled bit of ribbon. Mrs. -Thatcher recognized the ribbon and cried over it. She said it was the -last relic she should ever have of her child; and that no other memorial -of her could ever be so precious, because this one parted latest from -the living body before the awful death came. Some said that now and -then, in the cave, a far-away speck of light would glimmer, and then a -glorious shout would burst forth and a score of men go trooping down the -echoing aisle--and then a sickening disappointment always followed; the -children were not there; it was only a searcher's light. - -Three dreadful days and nights dragged their tedious hours along, and -the village sank into a hopeless stupor. No one had heart for anything. -The accidental discovery, just made, that the proprietor of the -Temperance Tavern kept liquor on his premises, scarcely fluttered the -public pulse, tremendous as the fact was. In a lucid interval, Huck -feebly led up to the subject of taverns, and finally asked--dimly -dreading the worst--if anything had been discovered at the Temperance -Tavern since he had been ill. - -"Yes," said the widow. - -Huck started up in bed, wild-eyed: - -"What? What was it?" - -"Liquor!--and the place has been shut up. Lie down, child--what a turn -you did give me!" - -"Only tell me just one thing--only just one--please! Was it Tom Sawyer -that found it?" - -The widow burst into tears. "Hush, hush, child, hush! I've told you -before, you must NOT talk. You are very, very sick!" - -Then nothing but liquor had been found; there would have been a great -powwow if it had been the gold. So the treasure was gone forever--gone -forever! But what could she be crying about? Curious that she should -cry. - -These thoughts worked their dim way through Huck's mind, and under the -weariness they gave him he fell asleep. The widow said to herself: - -"There--he's asleep, poor wreck. Tom Sawyer find it! Pity but somebody -could find Tom Sawyer! Ah, there ain't many left, now, that's got hope -enough, or strength enough, either, to go on searching." - - - -CHAPTER XXXI - -NOW to return to Tom and Becky's share in the picnic. They tripped -along the murky aisles with the rest of the company, visiting the -familiar wonders of the cave--wonders dubbed with rather -over-descriptive names, such as "The Drawing-Room," "The Cathedral," -"Aladdin's Palace," and so on. Presently the hide-and-seek frolicking -began, and Tom and Becky engaged in it with zeal until the exertion -began to grow a trifle wearisome; then they wandered down a sinuous -avenue holding their candles aloft and reading the tangled web-work of -names, dates, post-office addresses, and mottoes with which the rocky -walls had been frescoed (in candle-smoke). Still drifting along and -talking, they scarcely noticed that they were now in a part of the cave -whose walls were not frescoed. They smoked their own names under an -overhanging shelf and moved on. Presently they came to a place where a -little stream of water, trickling over a ledge and carrying a limestone -sediment with it, had, in the slow-dragging ages, formed a laced and -ruffled Niagara in gleaming and imperishable stone. Tom squeezed his -small body behind it in order to illuminate it for Becky's -gratification. He found that it curtained a sort of steep natural -stairway which was enclosed between narrow walls, and at once the -ambition to be a discoverer seized him. Becky responded to his call, -and they made a smoke-mark for future guidance, and started upon their -quest. They wound this way and that, far down into the secret depths of -the cave, made another mark, and branched off in search of novelties to -tell the upper world about. In one place they found a spacious cavern, -from whose ceiling depended a multitude of shining stalactites of the -length and circumference of a man's leg; they walked all about it, -wondering and admiring, and presently left it by one of the numerous -passages that opened into it. This shortly brought them to a bewitching -spring, whose basin was incrusted with a frostwork of glittering -crystals; it was in the midst of a cavern whose walls were supported by -many fantastic pillars which had been formed by the joining of great -stalactites and stalagmites together, the result of the ceaseless -water-drip of centuries. Under the roof vast knots of bats had packed -themselves together, thousands in a bunch; the lights disturbed the -creatures and they came flocking down by hundreds, squeaking and -darting furiously at the candles. Tom knew their ways and the danger of -this sort of conduct. He seized Becky's hand and hurried her into the -first corridor that offered; and none too soon, for a bat struck -Becky's light out with its wing while she was passing out of the -cavern. The bats chased the children a good distance; but the fugitives -plunged into every new passage that offered, and at last got rid of the -perilous things. Tom found a subterranean lake, shortly, which -stretched its dim length away until its shape was lost in the shadows. -He wanted to explore its borders, but concluded that it would be best -to sit down and rest awhile, first. Now, for the first time, the deep -stillness of the place laid a clammy hand upon the spirits of the -children. Becky said: - -"Why, I didn't notice, but it seems ever so long since I heard any of -the others." - -"Come to think, Becky, we are away down below them--and I don't know -how far away north, or south, or east, or whichever it is. We couldn't -hear them here." - -Becky grew apprehensive. - -"I wonder how long we've been down here, Tom? We better start back." - -"Yes, I reckon we better. P'raps we better." - -"Can you find the way, Tom? It's all a mixed-up crookedness to me." - -"I reckon I could find it--but then the bats. If they put our candles -out it will be an awful fix. Let's try some other way, so as not to go -through there." - -"Well. But I hope we won't get lost. It would be so awful!" and the -girl shuddered at the thought of the dreadful possibilities. - -They started through a corridor, and traversed it in silence a long -way, glancing at each new opening, to see if there was anything -familiar about the look of it; but they were all strange. Every time -Tom made an examination, Becky would watch his face for an encouraging -sign, and he would say cheerily: - -"Oh, it's all right. This ain't the one, but we'll come to it right -away!" - -But he felt less and less hopeful with each failure, and presently -began to turn off into diverging avenues at sheer random, in desperate -hope of finding the one that was wanted. He still said it was "all -right," but there was such a leaden dread at his heart that the words -had lost their ring and sounded just as if he had said, "All is lost!" -Becky clung to his side in an anguish of fear, and tried hard to keep -back the tears, but they would come. At last she said: - -"Oh, Tom, never mind the bats, let's go back that way! We seem to get -worse and worse off all the time." - -"Listen!" said he. - -Profound silence; silence so deep that even their breathings were -conspicuous in the hush. Tom shouted. The call went echoing down the -empty aisles and died out in the distance in a faint sound that -resembled a ripple of mocking laughter. - -"Oh, don't do it again, Tom, it is too horrid," said Becky. - -"It is horrid, but I better, Becky; they might hear us, you know," and -he shouted again. - -The "might" was even a chillier horror than the ghostly laughter, it -so confessed a perishing hope. The children stood still and listened; -but there was no result. Tom turned upon the back track at once, and -hurried his steps. It was but a little while before a certain -indecision in his manner revealed another fearful fact to Becky--he -could not find his way back! - -"Oh, Tom, you didn't make any marks!" - -"Becky, I was such a fool! Such a fool! I never thought we might want -to come back! No--I can't find the way. It's all mixed up." - -"Tom, Tom, we're lost! we're lost! We never can get out of this awful -place! Oh, why DID we ever leave the others!" - -She sank to the ground and burst into such a frenzy of crying that Tom -was appalled with the idea that she might die, or lose her reason. He -sat down by her and put his arms around her; she buried her face in his -bosom, she clung to him, she poured out her terrors, her unavailing -regrets, and the far echoes turned them all to jeering laughter. Tom -begged her to pluck up hope again, and she said she could not. He fell -to blaming and abusing himself for getting her into this miserable -situation; this had a better effect. She said she would try to hope -again, she would get up and follow wherever he might lead if only he -would not talk like that any more. For he was no more to blame than -she, she said. - -So they moved on again--aimlessly--simply at random--all they could do -was to move, keep moving. For a little while, hope made a show of -reviving--not with any reason to back it, but only because it is its -nature to revive when the spring has not been taken out of it by age -and familiarity with failure. - -By-and-by Tom took Becky's candle and blew it out. This economy meant -so much! Words were not needed. Becky understood, and her hope died -again. She knew that Tom had a whole candle and three or four pieces in -his pockets--yet he must economize. - -By-and-by, fatigue began to assert its claims; the children tried to -pay attention, for it was dreadful to think of sitting down when time -was grown to be so precious, moving, in some direction, in any -direction, was at least progress and might bear fruit; but to sit down -was to invite death and shorten its pursuit. - -At last Becky's frail limbs refused to carry her farther. She sat -down. Tom rested with her, and they talked of home, and the friends -there, and the comfortable beds and, above all, the light! Becky cried, -and Tom tried to think of some way of comforting her, but all his -encouragements were grown threadbare with use, and sounded like -sarcasms. Fatigue bore so heavily upon Becky that she drowsed off to -sleep. Tom was grateful. He sat looking into her drawn face and saw it -grow smooth and natural under the influence of pleasant dreams; and -by-and-by a smile dawned and rested there. The peaceful face reflected -somewhat of peace and healing into his own spirit, and his thoughts -wandered away to bygone times and dreamy memories. While he was deep in -his musings, Becky woke up with a breezy little laugh--but it was -stricken dead upon her lips, and a groan followed it. - -"Oh, how COULD I sleep! I wish I never, never had waked! No! No, I -don't, Tom! Don't look so! I won't say it again." - -"I'm glad you've slept, Becky; you'll feel rested, now, and we'll find -the way out." - -"We can try, Tom; but I've seen such a beautiful country in my dream. -I reckon we are going there." - -"Maybe not, maybe not. Cheer up, Becky, and let's go on trying." - -They rose up and wandered along, hand in hand and hopeless. They tried -to estimate how long they had been in the cave, but all they knew was -that it seemed days and weeks, and yet it was plain that this could not -be, for their candles were not gone yet. A long time after this--they -could not tell how long--Tom said they must go softly and listen for -dripping water--they must find a spring. They found one presently, and -Tom said it was time to rest again. Both were cruelly tired, yet Becky -said she thought she could go a little farther. She was surprised to -hear Tom dissent. She could not understand it. They sat down, and Tom -fastened his candle to the wall in front of them with some clay. -Thought was soon busy; nothing was said for some time. Then Becky broke -the silence: - -"Tom, I am so hungry!" - -Tom took something out of his pocket. - -"Do you remember this?" said he. - -Becky almost smiled. - -"It's our wedding-cake, Tom." - -"Yes--I wish it was as big as a barrel, for it's all we've got." - -"I saved it from the picnic for us to dream on, Tom, the way grown-up -people do with wedding-cake--but it'll be our--" - -She dropped the sentence where it was. Tom divided the cake and Becky -ate with good appetite, while Tom nibbled at his moiety. There was -abundance of cold water to finish the feast with. By-and-by Becky -suggested that they move on again. Tom was silent a moment. Then he -said: - -"Becky, can you bear it if I tell you something?" - -Becky's face paled, but she thought she could. - -"Well, then, Becky, we must stay here, where there's water to drink. -That little piece is our last candle!" - -Becky gave loose to tears and wailings. Tom did what he could to -comfort her, but with little effect. At length Becky said: - -"Tom!" - -"Well, Becky?" - -"They'll miss us and hunt for us!" - -"Yes, they will! Certainly they will!" - -"Maybe they're hunting for us now, Tom." - -"Why, I reckon maybe they are. I hope they are." - -"When would they miss us, Tom?" - -"When they get back to the boat, I reckon." - -"Tom, it might be dark then--would they notice we hadn't come?" - -"I don't know. But anyway, your mother would miss you as soon as they -got home." - -A frightened look in Becky's face brought Tom to his senses and he saw -that he had made a blunder. Becky was not to have gone home that night! -The children became silent and thoughtful. In a moment a new burst of -grief from Becky showed Tom that the thing in his mind had struck hers -also--that the Sabbath morning might be half spent before Mrs. Thatcher -discovered that Becky was not at Mrs. Harper's. - -The children fastened their eyes upon their bit of candle and watched -it melt slowly and pitilessly away; saw the half inch of wick stand -alone at last; saw the feeble flame rise and fall, climb the thin -column of smoke, linger at its top a moment, and then--the horror of -utter darkness reigned! - -How long afterward it was that Becky came to a slow consciousness that -she was crying in Tom's arms, neither could tell. All that they knew -was, that after what seemed a mighty stretch of time, both awoke out of -a dead stupor of sleep and resumed their miseries once more. Tom said -it might be Sunday, now--maybe Monday. He tried to get Becky to talk, -but her sorrows were too oppressive, all her hopes were gone. Tom said -that they must have been missed long ago, and no doubt the search was -going on. He would shout and maybe some one would come. He tried it; -but in the darkness the distant echoes sounded so hideously that he -tried it no more. - -The hours wasted away, and hunger came to torment the captives again. -A portion of Tom's half of the cake was left; they divided and ate it. -But they seemed hungrier than before. The poor morsel of food only -whetted desire. - -By-and-by Tom said: - -"SH! Did you hear that?" - -Both held their breath and listened. There was a sound like the -faintest, far-off shout. Instantly Tom answered it, and leading Becky -by the hand, started groping down the corridor in its direction. -Presently he listened again; again the sound was heard, and apparently -a little nearer. - -"It's them!" said Tom; "they're coming! Come along, Becky--we're all -right now!" - -The joy of the prisoners was almost overwhelming. Their speed was -slow, however, because pitfalls were somewhat common, and had to be -guarded against. They shortly came to one and had to stop. It might be -three feet deep, it might be a hundred--there was no passing it at any -rate. Tom got down on his breast and reached as far down as he could. -No bottom. They must stay there and wait until the searchers came. They -listened; evidently the distant shoutings were growing more distant! a -moment or two more and they had gone altogether. The heart-sinking -misery of it! Tom whooped until he was hoarse, but it was of no use. He -talked hopefully to Becky; but an age of anxious waiting passed and no -sounds came again. - -The children groped their way back to the spring. The weary time -dragged on; they slept again, and awoke famished and woe-stricken. Tom -believed it must be Tuesday by this time. - -Now an idea struck him. There were some side passages near at hand. It -would be better to explore some of these than bear the weight of the -heavy time in idleness. He took a kite-line from his pocket, tied it to -a projection, and he and Becky started, Tom in the lead, unwinding the -line as he groped along. At the end of twenty steps the corridor ended -in a "jumping-off place." Tom got down on his knees and felt below, and -then as far around the corner as he could reach with his hands -conveniently; he made an effort to stretch yet a little farther to the -right, and at that moment, not twenty yards away, a human hand, holding -a candle, appeared from behind a rock! Tom lifted up a glorious shout, -and instantly that hand was followed by the body it belonged to--Injun -Joe's! Tom was paralyzed; he could not move. He was vastly gratified -the next moment, to see the "Spaniard" take to his heels and get -himself out of sight. Tom wondered that Joe had not recognized his -voice and come over and killed him for testifying in court. But the -echoes must have disguised the voice. Without doubt, that was it, he -reasoned. Tom's fright weakened every muscle in his body. He said to -himself that if he had strength enough to get back to the spring he -would stay there, and nothing should tempt him to run the risk of -meeting Injun Joe again. He was careful to keep from Becky what it was -he had seen. He told her he had only shouted "for luck." - -But hunger and wretchedness rise superior to fears in the long run. -Another tedious wait at the spring and another long sleep brought -changes. The children awoke tortured with a raging hunger. Tom believed -that it must be Wednesday or Thursday or even Friday or Saturday, now, -and that the search had been given over. He proposed to explore another -passage. He felt willing to risk Injun Joe and all other terrors. But -Becky was very weak. She had sunk into a dreary apathy and would not be -roused. She said she would wait, now, where she was, and die--it would -not be long. She told Tom to go with the kite-line and explore if he -chose; but she implored him to come back every little while and speak -to her; and she made him promise that when the awful time came, he -would stay by her and hold her hand until all was over. - -Tom kissed her, with a choking sensation in his throat, and made a -show of being confident of finding the searchers or an escape from the -cave; then he took the kite-line in his hand and went groping down one -of the passages on his hands and knees, distressed with hunger and sick -with bodings of coming doom. - - - -CHAPTER XXXII - -TUESDAY afternoon came, and waned to the twilight. The village of St. -Petersburg still mourned. The lost children had not been found. Public -prayers had been offered up for them, and many and many a private -prayer that had the petitioner's whole heart in it; but still no good -news came from the cave. The majority of the searchers had given up the -quest and gone back to their daily avocations, saying that it was plain -the children could never be found. Mrs. Thatcher was very ill, and a -great part of the time delirious. People said it was heartbreaking to -hear her call her child, and raise her head and listen a whole minute -at a time, then lay it wearily down again with a moan. Aunt Polly had -drooped into a settled melancholy, and her gray hair had grown almost -white. The village went to its rest on Tuesday night, sad and forlorn. - -Away in the middle of the night a wild peal burst from the village -bells, and in a moment the streets were swarming with frantic half-clad -people, who shouted, "Turn out! turn out! they're found! they're -found!" Tin pans and horns were added to the din, the population massed -itself and moved toward the river, met the children coming in an open -carriage drawn by shouting citizens, thronged around it, joined its -homeward march, and swept magnificently up the main street roaring -huzzah after huzzah! - -The village was illuminated; nobody went to bed again; it was the -greatest night the little town had ever seen. During the first half-hour -a procession of villagers filed through Judge Thatcher's house, seized -the saved ones and kissed them, squeezed Mrs. Thatcher's hand, tried to -speak but couldn't--and drifted out raining tears all over the place. - -Aunt Polly's happiness was complete, and Mrs. Thatcher's nearly so. It -would be complete, however, as soon as the messenger dispatched with -the great news to the cave should get the word to her husband. Tom lay -upon a sofa with an eager auditory about him and told the history of -the wonderful adventure, putting in many striking additions to adorn it -withal; and closed with a description of how he left Becky and went on -an exploring expedition; how he followed two avenues as far as his -kite-line would reach; how he followed a third to the fullest stretch of -the kite-line, and was about to turn back when he glimpsed a far-off -speck that looked like daylight; dropped the line and groped toward it, -pushed his head and shoulders through a small hole, and saw the broad -Mississippi rolling by! And if it had only happened to be night he would -not have seen that speck of daylight and would not have explored that -passage any more! He told how he went back for Becky and broke the good -news and she told him not to fret her with such stuff, for she was -tired, and knew she was going to die, and wanted to. He described how he -labored with her and convinced her; and how she almost died for joy when -she had groped to where she actually saw the blue speck of daylight; how -he pushed his way out at the hole and then helped her out; how they sat -there and cried for gladness; how some men came along in a skiff and Tom -hailed them and told them their situation and their famished condition; -how the men didn't believe the wild tale at first, "because," said they, -"you are five miles down the river below the valley the cave is in" ---then took them aboard, rowed to a house, gave them supper, made them -rest till two or three hours after dark and then brought them home. - -Before day-dawn, Judge Thatcher and the handful of searchers with him -were tracked out, in the cave, by the twine clews they had strung -behind them, and informed of the great news. - -Three days and nights of toil and hunger in the cave were not to be -shaken off at once, as Tom and Becky soon discovered. They were -bedridden all of Wednesday and Thursday, and seemed to grow more and -more tired and worn, all the time. Tom got about, a little, on -Thursday, was down-town Friday, and nearly as whole as ever Saturday; -but Becky did not leave her room until Sunday, and then she looked as -if she had passed through a wasting illness. - -Tom learned of Huck's sickness and went to see him on Friday, but -could not be admitted to the bedroom; neither could he on Saturday or -Sunday. He was admitted daily after that, but was warned to keep still -about his adventure and introduce no exciting topic. The Widow Douglas -stayed by to see that he obeyed. At home Tom learned of the Cardiff -Hill event; also that the "ragged man's" body had eventually been found -in the river near the ferry-landing; he had been drowned while trying -to escape, perhaps. - -About a fortnight after Tom's rescue from the cave, he started off to -visit Huck, who had grown plenty strong enough, now, to hear exciting -talk, and Tom had some that would interest him, he thought. Judge -Thatcher's house was on Tom's way, and he stopped to see Becky. The -Judge and some friends set Tom to talking, and some one asked him -ironically if he wouldn't like to go to the cave again. Tom said he -thought he wouldn't mind it. The Judge said: - -"Well, there are others just like you, Tom, I've not the least doubt. -But we have taken care of that. Nobody will get lost in that cave any -more." - -"Why?" - -"Because I had its big door sheathed with boiler iron two weeks ago, -and triple-locked--and I've got the keys." - -Tom turned as white as a sheet. - -"What's the matter, boy! Here, run, somebody! Fetch a glass of water!" - -The water was brought and thrown into Tom's face. - -"Ah, now you're all right. What was the matter with you, Tom?" - -"Oh, Judge, Injun Joe's in the cave!" - - - -CHAPTER XXXIII - -WITHIN a few minutes the news had spread, and a dozen skiff-loads of -men were on their way to McDougal's cave, and the ferryboat, well -filled with passengers, soon followed. Tom Sawyer was in the skiff that -bore Judge Thatcher. - -When the cave door was unlocked, a sorrowful sight presented itself in -the dim twilight of the place. Injun Joe lay stretched upon the ground, -dead, with his face close to the crack of the door, as if his longing -eyes had been fixed, to the latest moment, upon the light and the cheer -of the free world outside. Tom was touched, for he knew by his own -experience how this wretch had suffered. His pity was moved, but -nevertheless he felt an abounding sense of relief and security, now, -which revealed to him in a degree which he had not fully appreciated -before how vast a weight of dread had been lying upon him since the day -he lifted his voice against this bloody-minded outcast. - -Injun Joe's bowie-knife lay close by, its blade broken in two. The -great foundation-beam of the door had been chipped and hacked through, -with tedious labor; useless labor, too, it was, for the native rock -formed a sill outside it, and upon that stubborn material the knife had -wrought no effect; the only damage done was to the knife itself. But if -there had been no stony obstruction there the labor would have been -useless still, for if the beam had been wholly cut away Injun Joe could -not have squeezed his body under the door, and he knew it. So he had -only hacked that place in order to be doing something--in order to pass -the weary time--in order to employ his tortured faculties. Ordinarily -one could find half a dozen bits of candle stuck around in the crevices -of this vestibule, left there by tourists; but there were none now. The -prisoner had searched them out and eaten them. He had also contrived to -catch a few bats, and these, also, he had eaten, leaving only their -claws. The poor unfortunate had starved to death. In one place, near at -hand, a stalagmite had been slowly growing up from the ground for ages, -builded by the water-drip from a stalactite overhead. The captive had -broken off the stalagmite, and upon the stump had placed a stone, -wherein he had scooped a shallow hollow to catch the precious drop -that fell once in every three minutes with the dreary regularity of a -clock-tick--a dessertspoonful once in four and twenty hours. That drop -was falling when the Pyramids were new; when Troy fell; when the -foundations of Rome were laid; when Christ was crucified; when the -Conqueror created the British empire; when Columbus sailed; when the -massacre at Lexington was "news." It is falling now; it will still be -falling when all these things shall have sunk down the afternoon of -history, and the twilight of tradition, and been swallowed up in the -thick night of oblivion. Has everything a purpose and a mission? Did -this drop fall patiently during five thousand years to be ready for -this flitting human insect's need? and has it another important object -to accomplish ten thousand years to come? No matter. It is many and -many a year since the hapless half-breed scooped out the stone to catch -the priceless drops, but to this day the tourist stares longest at that -pathetic stone and that slow-dropping water when he comes to see the -wonders of McDougal's cave. Injun Joe's cup stands first in the list of -the cavern's marvels; even "Aladdin's Palace" cannot rival it. - -Injun Joe was buried near the mouth of the cave; and people flocked -there in boats and wagons from the towns and from all the farms and -hamlets for seven miles around; they brought their children, and all -sorts of provisions, and confessed that they had had almost as -satisfactory a time at the funeral as they could have had at the -hanging. - -This funeral stopped the further growth of one thing--the petition to -the governor for Injun Joe's pardon. The petition had been largely -signed; many tearful and eloquent meetings had been held, and a -committee of sappy women been appointed to go in deep mourning and wail -around the governor, and implore him to be a merciful ass and trample -his duty under foot. Injun Joe was believed to have killed five -citizens of the village, but what of that? If he had been Satan himself -there would have been plenty of weaklings ready to scribble their names -to a pardon-petition, and drip a tear on it from their permanently -impaired and leaky water-works. - -The morning after the funeral Tom took Huck to a private place to have -an important talk. Huck had learned all about Tom's adventure from the -Welshman and the Widow Douglas, by this time, but Tom said he reckoned -there was one thing they had not told him; that thing was what he -wanted to talk about now. Huck's face saddened. He said: - -"I know what it is. You got into No. 2 and never found anything but -whiskey. Nobody told me it was you; but I just knowed it must 'a' ben -you, soon as I heard 'bout that whiskey business; and I knowed you -hadn't got the money becuz you'd 'a' got at me some way or other and -told me even if you was mum to everybody else. Tom, something's always -told me we'd never get holt of that swag." - -"Why, Huck, I never told on that tavern-keeper. YOU know his tavern -was all right the Saturday I went to the picnic. Don't you remember you -was to watch there that night?" - -"Oh yes! Why, it seems 'bout a year ago. It was that very night that I -follered Injun Joe to the widder's." - -"YOU followed him?" - -"Yes--but you keep mum. I reckon Injun Joe's left friends behind him, -and I don't want 'em souring on me and doing me mean tricks. If it -hadn't ben for me he'd be down in Texas now, all right." - -Then Huck told his entire adventure in confidence to Tom, who had only -heard of the Welshman's part of it before. - -"Well," said Huck, presently, coming back to the main question, -"whoever nipped the whiskey in No. 2, nipped the money, too, I reckon ---anyways it's a goner for us, Tom." - -"Huck, that money wasn't ever in No. 2!" - -"What!" Huck searched his comrade's face keenly. "Tom, have you got on -the track of that money again?" - -"Huck, it's in the cave!" - -Huck's eyes blazed. - -"Say it again, Tom." - -"The money's in the cave!" - -"Tom--honest injun, now--is it fun, or earnest?" - -"Earnest, Huck--just as earnest as ever I was in my life. Will you go -in there with me and help get it out?" - -"I bet I will! I will if it's where we can blaze our way to it and not -get lost." - -"Huck, we can do that without the least little bit of trouble in the -world." - -"Good as wheat! What makes you think the money's--" - -"Huck, you just wait till we get in there. If we don't find it I'll -agree to give you my drum and every thing I've got in the world. I -will, by jings." - -"All right--it's a whiz. When do you say?" - -"Right now, if you say it. Are you strong enough?" - -"Is it far in the cave? I ben on my pins a little, three or four days, -now, but I can't walk more'n a mile, Tom--least I don't think I could." - -"It's about five mile into there the way anybody but me would go, -Huck, but there's a mighty short cut that they don't anybody but me -know about. Huck, I'll take you right to it in a skiff. I'll float the -skiff down there, and I'll pull it back again all by myself. You -needn't ever turn your hand over." - -"Less start right off, Tom." - -"All right. We want some bread and meat, and our pipes, and a little -bag or two, and two or three kite-strings, and some of these -new-fangled things they call lucifer matches. I tell you, many's -the time I wished I had some when I was in there before." - -A trifle after noon the boys borrowed a small skiff from a citizen who -was absent, and got under way at once. When they were several miles -below "Cave Hollow," Tom said: - -"Now you see this bluff here looks all alike all the way down from the -cave hollow--no houses, no wood-yards, bushes all alike. But do you see -that white place up yonder where there's been a landslide? Well, that's -one of my marks. We'll get ashore, now." - -They landed. - -"Now, Huck, where we're a-standing you could touch that hole I got out -of with a fishing-pole. See if you can find it." - -Huck searched all the place about, and found nothing. Tom proudly -marched into a thick clump of sumach bushes and said: - -"Here you are! Look at it, Huck; it's the snuggest hole in this -country. You just keep mum about it. All along I've been wanting to be -a robber, but I knew I'd got to have a thing like this, and where to -run across it was the bother. We've got it now, and we'll keep it -quiet, only we'll let Joe Harper and Ben Rogers in--because of course -there's got to be a Gang, or else there wouldn't be any style about it. -Tom Sawyer's Gang--it sounds splendid, don't it, Huck?" - -"Well, it just does, Tom. And who'll we rob?" - -"Oh, most anybody. Waylay people--that's mostly the way." - -"And kill them?" - -"No, not always. Hive them in the cave till they raise a ransom." - -"What's a ransom?" - -"Money. You make them raise all they can, off'n their friends; and -after you've kept them a year, if it ain't raised then you kill them. -That's the general way. Only you don't kill the women. You shut up the -women, but you don't kill them. They're always beautiful and rich, and -awfully scared. You take their watches and things, but you always take -your hat off and talk polite. They ain't anybody as polite as robbers ---you'll see that in any book. Well, the women get to loving you, and -after they've been in the cave a week or two weeks they stop crying and -after that you couldn't get them to leave. If you drove them out they'd -turn right around and come back. It's so in all the books." - -"Why, it's real bully, Tom. I believe it's better'n to be a pirate." - -"Yes, it's better in some ways, because it's close to home and -circuses and all that." - -By this time everything was ready and the boys entered the hole, Tom -in the lead. They toiled their way to the farther end of the tunnel, -then made their spliced kite-strings fast and moved on. A few steps -brought them to the spring, and Tom felt a shudder quiver all through -him. He showed Huck the fragment of candle-wick perched on a lump of -clay against the wall, and described how he and Becky had watched the -flame struggle and expire. - -The boys began to quiet down to whispers, now, for the stillness and -gloom of the place oppressed their spirits. They went on, and presently -entered and followed Tom's other corridor until they reached the -"jumping-off place." The candles revealed the fact that it was not -really a precipice, but only a steep clay hill twenty or thirty feet -high. Tom whispered: - -"Now I'll show you something, Huck." - -He held his candle aloft and said: - -"Look as far around the corner as you can. Do you see that? There--on -the big rock over yonder--done with candle-smoke." - -"Tom, it's a CROSS!" - -"NOW where's your Number Two? 'UNDER THE CROSS,' hey? Right yonder's -where I saw Injun Joe poke up his candle, Huck!" - -Huck stared at the mystic sign awhile, and then said with a shaky voice: - -"Tom, less git out of here!" - -"What! and leave the treasure?" - -"Yes--leave it. Injun Joe's ghost is round about there, certain." - -"No it ain't, Huck, no it ain't. It would ha'nt the place where he -died--away out at the mouth of the cave--five mile from here." - -"No, Tom, it wouldn't. It would hang round the money. I know the ways -of ghosts, and so do you." - -Tom began to fear that Huck was right. Misgivings gathered in his -mind. But presently an idea occurred to him-- - -"Lookyhere, Huck, what fools we're making of ourselves! Injun Joe's -ghost ain't a going to come around where there's a cross!" - -The point was well taken. It had its effect. - -"Tom, I didn't think of that. But that's so. It's luck for us, that -cross is. I reckon we'll climb down there and have a hunt for that box." - -Tom went first, cutting rude steps in the clay hill as he descended. -Huck followed. Four avenues opened out of the small cavern which the -great rock stood in. The boys examined three of them with no result. -They found a small recess in the one nearest the base of the rock, with -a pallet of blankets spread down in it; also an old suspender, some -bacon rind, and the well-gnawed bones of two or three fowls. But there -was no money-box. The lads searched and researched this place, but in -vain. Tom said: - -"He said UNDER the cross. Well, this comes nearest to being under the -cross. It can't be under the rock itself, because that sets solid on -the ground." - -They searched everywhere once more, and then sat down discouraged. -Huck could suggest nothing. By-and-by Tom said: - -"Lookyhere, Huck, there's footprints and some candle-grease on the -clay about one side of this rock, but not on the other sides. Now, -what's that for? I bet you the money IS under the rock. I'm going to -dig in the clay." - -"That ain't no bad notion, Tom!" said Huck with animation. - -Tom's "real Barlow" was out at once, and he had not dug four inches -before he struck wood. - -"Hey, Huck!--you hear that?" - -Huck began to dig and scratch now. Some boards were soon uncovered and -removed. They had concealed a natural chasm which led under the rock. -Tom got into this and held his candle as far under the rock as he -could, but said he could not see to the end of the rift. He proposed to -explore. He stooped and passed under; the narrow way descended -gradually. He followed its winding course, first to the right, then to -the left, Huck at his heels. Tom turned a short curve, by-and-by, and -exclaimed: - -"My goodness, Huck, lookyhere!" - -It was the treasure-box, sure enough, occupying a snug little cavern, -along with an empty powder-keg, a couple of guns in leather cases, two -or three pairs of old moccasins, a leather belt, and some other rubbish -well soaked with the water-drip. - -"Got it at last!" said Huck, ploughing among the tarnished coins with -his hand. "My, but we're rich, Tom!" - -"Huck, I always reckoned we'd get it. It's just too good to believe, -but we HAVE got it, sure! Say--let's not fool around here. Let's snake -it out. Lemme see if I can lift the box." - -It weighed about fifty pounds. Tom could lift it, after an awkward -fashion, but could not carry it conveniently. - -"I thought so," he said; "THEY carried it like it was heavy, that day -at the ha'nted house. I noticed that. I reckon I was right to think of -fetching the little bags along." - -The money was soon in the bags and the boys took it up to the cross -rock. - -"Now less fetch the guns and things," said Huck. - -"No, Huck--leave them there. They're just the tricks to have when we -go to robbing. We'll keep them there all the time, and we'll hold our -orgies there, too. It's an awful snug place for orgies." - -"What orgies?" - -"I dono. But robbers always have orgies, and of course we've got to -have them, too. Come along, Huck, we've been in here a long time. It's -getting late, I reckon. I'm hungry, too. We'll eat and smoke when we -get to the skiff." - -They presently emerged into the clump of sumach bushes, looked warily -out, found the coast clear, and were soon lunching and smoking in the -skiff. As the sun dipped toward the horizon they pushed out and got -under way. Tom skimmed up the shore through the long twilight, chatting -cheerily with Huck, and landed shortly after dark. - -"Now, Huck," said Tom, "we'll hide the money in the loft of the -widow's woodshed, and I'll come up in the morning and we'll count it -and divide, and then we'll hunt up a place out in the woods for it -where it will be safe. Just you lay quiet here and watch the stuff till -I run and hook Benny Taylor's little wagon; I won't be gone a minute." - -He disappeared, and presently returned with the wagon, put the two -small sacks into it, threw some old rags on top of them, and started -off, dragging his cargo behind him. When the boys reached the -Welshman's house, they stopped to rest. Just as they were about to move -on, the Welshman stepped out and said: - -"Hallo, who's that?" - -"Huck and Tom Sawyer." - -"Good! Come along with me, boys, you are keeping everybody waiting. -Here--hurry up, trot ahead--I'll haul the wagon for you. Why, it's not -as light as it might be. Got bricks in it?--or old metal?" - -"Old metal," said Tom. - -"I judged so; the boys in this town will take more trouble and fool -away more time hunting up six bits' worth of old iron to sell to the -foundry than they would to make twice the money at regular work. But -that's human nature--hurry along, hurry along!" - -The boys wanted to know what the hurry was about. - -"Never mind; you'll see, when we get to the Widow Douglas'." - -Huck said with some apprehension--for he was long used to being -falsely accused: - -"Mr. Jones, we haven't been doing nothing." - -The Welshman laughed. - -"Well, I don't know, Huck, my boy. I don't know about that. Ain't you -and the widow good friends?" - -"Yes. Well, she's ben good friends to me, anyway." - -"All right, then. What do you want to be afraid for?" - -This question was not entirely answered in Huck's slow mind before he -found himself pushed, along with Tom, into Mrs. Douglas' drawing-room. -Mr. Jones left the wagon near the door and followed. - -The place was grandly lighted, and everybody that was of any -consequence in the village was there. The Thatchers were there, the -Harpers, the Rogerses, Aunt Polly, Sid, Mary, the minister, the editor, -and a great many more, and all dressed in their best. The widow -received the boys as heartily as any one could well receive two such -looking beings. They were covered with clay and candle-grease. Aunt -Polly blushed crimson with humiliation, and frowned and shook her head -at Tom. Nobody suffered half as much as the two boys did, however. Mr. -Jones said: - -"Tom wasn't at home, yet, so I gave him up; but I stumbled on him and -Huck right at my door, and so I just brought them along in a hurry." - -"And you did just right," said the widow. "Come with me, boys." - -She took them to a bedchamber and said: - -"Now wash and dress yourselves. Here are two new suits of clothes ---shirts, socks, everything complete. They're Huck's--no, no thanks, -Huck--Mr. Jones bought one and I the other. But they'll fit both of you. -Get into them. We'll wait--come down when you are slicked up enough." - -Then she left. - - - -CHAPTER XXXIV - -HUCK said: "Tom, we can slope, if we can find a rope. The window ain't -high from the ground." - -"Shucks! what do you want to slope for?" - -"Well, I ain't used to that kind of a crowd. I can't stand it. I ain't -going down there, Tom." - -"Oh, bother! It ain't anything. I don't mind it a bit. I'll take care -of you." - -Sid appeared. - -"Tom," said he, "auntie has been waiting for you all the afternoon. -Mary got your Sunday clothes ready, and everybody's been fretting about -you. Say--ain't this grease and clay, on your clothes?" - -"Now, Mr. Siddy, you jist 'tend to your own business. What's all this -blow-out about, anyway?" - -"It's one of the widow's parties that she's always having. This time -it's for the Welshman and his sons, on account of that scrape they -helped her out of the other night. And say--I can tell you something, -if you want to know." - -"Well, what?" - -"Why, old Mr. Jones is going to try to spring something on the people -here to-night, but I overheard him tell auntie to-day about it, as a -secret, but I reckon it's not much of a secret now. Everybody knows ---the widow, too, for all she tries to let on she don't. Mr. Jones was -bound Huck should be here--couldn't get along with his grand secret -without Huck, you know!" - -"Secret about what, Sid?" - -"About Huck tracking the robbers to the widow's. I reckon Mr. Jones -was going to make a grand time over his surprise, but I bet you it will -drop pretty flat." - -Sid chuckled in a very contented and satisfied way. - -"Sid, was it you that told?" - -"Oh, never mind who it was. SOMEBODY told--that's enough." - -"Sid, there's only one person in this town mean enough to do that, and -that's you. If you had been in Huck's place you'd 'a' sneaked down the -hill and never told anybody on the robbers. You can't do any but mean -things, and you can't bear to see anybody praised for doing good ones. -There--no thanks, as the widow says"--and Tom cuffed Sid's ears and -helped him to the door with several kicks. "Now go and tell auntie if -you dare--and to-morrow you'll catch it!" - -Some minutes later the widow's guests were at the supper-table, and a -dozen children were propped up at little side-tables in the same room, -after the fashion of that country and that day. At the proper time Mr. -Jones made his little speech, in which he thanked the widow for the -honor she was doing himself and his sons, but said that there was -another person whose modesty-- - -And so forth and so on. He sprung his secret about Huck's share in the -adventure in the finest dramatic manner he was master of, but the -surprise it occasioned was largely counterfeit and not as clamorous and -effusive as it might have been under happier circumstances. However, -the widow made a pretty fair show of astonishment, and heaped so many -compliments and so much gratitude upon Huck that he almost forgot the -nearly intolerable discomfort of his new clothes in the entirely -intolerable discomfort of being set up as a target for everybody's gaze -and everybody's laudations. - -The widow said she meant to give Huck a home under her roof and have -him educated; and that when she could spare the money she would start -him in business in a modest way. Tom's chance was come. He said: - -"Huck don't need it. Huck's rich." - -Nothing but a heavy strain upon the good manners of the company kept -back the due and proper complimentary laugh at this pleasant joke. But -the silence was a little awkward. Tom broke it: - -"Huck's got money. Maybe you don't believe it, but he's got lots of -it. Oh, you needn't smile--I reckon I can show you. You just wait a -minute." - -Tom ran out of doors. The company looked at each other with a -perplexed interest--and inquiringly at Huck, who was tongue-tied. - -"Sid, what ails Tom?" said Aunt Polly. "He--well, there ain't ever any -making of that boy out. I never--" - -Tom entered, struggling with the weight of his sacks, and Aunt Polly -did not finish her sentence. Tom poured the mass of yellow coin upon -the table and said: - -"There--what did I tell you? Half of it's Huck's and half of it's mine!" - -The spectacle took the general breath away. All gazed, nobody spoke -for a moment. Then there was a unanimous call for an explanation. Tom -said he could furnish it, and he did. The tale was long, but brimful of -interest. There was scarcely an interruption from any one to break the -charm of its flow. When he had finished, Mr. Jones said: - -"I thought I had fixed up a little surprise for this occasion, but it -don't amount to anything now. This one makes it sing mighty small, I'm -willing to allow." - -The money was counted. The sum amounted to a little over twelve -thousand dollars. It was more than any one present had ever seen at one -time before, though several persons were there who were worth -considerably more than that in property. - - - -CHAPTER XXXV - -THE reader may rest satisfied that Tom's and Huck's windfall made a -mighty stir in the poor little village of St. Petersburg. So vast a -sum, all in actual cash, seemed next to incredible. It was talked -about, gloated over, glorified, until the reason of many of the -citizens tottered under the strain of the unhealthy excitement. Every -"haunted" house in St. Petersburg and the neighboring villages was -dissected, plank by plank, and its foundations dug up and ransacked for -hidden treasure--and not by boys, but men--pretty grave, unromantic -men, too, some of them. Wherever Tom and Huck appeared they were -courted, admired, stared at. The boys were not able to remember that -their remarks had possessed weight before; but now their sayings were -treasured and repeated; everything they did seemed somehow to be -regarded as remarkable; they had evidently lost the power of doing and -saying commonplace things; moreover, their past history was raked up -and discovered to bear marks of conspicuous originality. The village -paper published biographical sketches of the boys. - -The Widow Douglas put Huck's money out at six per cent., and Judge -Thatcher did the same with Tom's at Aunt Polly's request. Each lad had -an income, now, that was simply prodigious--a dollar for every week-day -in the year and half of the Sundays. It was just what the minister got ---no, it was what he was promised--he generally couldn't collect it. A -dollar and a quarter a week would board, lodge, and school a boy in -those old simple days--and clothe him and wash him, too, for that -matter. - -Judge Thatcher had conceived a great opinion of Tom. He said that no -commonplace boy would ever have got his daughter out of the cave. When -Becky told her father, in strict confidence, how Tom had taken her -whipping at school, the Judge was visibly moved; and when she pleaded -grace for the mighty lie which Tom had told in order to shift that -whipping from her shoulders to his own, the Judge said with a fine -outburst that it was a noble, a generous, a magnanimous lie--a lie that -was worthy to hold up its head and march down through history breast to -breast with George Washington's lauded Truth about the hatchet! Becky -thought her father had never looked so tall and so superb as when he -walked the floor and stamped his foot and said that. She went straight -off and told Tom about it. - -Judge Thatcher hoped to see Tom a great lawyer or a great soldier some -day. He said he meant to look to it that Tom should be admitted to the -National Military Academy and afterward trained in the best law school -in the country, in order that he might be ready for either career or -both. - -Huck Finn's wealth and the fact that he was now under the Widow -Douglas' protection introduced him into society--no, dragged him into -it, hurled him into it--and his sufferings were almost more than he -could bear. The widow's servants kept him clean and neat, combed and -brushed, and they bedded him nightly in unsympathetic sheets that had -not one little spot or stain which he could press to his heart and know -for a friend. He had to eat with a knife and fork; he had to use -napkin, cup, and plate; he had to learn his book, he had to go to -church; he had to talk so properly that speech was become insipid in -his mouth; whithersoever he turned, the bars and shackles of -civilization shut him in and bound him hand and foot. - -He bravely bore his miseries three weeks, and then one day turned up -missing. For forty-eight hours the widow hunted for him everywhere in -great distress. The public were profoundly concerned; they searched -high and low, they dragged the river for his body. Early the third -morning Tom Sawyer wisely went poking among some old empty hogsheads -down behind the abandoned slaughter-house, and in one of them he found -the refugee. Huck had slept there; he had just breakfasted upon some -stolen odds and ends of food, and was lying off, now, in comfort, with -his pipe. He was unkempt, uncombed, and clad in the same old ruin of -rags that had made him picturesque in the days when he was free and -happy. Tom routed him out, told him the trouble he had been causing, -and urged him to go home. Huck's face lost its tranquil content, and -took a melancholy cast. He said: - -"Don't talk about it, Tom. I've tried it, and it don't work; it don't -work, Tom. It ain't for me; I ain't used to it. The widder's good to -me, and friendly; but I can't stand them ways. She makes me get up just -at the same time every morning; she makes me wash, they comb me all to -thunder; she won't let me sleep in the woodshed; I got to wear them -blamed clothes that just smothers me, Tom; they don't seem to any air -git through 'em, somehow; and they're so rotten nice that I can't set -down, nor lay down, nor roll around anywher's; I hain't slid on a -cellar-door for--well, it 'pears to be years; I got to go to church and -sweat and sweat--I hate them ornery sermons! I can't ketch a fly in -there, I can't chaw. I got to wear shoes all Sunday. The widder eats by -a bell; she goes to bed by a bell; she gits up by a bell--everything's -so awful reg'lar a body can't stand it." - -"Well, everybody does that way, Huck." - -"Tom, it don't make no difference. I ain't everybody, and I can't -STAND it. It's awful to be tied up so. And grub comes too easy--I don't -take no interest in vittles, that way. I got to ask to go a-fishing; I -got to ask to go in a-swimming--dern'd if I hain't got to ask to do -everything. Well, I'd got to talk so nice it wasn't no comfort--I'd got -to go up in the attic and rip out awhile, every day, to git a taste in -my mouth, or I'd a died, Tom. The widder wouldn't let me smoke; she -wouldn't let me yell, she wouldn't let me gape, nor stretch, nor -scratch, before folks--" [Then with a spasm of special irritation and -injury]--"And dad fetch it, she prayed all the time! I never see such a -woman! I HAD to shove, Tom--I just had to. And besides, that school's -going to open, and I'd a had to go to it--well, I wouldn't stand THAT, -Tom. Looky here, Tom, being rich ain't what it's cracked up to be. It's -just worry and worry, and sweat and sweat, and a-wishing you was dead -all the time. Now these clothes suits me, and this bar'l suits me, and -I ain't ever going to shake 'em any more. Tom, I wouldn't ever got into -all this trouble if it hadn't 'a' ben for that money; now you just take -my sheer of it along with your'n, and gimme a ten-center sometimes--not -many times, becuz I don't give a dern for a thing 'thout it's tollable -hard to git--and you go and beg off for me with the widder." - -"Oh, Huck, you know I can't do that. 'Tain't fair; and besides if -you'll try this thing just a while longer you'll come to like it." - -"Like it! Yes--the way I'd like a hot stove if I was to set on it long -enough. No, Tom, I won't be rich, and I won't live in them cussed -smothery houses. I like the woods, and the river, and hogsheads, and -I'll stick to 'em, too. Blame it all! just as we'd got guns, and a -cave, and all just fixed to rob, here this dern foolishness has got to -come up and spile it all!" - -Tom saw his opportunity-- - -"Lookyhere, Huck, being rich ain't going to keep me back from turning -robber." - -"No! Oh, good-licks; are you in real dead-wood earnest, Tom?" - -"Just as dead earnest as I'm sitting here. But Huck, we can't let you -into the gang if you ain't respectable, you know." - -Huck's joy was quenched. - -"Can't let me in, Tom? Didn't you let me go for a pirate?" - -"Yes, but that's different. A robber is more high-toned than what a -pirate is--as a general thing. In most countries they're awful high up -in the nobility--dukes and such." - -"Now, Tom, hain't you always ben friendly to me? You wouldn't shet me -out, would you, Tom? You wouldn't do that, now, WOULD you, Tom?" - -"Huck, I wouldn't want to, and I DON'T want to--but what would people -say? Why, they'd say, 'Mph! Tom Sawyer's Gang! pretty low characters in -it!' They'd mean you, Huck. You wouldn't like that, and I wouldn't." - -Huck was silent for some time, engaged in a mental struggle. Finally -he said: - -"Well, I'll go back to the widder for a month and tackle it and see if -I can come to stand it, if you'll let me b'long to the gang, Tom." - -"All right, Huck, it's a whiz! Come along, old chap, and I'll ask the -widow to let up on you a little, Huck." - -"Will you, Tom--now will you? That's good. If she'll let up on some of -the roughest things, I'll smoke private and cuss private, and crowd -through or bust. When you going to start the gang and turn robbers?" - -"Oh, right off. We'll get the boys together and have the initiation -to-night, maybe." - -"Have the which?" - -"Have the initiation." - -"What's that?" - -"It's to swear to stand by one another, and never tell the gang's -secrets, even if you're chopped all to flinders, and kill anybody and -all his family that hurts one of the gang." - -"That's gay--that's mighty gay, Tom, I tell you." - -"Well, I bet it is. And all that swearing's got to be done at -midnight, in the lonesomest, awfulest place you can find--a ha'nted -house is the best, but they're all ripped up now." - -"Well, midnight's good, anyway, Tom." - -"Yes, so it is. And you've got to swear on a coffin, and sign it with -blood." - -"Now, that's something LIKE! Why, it's a million times bullier than -pirating. I'll stick to the widder till I rot, Tom; and if I git to be -a reg'lar ripper of a robber, and everybody talking 'bout it, I reckon -she'll be proud she snaked me in out of the wet." - - - -CONCLUSION - -SO endeth this chronicle. It being strictly a history of a BOY, it -must stop here; the story could not go much further without becoming -the history of a MAN. When one writes a novel about grown people, he -knows exactly where to stop--that is, with a marriage; but when he -writes of juveniles, he must stop where he best can. - -Most of the characters that perform in this book still live, and are -prosperous and happy. Some day it may seem worth while to take up the -story of the younger ones again and see what sort of men and women they -turned out to be; therefore it will be wisest not to reveal any of that -part of their lives at present. diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index feb464b2f284e..2c4f25d5ae693 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -129,12 +129,13 @@ func (r *Reader) readContinuedLineSlice() ([]byte, error) { } // Optimistically assume that we have started to buffer the next line - // and it starts with an ASCII letter (the next header key), so we can - // avoid copying that buffered data around in memory and skipping over - // non-existent whitespace. + // and it starts with an ASCII letter (the next header key), or a blank + // line, so we can avoid copying that buffered data around in memory + // and skipping over non-existent whitespace. if r.R.Buffered() > 1 { - peek, err := r.R.Peek(1) - if err == nil && isASCIILetter(peek[0]) { + peek, _ := r.R.Peek(2) + if len(peek) > 0 && (isASCIILetter(peek[0]) || peek[0] == '\n') || + len(peek) == 2 && peek[0] == '\r' && peek[1] == '\n' { return trim(line), nil } } diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go index 7cff7b4579754..f85fbdc36d7d9 100644 --- a/src/net/textproto/reader_test.go +++ b/src/net/textproto/reader_test.go @@ -382,31 +382,25 @@ Non-Interned: test func BenchmarkReadMIMEHeader(b *testing.B) { b.ReportAllocs() - var buf bytes.Buffer - br := bufio.NewReader(&buf) - r := NewReader(br) - for i := 0; i < b.N; i++ { - var want int - var find string - if (i & 1) == 1 { - buf.WriteString(clientHeaders) - want = 10 - find = "Cookie" - } else { - buf.WriteString(serverHeaders) - want = 9 - find = "Via" - } - h, err := r.ReadMIMEHeader() - if err != nil { - b.Fatal(err) - } - if len(h) != want { - b.Fatalf("wrong number of headers: got %d, want %d", len(h), want) - } - if _, ok := h[find]; !ok { - b.Fatalf("did not find key %s", find) - } + for _, set := range []struct { + name string + headers string + }{ + {"client_headers", clientHeaders}, + {"server_headers", serverHeaders}, + } { + b.Run(set.name, func(b *testing.B) { + var buf bytes.Buffer + br := bufio.NewReader(&buf) + r := NewReader(br) + + for i := 0; i < b.N; i++ { + buf.WriteString(set.headers) + if _, err := r.ReadMIMEHeader(); err != nil { + b.Fatal(err) + } + } + }) } } diff --git a/src/net/timeout_test.go b/src/net/timeout_test.go index 7c7d0c89938c9..9599fa1d3e886 100644 --- a/src/net/timeout_test.go +++ b/src/net/timeout_test.go @@ -812,6 +812,9 @@ func (b neverEnding) Read(p []byte) (int, error) { } func testVariousDeadlines(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test on plan9; see golang.org/issue/26945") + } type result struct { n int64 err error diff --git a/src/net/udpsock_posix.go b/src/net/udpsock_posix.go index b0adf9585bd32..611fe51df91f9 100644 --- a/src/net/udpsock_posix.go +++ b/src/net/udpsock_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net diff --git a/src/net/udpsock_test.go b/src/net/udpsock_test.go index 494064444ec15..397b6649cda29 100644 --- a/src/net/udpsock_test.go +++ b/src/net/udpsock_test.go @@ -337,6 +337,8 @@ func TestUDPZeroBytePayload(t *testing.T) { switch runtime.GOOS { case "nacl", "plan9": t.Skipf("not supported on %s", runtime.GOOS) + case "darwin": + testenv.SkipFlaky(t, 29225) } c, err := newLocalPacketListener("udp") @@ -353,19 +355,18 @@ func TestUDPZeroBytePayload(t *testing.T) { if n != 0 { t.Errorf("got %d; want 0", n) } - c.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) + c.SetReadDeadline(time.Now().Add(30 * time.Second)) var b [1]byte + var name string if genericRead { _, err = c.(Conn).Read(b[:]) + name = "Read" } else { _, _, err = c.ReadFrom(b[:]) + name = "ReadFrom" } - switch err { - case nil: // ReadFrom succeeds - default: // Read may timeout, it depends on the platform - if nerr, ok := err.(Error); !ok || !nerr.Timeout() { - t.Fatal(err) - } + if err != nil { + t.Errorf("%s of zero byte packet failed: %v", name, err) } } } diff --git a/src/net/unixsock.go b/src/net/unixsock.go index 3ae62f6a8b84a..ae912a46ddb8c 100644 --- a/src/net/unixsock.go +++ b/src/net/unixsock.go @@ -12,8 +12,11 @@ import ( "time" ) -// BUG(mikio): On JS, NaCl, Plan 9 and Windows, methods and functions -// related to UnixConn and UnixListener are not implemented. +// BUG(mikio): On JS, NaCl and Plan 9, methods and functions related +// to UnixConn and UnixListener are not implemented. + +// BUG(mikio): On Windows, methods and functions related to UnixConn +// and UnixListener don't work for "unixgram" and "unixpacket". // UnixAddr represents the address of a Unix domain socket end point. type UnixAddr struct { diff --git a/src/net/unixsock_posix.go b/src/net/unixsock_posix.go index b31ad49f25f58..74f5cc231cf05 100644 --- a/src/net/unixsock_posix.go +++ b/src/net/unixsock_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package net diff --git a/src/net/unixsock_windows_test.go b/src/net/unixsock_windows_test.go new file mode 100644 index 0000000000000..5dccc1465334c --- /dev/null +++ b/src/net/unixsock_windows_test.go @@ -0,0 +1,100 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package net + +import ( + "internal/syscall/windows/registry" + "os" + "reflect" + "runtime" + "strconv" + "testing" +) + +func isBuild17063() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.READ) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("CurrentBuild") + if err != nil { + return false + } + ver, err := strconv.Atoi(s) + if err != nil { + return false + } + return ver >= 17063 +} + +func TestUnixConnLocalWindows(t *testing.T) { + switch runtime.GOARCH { + case "386": + t.Skip("not supported on windows/386, see golang.org/issue/27943") + case "arm": + t.Skip("not supported on windows/arm, see golang.org/issue/28061") + } + if !isBuild17063() { + t.Skip("unix test") + } + + handler := func(ls *localServer, ln Listener) {} + for _, laddr := range []string{"", testUnixAddr()} { + laddr := laddr + taddr := testUnixAddr() + ta, err := ResolveUnixAddr("unix", taddr) + if err != nil { + t.Fatal(err) + } + ln, err := ListenUnix("unix", ta) + if err != nil { + t.Fatal(err) + } + ls, err := (&streamListener{Listener: ln}).newLocalServer() + if err != nil { + t.Fatal(err) + } + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + la, err := ResolveUnixAddr("unix", laddr) + if err != nil { + t.Fatal(err) + } + c, err := DialUnix("unix", la, ta) + if err != nil { + t.Fatal(err) + } + defer func() { + c.Close() + if la != nil { + defer os.Remove(laddr) + } + }() + if _, err := c.Write([]byte("UNIXCONN LOCAL AND REMOTE NAME TEST")); err != nil { + t.Fatal(err) + } + + if laddr == "" { + laddr = "@" + } + var connAddrs = [3]struct{ got, want Addr }{ + {ln.Addr(), ta}, + {c.LocalAddr(), &UnixAddr{Name: laddr, Net: "unix"}}, + {c.RemoteAddr(), ta}, + } + for _, ca := range connAddrs { + if !reflect.DeepEqual(ca.got, ca.want) { + t.Fatalf("got %#v, expected %#v", ca.got, ca.want) + } + } + } +} diff --git a/src/net/url/example_test.go b/src/net/url/example_test.go index d8eb6dcd20c7b..ad67f5328a868 100644 --- a/src/net/url/example_test.go +++ b/src/net/url/example_test.go @@ -219,5 +219,5 @@ func toJSON(m interface{}) string { if err != nil { log.Fatal(err) } - return strings.Replace(string(js), ",", ", ", -1) + return strings.ReplaceAll(string(js), ",", ", ") } diff --git a/src/net/url/url.go b/src/net/url/url.go index 80eb7a86c8de2..64274a0a364d0 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -304,7 +304,26 @@ func escape(s string, mode encoding) string { return s } - t := make([]byte, len(s)+2*hexCount) + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + if hexCount == 0 { + copy(t, s) + for i := 0; i < len(s); i++ { + if s[i] == ' ' { + t[i] = '+' + } + } + return string(t) + } + j := 0 for i := 0; i < len(s); i++ { switch c := s[i]; { @@ -494,6 +513,10 @@ func parse(rawurl string, viaRequest bool) (*URL, error) { var rest string var err error + if stringContainsCTLByte(rawurl) { + return nil, errors.New("net/url: invalid control character in URL") + } + if rawurl == "" && viaRequest { return nil, errors.New("empty url") } @@ -736,6 +759,7 @@ func validOptionalPort(port string) bool { // // If u.Opaque is non-empty, String uses the first form; // otherwise it uses the second form. +// Any non-ASCII characters in host are escaped. // To obtain the path, String uses u.EscapedPath(). // // In the second form, the following rules apply: @@ -1114,3 +1138,14 @@ func validUserinfo(s string) bool { } return true } + +// stringContainsCTLByte reports whether s contains any ASCII control character. +func stringContainsCTLByte(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b < ' ' || b == 0x7f { + return true + } + } + return false +} diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go index 9043a844e88f8..c5fc90d5156fd 100644 --- a/src/net/url/url_test.go +++ b/src/net/url/url_test.go @@ -848,18 +848,18 @@ func TestUnescape(t *testing.T) { in := tt.in out := tt.out if strings.Contains(tt.in, "+") { - in = strings.Replace(tt.in, "+", "%20", -1) + in = strings.ReplaceAll(tt.in, "+", "%20") actual, err := PathUnescape(in) if actual != tt.out || (err != nil) != (tt.err != nil) { t.Errorf("PathUnescape(%q) = %q, %s; want %q, %s", in, actual, err, tt.out, tt.err) } if tt.err == nil { - s, err := QueryUnescape(strings.Replace(tt.in, "+", "XXX", -1)) + s, err := QueryUnescape(strings.ReplaceAll(tt.in, "+", "XXX")) if err != nil { continue } in = tt.in - out = strings.Replace(s, "XXX", "+", -1) + out = strings.ReplaceAll(s, "XXX", "+") } } @@ -1738,8 +1738,132 @@ func TestNilUser(t *testing.T) { } func TestInvalidUserPassword(t *testing.T) { - _, err := Parse("http://us\ner:pass\nword@foo.com/") + _, err := Parse("http://user^:passwo^rd@foo.com/") if got, wantsub := fmt.Sprint(err), "net/url: invalid userinfo"; !strings.Contains(got, wantsub) { t.Errorf("error = %q; want substring %q", got, wantsub) } } + +func TestRejectControlCharacters(t *testing.T) { + tests := []string{ + "http://foo.com/?foo\nbar", + "http\r://foo.com/", + "http://foo\x7f.com/", + } + for _, s := range tests { + _, err := Parse(s) + const wantSub = "net/url: invalid control character in URL" + if got := fmt.Sprint(err); !strings.Contains(got, wantSub) { + t.Errorf("Parse(%q) error = %q; want substring %q", s, got, wantSub) + } + } + + // But don't reject non-ASCII CTLs, at least for now: + if _, err := Parse("http://foo.com/ctl\x80"); err != nil { + t.Errorf("error parsing URL with non-ASCII control byte: %v", err) + } + +} + +var escapeBenchmarks = []struct { + unescaped string + query string + path string +}{ + { + unescaped: "one two", + query: "one+two", + path: "one%20two", + }, + { + unescaped: "Фотки собак", + query: "%D0%A4%D0%BE%D1%82%D0%BA%D0%B8+%D1%81%D0%BE%D0%B1%D0%B0%D0%BA", + path: "%D0%A4%D0%BE%D1%82%D0%BA%D0%B8%20%D1%81%D0%BE%D0%B1%D0%B0%D0%BA", + }, + + { + unescaped: "shortrun(break)shortrun", + query: "shortrun%28break%29shortrun", + path: "shortrun%28break%29shortrun", + }, + + { + unescaped: "longerrunofcharacters(break)anotherlongerrunofcharacters", + query: "longerrunofcharacters%28break%29anotherlongerrunofcharacters", + path: "longerrunofcharacters%28break%29anotherlongerrunofcharacters", + }, + + { + unescaped: strings.Repeat("padded/with+various%characters?that=need$some@escaping+paddedsowebreak/256bytes", 4), + query: strings.Repeat("padded%2Fwith%2Bvarious%25characters%3Fthat%3Dneed%24some%40escaping%2Bpaddedsowebreak%2F256bytes", 4), + path: strings.Repeat("padded%2Fwith+various%25characters%3Fthat=need$some@escaping+paddedsowebreak%2F256bytes", 4), + }, +} + +func BenchmarkQueryEscape(b *testing.B) { + for _, tc := range escapeBenchmarks { + b.Run("", func(b *testing.B) { + b.ReportAllocs() + var g string + for i := 0; i < b.N; i++ { + g = QueryEscape(tc.unescaped) + } + b.StopTimer() + if g != tc.query { + b.Errorf("QueryEscape(%q) == %q, want %q", tc.unescaped, g, tc.query) + } + + }) + } +} + +func BenchmarkPathEscape(b *testing.B) { + for _, tc := range escapeBenchmarks { + b.Run("", func(b *testing.B) { + b.ReportAllocs() + var g string + for i := 0; i < b.N; i++ { + g = PathEscape(tc.unescaped) + } + b.StopTimer() + if g != tc.path { + b.Errorf("PathEscape(%q) == %q, want %q", tc.unescaped, g, tc.path) + } + + }) + } +} + +func BenchmarkQueryUnescape(b *testing.B) { + for _, tc := range escapeBenchmarks { + b.Run("", func(b *testing.B) { + b.ReportAllocs() + var g string + for i := 0; i < b.N; i++ { + g, _ = QueryUnescape(tc.query) + } + b.StopTimer() + if g != tc.unescaped { + b.Errorf("QueryUnescape(%q) == %q, want %q", tc.query, g, tc.unescaped) + } + + }) + } +} + +func BenchmarkPathUnescape(b *testing.B) { + for _, tc := range escapeBenchmarks { + b.Run("", func(b *testing.B) { + b.ReportAllocs() + var g string + for i := 0; i < b.N; i++ { + g, _ = PathUnescape(tc.path) + } + b.StopTimer() + if g != tc.unescaped { + b.Errorf("PathUnescape(%q) == %q, want %q", tc.path, g, tc.unescaped) + } + + }) + } +} diff --git a/src/os/dir_ios.go b/src/os/dir_ios.go new file mode 100644 index 0000000000000..8c14d89508a5f --- /dev/null +++ b/src/os/dir_ios.go @@ -0,0 +1,87 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin +// +build arm arm64 + +package os + +import ( + "io" + "runtime" + "syscall" + "unsafe" +) + +// Auxiliary information if the File describes a directory +type dirInfo struct { + dir uintptr // Pointer to DIR structure from dirent.h +} + +func (d *dirInfo) close() { + if d.dir == 0 { + return + } + closedir(d.dir) + d.dir = 0 +} + +func (f *File) readdirnames(n int) (names []string, err error) { + if f.dirinfo == nil { + dir, call, errno := f.pfd.OpenDir() + if errno != nil { + return nil, wrapSyscallError(call, errno) + } + f.dirinfo = &dirInfo{ + dir: dir, + } + } + d := f.dirinfo + + size := n + if size <= 0 { + size = 100 + n = -1 + } + + names = make([]string, 0, size) + var dirent syscall.Dirent + var entptr uintptr + for len(names) < size { + if res := readdir_r(d.dir, uintptr(unsafe.Pointer(&dirent)), uintptr(unsafe.Pointer(&entptr))); res != 0 { + return names, wrapSyscallError("readdir", syscall.Errno(res)) + } + if entptr == 0 { // EOF + break + } + if dirent.Ino == 0 { + continue + } + name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] + for i, c := range name { + if c == 0 { + name = name[:i] + break + } + } + // Check for useless names before allocating a string. + if string(name) == "." || string(name) == ".." { + continue + } + names = append(names, string(name)) + runtime.KeepAlive(f) + } + if n >= 0 && len(names) == 0 { + return names, io.EOF + } + return names, nil +} + +// Implemented in syscall/syscall_darwin.go. + +//go:linkname closedir syscall.closedir +func closedir(dir uintptr) (err error) + +//go:linkname readdir_r syscall.readdir_r +func readdir_r(dir, entry, result uintptr) (res int) diff --git a/src/os/dir_unix.go b/src/os/dir_unix.go index 79d61c783f18c..bd99ef4813841 100644 --- a/src/os/dir_unix.go +++ b/src/os/dir_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin,!arm,!arm64 dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os @@ -12,37 +12,19 @@ import ( "syscall" ) +// Auxiliary information if the File describes a directory +type dirInfo struct { + buf []byte // buffer for directory I/O + nbuf int // length of buf; return value from Getdirentries + bufp int // location of next record in buf. +} + const ( // More than 5760 to work around https://golang.org/issue/24015. blockSize = 8192 ) -func (f *File) readdir(n int) (fi []FileInfo, err error) { - dirname := f.name - if dirname == "" { - dirname = "." - } - names, err := f.Readdirnames(n) - fi = make([]FileInfo, 0, len(names)) - for _, filename := range names { - fip, lerr := lstat(dirname + "/" + filename) - if IsNotExist(lerr) { - // File disappeared between readdir + stat. - // Just treat it as if it didn't exist. - continue - } - if lerr != nil { - return fi, lerr - } - fi = append(fi, fip) - } - if len(fi) == 0 && err == nil && n > 0 { - // Per File.Readdir, the slice must be non-empty or err - // must be non-nil if n > 0. - err = io.EOF - } - return fi, err -} +func (d *dirInfo) close() {} func (f *File) readdirnames(n int) (names []string, err error) { // If this file has no dirinfo, create one. diff --git a/src/os/env_unix_test.go b/src/os/env_unix_test.go index f7b67ebbb80d6..89430b3e20409 100644 --- a/src/os/env_unix_test.go +++ b/src/os/env_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package os_test diff --git a/src/os/error_posix.go b/src/os/error_posix.go index 3c81b41706fad..0478ba676aa7b 100644 --- a/src/os/error_posix.go +++ b/src/os/error_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package os diff --git a/src/os/error_unix.go b/src/os/error_unix.go index a9d798b391533..bb6bbcc1e6c15 100644 --- a/src/os/error_unix.go +++ b/src/os/error_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os diff --git a/src/os/error_unix_test.go b/src/os/error_unix_test.go index 8db98676d106b..c47af56332048 100644 --- a/src/os/error_unix_test.go +++ b/src/os/error_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os_test diff --git a/src/os/example_test.go b/src/os/example_test.go index e21415a3fda9e..8b6566e1496ed 100644 --- a/src/os/example_test.go +++ b/src/os/example_test.go @@ -55,6 +55,7 @@ func ExampleFileMode() { log.Fatal(err) } + fmt.Printf("permissions: %#o\n", fi.Mode().Perm()) // 0400, 0777, etc. switch mode := fi.Mode(); { case mode.IsRegular(): fmt.Println("regular file") @@ -70,38 +71,35 @@ func ExampleFileMode() { func ExampleIsNotExist() { filename := "a-nonexistent-file" if _, err := os.Stat(filename); os.IsNotExist(err) { - fmt.Printf("file does not exist") + fmt.Println("file does not exist") } // Output: // file does not exist } -func init() { - os.Setenv("USER", "gopher") - os.Setenv("HOME", "/usr/gopher") - os.Unsetenv("GOPATH") -} - func ExampleExpand() { mapper := func(placeholderName string) string { switch placeholderName { case "DAY_PART": return "morning" - case "USER": + case "NAME": return "Gopher" } return "" } - fmt.Println(os.Expand("Good ${DAY_PART}, $USER!", mapper)) + fmt.Println(os.Expand("Good ${DAY_PART}, $NAME!", mapper)) // Output: // Good morning, Gopher! } func ExampleExpandEnv() { - fmt.Println(os.ExpandEnv("$USER lives in ${HOME}.")) + os.Setenv("NAME", "gopher") + os.Setenv("BURROW", "/usr/gopher") + + fmt.Println(os.ExpandEnv("$NAME lives in ${BURROW}.")) // Output: // gopher lives in /usr/gopher. @@ -117,16 +115,24 @@ func ExampleLookupEnv() { } } - show("USER") - show("GOPATH") + os.Setenv("SOME_KEY", "value") + os.Setenv("EMPTY_KEY", "") + + show("SOME_KEY") + show("EMPTY_KEY") + show("MISSING_KEY") // Output: - // USER=gopher - // GOPATH not set + // SOME_KEY=value + // EMPTY_KEY= + // MISSING_KEY not set } func ExampleGetenv() { - fmt.Printf("%s lives in %s.\n", os.Getenv("USER"), os.Getenv("HOME")) + os.Setenv("NAME", "gopher") + os.Setenv("BURROW", "/usr/gopher") + + fmt.Printf("%s lives in %s.\n", os.Getenv("NAME"), os.Getenv("BURROW")) // Output: // gopher lives in /usr/gopher. diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go index 88b0a916992ed..1aa3ab93dcc12 100644 --- a/src/os/exec/exec.go +++ b/src/os/exec/exec.go @@ -152,6 +152,15 @@ type Cmd struct { // followed by the elements of arg, so arg should not include the // command name itself. For example, Command("echo", "hello"). // Args[0] is always name, not the possibly resolved Path. +// +// On Windows, processes receive the whole command line as a single string +// and do their own parsing. Command combines and quotes Args into a command +// line string with an algorithm compatible with applications using +// CommandLineToArgvW (which is the most common way). Notable exceptions are +// msiexec.exe and cmd.exe (and thus, all batch files), which have a different +// unquoting algorithm. In these or other similar cases, you can do the +// quoting yourself and provide the full command line in SysProcAttr.CmdLine, +// leaving Args empty. func Command(name string, arg ...string) *Cmd { cmd := &Cmd{ Path: name, diff --git a/src/os/exec/exec_posix_test.go b/src/os/exec/exec_posix_test.go index 865b6c3ced289..46799cdbdb67a 100644 --- a/src/os/exec/exec_posix_test.go +++ b/src/os/exec/exec_posix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package exec_test diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index 7bb230806f95a..3e6b7bb95e158 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -168,6 +168,58 @@ func TestExitStatus(t *testing.T) { } } +func TestExitCode(t *testing.T) { + // Test that exit code are returned correctly + cmd := helperCommand(t, "exit", "42") + cmd.Run() + want := 42 + if runtime.GOOS == "plan9" { + want = 1 + } + got := cmd.ProcessState.ExitCode() + if want != got { + t.Errorf("ExitCode got %d, want %d", got, want) + } + + cmd = helperCommand(t, "/no-exist-executable") + cmd.Run() + want = 2 + if runtime.GOOS == "plan9" { + want = 1 + } + got = cmd.ProcessState.ExitCode() + if want != got { + t.Errorf("ExitCode got %d, want %d", got, want) + } + + cmd = helperCommand(t, "exit", "255") + cmd.Run() + want = 255 + if runtime.GOOS == "plan9" { + want = 1 + } + got = cmd.ProcessState.ExitCode() + if want != got { + t.Errorf("ExitCode got %d, want %d", got, want) + } + + cmd = helperCommand(t, "cat") + cmd.Run() + want = 0 + got = cmd.ProcessState.ExitCode() + if want != got { + t.Errorf("ExitCode got %d, want %d", got, want) + } + + // Test when command does not call Run(). + cmd = helperCommand(t, "cat") + want = -1 + got = cmd.ProcessState.ExitCode() + if want != got { + t.Errorf("ExitCode got %d, want %d", got, want) + } +} + func TestPipes(t *testing.T) { check := func(what string, err error) { if err != nil { @@ -407,7 +459,7 @@ func basefds() uintptr { // The poll (epoll/kqueue) descriptor can be numerically // either between stderr and the testlog-fd, or after // testlog-fd. - if poll.PollDescriptor() == n { + if poll.IsPollDescriptor(n) { n++ } for _, arg := range os.Args { @@ -420,7 +472,7 @@ func basefds() uintptr { func closeUnexpectedFds(t *testing.T, m string) { for fd := basefds(); fd <= 101; fd++ { - if fd == poll.PollDescriptor() { + if poll.IsPollDescriptor(fd) { continue } err := os.NewFile(fd, "").Close() @@ -682,6 +734,8 @@ func TestHelperProcess(*testing.T) { ofcmd = "fstat" case "plan9": ofcmd = "/bin/cat" + case "aix": + ofcmd = "procfiles" } args := os.Args @@ -785,7 +839,7 @@ func TestHelperProcess(*testing.T) { // Now verify that there are no other open fds. var files []*os.File for wantfd := basefds() + 1; wantfd <= 100; wantfd++ { - if wantfd == poll.PollDescriptor() { + if poll.IsPollDescriptor(wantfd) { continue } f, err := os.Open(os.Args[0]) @@ -799,6 +853,8 @@ func TestHelperProcess(*testing.T) { switch runtime.GOOS { case "plan9": args = []string{fmt.Sprintf("/proc/%d/fd", os.Getpid())} + case "aix": + args = []string{fmt.Sprint(os.Getpid())} default: args = []string{"-p", fmt.Sprint(os.Getpid())} } diff --git a/src/os/exec/lp_unix.go b/src/os/exec/lp_unix.go index e098ff8e1d5b7..799e0b4eeb922 100644 --- a/src/os/exec/lp_unix.go +++ b/src/os/exec/lp_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package exec diff --git a/src/os/exec/lp_unix_test.go b/src/os/exec/lp_unix_test.go index d467acf5dba2c..e4656cafb8b4a 100644 --- a/src/os/exec/lp_unix_test.go +++ b/src/os/exec/lp_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package exec diff --git a/src/os/exec_plan9.go b/src/os/exec_plan9.go index 6b4d28c93da5f..bab16ccad34cc 100644 --- a/src/os/exec_plan9.go +++ b/src/os/exec_plan9.go @@ -136,3 +136,13 @@ func (p *ProcessState) String() string { } return "exit status: " + p.status.Msg } + +// ExitCode returns the exit code of the exited process, or -1 +// if the process hasn't exited or was terminated by a signal. +func (p *ProcessState) ExitCode() int { + // return -1 if the process hasn't started. + if p == nil { + return -1 + } + return p.status.ExitStatus() +} diff --git a/src/os/exec_posix.go b/src/os/exec_posix.go index ec5cf33236068..4c8261295c009 100644 --- a/src/os/exec_posix.go +++ b/src/os/exec_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package os @@ -10,10 +10,11 @@ import ( "syscall" ) -// The only signal values guaranteed to be present in the os package -// on all systems are Interrupt (send the process an interrupt) and -// Kill (force the process to exit). Interrupt is not implemented on -// Windows; using it with os.Process.Signal will return an error. +// The only signal values guaranteed to be present in the os package on all +// systems are os.Interrupt (send the process an interrupt) and os.Kill (force +// the process to exit). On Windows, sending os.Interrupt to a process with +// os.Process.Signal is not implemented; it will return an error instead of +// sending a signal. var ( Interrupt Signal = syscall.SIGINT Kill Signal = syscall.SIGKILL @@ -106,3 +107,13 @@ func (p *ProcessState) String() string { } return res } + +// ExitCode returns the exit code of the exited process, or -1 +// if the process hasn't exited or was terminated by a signal. +func (p *ProcessState) ExitCode() int { + // return -1 if the process hasn't started. + if p == nil { + return -1 + } + return p.status.ExitStatus() +} diff --git a/src/os/exec_unix.go b/src/os/exec_unix.go index b07543e550a08..abae5a2feb339 100644 --- a/src/os/exec_unix.go +++ b/src/os/exec_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os diff --git a/src/os/exec_windows.go b/src/os/exec_windows.go index d5d553a2f6b33..38293a0d285f4 100644 --- a/src/os/exec_windows.go +++ b/src/os/exec_windows.go @@ -38,7 +38,8 @@ func (p *Process) wait() (ps *ProcessState, err error) { // NOTE(brainman): It seems that sometimes process is not dead // when WaitForSingleObject returns. But we do not know any // other way to wait for it. Sleeping for a while seems to do - // the trick sometimes. So we will sleep and smell the roses. + // the trick sometimes. + // See https://golang.org/issue/25965 for details. defer time.Sleep(5 * time.Millisecond) defer p.Release() return &ProcessState{p.Pid, syscall.WaitStatus{ExitCode: ec}, &u}, nil diff --git a/src/os/executable_path.go b/src/os/executable_path.go index 057e6a72f4e61..7b8b83652c895 100644 --- a/src/os/executable_path.go +++ b/src/os/executable_path.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build openbsd +// +build aix openbsd package os diff --git a/src/os/executable_test.go b/src/os/executable_test.go index 4a9a8837be415..d513c8760ee6d 100644 --- a/src/os/executable_test.go +++ b/src/os/executable_test.go @@ -36,8 +36,8 @@ func TestExecutable(t *testing.T) { // forge argv[0] for child, so that we can verify we could correctly // get real path of the executable without influenced by argv[0]. cmd.Args = []string{"-", "-test.run=XXXX"} - if runtime.GOOS == "openbsd" { - // OpenBSD relies on argv[0] + if runtime.GOOS == "openbsd" || runtime.GOOS == "aix" { + // OpenBSD and AIX rely on argv[0] cmd.Args[0] = fn } cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", executable_EnvVar)) diff --git a/src/os/file.go b/src/os/file.go index cba70d78fbfd3..fdead63bfc4cc 100644 --- a/src/os/file.go +++ b/src/os/file.go @@ -73,7 +73,7 @@ const ( O_CREATE int = syscall.O_CREAT // create a new file if none exists. O_EXCL int = syscall.O_EXCL // used with O_CREATE, file must not exist. O_SYNC int = syscall.O_SYNC // open for synchronous I/O. - O_TRUNC int = syscall.O_TRUNC // if possible, truncate file when opened. + O_TRUNC int = syscall.O_TRUNC // truncate regular writable file when opened. ) // Seek whence values. @@ -381,6 +381,31 @@ func UserCacheDir() (string, error) { return dir, nil } +// UserHomeDir returns the current user's home directory. +// +// On Unix, including macOS, it returns the $HOME environment variable. +// On Windows, it returns %USERPROFILE%. +// On Plan 9, it returns the $home environment variable. +func UserHomeDir() (string, error) { + env, enverr := "HOME", "$HOME" + switch runtime.GOOS { + case "windows": + env, enverr = "USERPROFILE", "%userprofile%" + case "plan9": + env, enverr = "home", "$home" + case "nacl", "android": + return "/", nil + case "darwin": + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + return "/", nil + } + } + if v := Getenv(env); v != "" { + return v, nil + } + return "", errors.New(enverr + " is not defined") +} + // Chmod changes the mode of the named file to mode. // If the file is a symbolic link, it changes the mode of the link's target. // If there is an error, it will be of type *PathError. @@ -448,3 +473,12 @@ func (f *File) SetReadDeadline(t time.Time) error { func (f *File) SetWriteDeadline(t time.Time) error { return f.setWriteDeadline(t) } + +// SyscallConn returns a raw file. +// This implements the syscall.Conn interface. +func (f *File) SyscallConn() (syscall.RawConn, error) { + if err := f.checkValid("SyscallConn"); err != nil { + return nil, err + } + return newRawConn(f) +} diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 2c74403434317..3fa12e681667c 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -534,3 +534,21 @@ func (f *File) checkValid(op string) error { } return nil } + +type rawConn struct{} + +func (c *rawConn) Control(f func(uintptr)) error { + return syscall.EPLAN9 +} + +func (c *rawConn) Read(f func(uintptr) bool) error { + return syscall.EPLAN9 +} + +func (c *rawConn) Write(f func(uintptr) bool) error { + return syscall.EPLAN9 +} + +func newRawConn(file *File) (*rawConn, error) { + return nil, syscall.EPLAN9 +} diff --git a/src/os/file_posix.go b/src/os/file_posix.go index 7cfafc8fde395..1c0de5c3a163e 100644 --- a/src/os/file_posix.go +++ b/src/os/file_posix.go @@ -2,11 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package os import ( + "runtime" "syscall" "time" ) @@ -19,6 +20,10 @@ func Readlink(name string) (string, error) { for len := 128; ; len *= 2 { b := make([]byte, len) n, e := fixCount(syscall.Readlink(fixLongPath(name), b)) + // buffer too small + if runtime.GOOS == "aix" && e == syscall.ERANGE { + continue + } if e != nil { return "", &PathError{"readlink", name, e} } diff --git a/src/os/file_unix.go b/src/os/file_unix.go index cb90b7073580e..2615df9d5b7cc 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os import ( "internal/poll" "internal/syscall/unix" + "io" "runtime" "syscall" ) @@ -116,23 +117,39 @@ func newFile(fd uintptr, name string, kind newFileKind) *File { pollable := kind == kindOpenFile || kind == kindPipe || kind == kindNonBlock - // Don't try to use kqueue with regular files on FreeBSD. - // It crashes the system unpredictably while running all.bash. - // Issue 19093. // If the caller passed a non-blocking filedes (kindNonBlock), // we assume they know what they are doing so we allow it to be // used with kqueue. - if runtime.GOOS == "freebsd" && kind == kindOpenFile { - pollable = false - } - - // On Darwin, kqueue does not work properly with fifos: - // closing the last writer does not cause a kqueue event - // for any readers. See issue #24164. - if runtime.GOOS == "darwin" && kind == kindOpenFile { + if kind == kindOpenFile { var st syscall.Stat_t - if err := syscall.Fstat(fdi, &st); err == nil && st.Mode&syscall.S_IFMT == syscall.S_IFIFO { + switch runtime.GOOS { + case "freebsd": + // On FreeBSD before 10.4 it used to crash the + // system unpredictably while running all.bash. + // When we stop supporting FreeBSD 10 we can merge + // this into the dragonfly/netbsd/openbsd case. + // Issue 27619. pollable = false + + case "dragonfly", "netbsd", "openbsd": + // Don't try to use kqueue with regular files on *BSDs. + // On FreeBSD a regular file is always + // reported as ready for writing. + // On Dragonfly, NetBSD and OpenBSD the fd is signaled + // only once as ready (both read and write). + // Issue 19093. + if err := syscall.Fstat(fdi, &st); err == nil && st.Mode&syscall.S_IFMT == syscall.S_IFREG { + pollable = false + } + + case "darwin": + // In addition to the behavior described above for regular files, + // on Darwin, kqueue does not work properly with fifos: + // closing the last writer does not cause a kqueue event + // for any readers. See issue #24164. + if err := syscall.Fstat(fdi, &st); err == nil && (st.Mode&syscall.S_IFMT == syscall.S_IFIFO || st.Mode&syscall.S_IFMT == syscall.S_IFREG) { + pollable = false + } } } @@ -155,13 +172,6 @@ func newFile(fd uintptr, name string, kind newFileKind) *File { return f } -// Auxiliary information if the File describes a directory -type dirInfo struct { - buf []byte // buffer for directory I/O - nbuf int // length of buf; return value from Getdirentries - bufp int // location of next record in buf. -} - // epipecheck raises SIGPIPE if we get an EPIPE error on standard // output or standard error. See the SIGPIPE docs in os/signal, and // issue 11845. @@ -176,6 +186,7 @@ func epipecheck(file *File, e error) { const DevNull = "/dev/null" // openFileNolog is the Unix implementation of OpenFile. +// Changes here should be reflected in openFdAt, if relevant. func openFileNolog(name string, flag int, perm FileMode) (*File, error) { setSticky := false if !supportsCreateWithStickyBit && flag&O_CREATE != 0 && perm&ModeSticky != 0 { @@ -230,6 +241,9 @@ func (file *file) close() error { if file == nil { return syscall.EINVAL } + if file.dirinfo != nil { + file.dirinfo.close() + } var err error if e := file.pfd.Close(); e != nil { if e == poll.ErrFileClosing { @@ -358,3 +372,30 @@ func Symlink(oldname, newname string) error { } return nil } + +func (f *File) readdir(n int) (fi []FileInfo, err error) { + dirname := f.name + if dirname == "" { + dirname = "." + } + names, err := f.Readdirnames(n) + fi = make([]FileInfo, 0, len(names)) + for _, filename := range names { + fip, lerr := lstat(dirname + "/" + filename) + if IsNotExist(lerr) { + // File disappeared between readdir + stat. + // Just treat it as if it didn't exist. + continue + } + if lerr != nil { + return fi, lerr + } + fi = append(fi, fip) + } + if len(fi) == 0 && err == nil && n > 0 { + // Per File.Readdir, the slice must be non-empty or err + // must be non-nil if n > 0. + err = io.EOF + } + return fi, err +} diff --git a/src/os/file_windows.go b/src/os/file_windows.go index 8901eadd25667..85f248774c77e 100644 --- a/src/os/file_windows.go +++ b/src/os/file_windows.go @@ -325,7 +325,10 @@ func tempDir() string { if n > uint32(len(b)) { continue } - if n > 0 && b[n-1] == '\\' { + if n == 3 && b[1] == ':' && b[2] == '\\' { + // Do nothing for path, like C:\. + } else if n > 0 && b[n-1] == '\\' { + // Otherwise remove terminating \. n-- } return string(utf16.Decode(b[:n])) @@ -356,13 +359,13 @@ func Symlink(oldname, newname string) error { // '/' does not work in link's content oldname = fromSlash(oldname) - // need the exact location of the oldname when its relative to determine if its a directory + // need the exact location of the oldname when it's relative to determine if it's a directory destpath := oldname if !isAbs(oldname) { destpath = dirname(newname) + `\` + oldname } - fi, err := Lstat(destpath) + fi, err := Stat(destpath) isdir := err == nil && fi.IsDir() n, err := syscall.UTF16PtrFromString(fixLongPath(newname)) diff --git a/src/os/os_test.go b/src/os/os_test.go index 894105a886011..9c4d5dada9a10 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -178,7 +178,6 @@ func TestStatError(t *testing.T) { defer chtmpdir(t)() path := "no-such-file" - Remove(path) // Just in case fi, err := Stat(path) if err == nil { @@ -194,12 +193,10 @@ func TestStatError(t *testing.T) { testenv.MustHaveSymlink(t) link := "symlink" - Remove(link) // Just in case err = Symlink(path, link) if err != nil { t.Fatal(err) } - defer Remove(link) fi, err = Stat(link) if err == nil { @@ -269,7 +266,7 @@ func TestRead0(t *testing.T) { } } -// Reading a closed file should should return ErrClosed error +// Reading a closed file should return ErrClosed error func TestReadClosed(t *testing.T) { path := sfdir + "/" + sfname file, err := Open(path) @@ -688,12 +685,10 @@ func TestHardLink(t *testing.T) { defer chtmpdir(t)() from, to := "hardlinktestfrom", "hardlinktestto" - Remove(from) // Just in case. file, err := Create(to) if err != nil { t.Fatalf("open %q failed: %v", to, err) } - defer Remove(to) if err = file.Close(); err != nil { t.Errorf("close %q failed: %v", to, err) } @@ -709,7 +704,6 @@ func TestHardLink(t *testing.T) { t.Errorf("link %q, %q failed to return a valid error", none, none) } - defer Remove(from) tostat, err := Stat(to) if err != nil { t.Fatalf("stat %q failed: %v", to, err) @@ -745,11 +739,8 @@ func TestHardLink(t *testing.T) { } // chtmpdir changes the working directory to a new temporary directory and -// provides a cleanup function. Used when PWD is read-only. +// provides a cleanup function. func chtmpdir(t *testing.T) func() { - if runtime.GOOS != "darwin" || (runtime.GOARCH != "arm" && runtime.GOARCH != "arm64") { - return func() {} // only needed on darwin/arm{,64} - } oldwd, err := Getwd() if err != nil { t.Fatalf("chtmpdir: %v", err) @@ -774,12 +765,10 @@ func TestSymlink(t *testing.T) { defer chtmpdir(t)() from, to := "symlinktestfrom", "symlinktestto" - Remove(from) // Just in case. file, err := Create(to) if err != nil { t.Fatalf("Create(%q) failed: %v", to, err) } - defer Remove(to) if err = file.Close(); err != nil { t.Errorf("Close(%q) failed: %v", to, err) } @@ -787,7 +776,6 @@ func TestSymlink(t *testing.T) { if err != nil { t.Fatalf("Symlink(%q, %q) failed: %v", to, from, err) } - defer Remove(from) tostat, err := Lstat(to) if err != nil { t.Fatalf("Lstat(%q) failed: %v", to, err) @@ -841,12 +829,10 @@ func TestLongSymlink(t *testing.T) { // Long, but not too long: a common limit is 255. s = s + s + s + s + s + s + s + s + s + s + s + s + s + s + s from := "longsymlinktestfrom" - Remove(from) // Just in case. err := Symlink(s, from) if err != nil { t.Fatalf("symlink %q, %q failed: %v", s, from, err) } - defer Remove(from) r, err := Readlink(from) if err != nil { t.Fatalf("readlink %q failed: %v", from, err) @@ -859,9 +845,6 @@ func TestLongSymlink(t *testing.T) { func TestRename(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Ensure we are not testing the overwrite case here. - Remove(from) - Remove(to) file, err := Create(from) if err != nil { @@ -874,7 +857,6 @@ func TestRename(t *testing.T) { if err != nil { t.Fatalf("rename %q, %q failed: %v", to, from, err) } - defer Remove(to) _, err = Stat(to) if err != nil { t.Errorf("stat %q failed: %v", to, err) @@ -884,9 +866,6 @@ func TestRename(t *testing.T) { func TestRenameOverwriteDest(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Just in case. - Remove(from) - Remove(to) toData := []byte("to") fromData := []byte("from") @@ -904,7 +883,6 @@ func TestRenameOverwriteDest(t *testing.T) { if err != nil { t.Fatalf("rename %q, %q failed: %v", to, from, err) } - defer Remove(to) _, err = Stat(from) if err == nil { @@ -925,9 +903,6 @@ func TestRenameOverwriteDest(t *testing.T) { func TestRenameFailed(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - // Ensure we are not testing the overwrite case here. - Remove(from) - Remove(to) err := Rename(from, to) switch err := err.(type) { @@ -943,9 +918,6 @@ func TestRenameFailed(t *testing.T) { } case nil: t.Errorf("rename %q, %q: expected error, got nil", from, to) - - // cleanup whatever was placed in "renameto" - Remove(to) default: t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) } @@ -956,7 +928,6 @@ func TestRenameNotExisting(t *testing.T) { from, to := "doesnt-exist", "dest" Mkdir(to, 0777) - defer Remove(to) if err := Rename(from, to); !IsNotExist(err) { t.Errorf("Rename(%q, %q) = %v; want an IsNotExist error", from, to, err) @@ -967,12 +938,8 @@ func TestRenameToDirFailed(t *testing.T) { defer chtmpdir(t)() from, to := "renamefrom", "renameto" - Remove(from) - Remove(to) Mkdir(from, 0777) Mkdir(to, 0777) - defer Remove(from) - defer Remove(to) err := Rename(from, to) switch err := err.(type) { @@ -988,9 +955,6 @@ func TestRenameToDirFailed(t *testing.T) { } case nil: t.Errorf("rename %q, %q: expected error, got nil", from, to) - - // cleanup whatever was placed in "renameto" - Remove(to) default: t.Errorf("rename %q, %q: expected %T, got %T %v", from, to, new(LinkError), err, err) } @@ -1493,7 +1457,11 @@ func runBinHostname(t *testing.T) string { } defer r.Close() const path = "/bin/hostname" - p, err := StartProcess(path, []string{"hostname"}, &ProcAttr{Files: []*File{nil, w, Stderr}}) + argv := []string{"hostname"} + if runtime.GOOS == "aix" { + argv = []string{"hostname", "-s"} + } + p, err := StartProcess(path, argv, &ProcAttr{Files: []*File{nil, w, Stderr}}) if err != nil { if _, err := Stat(path); IsNotExist(err) { t.Skipf("skipping test; test requires %s but it does not exist", path) @@ -1698,7 +1666,6 @@ func writeFile(t *testing.T, fname string, flag int, text string) string { func TestAppend(t *testing.T) { defer chtmpdir(t)() const f = "append.txt" - defer Remove(f) s := writeFile(t, f, O_CREATE|O_TRUNC|O_RDWR, "new") if s != "new" { t.Fatalf("writeFile: have %q want %q", s, "new") @@ -1765,13 +1732,11 @@ func TestSameFile(t *testing.T) { if err != nil { t.Fatalf("Create(a): %v", err) } - defer Remove(fa.Name()) fa.Close() fb, err := Create("b") if err != nil { t.Fatalf("Create(b): %v", err) } - defer Remove(fb.Name()) fb.Close() ia1, err := Stat("a") @@ -2333,3 +2298,20 @@ func TestDoubleCloseError(t *testing.T) { t.Logf("second close returned expected error %q", err) } } + +func TestUserHomeDir(t *testing.T) { + dir, err := UserHomeDir() + if dir == "" && err == nil { + t.Fatal("UserHomeDir returned an empty string but no error") + } + if err != nil { + t.Skipf("UserHomeDir failed: %v", err) + } + fi, err := Stat(dir) + if err != nil { + t.Fatal(err) + } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } +} diff --git a/src/os/os_unix_test.go b/src/os/os_unix_test.go index 54f121ef4c2d5..2aa930ea80dd0 100644 --- a/src/os/os_unix_test.go +++ b/src/os/os_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package os_test @@ -22,6 +22,9 @@ func init() { isReadonlyError = func(err error) bool { return err == syscall.EROFS } } +// For TestRawConnReadWrite. +type syscallDescriptor = int + func checkUidGid(t *testing.T, path string, uid, gid int) { dir, err := Lstat(path) if err != nil { @@ -234,7 +237,7 @@ func newFileTest(t *testing.T, blocking bool) { } defer syscall.Close(p[1]) - // Set the the read-side to non-blocking. + // Set the read-side to non-blocking. if !blocking { if err := syscall.SetNonblock(p[0], true); err != nil { syscall.Close(p[0]) diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go index 8984dd2c6623d..285e1eb35e9d1 100644 --- a/src/os/os_windows_test.go +++ b/src/os/os_windows_test.go @@ -5,6 +5,7 @@ package os_test import ( + "errors" "fmt" "internal/poll" "internal/syscall/windows" @@ -25,6 +26,9 @@ import ( "unsafe" ) +// For TestRawConnReadWrite. +type syscallDescriptor = syscall.Handle + func TestSameWindowsFile(t *testing.T) { temp, err := ioutil.TempDir("", "TestSameWindowsFile") if err != nil { @@ -895,16 +899,6 @@ func main() { } } -func testIsDir(t *testing.T, path string, fi os.FileInfo) { - t.Helper() - if !fi.IsDir() { - t.Errorf("%q should be a directory", path) - } - if fi.Mode()&os.ModeSymlink != 0 { - t.Errorf("%q should not be a symlink", path) - } -} - func findOneDriveDir() (string, error) { // as per https://stackoverflow.com/questions/42519624/how-to-determine-location-of-onedrive-on-windows-7-and-8-in-c const onedrivekey = `SOFTWARE\Microsoft\OneDrive` @@ -927,57 +921,7 @@ func TestOneDrive(t *testing.T) { if err != nil { t.Skipf("Skipping, because we did not find OneDrive directory: %v", err) } - - // test os.Stat - fi, err := os.Stat(dir) - if err != nil { - t.Fatal(err) - } - testIsDir(t, dir, fi) - - // test os.Lstat - fi, err = os.Lstat(dir) - if err != nil { - t.Fatal(err) - } - testIsDir(t, dir, fi) - - // test os.File.Stat - f, err := os.Open(dir) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - fi, err = f.Stat() - if err != nil { - t.Fatal(err) - } - testIsDir(t, dir, fi) - - // test os.FileInfo returned by os.Readdir - parent, err := os.Open(filepath.Dir(dir)) - if err != nil { - t.Fatal(err) - } - defer parent.Close() - - fis, err := parent.Readdir(-1) - if err != nil { - t.Fatal(err) - } - fi = nil - base := filepath.Base(dir) - for _, fi2 := range fis { - if fi2.Name() == base { - fi = fi2 - break - } - } - if fi == nil { - t.Errorf("failed to find %q in its parent", dir) - } - testIsDir(t, dir, fi) + testDirStats(t, dir) } func TestWindowsDevNullFile(t *testing.T) { @@ -1063,3 +1007,46 @@ func TestStatOfInvalidName(t *testing.T) { t.Fatal(`os.Stat("*.go") unexpectedly succeeded`) } } + +// findUnusedDriveLetter searches mounted drive list on the system +// (starting from Z: and ending at D:) for unused drive letter. +// It returns path to the found drive root directory (like Z:\) or error. +func findUnusedDriveLetter() (string, error) { + // Do not use A: and B:, because they are reserved for floppy drive. + // Do not use C:, becasue it is normally used for main drive. + for l := 'Z'; l >= 'D'; l-- { + p := string(l) + `:\` + _, err := os.Stat(p) + if os.IsNotExist(err) { + return p, nil + } + } + return "", errors.New("Could not find unused drive letter.") +} + +func TestRootDirAsTemp(t *testing.T) { + testenv.MustHaveExec(t) + + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + fmt.Print(os.TempDir()) + os.Exit(0) + } + + newtmp, err := findUnusedDriveLetter() + if err != nil { + t.Fatal(err) + } + + cmd := osexec.Command(os.Args[0], "-test.run=TestRootDirAsTemp") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + cmd.Env = append(cmd.Env, "TMP="+newtmp) + cmd.Env = append(cmd.Env, "TEMP="+newtmp) + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Failed to spawn child process: %v %q", err, string(output)) + } + if want, have := newtmp, string(output); have != want { + t.Fatalf("unexpected child process output %q, want %q", have, want) + } +} diff --git a/src/os/path.go b/src/os/path.go index cdfbc189219a8..104b7ceaf7dad 100644 --- a/src/os/path.go +++ b/src/os/path.go @@ -5,7 +5,6 @@ package os import ( - "io" "syscall" ) @@ -64,95 +63,16 @@ func MkdirAll(path string, perm FileMode) error { // it encounters. If the path does not exist, RemoveAll // returns nil (no error). func RemoveAll(path string) error { - // Simple case: if Remove works, we're done. - err := Remove(path) - if err == nil || IsNotExist(err) { - return nil - } - - // Otherwise, is this a directory we need to recurse into? - dir, serr := Lstat(path) - if serr != nil { - if serr, ok := serr.(*PathError); ok && (IsNotExist(serr.Err) || serr.Err == syscall.ENOTDIR) { - return nil - } - return serr - } - if !dir.IsDir() { - // Not a directory; return the error from Remove. - return err - } - - // Remove contents & return first error. - err = nil - for { - fd, err := Open(path) - if err != nil { - if IsNotExist(err) { - // Already deleted by someone else. - return nil - } - return err - } - - const request = 1024 - names, err1 := fd.Readdirnames(request) - - // Removing files from the directory may have caused - // the OS to reshuffle it. Simply calling Readdirnames - // again may skip some entries. The only reliable way - // to avoid this is to close and re-open the - // directory. See issue 20841. - fd.Close() - - for _, name := range names { - err1 := RemoveAll(path + string(PathSeparator) + name) - if err == nil { - err = err1 - } - } - - if err1 == io.EOF { - break - } - // If Readdirnames returned an error, use it. - if err == nil { - err = err1 - } - if len(names) == 0 { - break - } - - // We don't want to re-open unnecessarily, so if we - // got fewer than request names from Readdirnames, try - // simply removing the directory now. If that - // succeeds, we are done. - if len(names) < request { - err1 := Remove(path) - if err1 == nil || IsNotExist(err1) { - return nil - } - - if err != nil { - // We got some error removing the - // directory contents, and since we - // read fewer names than we requested - // there probably aren't more files to - // remove. Don't loop around to read - // the directory again. We'll probably - // just get the same error. - return err - } - } - } + return removeAll(path) +} - // Remove directory. - err1 := Remove(path) - if err1 == nil || IsNotExist(err1) { - return nil +// endsWithDot reports whether the final component of path is ".". +func endsWithDot(path string) bool { + if path == "." { + return true } - if err == nil { - err = err1 + if len(path) >= 2 && path[len(path)-1] == '.' && IsPathSeparator(path[len(path)-2]) { + return true } - return err + return false } diff --git a/src/os/path_test.go b/src/os/path_test.go index f58c7e746d995..6cb25bcaa716d 100644 --- a/src/os/path_test.go +++ b/src/os/path_test.go @@ -5,7 +5,6 @@ package os_test import ( - "fmt" "internal/testenv" "io/ioutil" . "os" @@ -76,130 +75,6 @@ func TestMkdirAll(t *testing.T) { } } -func TestRemoveAll(t *testing.T) { - tmpDir := TempDir() - // Work directory. - path := tmpDir + "/_TestRemoveAll_" - fpath := path + "/file" - dpath := path + "/dir" - - // Make directory with 1 file and remove. - if err := MkdirAll(path, 0777); err != nil { - t.Fatalf("MkdirAll %q: %s", path, err) - } - fd, err := Create(fpath) - if err != nil { - t.Fatalf("create %q: %s", fpath, err) - } - fd.Close() - if err = RemoveAll(path); err != nil { - t.Fatalf("RemoveAll %q (first): %s", path, err) - } - if _, err = Lstat(path); err == nil { - t.Fatalf("Lstat %q succeeded after RemoveAll (first)", path) - } - - // Make directory with file and subdirectory and remove. - if err = MkdirAll(dpath, 0777); err != nil { - t.Fatalf("MkdirAll %q: %s", dpath, err) - } - fd, err = Create(fpath) - if err != nil { - t.Fatalf("create %q: %s", fpath, err) - } - fd.Close() - fd, err = Create(dpath + "/file") - if err != nil { - t.Fatalf("create %q: %s", fpath, err) - } - fd.Close() - if err = RemoveAll(path); err != nil { - t.Fatalf("RemoveAll %q (second): %s", path, err) - } - if _, err := Lstat(path); err == nil { - t.Fatalf("Lstat %q succeeded after RemoveAll (second)", path) - } - - // Determine if we should run the following test. - testit := true - if runtime.GOOS == "windows" { - // Chmod is not supported under windows. - testit = false - } else { - // Test fails as root. - testit = Getuid() != 0 - } - if testit { - // Make directory with file and subdirectory and trigger error. - if err = MkdirAll(dpath, 0777); err != nil { - t.Fatalf("MkdirAll %q: %s", dpath, err) - } - - for _, s := range []string{fpath, dpath + "/file1", path + "/zzz"} { - fd, err = Create(s) - if err != nil { - t.Fatalf("create %q: %s", s, err) - } - fd.Close() - } - if err = Chmod(dpath, 0); err != nil { - t.Fatalf("Chmod %q 0: %s", dpath, err) - } - - // No error checking here: either RemoveAll - // will or won't be able to remove dpath; - // either way we want to see if it removes fpath - // and path/zzz. Reasons why RemoveAll might - // succeed in removing dpath as well include: - // * running as root - // * running on a file system without permissions (FAT) - RemoveAll(path) - Chmod(dpath, 0777) - - for _, s := range []string{fpath, path + "/zzz"} { - if _, err = Lstat(s); err == nil { - t.Fatalf("Lstat %q succeeded after partial RemoveAll", s) - } - } - } - if err = RemoveAll(path); err != nil { - t.Fatalf("RemoveAll %q after partial RemoveAll: %s", path, err) - } - if _, err = Lstat(path); err == nil { - t.Fatalf("Lstat %q succeeded after RemoveAll (final)", path) - } -} - -// Test RemoveAll on a large directory. -func TestRemoveAllLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - - tmpDir := TempDir() - // Work directory. - path := tmpDir + "/_TestRemoveAllLarge_" - - // Make directory with 1000 files and remove. - if err := MkdirAll(path, 0777); err != nil { - t.Fatalf("MkdirAll %q: %s", path, err) - } - for i := 0; i < 1000; i++ { - fpath := fmt.Sprintf("%s/file%d", path, i) - fd, err := Create(fpath) - if err != nil { - t.Fatalf("create %q: %s", fpath, err) - } - fd.Close() - } - if err := RemoveAll(path); err != nil { - t.Fatalf("RemoveAll %q: %s", path, err) - } - if _, err := Lstat(path); err == nil { - t.Fatalf("Lstat %q succeeded after RemoveAll", path) - } -} - func TestMkdirAllWithSymlink(t *testing.T) { testenv.MustHaveSymlink(t) diff --git a/src/os/path_unix.go b/src/os/path_unix.go index b2e0bca0df1eb..be373a50a9193 100644 --- a/src/os/path_unix.go +++ b/src/os/path_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os @@ -16,7 +16,7 @@ func IsPathSeparator(c uint8) bool { return PathSeparator == c } -// basename removes trailing slashes and the leading directory name from path name +// basename removes trailing slashes and the leading directory name from path name. func basename(name string) string { i := len(name) - 1 // Remove trailing slashes @@ -34,6 +34,32 @@ func basename(name string) string { return name } +// splitPath returns the base name and parent directory. +func splitPath(path string) (string, string) { + // if no better parent is found, the path is relative from "here" + dirname := "." + // if no slashes in path, base is path + basename := path + + i := len(path) - 1 + + // Remove trailing slashes + for ; i > 0 && path[i] == '/'; i-- { + path = path[:i] + } + + // Remove leading directory path + for i--; i >= 0; i-- { + if path[i] == '/' { + dirname = path[:i+1] + basename = path[i+1:] + break + } + } + + return dirname, basename +} + func fixRootDirectory(p string) string { return p } diff --git a/src/os/path_windows_test.go b/src/os/path_windows_test.go index 00a3e63bf3cec..f1745ad132e76 100644 --- a/src/os/path_windows_test.go +++ b/src/os/path_windows_test.go @@ -38,10 +38,10 @@ func TestFixLongPath(t *testing.T) { {`\\?\c:\long\foo.txt`, `\\?\c:\long\foo.txt`}, {`\\?\c:\long/foo.txt`, `\\?\c:\long/foo.txt`}, } { - in := strings.Replace(test.in, "long", veryLong, -1) - want := strings.Replace(test.want, "long", veryLong, -1) + in := strings.ReplaceAll(test.in, "long", veryLong) + want := strings.ReplaceAll(test.want, "long", veryLong) if got := os.FixLongPath(in); got != want { - got = strings.Replace(got, veryLong, "long", -1) + got = strings.ReplaceAll(got, veryLong, "long") t.Errorf("fixLongPath(%q) = %q; want %q", test.in, got, test.want) } } diff --git a/src/os/pipe_bsd.go b/src/os/pipe_bsd.go index 9735988f324d3..dc4c951a28638 100644 --- a/src/os/pipe_bsd.go +++ b/src/os/pipe_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly js,wasm nacl solaris +// +build aix darwin dragonfly js,wasm nacl solaris package os diff --git a/src/os/pipe_test.go b/src/os/pipe_test.go index 59d31e5837c9d..779b2bdf85b3b 100644 --- a/src/os/pipe_test.go +++ b/src/os/pipe_test.go @@ -131,7 +131,7 @@ func testClosedPipeRace(t *testing.T, read bool) { if !read { // Get the amount we have to write to overload a pipe // with no reader. - limit = 65537 + limit = 131073 if b, err := ioutil.ReadFile("/proc/sys/fs/pipe-max-size"); err == nil { if i, err := strconv.Atoi(strings.TrimSpace(string(b))); err == nil { limit = i + 1 diff --git a/src/os/rawconn.go b/src/os/rawconn.go new file mode 100644 index 0000000000000..9e11cda8c9aa9 --- /dev/null +++ b/src/os/rawconn.go @@ -0,0 +1,47 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package os + +import ( + "runtime" +) + +// rawConn implements syscall.RawConn. +type rawConn struct { + file *File +} + +func (c *rawConn) Control(f func(uintptr)) error { + if err := c.file.checkValid("SyscallConn.Control"); err != nil { + return err + } + err := c.file.pfd.RawControl(f) + runtime.KeepAlive(c.file) + return err +} + +func (c *rawConn) Read(f func(uintptr) bool) error { + if err := c.file.checkValid("SyscallConn.Read"); err != nil { + return err + } + err := c.file.pfd.RawRead(f) + runtime.KeepAlive(c.file) + return err +} + +func (c *rawConn) Write(f func(uintptr) bool) error { + if err := c.file.checkValid("SyscallConn.Write"); err != nil { + return err + } + err := c.file.pfd.RawWrite(f) + runtime.KeepAlive(c.file) + return err +} + +func newRawConn(file *File) (*rawConn, error) { + return &rawConn{file: file}, nil +} diff --git a/src/os/rawconn_test.go b/src/os/rawconn_test.go new file mode 100644 index 0000000000000..820150d959b09 --- /dev/null +++ b/src/os/rawconn_test.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test use of raw connections. +// +build !plan9,!nacl,!js + +package os_test + +import ( + "os" + "syscall" + "testing" +) + +func TestRawConnReadWrite(t *testing.T) { + t.Parallel() + + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer r.Close() + defer w.Close() + + rconn, err := r.SyscallConn() + if err != nil { + t.Fatal(err) + } + wconn, err := w.SyscallConn() + if err != nil { + t.Fatal(err) + } + + var operr error + err = wconn.Write(func(s uintptr) bool { + _, operr = syscall.Write(syscallDescriptor(s), []byte{'b'}) + return operr != syscall.EAGAIN + }) + if err != nil { + t.Fatal(err) + } + if operr != nil { + t.Fatal(err) + } + + var n int + buf := make([]byte, 1) + err = rconn.Read(func(s uintptr) bool { + n, operr = syscall.Read(syscallDescriptor(s), buf) + return operr != syscall.EAGAIN + }) + if err != nil { + t.Fatal(err) + } + if operr != nil { + t.Fatal(operr) + } + if n != 1 { + t.Errorf("read %d bytes, expected 1", n) + } + if buf[0] != 'b' { + t.Errorf("read %q, expected %q", buf, "b") + } +} diff --git a/src/os/removeall_at.go b/src/os/removeall_at.go new file mode 100644 index 0000000000000..7f2d5922ae03c --- /dev/null +++ b/src/os/removeall_at.go @@ -0,0 +1,164 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package os + +import ( + "internal/syscall/unix" + "io" + "runtime" + "syscall" +) + +func removeAll(path string) error { + if path == "" { + // fail silently to retain compatibility with previous behavior + // of RemoveAll. See issue 28830. + return nil + } + + // The rmdir system call does not permit removing ".", + // so we don't permit it either. + if endsWithDot(path) { + return &PathError{"RemoveAll", path, syscall.EINVAL} + } + + // Simple case: if Remove works, we're done. + err := Remove(path) + if err == nil || IsNotExist(err) { + return nil + } + + // RemoveAll recurses by deleting the path base from + // its parent directory + parentDir, base := splitPath(path) + + parent, err := Open(parentDir) + if IsNotExist(err) { + // If parent does not exist, base cannot exist. Fail silently + return nil + } + if err != nil { + return err + } + defer parent.Close() + + return removeAllFrom(parent, base) +} + +func removeAllFrom(parent *File, path string) error { + parentFd := int(parent.Fd()) + // Simple case: if Unlink (aka remove) works, we're done. + err := unix.Unlinkat(parentFd, path, 0) + if err == nil || IsNotExist(err) { + return nil + } + + // EISDIR means that we have a directory, and we need to + // remove its contents. + // EPERM or EACCES means that we don't have write permission on + // the parent directory, but this entry might still be a directory + // whose contents need to be removed. + // Otherwise just return the error. + if err != syscall.EISDIR && err != syscall.EPERM && err != syscall.EACCES { + return err + } + + // Is this a directory we need to recurse into? + var statInfo syscall.Stat_t + statErr := unix.Fstatat(parentFd, path, &statInfo, unix.AT_SYMLINK_NOFOLLOW) + if statErr != nil { + return statErr + } + if statInfo.Mode&syscall.S_IFMT != syscall.S_IFDIR { + // Not a directory; return the error from the Remove. + return err + } + + // Remove the directory's entries. + var recurseErr error + for { + const request = 1024 + + // Open the directory to recurse into + file, err := openFdAt(parentFd, path) + if err != nil { + if IsNotExist(err) { + return nil + } + return err + } + + names, readErr := file.Readdirnames(request) + // Errors other than EOF should stop us from continuing. + if readErr != nil && readErr != io.EOF { + file.Close() + if IsNotExist(readErr) { + return nil + } + return readErr + } + + for _, name := range names { + err := removeAllFrom(file, name) + if err != nil { + recurseErr = err + } + } + + // Removing files from the directory may have caused + // the OS to reshuffle it. Simply calling Readdirnames + // again may skip some entries. The only reliable way + // to avoid this is to close and re-open the + // directory. See issue 20841. + file.Close() + + // Finish when the end of the directory is reached + if len(names) < request { + break + } + } + + // Remove the directory itself. + unlinkError := unix.Unlinkat(parentFd, path, unix.AT_REMOVEDIR) + if unlinkError == nil || IsNotExist(unlinkError) { + return nil + } + + if recurseErr != nil { + return recurseErr + } + return unlinkError +} + +// openFdAt opens path relative to the directory in fd. +// Other than that this should act like openFileNolog. +// This acts like openFileNolog rather than OpenFile because +// we are going to (try to) remove the file. +// The contents of this file are not relevant for test caching. +func openFdAt(dirfd int, name string) (*File, error) { + var r int + for { + var e error + r, e = unix.Openat(dirfd, name, O_RDONLY, 0) + if e == nil { + break + } + + // See comment in openFileNolog. + if runtime.GOOS == "darwin" && e == syscall.EINTR { + continue + } + + return nil, &PathError{"openat", name, e} + } + + if !supportsCloseOnExec { + syscall.CloseOnExec(r) + } + + return newFile(uintptr(r), name, kindOpenFile), nil +} diff --git a/src/os/removeall_noat.go b/src/os/removeall_noat.go new file mode 100644 index 0000000000000..5a7dc263f08f2 --- /dev/null +++ b/src/os/removeall_noat.go @@ -0,0 +1,119 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package os + +import ( + "io" + "syscall" +) + +func removeAll(path string) error { + if path == "" { + // fail silently to retain compatibility with previous behavior + // of RemoveAll. See issue 28830. + return nil + } + + // The rmdir system call permits removing "." on Plan 9, + // so we don't permit it to remain consistent with the + // "at" implementation of RemoveAll. + if endsWithDot(path) { + return &PathError{"RemoveAll", path, syscall.EINVAL} + } + + // Simple case: if Remove works, we're done. + err := Remove(path) + if err == nil || IsNotExist(err) { + return nil + } + + // Otherwise, is this a directory we need to recurse into? + dir, serr := Lstat(path) + if serr != nil { + if serr, ok := serr.(*PathError); ok && (IsNotExist(serr.Err) || serr.Err == syscall.ENOTDIR) { + return nil + } + return serr + } + if !dir.IsDir() { + // Not a directory; return the error from Remove. + return err + } + + // Remove contents & return first error. + err = nil + for { + fd, err := Open(path) + if err != nil { + if IsNotExist(err) { + // Already deleted by someone else. + return nil + } + return err + } + + const request = 1024 + names, err1 := fd.Readdirnames(request) + + // Removing files from the directory may have caused + // the OS to reshuffle it. Simply calling Readdirnames + // again may skip some entries. The only reliable way + // to avoid this is to close and re-open the + // directory. See issue 20841. + fd.Close() + + for _, name := range names { + err1 := RemoveAll(path + string(PathSeparator) + name) + if err == nil { + err = err1 + } + } + + if err1 == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + + // We don't want to re-open unnecessarily, so if we + // got fewer than request names from Readdirnames, try + // simply removing the directory now. If that + // succeeds, we are done. + if len(names) < request { + err1 := Remove(path) + if err1 == nil || IsNotExist(err1) { + return nil + } + + if err != nil { + // We got some error removing the + // directory contents, and since we + // read fewer names than we requested + // there probably aren't more files to + // remove. Don't loop around to read + // the directory again. We'll probably + // just get the same error. + return err + } + } + } + + // Remove directory. + err1 := Remove(path) + if err1 == nil || IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} diff --git a/src/os/removeall_test.go b/src/os/removeall_test.go new file mode 100644 index 0000000000000..9dab0d4bb1079 --- /dev/null +++ b/src/os/removeall_test.go @@ -0,0 +1,374 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "fmt" + "io/ioutil" + . "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +func TestRemoveAll(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRemoveAll-") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tmpDir) + + if err := RemoveAll(""); err != nil { + t.Errorf("RemoveAll(\"\"): %v; want nil", err) + } + + file := filepath.Join(tmpDir, "file") + path := filepath.Join(tmpDir, "_TestRemoveAll_") + fpath := filepath.Join(path, "file") + dpath := filepath.Join(path, "dir") + + // Make a regular file and remove + fd, err := Create(file) + if err != nil { + t.Fatalf("create %q: %s", file, err) + } + fd.Close() + if err = RemoveAll(file); err != nil { + t.Fatalf("RemoveAll %q (first): %s", file, err) + } + if _, err = Lstat(file); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (first)", file) + } + + // Make directory with 1 file and remove. + if err := MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + fd, err = Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q (second): %s", path, err) + } + if _, err = Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (second)", path) + } + + // Make directory with file and subdirectory and remove. + if err = MkdirAll(dpath, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", dpath, err) + } + fd, err = Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + fd, err = Create(dpath + "/file") + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q (third): %s", path, err) + } + if _, err := Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (third)", path) + } + + // Determine if we should run the following test. + testit := true + if runtime.GOOS == "windows" { + // Chmod is not supported under windows. + testit = false + } else { + // Test fails as root. + testit = Getuid() != 0 + } + if testit { + // Make directory with file and subdirectory and trigger error. + if err = MkdirAll(dpath, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", dpath, err) + } + + for _, s := range []string{fpath, dpath + "/file1", path + "/zzz"} { + fd, err = Create(s) + if err != nil { + t.Fatalf("create %q: %s", s, err) + } + fd.Close() + } + if err = Chmod(dpath, 0); err != nil { + t.Fatalf("Chmod %q 0: %s", dpath, err) + } + + // No error checking here: either RemoveAll + // will or won't be able to remove dpath; + // either way we want to see if it removes fpath + // and path/zzz. Reasons why RemoveAll might + // succeed in removing dpath as well include: + // * running as root + // * running on a file system without permissions (FAT) + RemoveAll(path) + Chmod(dpath, 0777) + + for _, s := range []string{fpath, path + "/zzz"} { + if _, err = Lstat(s); err == nil { + t.Fatalf("Lstat %q succeeded after partial RemoveAll", s) + } + } + } + if err = RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q after partial RemoveAll: %s", path, err) + } + if _, err = Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll (final)", path) + } +} + +// Test RemoveAll on a large directory. +func TestRemoveAllLarge(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + tmpDir, err := ioutil.TempDir("", "TestRemoveAll-") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "_TestRemoveAllLarge_") + + // Make directory with 1000 files and remove. + if err := MkdirAll(path, 0777); err != nil { + t.Fatalf("MkdirAll %q: %s", path, err) + } + for i := 0; i < 1000; i++ { + fpath := fmt.Sprintf("%s/file%d", path, i) + fd, err := Create(fpath) + if err != nil { + t.Fatalf("create %q: %s", fpath, err) + } + fd.Close() + } + if err := RemoveAll(path); err != nil { + t.Fatalf("RemoveAll %q: %s", path, err) + } + if _, err := Lstat(path); err == nil { + t.Fatalf("Lstat %q succeeded after RemoveAll", path) + } +} + +func TestRemoveAllLongPath(t *testing.T) { + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + break + default: + t.Skip("skipping for not implemented platforms") + } + + prevDir, err := Getwd() + if err != nil { + t.Fatalf("Could not get wd: %s", err) + } + + startPath, err := ioutil.TempDir("", "TestRemoveAllLongPath-") + if err != nil { + t.Fatalf("Could not create TempDir: %s", err) + } + defer RemoveAll(startPath) + + err = Chdir(startPath) + if err != nil { + t.Fatalf("Could not chdir %s: %s", startPath, err) + } + + // Removing paths with over 4096 chars commonly fails + for i := 0; i < 41; i++ { + name := strings.Repeat("a", 100) + + err = Mkdir(name, 0755) + if err != nil { + t.Fatalf("Could not mkdir %s: %s", name, err) + } + + err = Chdir(name) + if err != nil { + t.Fatalf("Could not chdir %s: %s", name, err) + } + } + + err = Chdir(prevDir) + if err != nil { + t.Fatalf("Could not chdir %s: %s", prevDir, err) + } + + err = RemoveAll(startPath) + if err != nil { + t.Errorf("RemoveAll could not remove long file path %s: %s", startPath, err) + } +} + +func TestRemoveAllDot(t *testing.T) { + prevDir, err := Getwd() + if err != nil { + t.Fatalf("Could not get wd: %s", err) + } + tempDir, err := ioutil.TempDir("", "TestRemoveAllDot-") + if err != nil { + t.Fatalf("Could not create TempDir: %s", err) + } + defer RemoveAll(tempDir) + + err = Chdir(tempDir) + if err != nil { + t.Fatalf("Could not chdir to tempdir: %s", err) + } + + err = RemoveAll(".") + if err == nil { + t.Errorf("RemoveAll succeed to remove .") + } + + err = Chdir(prevDir) + if err != nil { + t.Fatalf("Could not chdir %s: %s", prevDir, err) + } +} + +func TestRemoveAllDotDot(t *testing.T) { + t.Parallel() + + tempDir, err := ioutil.TempDir("", "TestRemoveAllDotDot-") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tempDir) + + subdir := filepath.Join(tempDir, "x") + subsubdir := filepath.Join(subdir, "y") + if err := MkdirAll(subsubdir, 0777); err != nil { + t.Fatal(err) + } + if err := RemoveAll(filepath.Join(subsubdir, "..")); err != nil { + t.Error(err) + } + for _, dir := range []string{subsubdir, subdir} { + if _, err := Stat(dir); err == nil { + t.Errorf("%s: exists after RemoveAll", dir) + } + } +} + +// Issue #29178. +func TestRemoveReadOnlyDir(t *testing.T) { + t.Parallel() + + tempDir, err := ioutil.TempDir("", "TestRemoveReadOnlyDir-") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tempDir) + + subdir := filepath.Join(tempDir, "x") + if err := Mkdir(subdir, 0); err != nil { + t.Fatal(err) + } + + // If an error occurs make it more likely that removing the + // temporary directory will succeed. + defer Chmod(subdir, 0777) + + if err := RemoveAll(subdir); err != nil { + t.Fatal(err) + } + + if _, err := Stat(subdir); err == nil { + t.Error("subdirectory was not removed") + } +} + +// Issue #29983. +func TestRemoveAllButReadOnly(t *testing.T) { + switch runtime.GOOS { + case "nacl", "js", "windows": + t.Skipf("skipping test on %s", runtime.GOOS) + } + + if Getuid() == 0 { + t.Skip("skipping test when running as root") + } + + t.Parallel() + + tempDir, err := ioutil.TempDir("", "TestRemoveAllButReadOnly-") + if err != nil { + t.Fatal(err) + } + defer RemoveAll(tempDir) + + dirs := []string{ + "a", + "a/x", + "a/x/1", + "b", + "b/y", + "b/y/2", + "c", + "c/z", + "c/z/3", + } + readonly := []string{ + "b", + } + inReadonly := func(d string) bool { + for _, ro := range readonly { + if d == ro { + return true + } + dd, _ := filepath.Split(d) + if filepath.Clean(dd) == ro { + return true + } + } + return false + } + + for _, dir := range dirs { + if err := Mkdir(filepath.Join(tempDir, dir), 0777); err != nil { + t.Fatal(err) + } + } + for _, dir := range readonly { + d := filepath.Join(tempDir, dir) + if err := Chmod(d, 0555); err != nil { + t.Fatal(err) + } + + // Defer changing the mode back so that the deferred + // RemoveAll(tempDir) can succeed. + defer Chmod(d, 0777) + } + + if err := RemoveAll(tempDir); err == nil { + t.Fatal("RemoveAll succeeded unexpectedly") + } + + for _, dir := range dirs { + _, err := Stat(filepath.Join(tempDir, dir)) + if inReadonly(dir) { + if err != nil { + t.Errorf("file %q was deleted but should still exist", dir) + } + } else { + if err == nil { + t.Errorf("file %q still exists but should have been deleted", dir) + } + } + } +} diff --git a/src/os/signal/internal/pty/pty.go b/src/os/signal/internal/pty/pty.go index c4c1567fce0b3..c1c7fcffc54b5 100644 --- a/src/os/signal/internal/pty/pty.go +++ b/src/os/signal/internal/pty/pty.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux,!android netbsd openbsd +// +build aix darwin dragonfly freebsd linux,!android netbsd openbsd // +build cgo // Package pty is a simple pseudo-terminal package for Unix systems, diff --git a/src/os/signal/signal_cgo_test.go b/src/os/signal/signal_cgo_test.go index 16aeea8221cdc..3c23090489f95 100644 --- a/src/os/signal/signal_cgo_test.go +++ b/src/os/signal/signal_cgo_test.go @@ -22,6 +22,7 @@ import ( "os/signal/internal/pty" "strconv" "strings" + "sync" "syscall" "testing" "time" @@ -113,7 +114,11 @@ func TestTerminalSignal(t *testing.T) { const prompt = "prompt> " // Read data from master in the background. + var wg sync.WaitGroup + wg.Add(1) + defer wg.Wait() go func() { + defer wg.Done() input := bufio.NewReader(master) var line, handled []byte for { diff --git a/src/os/signal/signal_test.go b/src/os/signal/signal_test.go index 3d79c7a8619a8..6ea59f4697f62 100644 --- a/src/os/signal/signal_test.go +++ b/src/os/signal/signal_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package signal @@ -432,12 +432,12 @@ func atomicStopTestProgram() { // At this point we should either die from SIGINT or // get a notification on cs. If neither happens, we - // dropped the signal. Give it a second to deliver, - // which is far far longer than it should require. + // dropped the signal. It is given 2 seconds to + // deliver, as needed for gccgo on some loaded test systems. select { case <-cs: - case <-time.After(1 * time.Second): + case <-time.After(2 * time.Second): if !printed { fmt.Print("lost signal on tries:") printed = true diff --git a/src/os/signal/signal_unix.go b/src/os/signal/signal_unix.go index 28fbb5499597d..7fa634f15ad83 100644 --- a/src/os/signal/signal_unix.go +++ b/src/os/signal/signal_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package signal diff --git a/src/os/stat_aix.go b/src/os/stat_aix.go new file mode 100644 index 0000000000000..a37c9fdae41ae --- /dev/null +++ b/src/os/stat_aix.go @@ -0,0 +1,51 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import ( + "syscall" + "time" +) + +func fillFileStatFromSys(fs *fileStat, name string) { + fs.name = basename(name) + fs.size = int64(fs.sys.Size) + fs.modTime = stTimespecToTime(fs.sys.Mtim) + fs.mode = FileMode(fs.sys.Mode & 0777) + switch fs.sys.Mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fs.mode |= ModeDevice + case syscall.S_IFCHR: + fs.mode |= ModeDevice | ModeCharDevice + case syscall.S_IFDIR: + fs.mode |= ModeDir + case syscall.S_IFIFO: + fs.mode |= ModeNamedPipe + case syscall.S_IFLNK: + fs.mode |= ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fs.mode |= ModeSocket + } + if fs.sys.Mode&syscall.S_ISGID != 0 { + fs.mode |= ModeSetgid + } + if fs.sys.Mode&syscall.S_ISUID != 0 { + fs.mode |= ModeSetuid + } + if fs.sys.Mode&syscall.S_ISVTX != 0 { + fs.mode |= ModeSticky + } +} + +func stTimespecToTime(ts syscall.StTimespec_t) time.Time { + return time.Unix(int64(ts.Sec), int64(ts.Nsec)) +} + +// For testing. +func atime(fi FileInfo) time.Time { + return stTimespecToTime(fi.Sys().(*syscall.Stat_t).Atim) +} diff --git a/src/os/stat_test.go b/src/os/stat_test.go new file mode 100644 index 0000000000000..60f3b4c587d2b --- /dev/null +++ b/src/os/stat_test.go @@ -0,0 +1,292 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "internal/testenv" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" +) + +// testStatAndLstat verifies that all os.Stat, os.Lstat os.File.Stat and os.Readdir work. +func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCheck func(*testing.T, string, os.FileInfo)) { + // test os.Stat + sfi, err := os.Stat(path) + if err != nil { + t.Error(err) + return + } + statCheck(t, path, sfi) + + // test os.Lstat + lsfi, err := os.Lstat(path) + if err != nil { + t.Error(err) + return + } + lstatCheck(t, path, lsfi) + + if isLink { + if os.SameFile(sfi, lsfi) { + t.Errorf("stat and lstat of %q should not be the same", path) + } + } else { + if !os.SameFile(sfi, lsfi) { + t.Errorf("stat and lstat of %q should be the same", path) + } + } + + // test os.File.Stat + f, err := os.Open(path) + if err != nil { + t.Error(err) + return + } + defer f.Close() + + sfi2, err := f.Stat() + if err != nil { + t.Error(err) + return + } + statCheck(t, path, sfi2) + + if !os.SameFile(sfi, sfi2) { + t.Errorf("stat of open %q file and stat of %q should be the same", path, path) + } + + if isLink { + if os.SameFile(sfi2, lsfi) { + t.Errorf("stat of opened %q file and lstat of %q should not be the same", path, path) + } + } else { + if !os.SameFile(sfi2, lsfi) { + t.Errorf("stat of opened %q file and lstat of %q should be the same", path, path) + } + } + + // test os.FileInfo returned by os.Readdir + if len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) { + // skip os.Readdir test of directories with slash at the end + return + } + parentdir := filepath.Dir(path) + parent, err := os.Open(parentdir) + if err != nil { + t.Error(err) + return + } + defer parent.Close() + + fis, err := parent.Readdir(-1) + if err != nil { + t.Error(err) + return + } + var lsfi2 os.FileInfo + base := filepath.Base(path) + for _, fi2 := range fis { + if fi2.Name() == base { + lsfi2 = fi2 + break + } + } + if lsfi2 == nil { + t.Errorf("failed to find %q in its parent", path) + return + } + lstatCheck(t, path, lsfi2) + + if !os.SameFile(lsfi, lsfi2) { + t.Errorf("lstat of %q file in %q directory and %q should be the same", lsfi2.Name(), parentdir, path) + } +} + +// testIsDir verifies that fi refers to directory. +func testIsDir(t *testing.T, path string, fi os.FileInfo) { + t.Helper() + if !fi.IsDir() { + t.Errorf("%q should be a directory", path) + } + if fi.Mode()&os.ModeSymlink != 0 { + t.Errorf("%q should not be a symlink", path) + } +} + +// testIsSymlink verifies that fi refers to symlink. +func testIsSymlink(t *testing.T, path string, fi os.FileInfo) { + t.Helper() + if fi.IsDir() { + t.Errorf("%q should not be a directory", path) + } + if fi.Mode()&os.ModeSymlink == 0 { + t.Errorf("%q should be a symlink", path) + } +} + +// testIsFile verifies that fi refers to file. +func testIsFile(t *testing.T, path string, fi os.FileInfo) { + t.Helper() + if fi.IsDir() { + t.Errorf("%q should not be a directory", path) + } + if fi.Mode()&os.ModeSymlink != 0 { + t.Errorf("%q should not be a symlink", path) + } +} + +func testDirStats(t *testing.T, path string) { + testStatAndLstat(t, path, false, testIsDir, testIsDir) +} + +func testFileStats(t *testing.T, path string) { + testStatAndLstat(t, path, false, testIsFile, testIsFile) +} + +func testSymlinkStats(t *testing.T, path string, isdir bool) { + if isdir { + testStatAndLstat(t, path, true, testIsDir, testIsSymlink) + } else { + testStatAndLstat(t, path, true, testIsFile, testIsSymlink) + } +} + +func testSymlinkSameFile(t *testing.T, path, link string) { + pathfi, err := os.Stat(path) + if err != nil { + t.Error(err) + return + } + + linkfi, err := os.Stat(link) + if err != nil { + t.Error(err) + return + } + if !os.SameFile(pathfi, linkfi) { + t.Errorf("os.Stat(%q) and os.Stat(%q) are not the same file", path, link) + } + + linkfi, err = os.Lstat(link) + if err != nil { + t.Error(err) + return + } + if os.SameFile(pathfi, linkfi) { + t.Errorf("os.Stat(%q) and os.Lstat(%q) are the same file", path, link) + } +} + +func TestDirAndSymlinkStats(t *testing.T) { + testenv.MustHaveSymlink(t) + + tmpdir, err := ioutil.TempDir("", "TestDirAndSymlinkStats") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + dir := filepath.Join(tmpdir, "dir") + err = os.Mkdir(dir, 0777) + if err != nil { + t.Fatal(err) + } + testDirStats(t, dir) + + dirlink := filepath.Join(tmpdir, "link") + err = os.Symlink(dir, dirlink) + if err != nil { + t.Fatal(err) + } + testSymlinkStats(t, dirlink, true) + testSymlinkSameFile(t, dir, dirlink) + + linklink := filepath.Join(tmpdir, "linklink") + err = os.Symlink(dirlink, linklink) + if err != nil { + t.Fatal(err) + } + testSymlinkStats(t, linklink, true) + testSymlinkSameFile(t, dir, linklink) +} + +func TestFileAndSymlinkStats(t *testing.T) { + testenv.MustHaveSymlink(t) + + tmpdir, err := ioutil.TempDir("", "TestFileAndSymlinkStats") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + file := filepath.Join(tmpdir, "file") + err = ioutil.WriteFile(file, []byte(""), 0644) + if err != nil { + t.Fatal(err) + } + testFileStats(t, file) + + filelink := filepath.Join(tmpdir, "link") + err = os.Symlink(file, filelink) + if err != nil { + t.Fatal(err) + } + testSymlinkStats(t, filelink, false) + testSymlinkSameFile(t, file, filelink) + + linklink := filepath.Join(tmpdir, "linklink") + err = os.Symlink(filelink, linklink) + if err != nil { + t.Fatal(err) + } + testSymlinkStats(t, linklink, false) + testSymlinkSameFile(t, file, linklink) +} + +// see issue 27225 for details +func TestSymlinkWithTrailingSlash(t *testing.T) { + testenv.MustHaveSymlink(t) + + tmpdir, err := ioutil.TempDir("", "TestSymlinkWithTrailingSlash") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + dir := filepath.Join(tmpdir, "dir") + err = os.Mkdir(dir, 0777) + if err != nil { + t.Fatal(err) + } + dirlink := filepath.Join(tmpdir, "link") + err = os.Symlink(dir, dirlink) + if err != nil { + t.Fatal(err) + } + dirlinkWithSlash := dirlink + string(os.PathSeparator) + + if runtime.GOOS == "windows" { + testSymlinkStats(t, dirlinkWithSlash, true) + } else { + testDirStats(t, dirlinkWithSlash) + } + + fi1, err := os.Stat(dir) + if err != nil { + t.Error(err) + return + } + fi2, err := os.Stat(dirlinkWithSlash) + if err != nil { + t.Error(err) + return + } + if !os.SameFile(fi1, fi2) { + t.Errorf("os.Stat(%q) and os.Stat(%q) are not the same file", dir, dirlinkWithSlash) + } +} diff --git a/src/os/stat_unix.go b/src/os/stat_unix.go index 856b49929f322..4f85dcea077c6 100644 --- a/src/os/stat_unix.go +++ b/src/os/stat_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package os diff --git a/src/os/stat_windows.go b/src/os/stat_windows.go index 19cc0cf6b773c..271ff5f843427 100644 --- a/src/os/stat_windows.go +++ b/src/os/stat_windows.go @@ -5,10 +5,12 @@ package os import ( + "internal/syscall/windows" "syscall" + "unsafe" ) -// isNulName returns true if name is NUL file name. +// isNulName reports whether name is NUL file name. // For example, it returns true for both "NUL" and "nul". func isNulName(name string) bool { if len(name) != 3 { @@ -58,33 +60,59 @@ func (file *File) Stat() (FileInfo, error) { return fs, err } -// statNolog implements Stat for Windows. -func statNolog(name string) (FileInfo, error) { +// stat implements both Stat and Lstat of a file. +func stat(funcname, name string, createFileAttrs uint32) (FileInfo, error) { if len(name) == 0 { - return nil, &PathError{"Stat", name, syscall.Errno(syscall.ERROR_PATH_NOT_FOUND)} + return nil, &PathError{funcname, name, syscall.Errno(syscall.ERROR_PATH_NOT_FOUND)} } if isNulName(name) { return &devNullStat, nil } namep, err := syscall.UTF16PtrFromString(fixLongPath(name)) if err != nil { - return nil, &PathError{"Stat", name, err} + return nil, &PathError{funcname, name, err} } - fs, err := newFileStatFromGetFileAttributesExOrFindFirstFile(name, namep) - if err != nil { - return nil, err + + // Try GetFileAttributesEx first, because it is faster than CreateFile. + // See https://golang.org/issues/19922#issuecomment-300031421 for details. + var fa syscall.Win32FileAttributeData + err = syscall.GetFileAttributesEx(namep, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa))) + if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + // Not a symlink. + fs := &fileStat{ + path: name, + FileAttributes: fa.FileAttributes, + CreationTime: fa.CreationTime, + LastAccessTime: fa.LastAccessTime, + LastWriteTime: fa.LastWriteTime, + FileSizeHigh: fa.FileSizeHigh, + FileSizeLow: fa.FileSizeLow, + } + // Gather full path to be used by os.SameFile later. + if !isAbs(fs.path) { + fs.path, err = syscall.FullPath(fs.path) + if err != nil { + return nil, &PathError{"FullPath", name, err} + } + } + fs.name = basename(name) + return fs, nil } - if !fs.isSymlink() { - err = fs.updatePathAndName(name) + // GetFileAttributesEx fails with ERROR_SHARING_VIOLATION error for + // files, like c:\pagefile.sys. Use FindFirstFile for such files. + if err == windows.ERROR_SHARING_VIOLATION { + var fd syscall.Win32finddata + sh, err := syscall.FindFirstFile(namep, &fd) if err != nil { - return nil, err + return nil, &PathError{"FindFirstFile", name, err} } - return fs, nil + syscall.FindClose(sh) + return newFileStatFromWin32finddata(&fd), nil } - // Use Windows I/O manager to dereference the symbolic link, as per - // https://blogs.msdn.microsoft.com/oldnewthing/20100212-00/?p=14963/ + + // Finally use CreateFile. h, err := syscall.CreateFile(namep, 0, 0, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + syscall.OPEN_EXISTING, createFileAttrs, 0) if err != nil { return nil, &PathError{"CreateFile", name, err} } @@ -93,25 +121,16 @@ func statNolog(name string) (FileInfo, error) { return newFileStatFromGetFileInformationByHandle(name, h) } +// statNolog implements Stat for Windows. +func statNolog(name string) (FileInfo, error) { + return stat("Stat", name, syscall.FILE_FLAG_BACKUP_SEMANTICS) +} + // lstatNolog implements Lstat for Windows. func lstatNolog(name string) (FileInfo, error) { - if len(name) == 0 { - return nil, &PathError{"Lstat", name, syscall.Errno(syscall.ERROR_PATH_NOT_FOUND)} - } - if isNulName(name) { - return &devNullStat, nil - } - namep, err := syscall.UTF16PtrFromString(fixLongPath(name)) - if err != nil { - return nil, &PathError{"Lstat", name, err} - } - fs, err := newFileStatFromGetFileAttributesExOrFindFirstFile(name, namep) - if err != nil { - return nil, err - } - err = fs.updatePathAndName(name) - if err != nil { - return nil, err - } - return fs, nil + attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + // Use FILE_FLAG_OPEN_REPARSE_POINT, otherwise CreateFile will follow symlink. + // See https://docs.microsoft.com/en-us/windows/desktop/FileIO/symbolic-link-effects-on-file-systems-functions#createfile-and-createfiletransacted + attrs |= syscall.FILE_FLAG_OPEN_REPARSE_POINT + return stat("Lstat", name, attrs) } diff --git a/src/os/sticky_bsd.go b/src/os/sticky_bsd.go index 6b54c758c70a1..ae2744f81756b 100644 --- a/src/os/sticky_bsd.go +++ b/src/os/sticky_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd solaris +// +build aix darwin dragonfly freebsd netbsd openbsd solaris package os diff --git a/src/os/sticky_notbsd.go b/src/os/sticky_notbsd.go index 834e79b0b5901..edb5f69bf0580 100644 --- a/src/os/sticky_notbsd.go +++ b/src/os/sticky_notbsd.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !aix // +build !darwin // +build !dragonfly // +build !freebsd diff --git a/src/os/sys_aix.go b/src/os/sys_aix.go new file mode 100644 index 0000000000000..53a40f26772fc --- /dev/null +++ b/src/os/sys_aix.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os + +import "syscall" + +// gethostname syscall cannot be used because it also returns the domain. +// Therefore, hostname is retrieve with uname syscall and the Nodename field. + +func hostname() (name string, err error) { + var u syscall.Utsname + if errno := syscall.Uname(&u); errno != nil { + return "", NewSyscallError("uname", errno) + } + b := make([]byte, len(u.Nodename)) + i := 0 + for ; i < len(u.Nodename); i++ { + if u.Nodename[i] == 0 { + break + } + b[i] = byte(u.Nodename[i]) + } + return string(b[:i]), nil +} diff --git a/src/os/sys_unix.go b/src/os/sys_unix.go index 3c63c104097c4..8491bad2421ae 100644 --- a/src/os/sys_unix.go +++ b/src/os/sys_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package os diff --git a/src/os/timeout_test.go b/src/os/timeout_test.go index 1886accb55f7c..4720738d24787 100644 --- a/src/os/timeout_test.go +++ b/src/os/timeout_test.go @@ -16,8 +16,10 @@ import ( "io/ioutil" "math/rand" "os" + "os/signal" "runtime" "sync" + "syscall" "testing" "time" ) @@ -591,6 +593,10 @@ func TestRacyWrite(t *testing.T) { // Closing a TTY while reading from it should not hang. Issue 23943. func TestTTYClose(t *testing.T) { + // Ignore SIGTTIN in case we are running in the background. + signal.Ignore(syscall.SIGTTIN) + defer signal.Reset(syscall.SIGTTIN) + f, err := os.Open("/dev/tty") if err != nil { t.Skipf("skipping because opening /dev/tty failed: %v", err) diff --git a/src/os/types.go b/src/os/types.go index b0b7d8d94d620..4b6c084838b6b 100644 --- a/src/os/types.go +++ b/src/os/types.go @@ -57,7 +57,7 @@ const ( ModeIrregular // ?: non-regular file; nothing else is known about this file // Mask for the type bits. For regular files, none will be set. - ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeIrregular + ModeType = ModeDir | ModeSymlink | ModeNamedPipe | ModeSocket | ModeDevice | ModeCharDevice | ModeIrregular ModePerm FileMode = 0777 // Unix permission bits ) diff --git a/src/os/types_windows.go b/src/os/types_windows.go index f3297c033856a..5e33292bec681 100644 --- a/src/os/types_windows.go +++ b/src/os/types_windows.go @@ -47,6 +47,21 @@ func newFileStatFromGetFileInformationByHandle(path string, h syscall.Handle) (f if err != nil { return nil, &PathError{"GetFileInformationByHandle", path, err} } + + var ti windows.FILE_ATTRIBUTE_TAG_INFO + err = windows.GetFileInformationByHandleEx(h, windows.FileAttributeTagInfo, (*byte)(unsafe.Pointer(&ti)), uint32(unsafe.Sizeof(ti))) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == windows.ERROR_INVALID_PARAMETER { + // It appears calling GetFileInformationByHandleEx with + // FILE_ATTRIBUTE_TAG_INFO fails on FAT file system with + // ERROR_INVALID_PARAMETER. Clear ti.ReparseTag in that + // instance to indicate no symlinks are possible. + ti.ReparseTag = 0 + } else { + return nil, &PathError{"GetFileInformationByHandleEx", path, err} + } + } + return &fileStat{ name: basename(path), FileAttributes: d.FileAttributes, @@ -58,6 +73,7 @@ func newFileStatFromGetFileInformationByHandle(path string, h syscall.Handle) (f vol: d.VolumeSerialNumber, idxhi: d.FileIndexHigh, idxlo: d.FileIndexLow, + Reserved0: ti.ReparseTag, // fileStat.path is used by os.SameFile to decide if it needs // to fetch vol, idxhi and idxlo. But these are already set, // so set fileStat.path to "" to prevent os.SameFile doing it again. @@ -78,67 +94,6 @@ func newFileStatFromWin32finddata(d *syscall.Win32finddata) *fileStat { } } -// newFileStatFromGetFileAttributesExOrFindFirstFile calls GetFileAttributesEx -// and FindFirstFile to gather all required information about the provided file path pathp. -func newFileStatFromGetFileAttributesExOrFindFirstFile(path string, pathp *uint16) (*fileStat, error) { - // As suggested by Microsoft, use GetFileAttributes() to acquire the file information, - // and if it's a reparse point use FindFirstFile() to get the tag: - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363940(v=vs.85).aspx - // Notice that always calling FindFirstFile can create performance problems - // (https://golang.org/issues/19922#issuecomment-300031421) - var fa syscall.Win32FileAttributeData - err := syscall.GetFileAttributesEx(pathp, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa))) - if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - // Not a symlink. - return &fileStat{ - FileAttributes: fa.FileAttributes, - CreationTime: fa.CreationTime, - LastAccessTime: fa.LastAccessTime, - LastWriteTime: fa.LastWriteTime, - FileSizeHigh: fa.FileSizeHigh, - FileSizeLow: fa.FileSizeLow, - }, nil - } - // GetFileAttributesEx returns ERROR_INVALID_NAME if called - // for invalid file name like "*.txt". Do not attempt to call - // FindFirstFile with "*.txt", because FindFirstFile will - // succeed. So just return ERROR_INVALID_NAME instead. - // see https://golang.org/issue/24999 for details. - if errno, _ := err.(syscall.Errno); errno == windows.ERROR_INVALID_NAME { - return nil, &PathError{"GetFileAttributesEx", path, err} - } - // We might have symlink here. But some directories also have - // FileAttributes FILE_ATTRIBUTE_REPARSE_POINT bit set. - // For example, OneDrive directory is like that - // (see golang.org/issue/22579 for details). - // So use FindFirstFile instead to distinguish directories like - // OneDrive from real symlinks (see instructions described at - // https://blogs.msdn.microsoft.com/oldnewthing/20100212-00/?p=14963/ - // and in particular bits about using both FileAttributes and - // Reserved0 fields). - var fd syscall.Win32finddata - sh, err := syscall.FindFirstFile(pathp, &fd) - if err != nil { - return nil, &PathError{"FindFirstFile", path, err} - } - syscall.FindClose(sh) - - return newFileStatFromWin32finddata(&fd), nil -} - -func (fs *fileStat) updatePathAndName(name string) error { - fs.path = name - if !isAbs(fs.path) { - var err error - fs.path, err = syscall.FullPath(fs.path) - if err != nil { - return &PathError{"FullPath", name, err} - } - } - fs.name = basename(name) - return nil -} - func (fs *fileStat) isSymlink() bool { // Use instructions described at // https://blogs.msdn.microsoft.com/oldnewthing/20100212-00/?p=14963/ @@ -211,7 +166,13 @@ func (fs *fileStat) loadFileId() error { if err != nil { return err } - h, err := syscall.CreateFile(pathp, 0, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) + attrs := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) + if fs.isSymlink() { + // Use FILE_FLAG_OPEN_REPARSE_POINT, otherwise CreateFile will follow symlink. + // See https://docs.microsoft.com/en-us/windows/desktop/FileIO/symbolic-link-effects-on-file-systems-functions#createfile-and-createfiletransacted + attrs |= syscall.FILE_FLAG_OPEN_REPARSE_POINT + } + h, err := syscall.CreateFile(pathp, 0, 0, nil, syscall.OPEN_EXISTING, attrs, 0) if err != nil { return err } diff --git a/src/os/user/cgo_lookup_unix.go b/src/os/user/cgo_lookup_unix.go index 210bd6e0b35c1..856ed28de8604 100644 --- a/src/os/user/cgo_lookup_unix.go +++ b/src/os/user/cgo_lookup_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris // +build cgo,!osusergo package user @@ -94,9 +94,8 @@ func lookupUnixUid(uid int) (*User, error) { defer buf.free() err := retryWithBuffer(buf, func() syscall.Errno { - // mygetpwuid_r is a wrapper around getpwuid_r to - // to avoid using uid_t because C.uid_t(uid) for - // unknown reasons doesn't work on linux. + // mygetpwuid_r is a wrapper around getpwuid_r to avoid using uid_t + // because C.uid_t(uid) for unknown reasons doesn't work on linux. return syscall.Errno(C.mygetpwuid_r(C.int(uid), &pwd, (*C.char)(buf.ptr), @@ -175,9 +174,8 @@ func lookupUnixGid(gid int) (*Group, error) { defer buf.free() err := retryWithBuffer(buf, func() syscall.Errno { - // mygetgrgid_r is a wrapper around getgrgid_r to - // to avoid using gid_t because C.gid_t(gid) for - // unknown reasons doesn't work on linux. + // mygetgrgid_r is a wrapper around getgrgid_r to avoid using gid_t + // because C.gid_t(gid) for unknown reasons doesn't work on linux. return syscall.Errno(C.mygetgrgid_r(C.int(gid), &grp, (*C.char)(buf.ptr), diff --git a/src/os/user/lookup.go b/src/os/user/lookup.go index 2243a25788aed..b36b7c01c0b1c 100644 --- a/src/os/user/lookup.go +++ b/src/os/user/lookup.go @@ -7,6 +7,10 @@ package user import "sync" // Current returns the current user. +// +// The first call will cache the current user information. +// Subsequent calls will return the cached value and will not reflect +// changes to the current user. func Current() (*User, error) { cache.Do(func() { cache.u, cache.err = current() }) if cache.err != nil { diff --git a/src/os/user/lookup_stubs.go b/src/os/user/lookup_stubs.go index f7d138ff46801..61bf1dc7a6f77 100644 --- a/src/os/user/lookup_stubs.go +++ b/src/os/user/lookup_stubs.go @@ -19,8 +19,15 @@ func init() { } func current() (*User, error) { - u := &User{ - Uid: currentUID(), + uid := currentUID() + // $USER and /etc/passwd may disagree; prefer the latter if we can get it. + // See issue 27524 for more information. + u, err := lookupUserId(uid) + if err == nil { + return u, nil + } + u = &User{ + Uid: uid, Gid: currentGID(), Username: os.Getenv("USER"), Name: "", // ignored @@ -58,8 +65,8 @@ func current() (*User, error) { } func listGroups(*User) ([]string, error) { - if runtime.GOOS == "android" { - return nil, errors.New("user: GroupIds not implemented on Android") + if runtime.GOOS == "android" || runtime.GOOS == "aix" { + return nil, errors.New(fmt.Sprintf("user: GroupIds not implemented on %s", runtime.GOOS)) } return nil, errors.New("user: GroupIds requires cgo") } diff --git a/src/os/user/lookup_unix.go b/src/os/user/lookup_unix.go index c4e9ba1e81889..be62f4d0c3ef3 100644 --- a/src/os/user/lookup_unix.go +++ b/src/os/user/lookup_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm !android,linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm !android,linux nacl netbsd openbsd solaris // +build !cgo osusergo package user diff --git a/src/os/user/lookup_unix_test.go b/src/os/user/lookup_unix_test.go index 02c88ab87574c..65fe0656de66a 100644 --- a/src/os/user/lookup_unix_test.go +++ b/src/os/user/lookup_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd !android,linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd !android,linux nacl netbsd openbsd solaris // +build !cgo package user diff --git a/src/os/user/user.go b/src/os/user/user.go index 1f733b80235a5..c1b8101c8629c 100644 --- a/src/os/user/user.go +++ b/src/os/user/user.go @@ -11,7 +11,7 @@ parses /etc/passwd and /etc/group. The other is cgo-based and relies on the standard C library (libc) routines such as getpwuid_r and getgrnam_r. When cgo is available, cgo-based (libc-backed) code is used by default. -This can be overriden by using osusergo build tag, which enforces +This can be overridden by using osusergo build tag, which enforces the pure Go implementation. */ package user diff --git a/src/os/user/user_test.go b/src/os/user/user_test.go index 8fd760e64981b..2563077eb2e97 100644 --- a/src/os/user/user_test.go +++ b/src/os/user/user_test.go @@ -5,33 +5,18 @@ package user import ( - "internal/testenv" - "os" "runtime" "testing" ) func checkUser(t *testing.T) { + t.Helper() if !userImplemented { t.Skip("user: not implemented; skipping tests") } } func TestCurrent(t *testing.T) { - // The Go builders (in particular the ones using containers) - // often have minimal environments without $HOME or $USER set, - // which breaks Current which relies on those working as a - // fallback. - // TODO: we should fix that (Issue 24884) and remove these - // workarounds. - if testenv.Builder() != "" && runtime.GOOS != "windows" && runtime.GOOS != "plan9" { - if os.Getenv("HOME") == "" { - os.Setenv("HOME", "/tmp") - } - if os.Getenv("USER") == "" { - os.Setenv("USER", "gobuilder") - } - } u, err := Current() if err != nil { t.Fatalf("Current: %v (got %#v)", err, u) @@ -108,6 +93,7 @@ func TestLookupId(t *testing.T) { } func checkGroup(t *testing.T) { + t.Helper() if !groupImplemented { t.Skip("user: group not implemented; skipping test") } diff --git a/src/os/wait_unimp.go b/src/os/wait_unimp.go index 3d8210f5bdcee..469abf764016b 100644 --- a/src/os/wait_unimp.go +++ b/src/os/wait_unimp.go @@ -2,12 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly js,wasm nacl netbsd openbsd solaris +// +build aix darwin dragonfly js,wasm nacl netbsd openbsd solaris package os // blockUntilWaitable attempts to block until a call to p.Wait will -// succeed immediately, and returns whether it has done so. +// succeed immediately, and reports whether it has done so. // It does not actually call p.Wait. // This version is used on systems that do not implement waitid, // or where we have not implemented it yet. diff --git a/src/os/wait_wait6.go b/src/os/wait_wait6.go index 891f242dacd14..45bf649015fd7 100644 --- a/src/os/wait_wait6.go +++ b/src/os/wait_wait6.go @@ -14,7 +14,7 @@ import ( const _P_PID = 0 // blockUntilWaitable attempts to block until a call to p.Wait will -// succeed immediately, and returns whether it has done so. +// succeed immediately, and reports whether it has done so. // It does not actually call p.Wait. func (p *Process) blockUntilWaitable() (bool, error) { var errno syscall.Errno diff --git a/src/os/wait_waitid.go b/src/os/wait_waitid.go index 5a62b27f191f2..946c085a50042 100644 --- a/src/os/wait_waitid.go +++ b/src/os/wait_waitid.go @@ -18,7 +18,7 @@ import ( const _P_PID = 1 // blockUntilWaitable attempts to block until a call to p.Wait will -// succeed immediately, and returns whether it has done so. +// succeed immediately, and reports whether it has done so. // It does not actually call p.Wait. func (p *Process) blockUntilWaitable() (bool, error) { // The waitid system call expects a pointer to a siginfo_t, diff --git a/src/path/filepath/example_unix_test.go b/src/path/filepath/example_unix_test.go index cd8233ceb6a60..20ec8927b4692 100644 --- a/src/path/filepath/example_unix_test.go +++ b/src/path/filepath/example_unix_test.go @@ -79,3 +79,18 @@ func ExampleJoin() { // a/b/c // a/b/c } + +func ExampleMatch() { + fmt.Println("On Unix:") + fmt.Println(filepath.Match("/home/catch/*", "/home/catch/foo")) + fmt.Println(filepath.Match("/home/catch/*", "/home/catch/foo/bar")) + fmt.Println(filepath.Match("/home/?opher", "/home/gopher")) + fmt.Println(filepath.Match("/home/\\*", "/home/*")) + + // Output: + // On Unix: + // true + // false + // true + // true +} diff --git a/src/path/filepath/match_test.go b/src/path/filepath/match_test.go index 1d91c274c7596..b8657626bc7ea 100644 --- a/src/path/filepath/match_test.go +++ b/src/path/filepath/match_test.go @@ -106,7 +106,7 @@ func TestMatch(t *testing.T) { } } -// contains returns true if vector contains the string s. +// contains reports whether vector contains the string s. func contains(vector []string, s string) bool { for _, elem := range vector { if elem == s { diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index 1508137a33d1d..bbb90306a7f38 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -96,14 +96,19 @@ func Clean(path string) string { } return originalPath + "." } + + n := len(path) + if volLen > 2 && n == 1 && os.IsPathSeparator(path[0]) { + // UNC volume name with trailing slash. + return FromSlash(originalPath[:volLen]) + } rooted := os.IsPathSeparator(path[0]) // Invariants: // reading from path; r is index of next byte to process. - // writing to buf; w is index of next byte to write. - // dotdot is index in buf where .. must stop, either because + // writing to out; w is index of next byte to write. + // dotdot is index in out where .. must stop, either because // it is the leading slash or it is a leading ../../.. prefix. - n := len(path) out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen} r, dotdot := 0, 0 if rooted { @@ -166,7 +171,7 @@ func ToSlash(path string) string { if Separator == '/' { return path } - return strings.Replace(path, string(Separator), "/", -1) + return strings.ReplaceAll(path, string(Separator), "/") } // FromSlash returns the result of replacing each slash ('/') character @@ -176,7 +181,7 @@ func FromSlash(path string) string { if Separator == '/' { return path } - return strings.Replace(path, "/", string(Separator), -1) + return strings.ReplaceAll(path, "/", string(Separator)) } // SplitList splits a list of paths joined by the OS-specific ListSeparator, diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index dde087253dda3..9c4c7ebedc3b4 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -15,6 +15,7 @@ import ( "runtime" "sort" "strings" + "syscall" "testing" ) @@ -92,6 +93,9 @@ var wincleantests = []PathTest{ {`//host/share/foo/../baz`, `\\host\share\baz`}, {`\\a\b\..\c`, `\\a\b\c`}, {`\\a\b`, `\\a\b`}, + {`\\a\b\`, `\\a\b`}, + {`\\folder\share\foo`, `\\folder\share\foo`}, + {`\\folder\share\foo\`, `\\folder\share\foo`}, } func TestClean(t *testing.T) { @@ -271,6 +275,10 @@ var winjointests = []JoinTest{ {[]string{`C:`, `a`}, `C:a`}, {[]string{`C:`, `a\b`}, `C:a\b`}, {[]string{`C:`, `a`, `b`}, `C:a\b`}, + {[]string{`C:`, ``, `b`}, `C:b`}, + {[]string{`C:`, ``, ``, `b`}, `C:b`}, + {[]string{`C:`, ``}, `C:.`}, + {[]string{`C:`, ``, ``}, `C:.`}, {[]string{`C:.`, `a`}, `C:a`}, {[]string{`C:a`, `b`}, `C:a\b`}, {[]string{`C:a`, `b`, `d`}, `C:a\b\d`}, @@ -744,6 +752,11 @@ func TestIsAbs(t *testing.T) { for _, test := range isabstests { tests = append(tests, IsAbsTest{"c:" + test.path, test.isAbs}) } + // Test reserved names. + tests = append(tests, IsAbsTest{os.DevNull, true}) + tests = append(tests, IsAbsTest{"NUL", true}) + tests = append(tests, IsAbsTest{"nul", true}) + tests = append(tests, IsAbsTest{"CON", true}) } else { tests = isabstests } @@ -767,6 +780,18 @@ var EvalSymlinksTestDirs = []EvalSymlinksTest{ {"test/link1", "../test"}, {"test/link2", "dir"}, {"test/linkabs", "/"}, + {"test/link4", "../test2"}, + {"test2", "test/dir"}, + // Issue 23444. + {"src", ""}, + {"src/pool", ""}, + {"src/pool/test", ""}, + {"src/versions", ""}, + {"src/versions/current", "../../version"}, + {"src/versions/v1", ""}, + {"src/versions/v1/modules", ""}, + {"src/versions/v1/modules/test", "../../../pool/test"}, + {"version", "src/versions/v1"}, } var EvalSymlinksTests = []EvalSymlinksTest{ @@ -780,6 +805,8 @@ var EvalSymlinksTests = []EvalSymlinksTest{ {"test/dir/link3", "."}, {"test/link2/link3/test", "test"}, {"test/linkabs", "/"}, + {"test/link4/..", "test"}, + {"src/versions/current/modules/test", "src/pool/test"}, } // simpleJoin builds a file name from the directory and path. @@ -1044,7 +1071,7 @@ func TestAbs(t *testing.T) { } for _, path := range absTests { - path = strings.Replace(path, "$", root, -1) + path = strings.ReplaceAll(path, "$", root) info, err := os.Stat(path) if err != nil { t.Errorf("%s: %s", path, err) @@ -1345,3 +1372,44 @@ func TestWalkSymlink(t *testing.T) { testenv.MustHaveSymlink(t) testWalkSymlink(t, os.Symlink) } + +func TestIssue29372(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestIssue29372") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + if runtime.GOOS == "windows" { + // This test is broken on windows, if temporary directory + // is a symlink. See issue 29746. + // TODO(brainman): Remove this hack once issue #29746 is fixed. + tmpDir, err = filepath.EvalSymlinks(tmpDir) + if err != nil { + t.Fatal(err) + } + } + + path := filepath.Join(tmpDir, "file.txt") + err = ioutil.WriteFile(path, nil, 0644) + if err != nil { + t.Fatal(err) + } + + pathSeparator := string(filepath.Separator) + tests := []string{ + path + strings.Repeat(pathSeparator, 1), + path + strings.Repeat(pathSeparator, 2), + path + strings.Repeat(pathSeparator, 1) + ".", + path + strings.Repeat(pathSeparator, 2) + ".", + path + strings.Repeat(pathSeparator, 1) + "..", + path + strings.Repeat(pathSeparator, 2) + "..", + } + + for i, test := range tests { + _, err = filepath.EvalSymlinks(test) + if err != syscall.ENOTDIR { + t.Fatalf("test#%d: want %q, got %q", i, syscall.ENOTDIR, err) + } + } +} diff --git a/src/path/filepath/path_unix.go b/src/path/filepath/path_unix.go index 349dea7b53952..c10b3288a1ec4 100644 --- a/src/path/filepath/path_unix.go +++ b/src/path/filepath/path_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package filepath diff --git a/src/path/filepath/path_windows.go b/src/path/filepath/path_windows.go index 409e8d6466a95..445c868e41460 100644 --- a/src/path/filepath/path_windows.go +++ b/src/path/filepath/path_windows.go @@ -13,8 +13,34 @@ func isSlash(c uint8) bool { return c == '\\' || c == '/' } +// reservedNames lists reserved Windows names. Search for PRN in +// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file +// for details. +var reservedNames = []string{ + "CON", "PRN", "AUX", "NUL", + "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", + "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", +} + +// isReservedName returns true, if path is Windows reserved name. +// See reservedNames for the full list. +func isReservedName(path string) bool { + if len(path) == 0 { + return false + } + for _, reserved := range reservedNames { + if strings.EqualFold(path, reserved) { + return true + } + } + return false +} + // IsAbs reports whether the path is absolute. func IsAbs(path string) (b bool) { + if isReservedName(path) { + return true + } l := volumeNameLen(path) if l == 0 { return false @@ -100,7 +126,7 @@ func splitList(path string) []string { // Remove quotes. for i, s := range list { - list[i] = strings.Replace(s, `"`, ``, -1) + list[i] = strings.ReplaceAll(s, `"`, ``) } return list @@ -134,7 +160,14 @@ func joinNonEmpty(elem []string) string { if len(elem[0]) == 2 && elem[0][1] == ':' { // First element is drive letter without terminating slash. // Keep path relative to current directory on that drive. - return Clean(elem[0] + strings.Join(elem[1:], string(Separator))) + // Skip empty elements. + i := 1 + for ; i < len(elem); i++ { + if elem[i] != "" { + break + } + } + return Clean(elem[0] + strings.Join(elem[i:], string(Separator))) } // The following logic prevents Join from inadvertently creating a // UNC path on Windows. Unless the first element is a UNC path, Join diff --git a/src/path/filepath/path_windows_test.go b/src/path/filepath/path_windows_test.go index e36a3c9b64636..d1735d39bd8cc 100644 --- a/src/path/filepath/path_windows_test.go +++ b/src/path/filepath/path_windows_test.go @@ -431,7 +431,7 @@ func TestToNorm(t *testing.T) { t.Fatal(err) } - err = os.MkdirAll(strings.Replace(testPath, "{{tmp}}", ctmp, -1), 0777) + err = os.MkdirAll(strings.ReplaceAll(testPath, "{{tmp}}", ctmp), 0777) if err != nil { t.Fatal(err) } @@ -536,17 +536,42 @@ func TestNTNamespaceSymlink(t *testing.T) { } target := strings.Trim(string(output), " \n\r") - link := filepath.Join(tmpdir, "link") - output, err = exec.Command("cmd", "/c", "mklink", "/J", link, target).CombinedOutput() + dirlink := filepath.Join(tmpdir, "dirlink") + output, err = exec.Command("cmd", "/c", "mklink", "/J", dirlink, target).CombinedOutput() if err != nil { - t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) + t.Fatalf("failed to run mklink %v %v: %v %q", dirlink, target, err, output) } - got, err := filepath.EvalSymlinks(link) + got, err := filepath.EvalSymlinks(dirlink) if err != nil { t.Fatal(err) } if want := vol + `\`; got != want { - t.Errorf(`EvalSymlinks(%q): got %q, want %q`, link, got, want) + t.Errorf(`EvalSymlinks(%q): got %q, want %q`, dirlink, got, want) + } + + // Make sure we have sufficient privilege to run mklink command. + testenv.MustHaveSymlink(t) + + file := filepath.Join(tmpdir, "file") + err = ioutil.WriteFile(file, []byte(""), 0666) + if err != nil { + t.Fatal(err) + } + + target += file[len(filepath.VolumeName(file)):] + + filelink := filepath.Join(tmpdir, "filelink") + output, err = exec.Command("cmd", "/c", "mklink", filelink, target).CombinedOutput() + if err != nil { + t.Fatalf("failed to run mklink %v %v: %v %q", filelink, target, err, output) + } + + got, err = filepath.EvalSymlinks(filelink) + if err != nil { + t.Fatal(err) + } + if want := file; got != want { + t.Errorf(`EvalSymlinks(%q): got %q, want %q`, filelink, got, want) } } diff --git a/src/path/filepath/symlink.go b/src/path/filepath/symlink.go index 824aee4e49056..4b41039e25f75 100644 --- a/src/path/filepath/symlink.go +++ b/src/path/filepath/symlink.go @@ -10,109 +10,128 @@ import ( "runtime" ) -// isRoot returns true if path is root of file system -// (`/` on unix and `/`, `\`, `c:\` or `c:/` on windows). -func isRoot(path string) bool { - if runtime.GOOS != "windows" { - return path == "/" - } - switch len(path) { - case 1: - return os.IsPathSeparator(path[0]) - case 3: - return path[1] == ':' && os.IsPathSeparator(path[2]) - } - return false -} +func walkSymlinks(path string) (string, error) { + volLen := volumeNameLen(path) + pathSeparator := string(os.PathSeparator) -// isDriveLetter returns true if path is Windows drive letter (like "c:"). -func isDriveLetter(path string) bool { - if runtime.GOOS != "windows" { - return false + if volLen < len(path) && os.IsPathSeparator(path[volLen]) { + volLen++ } - return len(path) == 2 && path[1] == ':' -} + vol := path[:volLen] + dest := vol + linksWalked := 0 + for start, end := volLen, volLen; start < len(path); start = end { + for start < len(path) && os.IsPathSeparator(path[start]) { + start++ + } + end = start + for end < len(path) && !os.IsPathSeparator(path[end]) { + end++ + } -func walkLink(path string, linksWalked *int) (newpath string, islink bool, err error) { - if *linksWalked > 255 { - return "", false, errors.New("EvalSymlinks: too many links") - } - fi, err := os.Lstat(path) - if err != nil { - return "", false, err - } - if fi.Mode()&os.ModeSymlink == 0 { - return path, false, nil - } - newpath, err = os.Readlink(path) - if err != nil { - return "", false, err - } - *linksWalked++ - return newpath, true, nil -} + // On Windows, "." can be a symlink. + // We look it up, and use the value if it is absolute. + // If not, we just return ".". + isWindowsDot := runtime.GOOS == "windows" && path[volumeNameLen(path):] == "." -func walkLinks(path string, linksWalked *int) (string, error) { - switch dir, file := Split(path); { - case dir == "": - newpath, _, err := walkLink(file, linksWalked) - return newpath, err - case file == "": - if isDriveLetter(dir) { - return dir, nil - } - if os.IsPathSeparator(dir[len(dir)-1]) { - if isRoot(dir) { - return dir, nil + // The next path component is in path[start:end]. + if end == start { + // No more path components. + break + } else if path[start:end] == "." && !isWindowsDot { + // Ignore path component ".". + continue + } else if path[start:end] == ".." { + // Back up to previous component if possible. + // Note that volLen includes any leading slash. + var r int + for r = len(dest) - 1; r >= volLen; r-- { + if os.IsPathSeparator(dest[r]) { + break + } + } + if r < volLen { + if len(dest) > volLen { + dest += pathSeparator + } + dest += ".." + } else { + dest = dest[:r] } - return walkLinks(dir[:len(dir)-1], linksWalked) + continue } - newpath, _, err := walkLink(dir, linksWalked) - return newpath, err - default: - newdir, err := walkLinks(dir, linksWalked) - if err != nil { - return "", err + + // Ordinary path component. Add it to result. + + if len(dest) > volumeNameLen(dest) && !os.IsPathSeparator(dest[len(dest)-1]) { + dest += pathSeparator } - newpath, islink, err := walkLink(Join(newdir, file), linksWalked) + + dest += path[start:end] + + // Resolve symlink. + + fi, err := os.Lstat(dest) if err != nil { return "", err } - if !islink { - return newpath, nil + + if fi.Mode()&os.ModeSymlink == 0 { + if !fi.Mode().IsDir() && end < len(path) { + return "", slashAfterFilePathError + } + continue } - if IsAbs(newpath) || os.IsPathSeparator(newpath[0]) { - return newpath, nil + + // Found symlink. + + linksWalked++ + if linksWalked > 255 { + return "", errors.New("EvalSymlinks: too many links") } - return Join(newdir, newpath), nil - } -} -func walkSymlinks(path string) (string, error) { - if path == "" { - return path, nil - } - var linksWalked int // to protect against cycles - for { - i := linksWalked - newpath, err := walkLinks(path, &linksWalked) + link, err := os.Readlink(dest) if err != nil { return "", err } - if runtime.GOOS == "windows" { - // walkLinks(".", ...) always returns "." on unix. - // But on windows it returns symlink target, if current - // directory is a symlink. Stop the walk, if symlink - // target is not absolute path, and return "." - // to the caller (just like unix does). - // Same for "C:.". - if path[volumeNameLen(path):] == "." && !IsAbs(newpath) { - return path, nil - } + + if isWindowsDot && !IsAbs(link) { + // On Windows, if "." is a relative symlink, + // just return ".". + break } - if i == linksWalked { - return Clean(newpath), nil + + path = link + path[end:] + + v := volumeNameLen(link) + if v > 0 { + // Symlink to drive name is an absolute path. + if v < len(link) && os.IsPathSeparator(link[v]) { + v++ + } + vol = link[:v] + dest = vol + end = len(vol) + } else if len(link) > 0 && os.IsPathSeparator(link[0]) { + // Symlink to absolute path. + dest = link[:1] + end = 1 + } else { + // Symlink to relative path; replace last + // path component in dest. + var r int + for r = len(dest) - 1; r >= volLen; r-- { + if os.IsPathSeparator(dest[r]) { + break + } + } + if r < volLen { + dest = vol + } else { + dest = dest[:r] + } + end = 0 } - path = newpath } + return Clean(dest), nil } diff --git a/src/path/filepath/symlink_unix.go b/src/path/filepath/symlink_unix.go index d20e63a987e9a..b57e7f2277e3e 100644 --- a/src/path/filepath/symlink_unix.go +++ b/src/path/filepath/symlink_unix.go @@ -2,6 +2,15 @@ package filepath +import ( + "syscall" +) + +// walkSymlinks returns slashAfterFilePathError error for paths like +// //path/to/existing_file/ and /path/to/existing_file/. and /path/to/existing_file/.. + +var slashAfterFilePathError = syscall.ENOTDIR + func evalSymlinks(path string) (string, error) { return walkSymlinks(path) } diff --git a/src/path/filepath/symlink_windows.go b/src/path/filepath/symlink_windows.go index 78cde4aa09074..531dc26fc0e4f 100644 --- a/src/path/filepath/symlink_windows.go +++ b/src/path/filepath/symlink_windows.go @@ -43,7 +43,7 @@ func normBase(path string) (string, error) { return syscall.UTF16ToString(data.FileName[:]), nil } -// baseIsDotDot returns whether the last element of path is "..". +// baseIsDotDot reports whether the last element of path is "..". // The given path should be 'Clean'-ed in advance. func baseIsDotDot(path string) bool { i := strings.LastIndexByte(path, Separator) @@ -171,8 +171,16 @@ func samefile(path1, path2 string) bool { return os.SameFile(fi1, fi2) } +// walkSymlinks returns slashAfterFilePathError error for paths like +// //path/to/existing_file/ and /path/to/existing_file/. and /path/to/existing_file/.. + +var slashAfterFilePathError = errors.New("attempting to walk past file path.") + func evalSymlinks(path string) (string, error) { newpath, err := walkSymlinks(path) + if err == slashAfterFilePathError { + return "", syscall.ENOTDIR + } if err != nil { newpath2, err2 := evalSymlinksUsingGetFinalPathNameByHandle(path) if err2 == nil { diff --git a/src/plugin/plugin_dlopen.go b/src/plugin/plugin_dlopen.go index 47f2b29a80b8d..f24093989fd6f 100644 --- a/src/plugin/plugin_dlopen.go +++ b/src/plugin/plugin_dlopen.go @@ -39,16 +39,6 @@ import ( "unsafe" ) -// avoid a dependency on strings -func lastIndexByte(s string, c byte) int { - for i := len(s) - 1; i >= 0; i-- { - if s[i] == c { - return i - } - } - return -1 -} - func open(name string) (*Plugin, error) { cPath := make([]byte, C.PATH_MAX+1) cRelName := make([]byte, len(name)+1) diff --git a/src/plugin/plugin_test.go b/src/plugin/plugin_test.go new file mode 100644 index 0000000000000..b334c5cf0ec02 --- /dev/null +++ b/src/plugin/plugin_test.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl +// +build !linux linux,!arm64 + +package plugin_test + +import ( + _ "plugin" + "testing" +) + +func TestPlugin(t *testing.T) { + // This test makes sure that executable that imports plugin + // package can actually run. See issue #28789 for details. +} diff --git a/src/race.bash b/src/race.bash index d673f503a9a73..e83c175df3b4c 100755 --- a/src/race.bash +++ b/src/race.bash @@ -9,7 +9,7 @@ set -e function usage { - echo 'race detector is only supported on linux/amd64, linux/ppc64le, freebsd/amd64, netbsd/amd64 and darwin/amd64' 1>&2 + echo 'race detector is only supported on linux/amd64, linux/ppc64le, linux/arm64, freebsd/amd64, netbsd/amd64 and darwin/amd64' 1>&2 exit 1 } @@ -21,7 +21,7 @@ case $(uname) in fi ;; "Linux") - if [ $(uname -m) != "x86_64" ] && [ $(uname -m) != "ppc64le" ]; then + if [ $(uname -m) != "x86_64" ] && [ $(uname -m) != "ppc64le" ] && [ $(uname -m) != "aarch64" ]; then usage fi ;; diff --git a/src/race.bat b/src/race.bat index e8df480811c88..e1c3fbf5d9c64 100644 --- a/src/race.bat +++ b/src/race.bat @@ -18,7 +18,7 @@ goto end set GOROOT=%CD%\.. call make.bat --dist-tool >NUL if errorlevel 1 goto fail -.\cmd\dist\dist env -w -p >env.bat +.\cmd\dist\dist.exe env -w -p >env.bat if errorlevel 1 goto fail call env.bat del env.bat diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index cf7fe3cf7ae72..10b52456f34ee 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -1009,6 +1009,7 @@ func TestIsNil(t *testing.T) { struct{ x func() bool }{}, struct{ x chan int }{}, struct{ x []string }{}, + struct{ x unsafe.Pointer }{}, } for _, ts := range doNil { ty := TypeOf(ts).Field(0).Type @@ -1693,9 +1694,9 @@ func TestCallReturnsEmpty(t *testing.T) { // nonzero-sized frame and zero-sized return value. runtime.GC() var finalized uint32 - f := func() (emptyStruct, *int) { - i := new(int) - runtime.SetFinalizer(i, func(*int) { atomic.StoreUint32(&finalized, 1) }) + f := func() (emptyStruct, *[2]int64) { + i := new([2]int64) // big enough to not be tinyalloc'd, so finalizer always runs when i dies + runtime.SetFinalizer(i, func(*[2]int64) { atomic.StoreUint32(&finalized, 1) }) return emptyStruct{}, i } v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run. @@ -5019,6 +5020,17 @@ func TestStructOfWithInterface(t *testing.T) { }) } +func TestStructOfTooManyFields(t *testing.T) { + // Bug Fix: #25402 - this should not panic + tt := StructOf([]StructField{ + {Name: "Time", Type: TypeOf(time.Time{}), Anonymous: true}, + }) + + if _, present := tt.MethodByName("After"); !present { + t.Errorf("Expected method `After` to be found") + } +} + func TestChanOf(t *testing.T) { // check construction and use of type not in binary type T string @@ -5844,7 +5856,7 @@ func clobber() { type funcLayoutTest struct { rcvr, t Type size, argsize, retOffset uintptr - stack []byte // pointer bitmap: 1 is pointer, 0 is scalar (or uninitialized) + stack []byte // pointer bitmap: 1 is pointer, 0 is scalar gc []byte } @@ -5866,7 +5878,7 @@ func init() { 6 * PtrSize, 4 * PtrSize, 4 * PtrSize, - []byte{1, 0, 1}, + []byte{1, 0, 1, 0, 1}, []byte{1, 0, 1, 0, 1}, }) @@ -5988,7 +6000,8 @@ func TestFuncLayout(t *testing.T) { func verifyGCBits(t *testing.T, typ Type, bits []byte) { heapBits := GCBits(New(typ).Interface()) if !bytes.Equal(heapBits, bits) { - t.Errorf("heapBits incorrect for %v\nhave %v\nwant %v", typ, heapBits, bits) + _, _, line, _ := runtime.Caller(1) + t.Errorf("line %d: heapBits incorrect for %v\nhave %v\nwant %v", line, typ, heapBits, bits) } } @@ -6576,3 +6589,124 @@ func TestIssue22073(t *testing.T) { // Shouldn't panic. m.Call(nil) } + +func TestMapIterNonEmptyMap(t *testing.T) { + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + if got, want := iterateToString(iter), `[one: 1, three: 3, two: 2]`; got != want { + t.Errorf("iterator returned %s (after sorting), want %s", got, want) + } +} + +func TestMapIterNilMap(t *testing.T) { + var m map[string]int + iter := ValueOf(m).MapRange() + if got, want := iterateToString(iter), `[]`; got != want { + t.Errorf("non-empty result iteratoring nil map: %s", got) + } +} + +func TestMapIterSafety(t *testing.T) { + // Using a zero MapIter causes a panic, but not a crash. + func() { + defer func() { recover() }() + new(MapIter).Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + new(MapIter).Value() + t.Fatal("Value did not panic") + }() + func() { + defer func() { recover() }() + new(MapIter).Next() + t.Fatal("Next did not panic") + }() + + // Calling Key/Value on a MapIter before Next + // causes a panic, but not a crash. + var m map[string]int + iter := ValueOf(m).MapRange() + + func() { + defer func() { recover() }() + iter.Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + iter.Value() + t.Fatal("Value did not panic") + }() + + // Calling Next, Key, or Value on an exhausted iterator + // causes a panic, but not a crash. + iter.Next() // -> false + func() { + defer func() { recover() }() + iter.Key() + t.Fatal("Key did not panic") + }() + func() { + defer func() { recover() }() + iter.Value() + t.Fatal("Value did not panic") + }() + func() { + defer func() { recover() }() + iter.Next() + t.Fatal("Next did not panic") + }() +} + +func TestMapIterNext(t *testing.T) { + // The first call to Next should reflect any + // insertions to the map since the iterator was created. + m := map[string]int{} + iter := ValueOf(m).MapRange() + m["one"] = 1 + if got, want := iterateToString(iter), `[one: 1]`; got != want { + t.Errorf("iterator returned deleted elements: got %s, want %s", got, want) + } +} + +func TestMapIterDelete0(t *testing.T) { + // Delete all elements before first iteration. + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + delete(m, "one") + delete(m, "two") + delete(m, "three") + if got, want := iterateToString(iter), `[]`; got != want { + t.Errorf("iterator returned deleted elements: got %s, want %s", got, want) + } +} + +func TestMapIterDelete1(t *testing.T) { + // Delete all elements after first iteration. + m := map[string]int{"one": 1, "two": 2, "three": 3} + iter := ValueOf(m).MapRange() + var got []string + for iter.Next() { + got = append(got, fmt.Sprint(iter.Key(), iter.Value())) + delete(m, "one") + delete(m, "two") + delete(m, "three") + } + if len(got) != 1 { + t.Errorf("iterator returned wrong number of elements: got %d, want 1", len(got)) + } +} + +// iterateToString returns the set of elements +// returned by an iterator in readable form. +func iterateToString(it *MapIter) string { + var got []string + for it.Next() { + line := fmt.Sprintf("%v: %v", it.Key(), it.Value()) + got = append(got, line) + } + sort.Strings(got) + return "[" + strings.Join(got, ", ") + "]" +} diff --git a/src/reflect/asm_386.s b/src/reflect/asm_386.s index d827360006c97..e79beb6dc99f0 100644 --- a/src/reflect/asm_386.s +++ b/src/reflect/asm_386.s @@ -9,11 +9,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVL DX, 0(SP) LEAL argframe+0(FP), CX MOVL CX, 4(SP) + MOVB $0, 12(SP) + LEAL 12(SP), AX + MOVL AX, 8(SP) CALL ·callReflect(SB) RET @@ -21,10 +24,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVL DX, 0(SP) LEAL argframe+0(FP), CX MOVL CX, 4(SP) + MOVB $0, 12(SP) + LEAL 12(SP), AX + MOVL AX, 8(SP) CALL ·callMethod(SB) RET diff --git a/src/reflect/asm_amd64.s b/src/reflect/asm_amd64.s index 1272c489de829..fb28ab87f113e 100644 --- a/src/reflect/asm_amd64.s +++ b/src/reflect/asm_amd64.s @@ -9,11 +9,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVQ DX, 0(SP) LEAQ argframe+0(FP), CX MOVQ CX, 8(SP) + MOVB $0, 24(SP) + LEAQ 24(SP), AX + MOVQ AX, 16(SP) CALL ·callReflect(SB) RET @@ -21,10 +24,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVQ DX, 0(SP) LEAQ argframe+0(FP), CX MOVQ CX, 8(SP) + MOVB $0, 24(SP) + LEAQ 24(SP), AX + MOVQ AX, 16(SP) CALL ·callMethod(SB) RET diff --git a/src/reflect/asm_amd64p32.s b/src/reflect/asm_amd64p32.s index d827360006c97..e79beb6dc99f0 100644 --- a/src/reflect/asm_amd64p32.s +++ b/src/reflect/asm_amd64p32.s @@ -9,11 +9,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVL DX, 0(SP) LEAL argframe+0(FP), CX MOVL CX, 4(SP) + MOVB $0, 12(SP) + LEAL 12(SP), AX + MOVL AX, 8(SP) CALL ·callReflect(SB) RET @@ -21,10 +24,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVL DX, 0(SP) LEAL argframe+0(FP), CX MOVL CX, 4(SP) + MOVB $0, 12(SP) + LEAL 12(SP), AX + MOVL AX, 8(SP) CALL ·callMethod(SB) RET diff --git a/src/reflect/asm_arm.s b/src/reflect/asm_arm.s index b721ed28c66f9..cd50d33918fc6 100644 --- a/src/reflect/asm_arm.s +++ b/src/reflect/asm_arm.s @@ -9,11 +9,15 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVW R7, 4(R13) MOVW $argframe+0(FP), R1 MOVW R1, 8(R13) + MOVW $0, R1 + MOVB R1, 16(R13) + ADD $16, R13, R1 + MOVW R1, 12(R13) BL ·callReflect(SB) RET @@ -21,10 +25,14 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No argsize here, gc generates argsize info at call site. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVW R7, 4(R13) MOVW $argframe+0(FP), R1 MOVW R1, 8(R13) + MOVW $0, R1 + MOVB R1, 16(R13) + ADD $16, R13, R1 + MOVW R1, 12(R13) BL ·callMethod(SB) RET diff --git a/src/reflect/asm_arm64.s b/src/reflect/asm_arm64.s index d1563709f2f5a..28bb86c2a47eb 100644 --- a/src/reflect/asm_arm64.s +++ b/src/reflect/asm_arm64.s @@ -9,11 +9,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$24 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 NO_LOCAL_POINTERS MOVD R26, 8(RSP) MOVD $argframe+0(FP), R3 MOVD R3, 16(RSP) + MOVB $0, 32(RSP) + ADD $32, RSP, R3 + MOVD R3, 24(RSP) BL ·callReflect(SB) RET @@ -21,10 +24,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$24 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$24 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 NO_LOCAL_POINTERS MOVD R26, 8(RSP) MOVD $argframe+0(FP), R3 MOVD R3, 16(RSP) + MOVB $0, 32(RSP) + ADD $32, RSP, R3 + MOVD R3, 24(RSP) BL ·callMethod(SB) RET diff --git a/src/reflect/asm_mips64x.s b/src/reflect/asm_mips64x.s index 98afb52f6a11b..6f76685567a7a 100644 --- a/src/reflect/asm_mips64x.s +++ b/src/reflect/asm_mips64x.s @@ -13,11 +13,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVV REGCTXT, 8(R29) MOVV $argframe+0(FP), R1 MOVV R1, 16(R29) + MOVB R0, 32(R29) + ADDV $32, R29, R1 + MOVV R1, 24(R29) JAL ·callReflect(SB) RET @@ -25,10 +28,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVV REGCTXT, 8(R29) MOVV $argframe+0(FP), R1 MOVV R1, 16(R29) + MOVB R0, 32(R29) + ADDV $32, R29, R1 + MOVV R1, 24(R29) JAL ·callMethod(SB) RET diff --git a/src/reflect/asm_mipsx.s b/src/reflect/asm_mipsx.s index b6df4e636e81f..5a5c53ef9f9f1 100644 --- a/src/reflect/asm_mipsx.s +++ b/src/reflect/asm_mipsx.s @@ -13,11 +13,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVW REGCTXT, 4(R29) MOVW $argframe+0(FP), R1 MOVW R1, 8(R29) + MOVB R0, 16(R29) + ADD $16, R29, R1 + MOVW R1, 12(R29) JAL ·callReflect(SB) RET @@ -25,10 +28,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$8 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$8 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 NO_LOCAL_POINTERS MOVW REGCTXT, 4(R29) MOVW $argframe+0(FP), R1 MOVW R1, 8(R29) + MOVB R0, 16(R29) + ADD $16, R29, R1 + MOVW R1, 12(R29) JAL ·callMethod(SB) RET diff --git a/src/reflect/asm_ppc64x.s b/src/reflect/asm_ppc64x.s index 42f57743e6f21..4609f6bb75223 100644 --- a/src/reflect/asm_ppc64x.s +++ b/src/reflect/asm_ppc64x.s @@ -12,11 +12,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD R11, FIXED_FRAME+0(R1) MOVD $argframe+0(FP), R3 MOVD R3, FIXED_FRAME+8(R1) + MOVB R0, FIXED_FRAME+24(R1) + ADD $FIXED_FRAME+24, R1, R3 + MOVD R3, FIXED_FRAME+16(R1) BL ·callReflect(SB) RET @@ -24,10 +27,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD R11, FIXED_FRAME+0(R1) MOVD $argframe+0(FP), R3 MOVD R3, FIXED_FRAME+8(R1) + MOVB R0, FIXED_FRAME+24(R1) + ADD $FIXED_FRAME+24, R1, R3 + MOVD R3, FIXED_FRAME+16(R1) BL ·callMethod(SB) RET diff --git a/src/reflect/asm_s390x.s b/src/reflect/asm_s390x.s index e6b86cfaa9d11..cb7954c900cb9 100644 --- a/src/reflect/asm_s390x.s +++ b/src/reflect/asm_s390x.s @@ -9,11 +9,14 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD R12, 8(R15) MOVD $argframe+0(FP), R3 MOVD R3, 16(R15) + MOVB $0, 32(R15) + ADD $32, R15, R3 + MOVD R3, 24(R15) BL ·callReflect(SB) RET @@ -21,10 +24,13 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD R12, 8(R15) MOVD $argframe+0(FP), R3 MOVD R3, 16(R15) + MOVB $0, 32(R15) + ADD $32, R15, R3 + MOVD R3, 24(R15) BL ·callMethod(SB) RET diff --git a/src/reflect/asm_wasm.s b/src/reflect/asm_wasm.s index 0f9b5aa130fe0..627e295769cb3 100644 --- a/src/reflect/asm_wasm.s +++ b/src/reflect/asm_wasm.s @@ -9,7 +9,7 @@ // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD CTXT, 0(SP) @@ -21,6 +21,9 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 I64Add I64Store $8 + MOVB $0, 24(SP) + MOVD $24(SP), 16(SP) + CALL ·callReflect(SB) RET @@ -28,7 +31,7 @@ TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16 // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$32 NO_LOCAL_POINTERS MOVD CTXT, 0(SP) @@ -40,5 +43,8 @@ TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16 I64Add I64Store $8 + MOVB $0, 24(SP) + MOVD $24(SP), 16(SP) + CALL ·callMethod(SB) RET diff --git a/src/reflect/example_test.go b/src/reflect/example_test.go index f959b95846383..23c08e4950049 100644 --- a/src/reflect/example_test.go +++ b/src/reflect/example_test.go @@ -13,6 +13,24 @@ import ( "reflect" ) +func ExampleKind() { + for _, v := range []interface{}{"hi", 42, func() {}} { + switch v := reflect.ValueOf(v); v.Kind() { + case reflect.String: + fmt.Println(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Println(v.Int()) + default: + fmt.Printf("unhandled kind %s", v.Kind()) + } + } + + // Output: + // hi + // 42 + // unhandled kind func +} + func ExampleMakeFunc() { // swap is the implementation passed to MakeFunc. // It must work in terms of reflect.Values so that it is possible diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index 14a6981fdee1b..3c47d6712f172 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -25,9 +25,9 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, var ft *rtype var s *bitVector if rcvr != nil { - ft, argSize, retOffset, s, _ = funcLayout(t.(*rtype), rcvr.(*rtype)) + ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), rcvr.(*rtype)) } else { - ft, argSize, retOffset, s, _ = funcLayout(t.(*rtype), nil) + ft, argSize, retOffset, s, _ = funcLayout((*funcType)(unsafe.Pointer(t.(*rtype))), nil) } frametype = ft for i := uint32(0); i < s.n; i++ { diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go index 885966db6fe92..67dc4859b9736 100644 --- a/src/reflect/makefunc.go +++ b/src/reflect/makefunc.go @@ -12,14 +12,15 @@ import ( // makeFuncImpl is the closure value implementing the function // returned by MakeFunc. -// The first two words of this type must be kept in sync with +// The first three words of this type must be kept in sync with // methodValue and runtime.reflectMethodValue. // Any changes should be reflected in all three. type makeFuncImpl struct { - code uintptr - stack *bitVector - typ *funcType - fn func([]Value) []Value + code uintptr + stack *bitVector // ptrmap for both args and results + argLen uintptr // just args + ftyp *funcType + fn func([]Value) []Value } // MakeFunc returns a new function of the given Type @@ -59,9 +60,9 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value { code := **(**uintptr)(unsafe.Pointer(&dummy)) // makeFuncImpl contains a stack map for use by the runtime - _, _, _, stack, _ := funcLayout(t, nil) + _, argLen, _, stack, _ := funcLayout(ftyp, nil) - impl := &makeFuncImpl{code: code, stack: stack, typ: ftyp, fn: fn} + impl := &makeFuncImpl{code: code, stack: stack, argLen: argLen, ftyp: ftyp, fn: fn} return Value{t, unsafe.Pointer(impl), flag(Func)} } @@ -73,12 +74,13 @@ func MakeFunc(typ Type, fn func(args []Value) (results []Value)) Value { // word in the passed-in argument frame. func makeFuncStub() -// The first two words of this type must be kept in sync with +// The first 3 words of this type must be kept in sync with // makeFuncImpl and runtime.reflectMethodValue. // Any changes should be reflected in all three. type methodValue struct { fn uintptr - stack *bitVector + stack *bitVector // ptrmap for both args and results + argLen uintptr // just args method int rcvr Value } @@ -101,7 +103,7 @@ func makeMethodValue(op string, v Value) Value { rcvr := Value{v.typ, v.ptr, fl} // v.Type returns the actual type of the method value. - funcType := v.Type().(*rtype) + ftyp := (*funcType)(unsafe.Pointer(v.Type().(*rtype))) // Indirect Go func value (dummy) to obtain // actual code address. (A Go func value is a pointer @@ -110,11 +112,12 @@ func makeMethodValue(op string, v Value) Value { code := **(**uintptr)(unsafe.Pointer(&dummy)) // methodValue contains a stack map for use by the runtime - _, _, _, stack, _ := funcLayout(funcType, nil) + _, argLen, _, stack, _ := funcLayout(ftyp, nil) fv := &methodValue{ fn: code, stack: stack, + argLen: argLen, method: int(v.flag) >> flagMethodShift, rcvr: rcvr, } @@ -124,7 +127,7 @@ func makeMethodValue(op string, v Value) Value { // but we want Interface() and other operations to fail early. methodReceiver(op, fv.rcvr, fv.method) - return Value{funcType, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)} + return Value{&ftyp.rtype, unsafe.Pointer(fv), v.flag&flagRO | flag(Func)} } // methodValueCall is an assembly function that is the code half of diff --git a/src/reflect/type.go b/src/reflect/type.go index 58cfc0e884035..5ce80c61dcfdf 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -394,16 +394,13 @@ type interfaceType struct { // mapType represents a map type. type mapType struct { rtype - key *rtype // map key type - elem *rtype // map element (value) type - bucket *rtype // internal bucket structure - keysize uint8 // size of key slot - indirectkey uint8 // store ptr to key instead of key itself - valuesize uint8 // size of value slot - indirectvalue uint8 // store ptr to value instead of value itself - bucketsize uint16 // size of bucket - reflexivekey bool // true if k==k for all keys - needkeyupdate bool // true if we need to update key on an overwrite + key *rtype // map key type + elem *rtype // map element (value) type + bucket *rtype // internal bucket structure + keysize uint8 // size of key slot + valuesize uint8 // size of value slot + bucketsize uint16 // size of bucket + flags uint32 } // ptrType represents a pointer type. @@ -593,6 +590,7 @@ const ( kindMask = (1 << 5) - 1 ) +// String returns the name of k. func (k Kind) String() string { if int(k) < len(kindNames) { return kindNames[k] @@ -1858,6 +1856,8 @@ func MapOf(key, elem Type) Type { } // Make a map type. + // Note: flag values must match those used in the TMAP case + // in ../cmd/compile/internal/gc/reflect.go:dtypesym. var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) mt := **(**mapType)(unsafe.Pointer(&imap)) mt.str = resolveReflectName(newName(s, "", false)) @@ -1866,29 +1866,37 @@ func MapOf(key, elem Type) Type { mt.key = ktyp mt.elem = etyp mt.bucket = bucketOf(ktyp, etyp) + mt.flags = 0 if ktyp.size > maxKeySize { mt.keysize = uint8(ptrSize) - mt.indirectkey = 1 + mt.flags |= 1 // indirect key } else { mt.keysize = uint8(ktyp.size) - mt.indirectkey = 0 } if etyp.size > maxValSize { mt.valuesize = uint8(ptrSize) - mt.indirectvalue = 1 + mt.flags |= 2 // indirect value } else { mt.valuesize = uint8(etyp.size) - mt.indirectvalue = 0 } mt.bucketsize = uint16(mt.bucket.size) - mt.reflexivekey = isReflexive(ktyp) - mt.needkeyupdate = needKeyUpdate(ktyp) + if isReflexive(ktyp) { + mt.flags |= 4 + } + if needKeyUpdate(ktyp) { + mt.flags |= 8 + } + if hashMightPanic(ktyp) { + mt.flags |= 16 + } mt.ptrToThis = 0 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype) return ti.(Type) } +// TODO(crawshaw): as these funcTypeFixedN structs have no methods, +// they could be defined at runtime using the StructOf function. type funcTypeFixed4 struct { funcType args [4]*rtype @@ -2119,6 +2127,27 @@ func needKeyUpdate(t *rtype) bool { } } +// hashMightPanic reports whether the hash of a map key of type t might panic. +func hashMightPanic(t *rtype) bool { + switch t.Kind() { + case Interface: + return true + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + return hashMightPanic(tt.elem) + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for _, f := range tt.fields { + if hashMightPanic(f.typ) { + return true + } + } + return false + default: + return false + } +} + // Make sure these routines stay in sync with ../../runtime/map.go! // These types exist only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in string @@ -2278,43 +2307,7 @@ type structTypeUncommon struct { u uncommonType } -// A *rtype representing a struct is followed directly in memory by an -// array of method objects representing the methods attached to the -// struct. To get the same layout for a run time generated type, we -// need an array directly following the uncommonType memory. The types -// structTypeFixed4, ...structTypeFixedN are used to do this. -// -// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. - -// TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs -// have no methods, they could be defined at runtime using the StructOf -// function. - -type structTypeFixed4 struct { - structType - u uncommonType - m [4]method -} - -type structTypeFixed8 struct { - structType - u uncommonType - m [8]method -} - -type structTypeFixed16 struct { - structType - u uncommonType - m [16]method -} - -type structTypeFixed32 struct { - structType - u uncommonType - m [32]method -} - -// isLetter returns true if a given 'rune' is classified as a Letter. +// isLetter reports whether a given 'rune' is classified as a Letter. func isLetter(ch rune) bool { return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) } @@ -2571,33 +2564,26 @@ func StructOf(fields []StructField) Type { var typ *structType var ut *uncommonType - switch { - case len(methods) == 0: + if len(methods) == 0 { t := new(structTypeUncommon) typ = &t.structType ut = &t.u - case len(methods) <= 4: - t := new(structTypeFixed4) - typ = &t.structType - ut = &t.u - copy(t.m[:], methods) - case len(methods) <= 8: - t := new(structTypeFixed8) - typ = &t.structType - ut = &t.u - copy(t.m[:], methods) - case len(methods) <= 16: - t := new(structTypeFixed16) - typ = &t.structType - ut = &t.u - copy(t.m[:], methods) - case len(methods) <= 32: - t := new(structTypeFixed32) - typ = &t.structType - ut = &t.u - copy(t.m[:], methods) - default: - panic("reflect.StructOf: too many methods") + } else { + // A *rtype representing a struct is followed directly in memory by an + // array of method objects representing the methods attached to the + // struct. To get the same layout for a run time generated type, we + // need an array directly following the uncommonType memory. + // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. + tt := New(StructOf([]StructField{ + {Name: "S", Type: TypeOf(structType{})}, + {Name: "U", Type: TypeOf(uncommonType{})}, + {Name: "M", Type: ArrayOf(len(methods), TypeOf(methods[0]))}, + })) + + typ = (*structType)(unsafe.Pointer(tt.Elem().Field(0).UnsafeAddr())) + ut = (*uncommonType)(unsafe.Pointer(tt.Elem().Field(1).UnsafeAddr())) + + copy(tt.Elem().Field(2).Slice(0, len(methods)).Interface().([]method), methods) } // TODO(sbinet): Once we allow embedding multiple types, // methods will need to be sorted like the compiler does. @@ -3022,8 +3008,8 @@ func toType(t *rtype) Type { } type layoutKey struct { - t *rtype // function signature - rcvr *rtype // receiver type, or nil if none + ftyp *funcType // function signature + rcvr *rtype // receiver type, or nil if none } type layoutType struct { @@ -3042,7 +3028,7 @@ var layoutCache sync.Map // map[layoutKey]layoutType // The returned type exists only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in // the name for possible debugging use. -func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { +func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) { if t.Kind() != Func { panic("reflect: funcLayout of non-func type") } @@ -3055,8 +3041,6 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool } - tt := (*funcType)(unsafe.Pointer(t)) - // compute gc program & stack bitmap for arguments ptrmap := new(bitVector) var offset uintptr @@ -3066,22 +3050,23 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin // space no matter how big they actually are. if ifaceIndir(rcvr) || rcvr.pointers() { ptrmap.append(1) + } else { + ptrmap.append(0) } offset += ptrSize } - for _, arg := range tt.in() { + for _, arg := range t.in() { offset += -offset & uintptr(arg.align-1) addTypeBits(ptrmap, offset, arg) offset += arg.size } - argN := ptrmap.n argSize = offset if runtime.GOARCH == "amd64p32" { offset += -offset & (8 - 1) } offset += -offset & (ptrSize - 1) retOffset = offset - for _, res := range tt.out() { + for _, res := range t.out() { offset += -offset & uintptr(res.align-1) addTypeBits(ptrmap, offset, res) offset += res.size @@ -3102,7 +3087,6 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin } else { x.kind |= kindNoPointers } - ptrmap.n = argN var s string if rcvr != nil { diff --git a/src/reflect/value.go b/src/reflect/value.go index 4e7b1d74db3dc..372b7a6dc8008 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -325,7 +325,7 @@ var callGC bool // for testing; see TestCallMethodJump func (v Value) call(op string, in []Value) []Value { // Get function pointer, type. - t := v.typ + t := (*funcType)(unsafe.Pointer(v.typ)) var ( fn unsafe.Pointer rcvr Value @@ -453,15 +453,14 @@ func (v Value) call(op string, in []Value) []Value { var ret []Value if nout == 0 { - // This is untyped because the frame is really a - // stack, even though it's a heap object. - memclrNoHeapPointers(args, frametype.size) + typedmemclr(frametype, args) framePool.Put(args) } else { // Zero the now unused input area of args, // because the Values returned by this function contain pointers to the args object, // and will thus keep the args object alive indefinitely. - memclrNoHeapPointers(args, retOffset) + typedmemclrpartial(frametype, args, 0, retOffset) + // Wrap Values around return values in args. ret = make([]Value, nout) off = retOffset @@ -472,6 +471,10 @@ func (v Value) call(op string, in []Value) []Value { if tv.Size() != 0 { fl := flagIndir | flag(tv.Kind()) ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl} + // Note: this does introduce false sharing between results - + // if any result is live, they are all live. + // (And the space for the args is live as well, but as we've + // cleared that space it isn't as big a deal.) } else { // For zero-sized return value, args+off may point to the next object. // In this case, return the zero value instead. @@ -496,8 +499,13 @@ func (v Value) call(op string, in []Value) []Value { // NOTE: This function must be marked as a "wrapper" in the generated code, // so that the linker can make it work correctly for panic and recover. // The gc compilers know to do that for the name "reflect.callReflect". -func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) { - ftyp := ctxt.typ +// +// ctxt is the "closure" generated by MakeFunc. +// frame is a pointer to the arguments to that closure on the stack. +// retValid points to a boolean which should be set when the results +// section of frame is set. +func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool) { + ftyp := ctxt.ftyp f := ctxt.fn // Copy argument frame into Values. @@ -553,15 +561,26 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) { continue } addr := add(ptr, off, "typ.size > 0") + // We are writing to stack. No write barrier. if v.flag&flagIndir != 0 { - typedmemmove(typ, addr, v.ptr) + memmove(addr, v.ptr, typ.size) } else { - *(*unsafe.Pointer)(addr) = v.ptr + *(*uintptr)(addr) = uintptr(v.ptr) } off += typ.size } } + // Announce that the return values are valid. + // After this point the runtime can depend on the return values being valid. + *retValid = true + + // We have to make sure that the out slice lives at least until + // the runtime knows the return values are valid. Otherwise, the + // return values might not be scanned by anyone during a GC. + // (out would be dead, and the return slots not yet alive.) + runtime.KeepAlive(out) + // runtime.getArgInfo expects to be able to find ctxt on the // stack when it finds our caller, makeFuncStub. Make sure it // doesn't get garbage collected. @@ -575,7 +594,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) { // The return value rcvrtype gives the method's actual receiver type. // The return value t gives the method type signature (without the receiver). // The return value fn is a pointer to the method code. -func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn unsafe.Pointer) { +func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *rtype, t *funcType, fn unsafe.Pointer) { i := methodIndex if v.typ.Kind() == Interface { tt := (*interfaceType)(unsafe.Pointer(v.typ)) @@ -592,7 +611,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn } rcvrtype = iface.itab.typ fn = unsafe.Pointer(&iface.itab.fun[i]) - t = tt.typeOff(m.typ) + t = (*funcType)(unsafe.Pointer(tt.typeOff(m.typ))) } else { rcvrtype = v.typ ms := v.typ.exportedMethods() @@ -605,7 +624,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn } ifn := v.typ.textOff(m.ifn) fn = unsafe.Pointer(&ifn) - t = v.typ.typeOff(m.mtyp) + t = (*funcType)(unsafe.Pointer(v.typ.typeOff(m.mtyp))) } return } @@ -644,23 +663,31 @@ func align(x, n uintptr) uintptr { // NOTE: This function must be marked as a "wrapper" in the generated code, // so that the linker can make it work correctly for panic and recover. // The gc compilers know to do that for the name "reflect.callMethod". -func callMethod(ctxt *methodValue, frame unsafe.Pointer) { +// +// ctxt is the "closure" generated by makeVethodValue. +// frame is a pointer to the arguments to that closure on the stack. +// retValid points to a boolean which should be set when the results +// section of frame is set. +func callMethod(ctxt *methodValue, frame unsafe.Pointer, retValid *bool) { rcvr := ctxt.rcvr rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method) frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype) // Make a new frame that is one word bigger so we can store the receiver. - args := framePool.Get().(unsafe.Pointer) + // This space is used for both arguments and return values. + scratch := framePool.Get().(unsafe.Pointer) // Copy in receiver and rest of args. // Avoid constructing out-of-bounds pointers if there are no args. - storeRcvr(rcvr, args) + storeRcvr(rcvr, scratch) if argSize-ptrSize > 0 { - typedmemmovepartial(frametype, add(args, ptrSize, "argSize > ptrSize"), frame, ptrSize, argSize-ptrSize) + typedmemmovepartial(frametype, add(scratch, ptrSize, "argSize > ptrSize"), frame, ptrSize, argSize-ptrSize) } // Call. - call(frametype, fn, args, uint32(frametype.size), uint32(retOffset)) + // Call copies the arguments from scratch to the stack, calls fn, + // and then copies the results back into scratch. + call(frametype, fn, scratch, uint32(frametype.size), uint32(retOffset)) // Copy return values. On amd64p32, the beginning of return values // is 64-bit aligned, so the caller's frame layout (which doesn't have @@ -673,17 +700,21 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) { if runtime.GOARCH == "amd64p32" { callerRetOffset = align(argSize-ptrSize, 8) } - typedmemmovepartial(frametype, - add(frame, callerRetOffset, "frametype.size > retOffset"), - add(args, retOffset, "frametype.size > retOffset"), - retOffset, + // This copies to the stack. Write barriers are not needed. + memmove(add(frame, callerRetOffset, "frametype.size > retOffset"), + add(scratch, retOffset, "frametype.size > retOffset"), frametype.size-retOffset) } - // This is untyped because the frame is really a stack, even - // though it's a heap object. - memclrNoHeapPointers(args, frametype.size) - framePool.Put(args) + // Tell the runtime it can now depend on the return values + // being properly initialized. + *retValid = true + + // Clear the scratch space and put it back in the pool. + // This must happen after the statement above, so that the return + // values will always be scanned by someone. + typedmemclr(frametype, scratch) + framePool.Put(scratch) // See the comment in callReflect. runtime.KeepAlive(ctxt) @@ -1001,7 +1032,7 @@ func (v Value) InterfaceData() [2]uintptr { func (v Value) IsNil() bool { k := v.kind() switch k { - case Chan, Func, Map, Ptr: + case Chan, Func, Map, Ptr, UnsafePointer: if v.flag&flagMethod != 0 { return false } @@ -1085,14 +1116,7 @@ func (v Value) MapIndex(key Value) Value { typ := tt.elem fl := (v.flag | key.flag).ro() fl |= flag(typ.Kind()) - if !ifaceIndir(typ) { - return Value{typ, *(*unsafe.Pointer)(e), fl} - } - // Copy result so future changes to the map - // won't change the underlying value. - c := unsafe_New(typ) - typedmemmove(typ, c, e) - return Value{typ, c, fl | flagIndir} + return copyVal(typ, fl, e) } // MapKeys returns a slice containing all the keys present in the map, @@ -1122,20 +1146,96 @@ func (v Value) MapKeys() []Value { // we can do about it. break } - if ifaceIndir(keyType) { - // Copy result so future changes to the map - // won't change the underlying value. - c := unsafe_New(keyType) - typedmemmove(keyType, c, key) - a[i] = Value{keyType, c, fl | flagIndir} - } else { - a[i] = Value{keyType, *(*unsafe.Pointer)(key), fl} - } + a[i] = copyVal(keyType, fl, key) mapiternext(it) } return a[:i] } +// A MapIter is an iterator for ranging over a map. +// See Value.MapRange. +type MapIter struct { + m Value + it unsafe.Pointer +} + +// Key returns the key of the iterator's current map entry. +func (it *MapIter) Key() Value { + if it.it == nil { + panic("MapIter.Key called before Next") + } + if mapiterkey(it.it) == nil { + panic("MapIter.Key called on exhausted iterator") + } + + t := (*mapType)(unsafe.Pointer(it.m.typ)) + ktype := t.key + return copyVal(ktype, it.m.flag.ro()|flag(ktype.Kind()), mapiterkey(it.it)) +} + +// Value returns the value of the iterator's current map entry. +func (it *MapIter) Value() Value { + if it.it == nil { + panic("MapIter.Value called before Next") + } + if mapiterkey(it.it) == nil { + panic("MapIter.Value called on exhausted iterator") + } + + t := (*mapType)(unsafe.Pointer(it.m.typ)) + vtype := t.elem + return copyVal(vtype, it.m.flag.ro()|flag(vtype.Kind()), mapitervalue(it.it)) +} + +// Next advances the map iterator and reports whether there is another +// entry. It returns false when the iterator is exhausted; subsequent +// calls to Key, Value, or Next will panic. +func (it *MapIter) Next() bool { + if it.it == nil { + it.it = mapiterinit(it.m.typ, it.m.pointer()) + } else { + if mapiterkey(it.it) == nil { + panic("MapIter.Next called on exhausted iterator") + } + mapiternext(it.it) + } + return mapiterkey(it.it) != nil +} + +// MapRange returns a range iterator for a map. +// It panics if v's Kind is not Map. +// +// Call Next to advance the iterator, and Key/Value to access each entry. +// Next returns false when the iterator is exhausted. +// MapRange follows the same iteration semantics as a range statement. +// +// Example: +// +// iter := reflect.ValueOf(m).MapRange() +// for iter.Next() { +// k := iter.Key() +// v := iter.Value() +// ... +// } +// +func (v Value) MapRange() *MapIter { + v.mustBe(Map) + return &MapIter{m: v} +} + +// copyVal returns a Value containing the map key or value at ptr, +// allocating a new variable as needed. +func copyVal(typ *rtype, fl flag, ptr unsafe.Pointer) Value { + if ifaceIndir(typ) { + // Copy result so future changes to the map + // won't change the underlying value. + c := unsafe_New(typ) + typedmemmove(typ, c, ptr) + return Value{typ, c, fl | flagIndir} + } + return Value{typ, *(*unsafe.Pointer)(ptr), fl} +} + // Method returns a function value corresponding to v's i'th method. // The arguments to a Call on the returned function should not include // a receiver; the returned function will always use v as the receiver. @@ -2554,6 +2654,9 @@ func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer //go:noescape func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer) +//go:noescape +func mapitervalue(it unsafe.Pointer) (value unsafe.Pointer) + //go:noescape func mapiternext(it unsafe.Pointer) @@ -2565,10 +2668,16 @@ func maplen(m unsafe.Pointer) int // back into arg+retoffset before returning. If copying result bytes back, // the caller must pass the argument frame type as argtype, so that // call can execute appropriate write barriers during the copy. +// +//go:linkname call runtime.reflectcall func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer) +// memmove copies size bytes to dst from src. No write barriers are used. +//go:noescape +func memmove(dst, src unsafe.Pointer, size uintptr) + // typedmemmove copies a value of type t to dst from src. //go:noescape func typedmemmove(t *rtype, dst, src unsafe.Pointer) @@ -2578,14 +2687,20 @@ func typedmemmove(t *rtype, dst, src unsafe.Pointer) //go:noescape func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size uintptr) +// typedmemclr zeros the value at ptr of type t. +//go:noescape +func typedmemclr(t *rtype, ptr unsafe.Pointer) + +// typedmemclrpartial is like typedmemclr but assumes that +// dst points off bytes into the value and only clears size bytes. +//go:noescape +func typedmemclrpartial(t *rtype, ptr unsafe.Pointer, off, size uintptr) + // typedslicecopy copies a slice of elemType values from src to dst, // returning the number of elements copied. //go:noescape func typedslicecopy(elemType *rtype, dst, src sliceHeader) int -//go:noescape -func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) - // Dummy annotation marking that the value x escapes, // for use in cases where the reflect code is so clever that // the compiler cannot follow. diff --git a/src/regexp/all_test.go b/src/regexp/all_test.go index 0fabeae59fcbe..623f82df72d3a 100644 --- a/src/regexp/all_test.go +++ b/src/regexp/all_test.go @@ -550,8 +550,8 @@ func TestOnePassCutoff(t *testing.T) { if err != nil { t.Fatalf("compile: %v", err) } - if compileOnePass(p) != notOnePass { - t.Fatalf("makeOnePass succeeded; wanted notOnePass") + if compileOnePass(p) != nil { + t.Fatalf("makeOnePass succeeded; wanted nil") } } @@ -859,3 +859,26 @@ func BenchmarkQuoteMetaNone(b *testing.B) { sink = QuoteMeta(s) } } + +func TestDeepEqual(t *testing.T) { + re1 := MustCompile("a.*b.*c.*d") + re2 := MustCompile("a.*b.*c.*d") + if !reflect.DeepEqual(re1, re2) { // has always been true, since Go 1. + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re1.MatchString("abcdefghijklmn") + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re2.MatchString("abcdefghijklmn") + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } + + re2.MatchString(strings.Repeat("abcdefghijklmn", 100)) + if !reflect.DeepEqual(re1, re2) { + t.Errorf("DeepEqual(re1, re2) = false, want true") + } +} diff --git a/src/regexp/backtrack.go b/src/regexp/backtrack.go index 440bf7ffc5988..9fb7d1e4937a5 100644 --- a/src/regexp/backtrack.go +++ b/src/regexp/backtrack.go @@ -14,7 +14,10 @@ package regexp -import "regexp/syntax" +import ( + "regexp/syntax" + "sync" +) // A job is an entry on the backtracker's job stack. It holds // the instruction pc and the position in the input. @@ -32,15 +35,29 @@ const ( // bitState holds state for the backtracker. type bitState struct { - prog *syntax.Prog + end int + cap []int + matchcap []int + jobs []job + visited []uint32 + + inputs inputs +} + +var bitStatePool sync.Pool - end int - cap []int - jobs []job - visited []uint32 +func newBitState() *bitState { + b, ok := bitStatePool.Get().(*bitState) + if !ok { + b = new(bitState) + } + return b } -var notBacktrack *bitState = nil +func freeBitState(b *bitState) { + b.inputs.clear() + bitStatePool.Put(b) +} // maxBitStateLen returns the maximum length of a string to search with // the backtracker using prog. @@ -51,18 +68,6 @@ func maxBitStateLen(prog *syntax.Prog) int { return maxBacktrackVector / len(prog.Inst) } -// newBitState returns a new bitState for the given prog, -// or notBacktrack if the size of the prog exceeds the maximum size that -// the backtracker will be run for. -func newBitState(prog *syntax.Prog) *bitState { - if !shouldBacktrack(prog) { - return notBacktrack - } - return &bitState{ - prog: prog, - } -} - // shouldBacktrack reports whether the program is too // long for the backtracker to run. func shouldBacktrack(prog *syntax.Prog) bool { @@ -72,7 +77,7 @@ func shouldBacktrack(prog *syntax.Prog) bool { // reset resets the state of the backtracker. // end is the end position in the input. // ncap is the number of captures. -func (b *bitState) reset(end int, ncap int) { +func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { b.end = end if cap(b.jobs) == 0 { @@ -81,7 +86,7 @@ func (b *bitState) reset(end int, ncap int) { b.jobs = b.jobs[:0] } - visitedSize := (len(b.prog.Inst)*(end+1) + visitedBits - 1) / visitedBits + visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits if cap(b.visited) < visitedSize { b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) } else { @@ -99,6 +104,15 @@ func (b *bitState) reset(end int, ncap int) { for i := range b.cap { b.cap[i] = -1 } + + if cap(b.matchcap) < ncap { + b.matchcap = make([]int, ncap) + } else { + b.matchcap = b.matchcap[:ncap] + } + for i := range b.matchcap { + b.matchcap[i] = -1 + } } // shouldVisit reports whether the combination of (pc, pos) has not @@ -114,20 +128,19 @@ func (b *bitState) shouldVisit(pc uint32, pos int) bool { // push pushes (pc, pos, arg) onto the job stack if it should be // visited. -func (b *bitState) push(pc uint32, pos int, arg bool) { +func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) { // Only check shouldVisit when arg is false. // When arg is true, we are continuing a previous visit. - if b.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { + if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos}) } } // tryBacktrack runs a backtracking search starting at pos. -func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { - longest := m.re.longest - m.matched = false +func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { + longest := re.longest - b.push(pc, pos, false) + b.push(re, pc, pos, false) for len(b.jobs) > 0 { l := len(b.jobs) - 1 // Pop job off the stack. @@ -150,7 +163,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { } Skip: - inst := b.prog.Inst[pc] + inst := re.prog.Inst[pc] switch inst.Op { default: @@ -172,23 +185,23 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { pc = inst.Arg goto CheckAndLoop } else { - b.push(pc, pos, true) + b.push(re, pc, pos, true) pc = inst.Out goto CheckAndLoop } case syntax.InstAltMatch: // One opcode consumes runes; the other leads to match. - switch b.prog.Inst[inst.Out].Op { + switch re.prog.Inst[inst.Out].Op { case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: // inst.Arg is the match. - b.push(inst.Arg, pos, false) + b.push(re, inst.Arg, pos, false) pc = inst.Arg pos = b.end goto CheckAndLoop } // inst.Out is the match - non-greedy - b.push(inst.Out, b.end, false) + b.push(re, inst.Out, b.end, false) pc = inst.Out goto CheckAndLoop @@ -236,7 +249,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { } else { if 0 <= inst.Arg && inst.Arg < uint32(len(b.cap)) { // Capture pos to register, but save old value. - b.push(pc, b.cap[inst.Arg], true) // come back when we're done. + b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done. b.cap[inst.Arg] = pos } pc = inst.Out @@ -244,7 +257,8 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { } case syntax.InstEmptyWidth: - if syntax.EmptyOp(inst.Arg)&^i.context(pos) != 0 { + flag := i.context(pos) + if !flag.match(syntax.EmptyOp(inst.Arg)) { continue } pc = inst.Out @@ -258,8 +272,7 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { // We found a match. If the caller doesn't care // where the match is, no point going further. if len(b.cap) == 0 { - m.matched = true - return m.matched + return true } // Record best match so far. @@ -268,19 +281,18 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { if len(b.cap) > 1 { b.cap[1] = pos } - if !m.matched || (longest && pos > 0 && pos > m.matchcap[1]) { - copy(m.matchcap, b.cap) + if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) { + copy(b.matchcap, b.cap) } - m.matched = true // If going for first match, we're done. if !longest { - return m.matched + return true } // If we used the entire text, no longer match is possible. if pos == b.end { - return m.matched + return true } // Otherwise, continue on in hope of a longer match. @@ -288,65 +300,68 @@ func (m *machine) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { } } - return m.matched + return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0 } // backtrack runs a backtracking search of prog on the input starting at pos. -func (m *machine) backtrack(i input, pos int, end int, ncap int) bool { - if !i.canCheckPrefix() { - panic("backtrack called for a RuneReader") - } - - startCond := m.re.cond +func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int { + startCond := re.cond if startCond == ^syntax.EmptyOp(0) { // impossible - return false + return nil } if startCond&syntax.EmptyBeginText != 0 && pos != 0 { // Anchored match, past beginning of text. - return false + return nil } - b := m.b - b.reset(end, ncap) - - m.matchcap = m.matchcap[:ncap] - for i := range m.matchcap { - m.matchcap[i] = -1 - } + b := newBitState() + i, end := b.inputs.init(nil, ib, is) + b.reset(re.prog, end, ncap) // Anchored search must start at the beginning of the input if startCond&syntax.EmptyBeginText != 0 { if len(b.cap) > 0 { b.cap[0] = pos } - return m.tryBacktrack(b, i, uint32(m.p.Start), pos) - } + if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + freeBitState(b) + return nil + } + } else { - // Unanchored search, starting from each possible text position. - // Notice that we have to try the empty string at the end of - // the text, so the loop condition is pos <= end, not pos < end. - // This looks like it's quadratic in the size of the text, - // but we are not clearing visited between calls to TrySearch, - // so no work is duplicated and it ends up still being linear. - width := -1 - for ; pos <= end && width != 0; pos += width { - if len(m.re.prefix) > 0 { - // Match requires literal prefix; fast search for it. - advance := i.index(m.re, pos) - if advance < 0 { - return false + // Unanchored search, starting from each possible text position. + // Notice that we have to try the empty string at the end of + // the text, so the loop condition is pos <= end, not pos < end. + // This looks like it's quadratic in the size of the text, + // but we are not clearing visited between calls to TrySearch, + // so no work is duplicated and it ends up still being linear. + width := -1 + for ; pos <= end && width != 0; pos += width { + if len(re.prefix) > 0 { + // Match requires literal prefix; fast search for it. + advance := i.index(re, pos) + if advance < 0 { + freeBitState(b) + return nil + } + pos += advance } - pos += advance - } - if len(b.cap) > 0 { - b.cap[0] = pos - } - if m.tryBacktrack(b, i, uint32(m.p.Start), pos) { - // Match must be leftmost; done. - return true + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) } - _, width = i.step(pos) + freeBitState(b) + return nil } - return false + +Match: + dstCap = append(dstCap, b.matchcap...) + freeBitState(b) + return dstCap } diff --git a/src/regexp/example_test.go b/src/regexp/example_test.go index d65464665f779..3008c56b6bea9 100644 --- a/src/regexp/example_test.go +++ b/src/regexp/example_test.go @@ -25,12 +25,26 @@ func Example() { // false } +func ExampleMatch() { + matched, err := regexp.Match(`foo.*`, []byte(`seafood`)) + fmt.Println(matched, err) + matched, err = regexp.Match(`bar.*`, []byte(`seafood`)) + fmt.Println(matched, err) + matched, err = regexp.Match(`a(b`, []byte(`seafood`)) + fmt.Println(matched, err) + + // Output: + // true + // false + // false error parsing regexp: missing closing ): `a(b` +} + func ExampleMatchString() { - matched, err := regexp.MatchString("foo.*", "seafood") + matched, err := regexp.MatchString(`foo.*`, "seafood") fmt.Println(matched, err) - matched, err = regexp.MatchString("bar.*", "seafood") + matched, err = regexp.MatchString(`bar.*`, "seafood") fmt.Println(matched, err) - matched, err = regexp.MatchString("a(b", "seafood") + matched, err = regexp.MatchString(`a(b`, "seafood") fmt.Println(matched, err) // Output: // true @@ -39,13 +53,53 @@ func ExampleMatchString() { } func ExampleQuoteMeta() { - fmt.Println(regexp.QuoteMeta("Escaping symbols like: .+*?()|[]{}^$")) + fmt.Println(regexp.QuoteMeta(`Escaping symbols like: .+*?()|[]{}^$`)) // Output: // Escaping symbols like: \.\+\*\?\(\)\|\[\]\{\}\^\$ } +func ExampleRegexp_Find() { + re := regexp.MustCompile(`foo.?`) + fmt.Printf("%q\n", re.Find([]byte(`seafood fool`))) + + // Output: + // "food" +} + +func ExampleRegexp_FindAll() { + re := regexp.MustCompile(`foo.?`) + fmt.Printf("%q\n", re.FindAll([]byte(`seafood fool`), -1)) + + // Output: + // ["food" "fool"] +} + +func ExampleRegexp_FindAllSubmatch() { + re := regexp.MustCompile(`foo(.?)`) + fmt.Printf("%q\n", re.FindAllSubmatch([]byte(`seafood fool`), -1)) + + // Output: + // [["food" "d"] ["fool" "l"]] +} + +func ExampleRegexp_FindSubmatch() { + re := regexp.MustCompile(`foo(.?)`) + fmt.Printf("%q\n", re.FindSubmatch([]byte(`seafood fool`))) + + // Output: + // ["food" "d"] +} + +func ExampleRegexp_Match() { + re := regexp.MustCompile(`foo.?`) + fmt.Println(re.Match([]byte(`seafood fool`))) + + // Output: + // true +} + func ExampleRegexp_FindString() { - re := regexp.MustCompile("foo.?") + re := regexp.MustCompile(`foo.?`) fmt.Printf("%q\n", re.FindString("seafood fool")) fmt.Printf("%q\n", re.FindString("meat")) // Output: @@ -54,7 +108,7 @@ func ExampleRegexp_FindString() { } func ExampleRegexp_FindStringIndex() { - re := regexp.MustCompile("ab?") + re := regexp.MustCompile(`ab?`) fmt.Println(re.FindStringIndex("tablett")) fmt.Println(re.FindStringIndex("foo") == nil) // Output: @@ -63,7 +117,7 @@ func ExampleRegexp_FindStringIndex() { } func ExampleRegexp_FindStringSubmatch() { - re := regexp.MustCompile("a(x*)b(y|z)c") + re := regexp.MustCompile(`a(x*)b(y|z)c`) fmt.Printf("%q\n", re.FindStringSubmatch("-axxxbyc-")) fmt.Printf("%q\n", re.FindStringSubmatch("-abzc-")) // Output: @@ -72,7 +126,7 @@ func ExampleRegexp_FindStringSubmatch() { } func ExampleRegexp_FindAllString() { - re := regexp.MustCompile("a.") + re := regexp.MustCompile(`a.`) fmt.Println(re.FindAllString("paranormal", -1)) fmt.Println(re.FindAllString("paranormal", 2)) fmt.Println(re.FindAllString("graal", -1)) @@ -85,7 +139,7 @@ func ExampleRegexp_FindAllString() { } func ExampleRegexp_FindAllStringSubmatch() { - re := regexp.MustCompile("a(x*)b") + re := regexp.MustCompile(`a(x*)b`) fmt.Printf("%q\n", re.FindAllStringSubmatch("-ab-", -1)) fmt.Printf("%q\n", re.FindAllStringSubmatch("-axxb-", -1)) fmt.Printf("%q\n", re.FindAllStringSubmatch("-ab-axb-", -1)) @@ -98,7 +152,7 @@ func ExampleRegexp_FindAllStringSubmatch() { } func ExampleRegexp_FindAllStringSubmatchIndex() { - re := regexp.MustCompile("a(x*)b") + re := regexp.MustCompile(`a(x*)b`) // Indices: // 01234567 012345678 // -ab-axb- -axxb-ab- @@ -116,7 +170,7 @@ func ExampleRegexp_FindAllStringSubmatchIndex() { } func ExampleRegexp_MatchString() { - re := regexp.MustCompile("(gopher){2}") + re := regexp.MustCompile(`(gopher){2}`) fmt.Println(re.MatchString("gopher")) fmt.Println(re.MatchString("gophergopher")) fmt.Println(re.MatchString("gophergophergopher")) @@ -127,7 +181,7 @@ func ExampleRegexp_MatchString() { } func ExampleRegexp_ReplaceAllLiteralString() { - re := regexp.MustCompile("a(x*)b") + re := regexp.MustCompile(`a(x*)b`) fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "T")) fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "$1")) fmt.Println(re.ReplaceAllLiteralString("-ab-axxb-", "${1}")) @@ -138,7 +192,7 @@ func ExampleRegexp_ReplaceAllLiteralString() { } func ExampleRegexp_ReplaceAllString() { - re := regexp.MustCompile("a(x*)b") + re := regexp.MustCompile(`a(x*)b`) fmt.Println(re.ReplaceAllString("-ab-axxb-", "T")) fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1")) fmt.Println(re.ReplaceAllString("-ab-axxb-", "$1W")) @@ -151,7 +205,7 @@ func ExampleRegexp_ReplaceAllString() { } func ExampleRegexp_SubexpNames() { - re := regexp.MustCompile("(?P[a-zA-Z]+) (?P[a-zA-Z]+)") + re := regexp.MustCompile(`(?P[a-zA-Z]+) (?P[a-zA-Z]+)`) fmt.Println(re.MatchString("Alan Turing")) fmt.Printf("%q\n", re.SubexpNames()) reversed := fmt.Sprintf("${%s} ${%s}", re.SubexpNames()[2], re.SubexpNames()[1]) @@ -165,12 +219,12 @@ func ExampleRegexp_SubexpNames() { } func ExampleRegexp_Split() { - a := regexp.MustCompile("a") + a := regexp.MustCompile(`a`) fmt.Println(a.Split("banana", -1)) fmt.Println(a.Split("banana", 0)) fmt.Println(a.Split("banana", 1)) fmt.Println(a.Split("banana", 2)) - zp := regexp.MustCompile("z+") + zp := regexp.MustCompile(`z+`) fmt.Println(zp.Split("pizza", -1)) fmt.Println(zp.Split("pizza", 0)) fmt.Println(zp.Split("pizza", 1)) diff --git a/src/regexp/exec.go b/src/regexp/exec.go index 1c7b02d1cd8be..efe764e2dcad9 100644 --- a/src/regexp/exec.go +++ b/src/regexp/exec.go @@ -7,6 +7,7 @@ package regexp import ( "io" "regexp/syntax" + "sync" ) // A queue is a 'sparse array' holding pending threads of execution. @@ -35,54 +36,60 @@ type thread struct { // A machine holds all the state during an NFA simulation for p. type machine struct { - re *Regexp // corresponding Regexp - p *syntax.Prog // compiled program - op *onePassProg // compiled onepass program, or notOnePass - maxBitStateLen int // max length of string to search with bitstate - b *bitState // state for backtracker, allocated lazily - q0, q1 queue // two queues for runq, nextq - pool []*thread // pool of available threads - matched bool // whether a match was found - matchcap []int // capture information for the match + re *Regexp // corresponding Regexp + p *syntax.Prog // compiled program + q0, q1 queue // two queues for runq, nextq + pool []*thread // pool of available threads + matched bool // whether a match was found + matchcap []int // capture information for the match + inputs inputs +} + +type inputs struct { // cached inputs, to avoid allocation - inputBytes inputBytes - inputString inputString - inputReader inputReader + bytes inputBytes + string inputString + reader inputReader } -func (m *machine) newInputBytes(b []byte) input { - m.inputBytes.str = b - return &m.inputBytes +func (i *inputs) newBytes(b []byte) input { + i.bytes.str = b + return &i.bytes } -func (m *machine) newInputString(s string) input { - m.inputString.str = s - return &m.inputString +func (i *inputs) newString(s string) input { + i.string.str = s + return &i.string } -func (m *machine) newInputReader(r io.RuneReader) input { - m.inputReader.r = r - m.inputReader.atEOT = false - m.inputReader.pos = 0 - return &m.inputReader +func (i *inputs) newReader(r io.RuneReader) input { + i.reader.r = r + i.reader.atEOT = false + i.reader.pos = 0 + return &i.reader +} + +func (i *inputs) clear() { + // We need to clear 1 of these. + // Avoid the expense of clearing the others (pointer write barrier). + if i.bytes.str != nil { + i.bytes.str = nil + } else if i.reader.r != nil { + i.reader.r = nil + } else { + i.string.str = "" + } } -// progMachine returns a new machine running the prog p. -func progMachine(p *syntax.Prog, op *onePassProg) *machine { - m := &machine{p: p, op: op} - n := len(m.p.Inst) - m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} - m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} - ncap := p.NumCap - if ncap < 2 { - ncap = 2 +func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) { + if r != nil { + return i.newReader(r), 0 } - if op == notOnePass { - m.maxBitStateLen = maxBitStateLen(p) + if b != nil { + return i.newBytes(b), len(b) } - m.matchcap = make([]int, ncap) - return m + return i.newString(s), len(s) } func (m *machine) init(ncap int) { @@ -107,6 +114,61 @@ func (m *machine) alloc(i *syntax.Inst) *thread { return t } +// A lazyFlag is a lazily-evaluated syntax.EmptyOp, +// for checking zero-width flags like ^ $ \A \z \B \b. +// It records the pair of relevant runes and does not +// determine the implied flags until absolutely necessary +// (most of the time, that means never). +type lazyFlag uint64 + +func newLazyFlag(r1, r2 rune) lazyFlag { + return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2))) +} + +func (f lazyFlag) match(op syntax.EmptyOp) bool { + if op == 0 { + return true + } + r1 := rune(f >> 32) + if op&syntax.EmptyBeginLine != 0 { + if r1 != '\n' && r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginLine + } + if op&syntax.EmptyBeginText != 0 { + if r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginText + } + if op == 0 { + return true + } + r2 := rune(f) + if op&syntax.EmptyEndLine != 0 { + if r2 != '\n' && r2 >= 0 { + return false + } + op &^= syntax.EmptyEndLine + } + if op&syntax.EmptyEndText != 0 { + if r2 >= 0 { + return false + } + op &^= syntax.EmptyEndText + } + if op == 0 { + return true + } + if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) { + op &^= syntax.EmptyWordBoundary + } else { + op &^= syntax.EmptyNoWordBoundary + } + return op == 0 +} + // match runs the machine over the input starting at pos. // It reports whether a match was found. // If so, m.matchcap holds the submatch information. @@ -126,9 +188,9 @@ func (m *machine) match(i input, pos int) bool { if r != endOfText { r1, width1 = i.step(pos + width) } - var flag syntax.EmptyOp + var flag lazyFlag if pos == 0 { - flag = syntax.EmptyOpContext(-1, r) + flag = newLazyFlag(-1, r) } else { flag = i.context(pos) } @@ -157,10 +219,10 @@ func (m *machine) match(i input, pos int) bool { if len(m.matchcap) > 0 { m.matchcap[0] = pos } - m.add(runq, uint32(m.p.Start), pos, m.matchcap, flag, nil) + m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil) } - flag = syntax.EmptyOpContext(r, r1) - m.step(runq, nextq, pos, pos+width, r, flag) + flag = newLazyFlag(r, r1) + m.step(runq, nextq, pos, pos+width, r, &flag) if width == 0 { break } @@ -195,7 +257,7 @@ func (m *machine) clear(q *queue) { // The step processes the rune c (which may be endOfText), // which starts at position pos and ends at nextPos. // nextCond gives the setting for the empty-width flags after c. -func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond syntax.EmptyOp) { +func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) { longest := m.re.longest for j := 0; j < len(runq.dense); j++ { d := &runq.dense[j] @@ -252,7 +314,8 @@ func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond sy // It also recursively adds an entry for all instructions reachable from pc by following // empty-width conditions satisfied by cond. pos gives the current position // in the input. -func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.EmptyOp, t *thread) *thread { +func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread { +Again: if pc == 0 { return t } @@ -275,13 +338,16 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty // nothing case syntax.InstAlt, syntax.InstAltMatch: t = m.add(q, i.Out, pos, cap, cond, t) - t = m.add(q, i.Arg, pos, cap, cond, t) + pc = i.Arg + goto Again case syntax.InstEmptyWidth: - if syntax.EmptyOp(i.Arg)&^cond == 0 { - t = m.add(q, i.Out, pos, cap, cond, t) + if cond.match(syntax.EmptyOp(i.Arg)) { + pc = i.Out + goto Again } case syntax.InstNop: - t = m.add(q, i.Out, pos, cap, cond, t) + pc = i.Out + goto Again case syntax.InstCapture: if int(i.Arg) < len(cap) { opos := cap[i.Arg] @@ -289,7 +355,8 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty m.add(q, i.Out, pos, cap, cond, nil) cap[i.Arg] = opos } else { - t = m.add(q, i.Out, pos, cap, cond, t) + pc = i.Out + goto Again } case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: if t == nil { @@ -306,85 +373,112 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty return t } -// onepass runs the machine over the input starting at pos. -// It reports whether a match was found. -// If so, m.matchcap holds the submatch information. -// ncap is the number of captures. -func (m *machine) onepass(i input, pos, ncap int) bool { - startCond := m.re.cond +type onePassMachine struct { + inputs inputs + matchcap []int +} + +var onePassPool sync.Pool + +func newOnePassMachine() *onePassMachine { + m, ok := onePassPool.Get().(*onePassMachine) + if !ok { + m = new(onePassMachine) + } + return m +} + +func freeOnePassMachine(m *onePassMachine) { + m.inputs.clear() + onePassPool.Put(m) +} + +// doOnePass implements r.doExecute using the one-pass execution engine. +func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int { + startCond := re.cond if startCond == ^syntax.EmptyOp(0) { // impossible - return false + return nil } - m.matched = false - m.matchcap = m.matchcap[:ncap] + + m := newOnePassMachine() + if cap(m.matchcap) < ncap { + m.matchcap = make([]int, ncap) + } else { + m.matchcap = m.matchcap[:ncap] + } + + matched := false for i := range m.matchcap { m.matchcap[i] = -1 } + + i, _ := m.inputs.init(ir, ib, is) + r, r1 := endOfText, endOfText width, width1 := 0, 0 r, width = i.step(pos) if r != endOfText { r1, width1 = i.step(pos + width) } - var flag syntax.EmptyOp + var flag lazyFlag if pos == 0 { - flag = syntax.EmptyOpContext(-1, r) + flag = newLazyFlag(-1, r) } else { flag = i.context(pos) } - pc := m.op.Start - inst := m.op.Inst[pc] + pc := re.onepass.Start + inst := re.onepass.Inst[pc] // If there is a simple literal prefix, skip over it. - if pos == 0 && syntax.EmptyOp(inst.Arg)&^flag == 0 && - len(m.re.prefix) > 0 && i.canCheckPrefix() { + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { // Match requires literal prefix; fast search for it. - if !i.hasPrefix(m.re) { - return m.matched + if !i.hasPrefix(re) { + goto Return } - pos += len(m.re.prefix) + pos += len(re.prefix) r, width = i.step(pos) r1, width1 = i.step(pos + width) flag = i.context(pos) - pc = int(m.re.prefixEnd) + pc = int(re.prefixEnd) } for { - inst = m.op.Inst[pc] + inst = re.onepass.Inst[pc] pc = int(inst.Out) switch inst.Op { default: panic("bad inst") case syntax.InstMatch: - m.matched = true + matched = true if len(m.matchcap) > 0 { m.matchcap[0] = 0 m.matchcap[1] = pos } - return m.matched + goto Return case syntax.InstRune: if !inst.MatchRune(r) { - return m.matched + goto Return } case syntax.InstRune1: if r != inst.Rune[0] { - return m.matched + goto Return } case syntax.InstRuneAny: // Nothing case syntax.InstRuneAnyNotNL: if r == '\n' { - return m.matched + goto Return } // peek at the input rune to see which branch of the Alt to take case syntax.InstAlt, syntax.InstAltMatch: pc = int(onePassNext(&inst, r)) continue case syntax.InstFail: - return m.matched + goto Return case syntax.InstNop: continue case syntax.InstEmptyWidth: - if syntax.EmptyOp(inst.Arg)&^flag != 0 { - return m.matched + if !flag.match(syntax.EmptyOp(inst.Arg)) { + goto Return } continue case syntax.InstCapture: @@ -396,14 +490,23 @@ func (m *machine) onepass(i input, pos, ncap int) bool { if width == 0 { break } - flag = syntax.EmptyOpContext(r, r1) + flag = newLazyFlag(r, r1) pos += width r, width = r1, width1 if r != endOfText { r1, width1 = i.step(pos + width) } } - return m.matched + +Return: + if !matched { + freeOnePassMachine(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + freeOnePassMachine(m) + return dstCap } // doMatch reports whether either r, b or s match the regexp. @@ -416,43 +519,28 @@ func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool { // // nil is returned if no matches are found and non-nil if matches are found. func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int { - m := re.get() - var i input - var size int - if r != nil { - i = m.newInputReader(r) - } else if b != nil { - i = m.newInputBytes(b) - size = len(b) - } else { - i = m.newInputString(s) - size = len(s) + if dstCap == nil { + // Make sure 'return dstCap' is non-nil. + dstCap = arrayNoInts[:0:0] } - if m.op != notOnePass { - if !m.onepass(i, pos, ncap) { - re.put(m) - return nil - } - } else if size < m.maxBitStateLen && r == nil { - if m.b == nil { - m.b = newBitState(m.p) - } - if !m.backtrack(i, pos, size, ncap) { - re.put(m) - return nil - } - } else { - m.init(ncap) - if !m.match(i, pos) { - re.put(m) - return nil - } + + if re.onepass != nil { + return re.doOnePass(r, b, s, pos, ncap, dstCap) } - dstCap = append(dstCap, m.matchcap...) - if dstCap == nil { - // Keep the promise of returning non-nil value on match. - dstCap = arrayNoInts[:0] + if r == nil && len(b)+len(s) < re.maxBitStateLen { + return re.backtrack(b, s, pos, ncap, dstCap) } + + m := re.get() + i, _ := m.inputs.init(r, b, s) + + m.init(ncap) + if !m.match(i, pos) { + re.put(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) re.put(m) return dstCap } diff --git a/src/regexp/exec_test.go b/src/regexp/exec_test.go index 5f8e747b17bc1..148921932899b 100644 --- a/src/regexp/exec_test.go +++ b/src/regexp/exec_test.go @@ -684,7 +684,7 @@ func BenchmarkMatch(b *testing.B) { func BenchmarkMatch_onepass_regex(b *testing.B) { isRaceBuilder := strings.HasSuffix(testenv.Builder(), "-race") r := MustCompile(`(?s)\A.*\z`) - if r.get().op == notOnePass { + if r.onepass == nil { b.Fatalf("want onepass regex, but %q is not onepass", r) } for _, size := range benchSizes { @@ -692,18 +692,12 @@ func BenchmarkMatch_onepass_regex(b *testing.B) { continue } t := makeText(size.n) - bs := make([][]byte, len(t)) - for i, s := range t { - bs[i] = []byte{s} - } b.Run(size.name, func(b *testing.B) { b.SetBytes(int64(size.n)) b.ReportAllocs() for i := 0; i < b.N; i++ { - for _, byts := range bs { - if !r.Match(byts) { - b.Fatal("not match!") - } + if !r.Match(t) { + b.Fatal("not match!") } } }) diff --git a/src/regexp/onepass.go b/src/regexp/onepass.go index 125be59a7d84b..2f3ce6f9f6cb8 100644 --- a/src/regexp/onepass.go +++ b/src/regexp/onepass.go @@ -294,12 +294,12 @@ var anyRune = []rune{0, unicode.MaxRune} // makeOnePass creates a onepass Prog, if possible. It is possible if at any alt, // the match engine can always tell which branch to take. The routine may modify // p if it is turned into a onepass Prog. If it isn't possible for this to be a -// onepass Prog, the Prog notOnePass is returned. makeOnePass is recursive +// onepass Prog, the Prog nil is returned. makeOnePass is recursive // to the size of the Prog. func makeOnePass(p *onePassProg) *onePassProg { // If the machine is very long, it's not worth the time to check if we can use one pass. if len(p.Inst) >= 1000 { - return notOnePass + return nil } var ( @@ -446,11 +446,11 @@ func makeOnePass(p *onePassProg) *onePassProg { visitQueue.clear() pc := instQueue.next() if !check(pc, m) { - p = notOnePass + p = nil break } } - if p != notOnePass { + if p != nil { for i := range p.Inst { p.Inst[i].Rune = onePassRunes[i] } @@ -458,20 +458,18 @@ func makeOnePass(p *onePassProg) *onePassProg { return p } -var notOnePass *onePassProg = nil - // compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog -// can be recharacterized as a one-pass regexp program, or syntax.notOnePass if the +// can be recharacterized as a one-pass regexp program, or syntax.nil if the // Prog cannot be converted. For a one pass prog, the fundamental condition that must // be true is: at any InstAlt, there must be no ambiguity about what branch to take. func compileOnePass(prog *syntax.Prog) (p *onePassProg) { if prog.Start == 0 { - return notOnePass + return nil } // onepass regexp is anchored if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth || syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { - return notOnePass + return nil } // every instruction leading to InstMatch must be EmptyEndText for _, inst := range prog.Inst { @@ -479,18 +477,18 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) { switch inst.Op { default: if opOut == syntax.InstMatch { - return notOnePass + return nil } case syntax.InstAlt, syntax.InstAltMatch: if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch { - return notOnePass + return nil } case syntax.InstEmptyWidth: if opOut == syntax.InstMatch { if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText { continue } - return notOnePass + return nil } } } @@ -501,7 +499,7 @@ func compileOnePass(prog *syntax.Prog) (p *onePassProg) { // checkAmbiguity on InstAlts, build onepass Prog if possible p = makeOnePass(p) - if p != notOnePass { + if p != nil { cleanupOnePass(p, prog) } return p diff --git a/src/regexp/onepass_test.go b/src/regexp/onepass_test.go index b1caa44515035..a0f2e39048938 100644 --- a/src/regexp/onepass_test.go +++ b/src/regexp/onepass_test.go @@ -134,47 +134,45 @@ func TestMergeRuneSet(t *testing.T) { } } -var onePass = &onePassProg{} - var onePassTests = []struct { - re string - onePass *onePassProg + re string + isOnePass bool }{ - {`^(?:a|(?:a*))$`, notOnePass}, - {`^(?:(a)|(?:a*))$`, notOnePass}, - {`^(?:(?:(?:.(?:$))?))$`, onePass}, - {`^abcd$`, onePass}, - {`^(?:(?:a{0,})*?)$`, onePass}, - {`^(?:(?:a+)*)$`, onePass}, - {`^(?:(?:a|(?:aa)))$`, onePass}, - {`^(?:[^\s\S])$`, onePass}, - {`^(?:(?:a{3,4}){0,})$`, notOnePass}, - {`^(?:(?:(?:a*)+))$`, onePass}, - {`^[a-c]+$`, onePass}, - {`^[a-c]*$`, onePass}, - {`^(?:a*)$`, onePass}, - {`^(?:(?:aa)|a)$`, onePass}, - {`^[a-c]*`, notOnePass}, - {`^...$`, onePass}, - {`^(?:a|(?:aa))$`, onePass}, - {`^a((b))c$`, onePass}, - {`^a.[l-nA-Cg-j]?e$`, onePass}, - {`^a((b))$`, onePass}, - {`^a(?:(b)|(c))c$`, onePass}, - {`^a(?:(b*)|(c))c$`, notOnePass}, - {`^a(?:b|c)$`, onePass}, - {`^a(?:b?|c)$`, onePass}, - {`^a(?:b?|c?)$`, notOnePass}, - {`^a(?:b?|c+)$`, onePass}, - {`^a(?:b+|(bc))d$`, notOnePass}, - {`^a(?:bc)+$`, onePass}, - {`^a(?:[bcd])+$`, onePass}, - {`^a((?:[bcd])+)$`, onePass}, - {`^a(:?b|c)*d$`, onePass}, - {`^.bc(d|e)*$`, onePass}, - {`^(?:(?:aa)|.)$`, notOnePass}, - {`^(?:(?:a{1,2}){1,2})$`, notOnePass}, - {`^l` + strings.Repeat("o", 2<<8) + `ng$`, onePass}, + {`^(?:a|(?:a*))$`, false}, + {`^(?:(a)|(?:a*))$`, false}, + {`^(?:(?:(?:.(?:$))?))$`, true}, + {`^abcd$`, true}, + {`^(?:(?:a{0,})*?)$`, true}, + {`^(?:(?:a+)*)$`, true}, + {`^(?:(?:a|(?:aa)))$`, true}, + {`^(?:[^\s\S])$`, true}, + {`^(?:(?:a{3,4}){0,})$`, false}, + {`^(?:(?:(?:a*)+))$`, true}, + {`^[a-c]+$`, true}, + {`^[a-c]*$`, true}, + {`^(?:a*)$`, true}, + {`^(?:(?:aa)|a)$`, true}, + {`^[a-c]*`, false}, + {`^...$`, true}, + {`^(?:a|(?:aa))$`, true}, + {`^a((b))c$`, true}, + {`^a.[l-nA-Cg-j]?e$`, true}, + {`^a((b))$`, true}, + {`^a(?:(b)|(c))c$`, true}, + {`^a(?:(b*)|(c))c$`, false}, + {`^a(?:b|c)$`, true}, + {`^a(?:b?|c)$`, true}, + {`^a(?:b?|c?)$`, false}, + {`^a(?:b?|c+)$`, true}, + {`^a(?:b+|(bc))d$`, false}, + {`^a(?:bc)+$`, true}, + {`^a(?:[bcd])+$`, true}, + {`^a((?:[bcd])+)$`, true}, + {`^a(:?b|c)*d$`, true}, + {`^.bc(d|e)*$`, true}, + {`^(?:(?:aa)|.)$`, false}, + {`^(?:(?:a{1,2}){1,2})$`, false}, + {`^l` + strings.Repeat("o", 2<<8) + `ng$`, true}, } func TestCompileOnePass(t *testing.T) { @@ -194,9 +192,9 @@ func TestCompileOnePass(t *testing.T) { t.Errorf("Compile(%q) got err:%s, want success", test.re, err) continue } - onePass = compileOnePass(p) - if (onePass == notOnePass) != (test.onePass == notOnePass) { - t.Errorf("CompileOnePass(%q) got %v, expected %v", test.re, onePass, test.onePass) + isOnePass := compileOnePass(p) != nil + if isOnePass != test.isOnePass { + t.Errorf("CompileOnePass(%q) got isOnePass=%v, expected %v", test.re, isOnePass, test.isOnePass) } } } @@ -216,8 +214,8 @@ func TestRunOnePass(t *testing.T) { t.Errorf("Compile(%q): got err: %s", test.re, err) continue } - if re.onepass == notOnePass { - t.Errorf("Compile(%q): got notOnePass, want one-pass", test.re) + if re.onepass == nil { + t.Errorf("Compile(%q): got nil, want one-pass", test.re) continue } if !re.MatchString(test.match) { @@ -227,21 +225,11 @@ func TestRunOnePass(t *testing.T) { } func BenchmarkCompileOnepass(b *testing.B) { - for _, test := range onePassTests { - if test.onePass == notOnePass { - continue - } - name := test.re - if len(name) > 20 { - name = name[:20] + "..." + b.ReportAllocs() + const re = `^a.[l-nA-Cg-j]?e$` + for i := 0; i < b.N; i++ { + if _, err := Compile(re); err != nil { + b.Fatal(err) } - b.Run(name, func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - if _, err := Compile(test.re); err != nil { - b.Fatal(err) - } - } - }) } } diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go index 61ed9c5059f35..38b3c86d9f683 100644 --- a/src/regexp/regexp.go +++ b/src/regexp/regexp.go @@ -79,27 +79,24 @@ import ( // A Regexp is safe for concurrent use by multiple goroutines, // except for configuration methods, such as Longest. type Regexp struct { - // read-only after Compile - regexpRO - - // cache of machines for running regexp - mu sync.Mutex - machine []*machine -} - -type regexpRO struct { - expr string // as passed to Compile - prog *syntax.Prog // compiled program - onepass *onePassProg // onepass program or nil + expr string // as passed to Compile + prog *syntax.Prog // compiled program + onepass *onePassProg // onepass program or nil + numSubexp int + maxBitStateLen int + subexpNames []string prefix string // required prefix in unanchored matches prefixBytes []byte // prefix, as a []byte - prefixComplete bool // prefix is the entire regexp prefixRune rune // first rune in prefix prefixEnd uint32 // pc for last rune in prefix + mpool int // pool for machines + matchcap int // size of recorded match lengths + prefixComplete bool // prefix is the entire regexp cond syntax.EmptyOp // empty-width conditions required at start of match - numSubexp int - subexpNames []string - longest bool + + // This field can be modified by the Longest method, + // but it is otherwise read-only. + longest bool // whether regexp prefers leftmost-longest match } // String returns the source text used to compile the regular expression. @@ -108,15 +105,16 @@ func (re *Regexp) String() string { } // Copy returns a new Regexp object copied from re. +// Calling Longest on one copy does not affect another. // -// When using a Regexp in multiple goroutines, giving each goroutine -// its own copy helps to avoid lock contention. +// Deprecated: In earlier releases, when using a Regexp in multiple goroutines, +// giving each goroutine its own copy helped to avoid lock contention. +// As of Go 1.12, using Copy is no longer necessary to avoid lock contention. +// Copy may still be appropriate if the reason for its use is to make +// two copies with different Longest settings. func (re *Regexp) Copy() *Regexp { - // It is not safe to copy Regexp by value - // since it contains a sync.Mutex. - return &Regexp{ - regexpRO: re.regexpRO, - } + re2 := *re + return &re2 } // Compile parses a regular expression and returns, if successful, @@ -179,19 +177,23 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { if err != nil { return nil, err } + matchcap := prog.NumCap + if matchcap < 2 { + matchcap = 2 + } regexp := &Regexp{ - regexpRO: regexpRO{ - expr: expr, - prog: prog, - onepass: compileOnePass(prog), - numSubexp: maxCap, - subexpNames: capNames, - cond: prog.StartCond(), - longest: longest, - }, - } - if regexp.onepass == notOnePass { + expr: expr, + prog: prog, + onepass: compileOnePass(prog), + numSubexp: maxCap, + subexpNames: capNames, + cond: prog.StartCond(), + longest: longest, + matchcap: matchcap, + } + if regexp.onepass == nil { regexp.prefix, regexp.prefixComplete = prog.Prefix() + regexp.maxBitStateLen = maxBitStateLen(prog) } else { regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) } @@ -201,39 +203,64 @@ func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { regexp.prefixBytes = []byte(regexp.prefix) regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix) } + + n := len(prog.Inst) + i := 0 + for matchSize[i] != 0 && matchSize[i] < n { + i++ + } + regexp.mpool = i + return regexp, nil } +// Pools of *machine for use during (*Regexp).doExecute, +// split up by the size of the execution queues. +// matchPool[i] machines have queue size matchSize[i]. +// On a 64-bit system each queue entry is 16 bytes, +// so matchPool[0] has 16*2*128 = 4kB queues, etc. +// The final matchPool is a catch-all for very large queues. +var ( + matchSize = [...]int{128, 512, 2048, 16384, 0} + matchPool [len(matchSize)]sync.Pool +) + // get returns a machine to use for matching re. // It uses the re's machine cache if possible, to avoid // unnecessary allocation. func (re *Regexp) get() *machine { - re.mu.Lock() - if n := len(re.machine); n > 0 { - z := re.machine[n-1] - re.machine = re.machine[:n-1] - re.mu.Unlock() - return z - } - re.mu.Unlock() - z := progMachine(re.prog, re.onepass) - z.re = re - return z -} - -// put returns a machine to the re's machine cache. -// There is no attempt to limit the size of the cache, so it will -// grow to the maximum number of simultaneous matches -// run using re. (The cache empties when re gets garbage collected.) -func (re *Regexp) put(z *machine) { - // Remove references to input data that we no longer need. - z.inputBytes.str = nil - z.inputString.str = "" - z.inputReader.r = nil - - re.mu.Lock() - re.machine = append(re.machine, z) - re.mu.Unlock() + m, ok := matchPool[re.mpool].Get().(*machine) + if !ok { + m = new(machine) + } + m.re = re + m.p = re.prog + if cap(m.matchcap) < re.matchcap { + m.matchcap = make([]int, re.matchcap) + for _, t := range m.pool { + t.cap = make([]int, re.matchcap) + } + } + + // Allocate queues if needed. + // Or reallocate, for "large" match pool. + n := matchSize[re.mpool] + if n == 0 { // large pool + n = len(re.prog.Inst) + } + if len(m.q0.sparse) < n { + m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} + m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} + } + return m +} + +// put returns a machine to the correct machine pool. +func (re *Regexp) put(m *machine) { + m.re = nil + m.p = nil + m.inputs.clear() + matchPool[re.mpool].Put(m) } // MustCompile is like Compile but panics if the expression cannot be parsed. @@ -288,7 +315,7 @@ type input interface { canCheckPrefix() bool // can we look ahead without losing info? hasPrefix(re *Regexp) bool index(re *Regexp, pos int) int - context(pos int) syntax.EmptyOp + context(pos int) lazyFlag } // inputString scans a string. @@ -319,7 +346,7 @@ func (i *inputString) index(re *Regexp, pos int) int { return strings.Index(i.str[pos:], re.prefix) } -func (i *inputString) context(pos int) syntax.EmptyOp { +func (i *inputString) context(pos int) lazyFlag { r1, r2 := endOfText, endOfText // 0 < pos && pos <= len(i.str) if uint(pos-1) < uint(len(i.str)) { @@ -335,7 +362,7 @@ func (i *inputString) context(pos int) syntax.EmptyOp { r2, _ = utf8.DecodeRuneInString(i.str[pos:]) } } - return syntax.EmptyOpContext(r1, r2) + return newLazyFlag(r1, r2) } // inputBytes scans a byte slice. @@ -366,7 +393,7 @@ func (i *inputBytes) index(re *Regexp, pos int) int { return bytes.Index(i.str[pos:], re.prefixBytes) } -func (i *inputBytes) context(pos int) syntax.EmptyOp { +func (i *inputBytes) context(pos int) lazyFlag { r1, r2 := endOfText, endOfText // 0 < pos && pos <= len(i.str) if uint(pos-1) < uint(len(i.str)) { @@ -382,7 +409,7 @@ func (i *inputBytes) context(pos int) syntax.EmptyOp { r2, _ = utf8.DecodeRune(i.str[pos:]) } } - return syntax.EmptyOpContext(r1, r2) + return newLazyFlag(r1, r2) } // inputReader scans a RuneReader. @@ -418,8 +445,8 @@ func (i *inputReader) index(re *Regexp, pos int) int { return -1 } -func (i *inputReader) context(pos int) syntax.EmptyOp { - return 0 +func (i *inputReader) context(pos int) lazyFlag { + return 0 // not used } // LiteralPrefix returns a literal string that must begin any match @@ -469,7 +496,7 @@ func MatchString(pattern string, s string) (matched bool, err error) { return re.MatchString(s), nil } -// MatchString reports whether the byte slice b +// Match reports whether the byte slice b // contains any match of the regular expression pattern. // More complicated queries need to use Compile and the full Regexp interface. func Match(pattern string, b []byte) (matched bool, err error) { diff --git a/src/regexp/syntax/prog.go b/src/regexp/syntax/prog.go index 49a06bbfad4b1..ae7a9a2fe0118 100644 --- a/src/regexp/syntax/prog.go +++ b/src/regexp/syntax/prog.go @@ -201,8 +201,12 @@ func (i *Inst) MatchRune(r rune) bool { func (i *Inst) MatchRunePos(r rune) int { rune := i.Rune - // Special case: single-rune slice is from literal string, not char class. - if len(rune) == 1 { + switch len(rune) { + case 0: + return noMatch + + case 1: + // Special case: single-rune slice is from literal string, not char class. r0 := rune[0] if r == r0 { return 0 @@ -215,17 +219,25 @@ func (i *Inst) MatchRunePos(r rune) int { } } return noMatch - } - // Peek at the first few pairs. - // Should handle ASCII well. - for j := 0; j < len(rune) && j <= 8; j += 2 { - if r < rune[j] { - return noMatch + case 2: + if r >= rune[0] && r <= rune[1] { + return 0 } - if r <= rune[j+1] { - return j / 2 + return noMatch + + case 4, 6, 8: + // Linear search for a few pairs. + // Should handle ASCII well. + for j := 0; j < len(rune); j += 2 { + if r < rune[j] { + return noMatch + } + if r <= rune[j+1] { + return j / 2 + } } + return noMatch } // Otherwise binary search. diff --git a/src/regexp/syntax/regexp.go b/src/regexp/syntax/regexp.go index a3f56f8c902e4..ae5fa053f985f 100644 --- a/src/regexp/syntax/regexp.go +++ b/src/regexp/syntax/regexp.go @@ -59,7 +59,7 @@ const ( const opPseudo Op = 128 // where pseudo-ops start -// Equal returns true if x and y have identical structure. +// Equal reports whether x and y have identical structure. func (x *Regexp) Equal(y *Regexp) bool { if x == nil || y == nil { return x == y diff --git a/src/run.bash b/src/run.bash index c14f4a206d3f3..1c6c4244349f5 100755 --- a/src/run.bash +++ b/src/run.bash @@ -21,6 +21,7 @@ export GOPATH unset CDPATH # in case user has it set unset GOBIN # Issue 14340 unset GOFLAGS +unset GO111MODULE export GOHOSTOS export CC diff --git a/src/run.bat b/src/run.bat index 0e0c413617c76..123edcc35dd7d 100644 --- a/src/run.bat +++ b/src/run.bat @@ -18,6 +18,7 @@ set GOPATH= :: Issue 14340: ignore GOBIN during all.bat. set GOBIN= set GOFLAGS= +set GO111MODULE= rem TODO avoid rebuild if possible diff --git a/src/run.rc b/src/run.rc index 49d6fd9a4df1e..c346f5cf5c6a2 100755 --- a/src/run.rc +++ b/src/run.rc @@ -11,5 +11,6 @@ GOPATH = () # we disallow local import for non-local packages, if $GOROOT happen # to be under $GOPATH, then some tests below will fail GOBIN = () # Issue 14340 GOFLAGS = () +GO111MODULE = () exec go tool dist test -rebuild $* diff --git a/src/runtime/HACKING.md b/src/runtime/HACKING.md index 72ba61970bd67..993edc67d886b 100644 --- a/src/runtime/HACKING.md +++ b/src/runtime/HACKING.md @@ -205,8 +205,10 @@ marked `//go:notinheap` (see below). Objects that are allocated in unmanaged memory **must not** contain heap pointers unless the following rules are also obeyed: -1. Any pointers from unmanaged memory to the heap must be added as - explicit garbage collection roots in `runtime.markroot`. +1. Any pointers from unmanaged memory to the heap must be garbage + collection roots. More specifically, any pointer must either be + accessible through a global variable or be added as an explicit + garbage collection root in `runtime.markroot`. 2. If the memory is reused, the heap pointers must be zero-initialized before they become visible as GC roots. Otherwise, the GC may diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 8e931fd7658c6..887dbebdeb074 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -301,6 +301,10 @@ func alginit() { } func initAlgAES() { + if GOOS == "aix" { + // runtime.algarray is immutable on AIX: see cmd/link/internal/ld/xcoff.go + return + } useAeshash = true algarray[alg_MEM32].hash = aeshash32 algarray[alg_MEM64].hash = aeshash64 diff --git a/src/runtime/asm.s b/src/runtime/asm.s index 6b209b2d1f463..314f99d69be4f 100644 --- a/src/runtime/asm.s +++ b/src/runtime/asm.s @@ -38,3 +38,11 @@ GLOBL runtime·memstats(SB), NOPTR, $0 // This function must be sizeofSkipFunction bytes. TEXT runtime·skipPleaseUseCallersFrames(SB),NOSPLIT,$0-0 SKIP64; SKIP64; SKIP64; SKIP64 + +// abi0Syms is a dummy symbol that creates ABI0 wrappers for Go +// functions called from assembly in other packages. +TEXT abi0Syms<>(SB),NOSPLIT,$0-0 + // obj assumes it can call morestack* using ABI0, but + // morestackc is actually defined in Go. + CALL ·morestackc(SB) + // References from syscall are automatically collected by cmd/go. diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index a6a81c3f63d0e..48a959aad1081 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -107,7 +107,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 MOVL BX, g_stackguard1(BP) MOVL BX, (g_stack+stack_lo)(BP) MOVL SP, (g_stack+stack_hi)(BP) - + // find out information about the processor we're on #ifdef GOOS_nacl // NaCl doesn't like PUSHFL/POPFL JMP has_cpuid @@ -493,9 +493,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-20 MOVL argsize+12(FP), CX DISPATCH(runtime·call16, 16) @@ -827,7 +824,7 @@ havem: MOVL (g_sched+gobuf_sp)(SI), SP MOVL 0(SP), AX MOVL AX, (g_sched+gobuf_sp)(SI) - + // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. CMPL DX, $0 @@ -881,7 +878,7 @@ TEXT runtime·stackcheck(SB), NOSPLIT, $0-0 // func cputicks() int64 TEXT runtime·cputicks(SB),NOSPLIT,$0-8 - CMPB runtime·support_sse2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 JNE done CMPB runtime·lfenceBeforeRdtsc(SB), $1 JNE mfence @@ -942,7 +939,7 @@ TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0 CMPL BX, $64 JBE aes33to64 JMP aes65plus - + aes0to15: TESTL BX, BX JE aes0 @@ -957,7 +954,7 @@ aes0to15: ADDL BX, BX PAND masks<>(SB)(BX*8), X1 -final1: +final1: AESENC X0, X1 // scramble input, xor in seed AESENC X1, X1 // scramble combo 2 times AESENC X1, X1 @@ -987,7 +984,7 @@ aes17to32: // make second starting seed PXOR runtime·aeskeysched+16(SB), X1 AESENC X1, X1 - + // load data to be hashed MOVOU (AX), X2 MOVOU -16(AX)(BX*1), X3 @@ -1015,22 +1012,22 @@ aes33to64: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + MOVOU (AX), X4 MOVOU 16(AX), X5 MOVOU -32(AX)(BX*1), X6 MOVOU -16(AX)(BX*1), X7 - + AESENC X0, X4 AESENC X1, X5 AESENC X2, X6 AESENC X3, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1052,7 +1049,7 @@ aes65plus: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + // start with last (possibly overlapping) block MOVOU -64(AX)(BX*1), X4 MOVOU -48(AX)(BX*1), X5 @@ -1068,7 +1065,7 @@ aes65plus: // compute number of remaining 64-byte blocks DECL BX SHRL $6, BX - + aesloop: // scramble state, xor in a block MOVOU (AX), X0 @@ -1095,7 +1092,7 @@ aesloop: AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1132,77 +1129,77 @@ DATA masks<>+0x00(SB)/4, $0x00000000 DATA masks<>+0x04(SB)/4, $0x00000000 DATA masks<>+0x08(SB)/4, $0x00000000 DATA masks<>+0x0c(SB)/4, $0x00000000 - + DATA masks<>+0x10(SB)/4, $0x000000ff DATA masks<>+0x14(SB)/4, $0x00000000 DATA masks<>+0x18(SB)/4, $0x00000000 DATA masks<>+0x1c(SB)/4, $0x00000000 - + DATA masks<>+0x20(SB)/4, $0x0000ffff DATA masks<>+0x24(SB)/4, $0x00000000 DATA masks<>+0x28(SB)/4, $0x00000000 DATA masks<>+0x2c(SB)/4, $0x00000000 - + DATA masks<>+0x30(SB)/4, $0x00ffffff DATA masks<>+0x34(SB)/4, $0x00000000 DATA masks<>+0x38(SB)/4, $0x00000000 DATA masks<>+0x3c(SB)/4, $0x00000000 - + DATA masks<>+0x40(SB)/4, $0xffffffff DATA masks<>+0x44(SB)/4, $0x00000000 DATA masks<>+0x48(SB)/4, $0x00000000 DATA masks<>+0x4c(SB)/4, $0x00000000 - + DATA masks<>+0x50(SB)/4, $0xffffffff DATA masks<>+0x54(SB)/4, $0x000000ff DATA masks<>+0x58(SB)/4, $0x00000000 DATA masks<>+0x5c(SB)/4, $0x00000000 - + DATA masks<>+0x60(SB)/4, $0xffffffff DATA masks<>+0x64(SB)/4, $0x0000ffff DATA masks<>+0x68(SB)/4, $0x00000000 DATA masks<>+0x6c(SB)/4, $0x00000000 - + DATA masks<>+0x70(SB)/4, $0xffffffff DATA masks<>+0x74(SB)/4, $0x00ffffff DATA masks<>+0x78(SB)/4, $0x00000000 DATA masks<>+0x7c(SB)/4, $0x00000000 - + DATA masks<>+0x80(SB)/4, $0xffffffff DATA masks<>+0x84(SB)/4, $0xffffffff DATA masks<>+0x88(SB)/4, $0x00000000 DATA masks<>+0x8c(SB)/4, $0x00000000 - + DATA masks<>+0x90(SB)/4, $0xffffffff DATA masks<>+0x94(SB)/4, $0xffffffff DATA masks<>+0x98(SB)/4, $0x000000ff DATA masks<>+0x9c(SB)/4, $0x00000000 - + DATA masks<>+0xa0(SB)/4, $0xffffffff DATA masks<>+0xa4(SB)/4, $0xffffffff DATA masks<>+0xa8(SB)/4, $0x0000ffff DATA masks<>+0xac(SB)/4, $0x00000000 - + DATA masks<>+0xb0(SB)/4, $0xffffffff DATA masks<>+0xb4(SB)/4, $0xffffffff DATA masks<>+0xb8(SB)/4, $0x00ffffff DATA masks<>+0xbc(SB)/4, $0x00000000 - + DATA masks<>+0xc0(SB)/4, $0xffffffff DATA masks<>+0xc4(SB)/4, $0xffffffff DATA masks<>+0xc8(SB)/4, $0xffffffff DATA masks<>+0xcc(SB)/4, $0x00000000 - + DATA masks<>+0xd0(SB)/4, $0xffffffff DATA masks<>+0xd4(SB)/4, $0xffffffff DATA masks<>+0xd8(SB)/4, $0xffffffff DATA masks<>+0xdc(SB)/4, $0x000000ff - + DATA masks<>+0xe0(SB)/4, $0xffffffff DATA masks<>+0xe4(SB)/4, $0xffffffff DATA masks<>+0xe8(SB)/4, $0xffffffff DATA masks<>+0xec(SB)/4, $0x0000ffff - + DATA masks<>+0xf0(SB)/4, $0xffffffff DATA masks<>+0xf4(SB)/4, $0xffffffff DATA masks<>+0xf8(SB)/4, $0xffffffff @@ -1217,77 +1214,77 @@ DATA shifts<>+0x00(SB)/4, $0x00000000 DATA shifts<>+0x04(SB)/4, $0x00000000 DATA shifts<>+0x08(SB)/4, $0x00000000 DATA shifts<>+0x0c(SB)/4, $0x00000000 - + DATA shifts<>+0x10(SB)/4, $0xffffff0f DATA shifts<>+0x14(SB)/4, $0xffffffff DATA shifts<>+0x18(SB)/4, $0xffffffff DATA shifts<>+0x1c(SB)/4, $0xffffffff - + DATA shifts<>+0x20(SB)/4, $0xffff0f0e DATA shifts<>+0x24(SB)/4, $0xffffffff DATA shifts<>+0x28(SB)/4, $0xffffffff DATA shifts<>+0x2c(SB)/4, $0xffffffff - + DATA shifts<>+0x30(SB)/4, $0xff0f0e0d DATA shifts<>+0x34(SB)/4, $0xffffffff DATA shifts<>+0x38(SB)/4, $0xffffffff DATA shifts<>+0x3c(SB)/4, $0xffffffff - + DATA shifts<>+0x40(SB)/4, $0x0f0e0d0c DATA shifts<>+0x44(SB)/4, $0xffffffff DATA shifts<>+0x48(SB)/4, $0xffffffff DATA shifts<>+0x4c(SB)/4, $0xffffffff - + DATA shifts<>+0x50(SB)/4, $0x0e0d0c0b DATA shifts<>+0x54(SB)/4, $0xffffff0f DATA shifts<>+0x58(SB)/4, $0xffffffff DATA shifts<>+0x5c(SB)/4, $0xffffffff - + DATA shifts<>+0x60(SB)/4, $0x0d0c0b0a DATA shifts<>+0x64(SB)/4, $0xffff0f0e DATA shifts<>+0x68(SB)/4, $0xffffffff DATA shifts<>+0x6c(SB)/4, $0xffffffff - + DATA shifts<>+0x70(SB)/4, $0x0c0b0a09 DATA shifts<>+0x74(SB)/4, $0xff0f0e0d DATA shifts<>+0x78(SB)/4, $0xffffffff DATA shifts<>+0x7c(SB)/4, $0xffffffff - + DATA shifts<>+0x80(SB)/4, $0x0b0a0908 DATA shifts<>+0x84(SB)/4, $0x0f0e0d0c DATA shifts<>+0x88(SB)/4, $0xffffffff DATA shifts<>+0x8c(SB)/4, $0xffffffff - + DATA shifts<>+0x90(SB)/4, $0x0a090807 DATA shifts<>+0x94(SB)/4, $0x0e0d0c0b DATA shifts<>+0x98(SB)/4, $0xffffff0f DATA shifts<>+0x9c(SB)/4, $0xffffffff - + DATA shifts<>+0xa0(SB)/4, $0x09080706 DATA shifts<>+0xa4(SB)/4, $0x0d0c0b0a DATA shifts<>+0xa8(SB)/4, $0xffff0f0e DATA shifts<>+0xac(SB)/4, $0xffffffff - + DATA shifts<>+0xb0(SB)/4, $0x08070605 DATA shifts<>+0xb4(SB)/4, $0x0c0b0a09 DATA shifts<>+0xb8(SB)/4, $0xff0f0e0d DATA shifts<>+0xbc(SB)/4, $0xffffffff - + DATA shifts<>+0xc0(SB)/4, $0x07060504 DATA shifts<>+0xc4(SB)/4, $0x0b0a0908 DATA shifts<>+0xc8(SB)/4, $0x0f0e0d0c DATA shifts<>+0xcc(SB)/4, $0xffffffff - + DATA shifts<>+0xd0(SB)/4, $0x06050403 DATA shifts<>+0xd4(SB)/4, $0x0a090807 DATA shifts<>+0xd8(SB)/4, $0x0e0d0c0b DATA shifts<>+0xdc(SB)/4, $0xffffff0f - + DATA shifts<>+0xe0(SB)/4, $0x05040302 DATA shifts<>+0xe4(SB)/4, $0x09080706 DATA shifts<>+0xe8(SB)/4, $0x0d0c0b0a DATA shifts<>+0xec(SB)/4, $0xffff0f0e - + DATA shifts<>+0xf0(SB)/4, $0x04030201 DATA shifts<>+0xf4(SB)/4, $0x08070605 DATA shifts<>+0xf8(SB)/4, $0x0c0b0a09 diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 6902ce2c22c02..6339dc000022c 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -92,7 +92,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ANDQ $~15, SP MOVQ AX, 16(SP) MOVQ BX, 24(SP) - + // create istack out of the given (operating system) stack. // _cgo_init may update stackguard. MOVQ $runtime·g0(SB), DI @@ -228,7 +228,7 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 * go-routine */ -// void gosave(Gobuf*) +// func gosave(buf *gobuf) // save state in Gobuf; setjmp TEXT runtime·gosave(SB), NOSPLIT, $0-8 MOVQ buf+0(FP), AX // gobuf @@ -248,7 +248,7 @@ TEXT runtime·gosave(SB), NOSPLIT, $0-8 MOVQ BX, gobuf_g(AX) RET -// void gogo(Gobuf*) +// func gogo(buf *gobuf) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB), NOSPLIT, $16-8 MOVQ buf+0(FP), BX // gobuf @@ -273,7 +273,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $16-8 // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-8 MOVQ fn+0(FP), DI - + get_tls(CX) MOVQ g(CX), AX // save state in g->sched MOVQ 0(SP), BX // caller's PC @@ -448,9 +448,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-32 MOVLQZX argsize+24(FP), CX DISPATCH(runtime·call32, 32) @@ -560,7 +557,8 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0-0 // compile barrier. RET -// void jmpdefer(fn, sp); +// func jmpdefer(fv *funcval, argp uintptr) +// argp is a caller SP. // called from deferreturn. // 1. pop the caller // 2. sub 5 bytes from the callers return @@ -616,7 +614,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 MOVQ m_gsignal(R8), SI CMPQ SI, DI JEQ nosave - + // Switch to system stack. MOVQ m_g0(R8), SI CALL gosave<>(SB) @@ -670,7 +668,7 @@ nosave: MOVL AX, ret+16(FP) RET -// cgocallback(void (*fn)(void*), void *frame, uintptr framesize, uintptr ctxt) +// func cgocallback(fn, frame unsafe.Pointer, framesize, ctxt uintptr) // Turn the fn into a Go func (by taking its address) and call // cgocallback_gofunc. TEXT runtime·cgocallback(SB),NOSPLIT,$32-32 @@ -686,7 +684,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$32-32 CALL AX RET -// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize, uintptr ctxt) +// func cgocallback_gofunc(fn, frame, framesize, ctxt uintptr) // See cgocall.go for more details. TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-32 NO_LOCAL_POINTERS @@ -716,7 +714,7 @@ needm: get_tls(CX) MOVQ g(CX), BX MOVQ g_m(BX), BX - + // Set m->sched.sp = SP, so that if a panic happens // during the function we are about to execute, it will // have a valid SP to run on the g0 stack. @@ -800,7 +798,7 @@ havem: MOVQ (g_sched+gobuf_sp)(SI), SP MOVQ 0(SP), AX MOVQ AX, (g_sched+gobuf_sp)(SI) - + // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. CMPQ R8, $0 @@ -811,7 +809,8 @@ havem: // Done! RET -// void setg(G*); set g. for use by needm. +// func setg(gg *g) +// set g. for use by needm. TEXT runtime·setg(SB), NOSPLIT, $0-8 MOVQ gg+0(FP), BX #ifdef GOOS_windows @@ -866,6 +865,7 @@ done: MOVQ AX, ret+0(FP) RET +// func aeshash(p unsafe.Pointer, h, s uintptr) uintptr // hash function using AES hardware instructions TEXT runtime·aeshash(SB),NOSPLIT,$0-32 MOVQ p+0(FP), AX // ptr to data @@ -873,6 +873,7 @@ TEXT runtime·aeshash(SB),NOSPLIT,$0-32 LEAQ ret+24(FP), DX JMP runtime·aeshashbody(SB) +// func aeshashstr(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to string struct MOVQ 8(AX), CX // length of string @@ -949,7 +950,7 @@ aes17to32: // make second starting seed PXOR runtime·aeskeysched+16(SB), X1 AESENC X1, X1 - + // load data to be hashed MOVOU (AX), X2 MOVOU -16(AX)(CX*1), X3 @@ -981,7 +982,7 @@ aes33to64: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + MOVOU (AX), X4 MOVOU 16(AX), X5 MOVOU -32(AX)(CX*1), X6 @@ -991,17 +992,17 @@ aes33to64: PXOR X1, X5 PXOR X2, X6 PXOR X3, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1117,7 +1118,7 @@ aes129plus: AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + // start with last (possibly overlapping) block MOVOU -128(AX)(CX*1), X8 MOVOU -112(AX)(CX*1), X9 @@ -1137,11 +1138,11 @@ aes129plus: PXOR X5, X13 PXOR X6, X14 PXOR X7, X15 - + // compute number of remaining 128-byte blocks DECQ CX SHRQ $7, CX - + aesloop: // scramble state AESENC X8, X8 @@ -1210,7 +1211,8 @@ aesloop: PXOR X9, X8 MOVQ X8, (DX) RET - + +// func aeshash32(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshash32(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to data MOVQ h+8(FP), X0 // seed @@ -1221,6 +1223,7 @@ TEXT runtime·aeshash32(SB),NOSPLIT,$0-24 MOVQ X0, ret+16(FP) RET +// func aeshash64(p unsafe.Pointer, h uintptr) uintptr TEXT runtime·aeshash64(SB),NOSPLIT,$0-24 MOVQ p+0(FP), AX // ptr to data MOVQ h+8(FP), X0 // seed @@ -1266,6 +1269,7 @@ DATA masks<>+0xf0(SB)/8, $0xffffffffffffffff DATA masks<>+0xf8(SB)/8, $0x00ffffffffffffff GLOBL masks<>(SB),RODATA,$256 +// func checkASM() bool TEXT ·checkASM(SB),NOSPLIT,$0-1 // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte MOVQ $masks<>(SB), AX @@ -1465,7 +1469,7 @@ GLOBL debugCallFrameTooLarge<>(SB), RODATA, $0x14 // Size duplicated below // This function communicates back to the debugger by setting RAX and // invoking INT3 to raise a breakpoint signal. See the comments in the // implementation for the protocol the debugger is expected to -// follow. InjectDebugCall in the runtime tests demonstates this protocol. +// follow. InjectDebugCall in the runtime tests demonstrates this protocol. // // The debugger must ensure that any pointers passed to the function // obey escape analysis requirements. Specifically, it must not pass @@ -1616,6 +1620,7 @@ DEBUG_CALL_FN(debugCall16384<>, 16384) DEBUG_CALL_FN(debugCall32768<>, 32768) DEBUG_CALL_FN(debugCall65536<>, 65536) +// func debugCallPanicked(val interface{}) TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 // Copy the panic value to the top of stack. MOVQ val_type+0(FP), AX diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s index 49958d0c886b1..7b57fc78d6b8e 100644 --- a/src/runtime/asm_amd64p32.s +++ b/src/runtime/asm_amd64p32.s @@ -18,7 +18,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 MOVL AX, 16(SP) MOVL BX, 24(SP) - + // create istack out of the given (operating system) stack. MOVL $runtime·g0(SB), DI LEAL (-64*1024+104)(SP), BX @@ -150,7 +150,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $8-4 // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-4 MOVL fn+0(FP), DI - + get_tls(CX) MOVL g(CX), AX // save state in g->sched MOVL 0(SP), BX // caller's PC @@ -318,9 +318,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0 JMP AX // Note: can't just "JMP NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-20 MOVLQZX argsize+12(FP), CX DISPATCH(runtime·call16, 16) diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index 6722ba760fb54..745aceaaff6c3 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -439,9 +439,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 MOVW $NAME(SB), R1; \ B (R1) -TEXT reflect·call(SB), NOSPLIT, $0-0 - B ·reflectcall(SB) - TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 MOVW argsize+12(FP), R0 DISPATCH(runtime·call16, 16) @@ -514,7 +511,7 @@ TEXT callRet<>(SB), NOSPLIT, $16-0 MOVW R1, 12(R13) MOVW R2, 16(R13) BL runtime·reflectcallmove(SB) - RET + RET CALLFN(·call16, 16) CALLFN(·call32, 32) @@ -673,7 +670,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$16-16 // See cgocall.go for more details. TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-16 NO_LOCAL_POINTERS - + // Load m and g from thread-local storage. MOVB runtime·iscgo(SB), R0 CMP $0, R0 @@ -784,6 +781,9 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0 MOVW R0, g // Save g to thread-local storage. +#ifdef GOOS_windows + B runtime·save_g(SB) +#else MOVB runtime·iscgo(SB), R0 CMP $0, R0 B.EQ 2(PC) @@ -791,6 +791,7 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0 MOVW g, R0 RET +#endif TEXT runtime·emptyfunc(SB),0,$0-0 RET @@ -851,12 +852,12 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$8 // callee-save in the gcc calling convention, so save them here. MOVW R11, saveR11-4(SP) MOVW g, saveG-8(SP) - + BL runtime·load_g(SB) MOVW g_m(g), R0 MOVW m_curg(R0), R0 MOVW (g_stack+stack_hi)(R0), R0 - + MOVW saveG-8(SP), g MOVW saveR11-4(SP), R11 RET diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index af389be9fee33..bbeb3df0c85f5 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -18,7 +18,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 // create istack out of the given (operating system) stack. // _cgo_init may update stackguard. MOVD $runtime·g0(SB), g - MOVD RSP, R7 + MOVD RSP, R7 MOVD $(-64*1024)(R7), R0 MOVD R0, g_stackguard0(g) MOVD R0, g_stackguard1(g) @@ -39,12 +39,12 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 #endif MOVD $setg_gcc<>(SB), R1 // arg 1: setg MOVD g, R0 // arg 0: G + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R12) - MOVD _cgo_init(SB), R12 - CMP $0, R12 - BEQ nocgo + ADD $16, RSP nocgo: + BL runtime·save_g(SB) // update stackguard after _cgo_init MOVD (g_stack+stack_lo)(g), R0 ADD $const__StackGuard, R0 @@ -107,6 +107,7 @@ TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 MOVD buf+0(FP), R3 MOVD RSP, R0 MOVD R0, gobuf_sp(R3) + MOVD R29, gobuf_bp(R3) MOVD LR, gobuf_pc(R3) MOVD g, gobuf_g(R3) MOVD ZR, gobuf_lr(R3) @@ -128,10 +129,12 @@ TEXT runtime·gogo(SB), NOSPLIT, $24-8 MOVD 0(g), R4 // make sure g is not nil MOVD gobuf_sp(R5), R0 MOVD R0, RSP + MOVD gobuf_bp(R5), R29 MOVD gobuf_lr(R5), LR MOVD gobuf_ret(R5), R0 MOVD gobuf_ctxt(R5), R26 MOVD $0, gobuf_sp(R5) + MOVD $0, gobuf_bp(R5) MOVD $0, gobuf_ret(R5) MOVD $0, gobuf_lr(R5) MOVD $0, gobuf_ctxt(R5) @@ -147,6 +150,7 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 // Save caller state in g->sched MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) MOVD LR, (g_sched+gobuf_pc)(g) MOVD $0, (g_sched+gobuf_lr)(g) MOVD g, (g_sched+gobuf_g)(g) @@ -163,6 +167,7 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 MOVD 0(R26), R4 // code pointer MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP // sp = m->g0->sched.sp + MOVD (g_sched+gobuf_bp)(g), R29 MOVD R3, -8(RSP) MOVD $0, -16(RSP) SUB $16, RSP @@ -211,6 +216,7 @@ switch: MOVD R6, (g_sched+gobuf_pc)(g) MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) MOVD $0, (g_sched+gobuf_lr)(g) MOVD g, (g_sched+gobuf_g)(g) @@ -224,6 +230,7 @@ switch: MOVD $runtime·mstart(SB), R4 MOVD R4, 0(R3) MOVD R3, RSP + MOVD (g_sched+gobuf_bp)(g), R29 // call target function MOVD 0(R26), R3 // code pointer @@ -235,7 +242,9 @@ switch: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 MOVD $0, (g_sched+gobuf_sp)(g) + MOVD $0, (g_sched+gobuf_bp)(g) RET noswitch: @@ -244,6 +253,7 @@ noswitch: // at an intermediate systemstack. MOVD 0(R26), R3 // code pointer MOVD.P 16(RSP), R30 // restore LR + SUB $8, RSP, R29 // restore FP B (R3) /* @@ -278,6 +288,7 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 // Set g->sched to context in f MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) MOVD LR, (g_sched+gobuf_pc)(g) MOVD R3, (g_sched+gobuf_lr)(g) MOVD R26, (g_sched+gobuf_ctxt)(g) @@ -294,6 +305,7 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 MOVD.W $0, -16(RSP) // create a call frame on g0 (saved LR; keep 16-aligned) BL runtime·newstack(SB) @@ -319,9 +331,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 B (R27) // Note: can't just "B NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - B ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWU argsize+24(FP), R16 DISPATCH(runtime·call32, 32) @@ -843,8 +852,9 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 // Save state of caller into g->sched. Smashes R0. TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0 MOVD LR, (g_sched+gobuf_pc)(g) - MOVD RSP, R0 + MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(g) + MOVD R29, (g_sched+gobuf_bp)(g) MOVD $0, (g_sched+gobuf_lr)(g) MOVD $0, (g_sched+gobuf_ret)(g) // Assert ctxt is zero. See func save. @@ -885,6 +895,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R0 MOVD R0, RSP + MOVD (g_sched+gobuf_bp)(g), R29 MOVD R9, R0 // Now on a scheduling stack (a pthread-created stack). @@ -996,6 +1007,7 @@ needm: MOVD m_g0(R8), R3 MOVD RSP, R0 MOVD R0, (g_sched+gobuf_sp)(R3) + MOVD R29, (g_sched+gobuf_bp)(R3) havem: // Now there's a valid m, and we're running on its m->g0. @@ -1003,7 +1015,7 @@ havem: // Save current sp in m->g0->sched.sp in preparation for // switch back to m->curg stack. // NOTE: unwindm knows that the saved g->sched.sp is at 16(RSP) aka savedsp-16(SP). - // Beware that the frame size is actually 32. + // Beware that the frame size is actually 32+16. MOVD m_g0(R8), R3 MOVD (g_sched+gobuf_sp)(R3), R4 MOVD R4, savedsp-16(SP) @@ -1030,10 +1042,12 @@ havem: BL runtime·save_g(SB) MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 MOVD (g_sched+gobuf_pc)(g), R5 - MOVD R5, -(24+8)(R4) + MOVD R5, -48(R4) + MOVD (g_sched+gobuf_bp)(g), R5 + MOVD R5, -56(R4) MOVD ctxt+24(FP), R0 - MOVD R0, -(16+8)(R4) - MOVD $-(24+8)(R4), R0 // maintain 16-byte SP alignment + MOVD R0, -40(R4) + MOVD $-48(R4), R0 // maintain 16-byte SP alignment MOVD R0, RSP BL runtime·cgocallbackg(SB) @@ -1041,7 +1055,7 @@ havem: MOVD 0(RSP), R5 MOVD R5, (g_sched+gobuf_pc)(g) MOVD RSP, R4 - ADD $(24+8), R4, R4 + ADD $48, R4, R4 MOVD R4, (g_sched+gobuf_sp)(g) // Switch back to m->g0's stack and restore m->g0->sched.sp. diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 4b842ff0f221e..ef45ab137880e 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -292,9 +292,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 JMP (R4) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWU argsize+24(FP), R1 DISPATCH(runtime·call32, 32) diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index 654eb6572c5bf..6ef4507ee1168 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -291,9 +291,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0 MOVW $NAME(SB), R4; \ JMP (R4) -TEXT reflect·call(SB),NOSPLIT,$0-20 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20 MOVW argsize+12(FP), R1 diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 0886de9f2ba7c..0a89b57cd81ba 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -36,6 +36,12 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 MOVD _cgo_init(SB), R12 CMP R0, R12 BEQ nocgo +#ifdef GOARCH_ppc64 + // ppc64 use elf ABI v1. we must get the real entry address from + // first slot of the function descriptor before call. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif MOVD R12, CTR // r12 = "global function entry point" MOVD R13, R5 // arg 2: TLS base pointer MOVD $setg_gcc<>(SB), R4 // arg 1: setg @@ -140,7 +146,9 @@ TEXT runtime·gogo(SB), NOSPLIT, $16-8 MOVD 0(g), R4 MOVD gobuf_sp(R5), R1 MOVD gobuf_lr(R5), R31 +#ifndef GOOS_aix MOVD 24(R1), R2 // restore R2 +#endif MOVD R31, LR MOVD gobuf_ret(R5), R3 MOVD gobuf_ctxt(R5), R11 @@ -257,7 +265,9 @@ switch: MOVD g_m(g), R3 MOVD m_curg(R3), g MOVD (g_sched+gobuf_sp)(g), R3 +#ifndef GOOS_aix MOVD 24(R3), R2 +#endif // switch back to g MOVD g_m(g), R3 MOVD m_curg(R3), g @@ -274,7 +284,9 @@ noswitch: MOVD 0(R11), R12 // code pointer MOVD R12, CTR BL (CTR) +#ifndef GOOS_aix MOVD 24(R1), R2 +#endif RET /* @@ -349,9 +361,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR (CTR) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - BR ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 MOVWZ argsize+24(FP), R3 DISPATCH(runtime·call32, 32) @@ -390,22 +399,52 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ /* copy arguments to stack */ \ MOVD arg+16(FP), R3; \ MOVWZ argsize+24(FP), R4; \ - MOVD R1, R5; \ - ADD $(FIXED_FRAME-1), R5; \ - SUB $1, R3; \ - ADD R5, R4; \ - CMP R5, R4; \ - BEQ 4(PC); \ - MOVBZU 1(R3), R6; \ - MOVBZU R6, 1(R5); \ - BR -4(PC); \ + MOVD R1, R5; \ + CMP R4, $8; \ + BLT tailsetup; \ + /* copy 8 at a time if possible */ \ + ADD $(FIXED_FRAME-8), R5; \ + SUB $8, R3; \ +top: \ + MOVDU 8(R3), R7; \ + MOVDU R7, 8(R5); \ + SUB $8, R4; \ + CMP R4, $8; \ + BGE top; \ + /* handle remaining bytes */ \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $7, R3; \ + ADD $7, R5; \ + BR tail; \ +tailsetup: \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $(FIXED_FRAME-1), R5; \ + SUB $1, R3; \ +tail: \ + MOVBU 1(R3), R6; \ + MOVBU R6, 1(R5); \ + SUB $1, R4; \ + CMP $0, R4; \ + BGT tail; \ +callfn: \ /* call function */ \ MOVD f+8(FP), R11; \ +#ifdef GOOS_aix \ + /* AIX won't trigger a SIGSEGV if R11 = nil */ \ + /* So it manually triggers it */ \ + CMP R0, R11 \ + BNE 2(PC) \ + MOVD R0, 0(R0) \ +#endif \ MOVD (R11), R12; \ MOVD R12, CTR; \ PCDATA $PCDATA_StackMapIndex, $0; \ BL (CTR); \ +#ifndef GOOS_aix \ MOVD 24(R1), R2; \ +#endif \ /* copy return values back */ \ MOVD argtype+0(FP), R7; \ MOVD arg+16(FP), R3; \ @@ -489,6 +528,13 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 MOVD fv+0(FP), R11 MOVD argp+8(FP), R1 SUB $FIXED_FRAME, R1 +#ifdef GOOS_aix + // AIX won't trigger a SIGSEGV if R11 = nil + // So it manually triggers it + CMP R0, R11 + BNE 2(PC) + MOVD R0, 0(R0) +#endif MOVD 0(R11), R12 MOVD R12, CTR BR (CTR) @@ -521,8 +567,13 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 // Figure out if we need to switch to m->g0 stack. // We get called to create new OS threads too, and those // come in on the m->g0 stack already. - MOVD g_m(g), R6 - MOVD m_g0(R6), R6 + // Moreover, if it's called inside the signal handler, it must not switch + // to g0 as it can be in use by another syscall. + MOVD g_m(g), R8 + MOVD m_gsignal(R8), R6 + CMP R6, g + BEQ g0 + MOVD m_g0(R8), R6 CMP R6, g BEQ g0 BL gosave<>(SB) @@ -534,15 +585,34 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 g0: // Save room for two of our pointers, plus 32 bytes of callee // save area that lives on the caller stack. +#ifdef GOOS_aix + // Create a fake LR to improve backtrace. + MOVD $runtime·asmcgocall(SB), R6 + MOVD R6, 16(R1) +#endif SUB $48, R1 RLDCR $0, R1, $~15, R1 // 16-byte alignment for gcc ABI MOVD R5, 40(R1) // save old g on stack MOVD (g_stack+stack_hi)(R5), R5 SUB R7, R5 MOVD R5, 32(R1) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) +#ifdef GOOS_aix + MOVD R7, 0(R1) // Save frame pointer to allow manual backtrace with gdb +#else MOVD R0, 0(R1) // clear back chain pointer (TODO can we give it real back trace information?) +#endif // This is a "global call", so put the global entry point in r12 MOVD R3, R12 + +#ifdef GOARCH_ppc64 + // ppc64 use elf ABI v1. we must get the real entry address from + // first slot of the function descriptor before call. +#ifndef GOOS_aix + // aix just passes the function pointer for the moment, see golang.org/cl/146898 for details. + MOVD 8(R12), R2 + MOVD (R12), R12 +#endif +#endif MOVD R12, CTR MOVD R4, R3 // arg in r3 BL (CTR) @@ -553,15 +623,14 @@ g0: // Restore g, stack pointer, toc pointer. // R3 is errno, so don't touch it MOVD 40(R1), g - MOVD (g_stack+stack_hi)(g), R5 - MOVD 32(R1), R6 - SUB R6, R5 - MOVD 24(R5), R2 - BL runtime·save_g(SB) MOVD (g_stack+stack_hi)(g), R5 MOVD 32(R1), R6 SUB R6, R5 +#ifndef GOOS_aix + MOVD 24(R5), R2 +#endif MOVD R5, R1 + BL runtime·save_g(SB) MOVW R3, ret+16(FP) RET @@ -589,7 +658,7 @@ TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-32 NO_LOCAL_POINTERS // Load m and g from thread-local storage. - MOVB runtime·iscgo(SB), R3 + MOVBZ runtime·iscgo(SB), R3 CMP R3, $0 BEQ nocgo BL runtime·load_g(SB) @@ -701,9 +770,20 @@ TEXT runtime·setg(SB), NOSPLIT, $0-8 BL runtime·save_g(SB) RET +#ifdef GOARCH_ppc64 +TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 + DWORD $_setg_gcc<>(SB) + DWORD $0 + DWORD $0 +#endif + // void setg_gcc(G*); set g in C TLS. // Must obey the gcc calling convention. +#ifdef GOARCH_ppc64le TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 +#else +TEXT _setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0 +#endif // The standard prologue clobbers R31, which is callee-save in // the C ABI, so we have to use $-8-0 and save LR ourselves. MOVD LR, R4 @@ -723,18 +803,11 @@ TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 MOVW (R0), R0 UNDEF -#define TBRL 268 -#define TBRU 269 /* Time base Upper/Lower */ +#define TBR 268 // int64 runtime·cputicks(void) TEXT runtime·cputicks(SB),NOSPLIT,$0-8 - MOVW SPR(TBRU), R4 - MOVW SPR(TBRL), R3 - MOVW SPR(TBRU), R5 - CMPW R4, R5 - BNE -4(PC) - SLD $32, R5 - OR R5, R3 + MOVD SPR(TBR), R3 MOVD R3, ret+0(FP) RET diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index 9ef1b8a4c8dce..566c3e923645d 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -381,9 +381,6 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 BR (R5) // Note: can't just "BR NAME(SB)" - bad inlining results. -TEXT reflect·call(SB), NOSPLIT, $0-0 - BR ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $-8-32 MOVWZ argsize+24(FP), R3 DISPATCH(runtime·call32, 32) diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index baf840d0cf5fb..374b9f73dbf3e 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -293,9 +293,6 @@ TEXT ·cgocallback_gofunc(SB), NOSPLIT, $16-32 JMP NAME(SB); \ End -TEXT reflect·call(SB), NOSPLIT, $0-0 - JMP ·reflectcall(SB) - TEXT ·reflectcall(SB), NOSPLIT, $0-32 I64Load fn+8(FP) I64Eqz diff --git a/src/runtime/atomic_pointer.go b/src/runtime/atomic_pointer.go index 09cfbda9b1cf2..b8f0c22c63966 100644 --- a/src/runtime/atomic_pointer.go +++ b/src/runtime/atomic_pointer.go @@ -13,8 +13,6 @@ import ( // because while ptr does not escape, new does. // If new is marked as not escaping, the compiler will make incorrect // escape analysis decisions about the pointer value being stored. -// Instead, these are wrappers around the actual atomics (casp1 and so on) -// that use noescape to convey which arguments do not escape. // atomicwb performs a write barrier before an atomic pointer write. // The caller should guard the call with "if writeBarrier.enabled". @@ -37,17 +35,6 @@ func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { atomic.StorepNoWB(noescape(ptr), new) } -//go:nosplit -func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { - // The write barrier is only necessary if the CAS succeeds, - // but since it needs to happen before the write becomes - // public, we have to do it conservatively all the time. - if writeBarrier.enabled { - atomicwb(ptr, new) - } - return atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) -} - // Like above, but implement in terms of sync/atomic's uintptr operations. // We cannot just call the runtime routines, because the race detector expects // to be able to intercept the sync/atomic forms but not the runtime forms. diff --git a/src/runtime/cgo/asm_amd64.s b/src/runtime/cgo/asm_amd64.s index 0e33fc47961a7..06c538b9bc020 100644 --- a/src/runtime/cgo/asm_amd64.s +++ b/src/runtime/cgo/asm_amd64.s @@ -36,9 +36,9 @@ TEXT crosscall2(SB),NOSPLIT,$0x110-0 /* also need to save xmm6 - xmm15 */ MOVQ DX, 0x0(SP) /* arg */ MOVQ R8, 0x8(SP) /* argsize (includes padding) */ MOVQ R9, 0x10(SP) /* ctxt */ - + CALL CX /* fn */ - + MOVQ 0x48(SP), DI MOVQ 0x50(SP), SI MOVUPS 0x60(SP), X6 @@ -64,5 +64,5 @@ TEXT crosscall2(SB),NOSPLIT,$0x110-0 /* also need to save xmm6 - xmm15 */ MOVQ 0x30(SP), R13 MOVQ 0x38(SP), R14 MOVQ 0x40(SP), R15 - + RET diff --git a/src/runtime/cgo/asm_arm.s b/src/runtime/cgo/asm_arm.s index 36dab286aecb3..60132c14a88c5 100644 --- a/src/runtime/cgo/asm_arm.s +++ b/src/runtime/cgo/asm_arm.s @@ -8,7 +8,7 @@ // func crosscall2(fn func(a unsafe.Pointer, n int32, ctxt uintptr), a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls fn with three arguments. TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 - /* + /* * We still need to save all callee save register as before, and then * push 3 args for fn (R1, R2, R3). * Also note that at procedure entry in gc world, 4(R13) will be the diff --git a/src/runtime/cgo/callbacks.go b/src/runtime/cgo/callbacks.go index 8590aa3659a56..14a218ec92cdc 100644 --- a/src/runtime/cgo/callbacks.go +++ b/src/runtime/cgo/callbacks.go @@ -35,7 +35,7 @@ func _runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr, uintptr) // /* The function call will not return. */ //go:linkname _runtime_cgo_panic_internal runtime._cgo_panic_internal -var _runtime_cgo_panic_internal byte +func _runtime_cgo_panic_internal(p *byte) //go:linkname _cgo_panic _cgo_panic //go:cgo_export_static _cgo_panic @@ -43,7 +43,12 @@ var _runtime_cgo_panic_internal byte //go:nosplit //go:norace func _cgo_panic(a unsafe.Pointer, n int32) { - _runtime_cgocallback(unsafe.Pointer(&_runtime_cgo_panic_internal), a, uintptr(n), 0) + f := _runtime_cgo_panic_internal + type funcval struct { + pc unsafe.Pointer + } + fv := *(**funcval)(unsafe.Pointer(&f)) + _runtime_cgocallback(fv.pc, a, uintptr(n), 0) } //go:cgo_import_static x_cgo_init diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c index 3dc5bde4ccb99..3dafd10b7bc29 100644 --- a/src/runtime/cgo/gcc_libinit.c +++ b/src/runtime/cgo/gcc_libinit.c @@ -63,7 +63,7 @@ _cgo_wait_runtime_init_done() { } void -x_cgo_notify_runtime_init_done(void* dummy) { +x_cgo_notify_runtime_init_done(void* dummy __attribute__ ((unused))) { pthread_mutex_lock(&runtime_init_mu); runtime_init_done = 1; pthread_cond_broadcast(&runtime_init_cond); diff --git a/src/runtime/cgo/gcc_libinit_windows.c b/src/runtime/cgo/gcc_libinit_windows.c index b6f51b3e4dd66..248d59fd69574 100644 --- a/src/runtime/cgo/gcc_libinit_windows.c +++ b/src/runtime/cgo/gcc_libinit_windows.c @@ -9,6 +9,7 @@ #include #include +#include #include "libcgo.h" diff --git a/src/runtime/cgo/gcc_openbsd_arm.c b/src/runtime/cgo/gcc_openbsd_arm.c new file mode 100644 index 0000000000000..9a5757f0ad369 --- /dev/null +++ b/src/runtime/cgo/gcc_openbsd_arm.c @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include +#include +#include "libcgo.h" +#include "libcgo_unix.h" + +static void* threadentry(void*); +static void (*setg_gcc)(void*); + +void +x_cgo_init(G *g, void (*setg)(void*)) +{ + pthread_attr_t attr; + size_t size; + + setg_gcc = setg; + pthread_attr_init(&attr); + pthread_attr_getstacksize(&attr, &size); + g->stacklo = (uintptr)&attr - size + 4096; + pthread_attr_destroy(&attr); +} + +void +_cgo_sys_thread_start(ThreadStart *ts) +{ + pthread_attr_t attr; + sigset_t ign, oset; + pthread_t p; + size_t size; + int err; + + sigfillset(&ign); + pthread_sigmask(SIG_SETMASK, &ign, &oset); + + pthread_attr_init(&attr); + pthread_attr_getstacksize(&attr, &size); + + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts->g->stackhi = size; + err = _cgo_try_pthread_create(&p, &attr, threadentry, ts); + + pthread_sigmask(SIG_SETMASK, &oset, nil); + + if (err != 0) { + fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err)); + abort(); + } +} + +extern void crosscall_arm1(void (*fn)(void), void (*setg_gcc)(void*), void *g); + +static void* +threadentry(void *v) +{ + ThreadStart ts; + + ts = *(ThreadStart*)v; + free(v); + + crosscall_arm1(ts.fn, setg_gcc, (void*)ts.g); + return nil; +} diff --git a/src/runtime/cgo/gcc_windows_386.c b/src/runtime/cgo/gcc_windows_386.c index f2ff710f60cdf..9184b91393ff5 100644 --- a/src/runtime/cgo/gcc_windows_386.c +++ b/src/runtime/cgo/gcc_windows_386.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "libcgo.h" static void threadentry(void*); diff --git a/src/runtime/cgo/gcc_windows_amd64.c b/src/runtime/cgo/gcc_windows_amd64.c index 511ab44fa9822..7192a24631543 100644 --- a/src/runtime/cgo/gcc_windows_amd64.c +++ b/src/runtime/cgo/gcc_windows_amd64.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "libcgo.h" static void threadentry(void*); diff --git a/src/runtime/cgo/signal_darwin_arm64.s b/src/runtime/cgo/signal_darwin_arm64.s index 60443b64c8d20..1ae00d13f307a 100644 --- a/src/runtime/cgo/signal_darwin_arm64.s +++ b/src/runtime/cgo/signal_darwin_arm64.s @@ -37,7 +37,7 @@ ongothread: // Build a 32-byte stack frame for us for this call. // Saved LR (none available) is at the bottom, - // then the PC argument for setsigsegv, + // then the PC argument for setsigsegv, // then a copy of the LR for us to restore. MOVD.W $0, -32(RSP) MOVD R1, 8(RSP) diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index c85033f4bccff..85b6c8289ab4e 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -130,12 +130,19 @@ func cgocall(fn, arg unsafe.Pointer) int32 { mp.incgo = true errno := asmcgocall(fn, arg) - // Call endcgo before exitsyscall because exitsyscall may + // Update accounting before exitsyscall because exitsyscall may // reschedule us on to a different M. - endcgo(mp) + mp.incgo = false + mp.ncgo-- exitsyscall() + // Note that raceacquire must be called only after exitsyscall has + // wired this M to a P. + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + // From the garbage collector's perspective, time can move // backwards in the sequence above. If there's a callback into // Go code, GC will see this function at the call to @@ -153,16 +160,6 @@ func cgocall(fn, arg unsafe.Pointer) int32 { return errno } -//go:nosplit -func endcgo(mp *m) { - mp.incgo = false - mp.ncgo-- - - if raceenabled { - raceacquire(unsafe.Pointer(&racecgosync)) - } -} - // Call from C back to Go. //go:nosplit func cgocallbackg(ctxt uintptr) { @@ -268,7 +265,8 @@ func cgocallbackg1(ctxt uintptr) { case "arm64": // On arm64, stack frame is four words and there's a saved LR between // SP and the stack frame and between the stack frame and the arguments. - cb = (*args)(unsafe.Pointer(sp + 5*sys.PtrSize)) + // Additional two words (16-byte alignment) are for saving FP. + cb = (*args)(unsafe.Pointer(sp + 7*sys.PtrSize)) case "amd64": // On amd64, stack frame is two words, plus caller PC. if framepointer_enabled { @@ -346,13 +344,14 @@ func unwindm(restore *bool) { sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16)) } - // Call endcgo to do the accounting that cgocall will not have a - // chance to do during an unwind. + // Do the accounting that cgocall will not have a chance to do + // during an unwind. // // In the case where a Go call originates from C, ncgo is 0 // and there is no matching cgocall to end. if mp.ncgo > 0 { - endcgo(mp) + mp.incgo = false + mp.ncgo-- } releasem(mp) @@ -607,7 +606,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { return } -// cgoIsGoPointer returns whether the pointer is a Go pointer--a +// cgoIsGoPointer reports whether the pointer is a Go pointer--a // pointer to Go memory. We only care about Go memory that might // contain pointers. //go:nosplit @@ -630,7 +629,7 @@ func cgoIsGoPointer(p unsafe.Pointer) bool { return false } -// cgoInRange returns whether p is between start and end. +// cgoInRange reports whether p is between start and end. //go:nosplit //go:nowritebarrierrec func cgoInRange(p unsafe.Pointer, start, end uintptr) bool { diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 73cb6ecae2b49..7f3c4aa803079 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -43,6 +43,13 @@ func cgoCheckWriteBarrier(dst *uintptr, src uintptr) { return } + // It's OK if writing to memory allocated by persistentalloc. + // Do this check last because it is more expensive and rarely true. + // If it is false the expense doesn't matter since we are crashing. + if inPersistentAlloc(uintptr(unsafe.Pointer(dst))) { + return + } + systemstack(func() { println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst)))) throw(cgoWriteBarrierFail) @@ -126,7 +133,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } s := spanOfUnchecked(uintptr(src)) - if s.state == _MSpanManual { + if s.state == mSpanManual { // There are no heap bits for value stored on the stack. // For a channel receive src might be on the stack of some // other goroutine, so we can't unwind the stack even if diff --git a/src/runtime/chan.go b/src/runtime/chan.go index ce71cee4c5b40..8662f00e13289 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -19,6 +19,7 @@ package runtime import ( "runtime/internal/atomic" + "runtime/internal/math" "unsafe" ) @@ -78,7 +79,8 @@ func makechan(t *chantype, size int) *hchan { throw("makechan: bad alignment") } - if size < 0 || uintptr(size) > maxSliceCap(elem.size) || uintptr(size)*elem.size > maxAlloc-hchanSize { + mem, overflow := math.MulUintptr(elem.size, uintptr(size)) + if overflow || mem > maxAlloc-hchanSize || size < 0 { panic(plainError("makechan: size out of range")) } @@ -88,20 +90,20 @@ func makechan(t *chantype, size int) *hchan { // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. var c *hchan switch { - case size == 0 || elem.size == 0: + case mem == 0: // Queue or element size is zero. c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. - c.buf = unsafe.Pointer(c) + c.buf = c.raceaddr() case elem.kind&kindNoPointers != 0: // Elements do not contain pointers. // Allocate hchan and buf in one call. - c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true)) + c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) c.buf = add(unsafe.Pointer(c), hchanSize) default: // Elements contain pointers. c = new(hchan) - c.buf = mallocgc(uintptr(size)*elem.size, elem, true) + c.buf = mallocgc(mem, elem, true) } c.elemsize = uint16(elem.size) @@ -151,7 +153,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } if raceenabled { - racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) + racereadpc(c.raceaddr(), callerpc, funcPC(chansend)) } // Fast path: check for failed non-blocking operation without acquiring the lock. @@ -232,6 +234,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { gp.param = nil c.sendq.enqueue(mysg) goparkunlock(&c.lock, waitReasonChanSend, traceEvGoBlockSend, 3) + // Ensure the value being sent is kept alive until the + // receiver copies it out. The sudog has a pointer to the + // stack object, but sudogs aren't considered as roots of the + // stack tracer. + KeepAlive(ep) // someone woke us up. if mysg != gp.waiting { @@ -337,13 +344,13 @@ func closechan(c *hchan) { if raceenabled { callerpc := getcallerpc() - racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) - racerelease(unsafe.Pointer(c)) + racewritepc(c.raceaddr(), callerpc, funcPC(closechan)) + racerelease(c.raceaddr()) } c.closed = 1 - var glist *g + var glist gList // release all readers for { @@ -361,10 +368,9 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } - gp.schedlink.set(glist) - glist = gp + glist.push(gp) } // release all writers (they will panic) @@ -380,17 +386,15 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } - gp.schedlink.set(glist) - glist = gp + glist.push(gp) } unlock(&c.lock) // Ready all Gs now that we've dropped the channel lock. - for glist != nil { - gp := glist - glist = glist.schedlink.ptr() + for !glist.empty() { + gp := glist.pop() gp.schedlink = 0 goready(gp, 3) } @@ -457,7 +461,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) if c.closed != 0 && c.qcount == 0 { if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } unlock(&c.lock) if ep != nil { @@ -735,6 +739,15 @@ func (q *waitq) dequeue() *sudog { } } +func (c *hchan) raceaddr() unsafe.Pointer { + // Treat read-like and write-like operations on the channel to + // happen at this address. Avoid using the address of qcount + // or dataqsiz, because the len() and cap() builtins read + // those addresses, and we don't want them racing with + // operations like close(). + return unsafe.Pointer(&c.buf) +} + func racesync(c *hchan, sg *sudog) { racerelease(chanbuf(c, 0)) raceacquireg(sg.g, chanbuf(c, 0)) diff --git a/src/runtime/cpuflags.go b/src/runtime/cpuflags.go new file mode 100644 index 0000000000000..1565afb93a557 --- /dev/null +++ b/src/runtime/cpuflags.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/cpu" + "unsafe" +) + +// Offsets into internal/cpu records for use in assembly. +const ( + offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) + offsetX86HasERMS = unsafe.Offsetof(cpu.X86.HasERMS) + offsetX86HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2) + + offsetARMHasIDIVA = unsafe.Offsetof(cpu.ARM.HasIDIVA) +) + +var ( + // Set in runtime.cpuinit. + // TODO: deprecate these; use internal/cpu directly. + x86HasPOPCNT bool + x86HasSSE41 bool + + arm64HasATOMICS bool +) diff --git a/src/runtime/cpuflags_amd64.go b/src/runtime/cpuflags_amd64.go index 10ab5f5b00561..8cca4bca8f0b5 100644 --- a/src/runtime/cpuflags_amd64.go +++ b/src/runtime/cpuflags_amd64.go @@ -6,12 +6,6 @@ package runtime import ( "internal/cpu" - "unsafe" -) - -// Offsets into internal/cpu records for use in assembly. -const ( - offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2) ) var useAVXmemmove bool diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go index 6da8341e84198..c1dd757797dea 100644 --- a/src/runtime/crash_cgo_test.go +++ b/src/runtime/crash_cgo_test.go @@ -263,7 +263,7 @@ func TestCgoTracebackContext(t *testing.T) { } } -func testCgoPprof(t *testing.T, buildArg, runArg string) { +func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) { t.Parallel() if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le") { t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH) @@ -287,7 +287,7 @@ func testCgoPprof(t *testing.T, buildArg, runArg string) { defer os.Remove(fn) for try := 0; try < 2; try++ { - cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1")) + cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-traces")) // Check that pprof works both with and without explicit executable on command line. if try == 0 { cmd.Args = append(cmd.Args, exe, fn) @@ -307,30 +307,38 @@ func testCgoPprof(t *testing.T, buildArg, runArg string) { cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir()) } - top, err := cmd.CombinedOutput() - t.Logf("%s:\n%s", cmd.Args, top) + out, err := cmd.CombinedOutput() + t.Logf("%s:\n%s", cmd.Args, out) if err != nil { t.Error(err) - } else if !bytes.Contains(top, []byte("cpuHog")) { - t.Error("missing cpuHog in pprof output") + continue + } + + trace := findTrace(string(out), top) + if len(trace) == 0 { + t.Errorf("%s traceback missing.", top) + continue + } + if trace[len(trace)-1] != bottom { + t.Errorf("invalid traceback origin: got=%v; want=[%s ... %s]", trace, top, bottom) } } } func TestCgoPprof(t *testing.T) { - testCgoPprof(t, "", "CgoPprof") + testCgoPprof(t, "", "CgoPprof", "cpuHog", "runtime.main") } func TestCgoPprofPIE(t *testing.T) { - testCgoPprof(t, "-buildmode=pie", "CgoPprof") + testCgoPprof(t, "-buildmode=pie", "CgoPprof", "cpuHog", "runtime.main") } func TestCgoPprofThread(t *testing.T) { - testCgoPprof(t, "", "CgoPprofThread") + testCgoPprof(t, "", "CgoPprofThread", "cpuHogThread", "cpuHogThread2") } func TestCgoPprofThreadNoTraceback(t *testing.T) { - testCgoPprof(t, "", "CgoPprofThreadNoTraceback") + testCgoPprof(t, "", "CgoPprofThreadNoTraceback", "cpuHogThread", "runtime._ExternalCode") } func TestRaceProf(t *testing.T) { @@ -509,3 +517,35 @@ func TestBigStackCallbackCgo(t *testing.T) { t.Errorf("expected %q got %v", want, got) } } + +func nextTrace(lines []string) ([]string, []string) { + var trace []string + for n, line := range lines { + if strings.HasPrefix(line, "---") { + return trace, lines[n+1:] + } + fields := strings.Fields(strings.TrimSpace(line)) + if len(fields) == 0 { + continue + } + // Last field contains the function name. + trace = append(trace, fields[len(fields)-1]) + } + return nil, nil +} + +func findTrace(text, top string) []string { + lines := strings.Split(text, "\n") + _, lines = nextTrace(lines) // Skip the header. + for len(lines) > 0 { + var t []string + t, lines = nextTrace(lines) + if len(t) == 0 { + continue + } + if t[0] == top { + return t + } + } + return nil +} diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 2766b8850af33..6fba4dd91a1da 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -623,6 +623,9 @@ func TestBadTraceback(t *testing.T) { } func TestTimePprof(t *testing.T) { + if runtime.GOOS == "aix" { + t.Skip("pprof not yet available on AIX (see golang.org/issue/28555)") + } fn := runTestProg(t, "testprog", "TimeProf") fn = strings.TrimSpace(fn) defer os.Remove(fn) @@ -686,7 +689,7 @@ func init() { func TestRuntimePanic(t *testing.T) { testenv.MustHaveExec(t) - cmd := exec.Command(os.Args[0], "-test.run=TestRuntimePanic") + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestRuntimePanic")) cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_PANIC=1") out, err := cmd.CombinedOutput() t.Logf("%s", out) diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go index af9e6430da2b8..1384e0021080d 100644 --- a/src/runtime/crash_unix_test.go +++ b/src/runtime/crash_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime_test diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go new file mode 100644 index 0000000000000..2c5aa27b6ed03 --- /dev/null +++ b/src/runtime/debug/mod.go @@ -0,0 +1,105 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package debug + +import ( + "strings" +) + +// set using cmd/go/internal/modload.ModInfoProg +var modinfo string + +// ReadBuildInfo returns the build information embedded +// in the running binary. The information is available only +// in binaries built with module support. +func ReadBuildInfo() (info *BuildInfo, ok bool) { + return readBuildInfo(modinfo) +} + +// BuildInfo represents the build information read from +// the running binary. +type BuildInfo struct { + Path string // The main package path + Main Module // The main module information + Deps []*Module // Module dependencies +} + +// Module represents a module. +type Module struct { + Path string // module path + Version string // module version + Sum string // checksum + Replace *Module // replaced by this module +} + +func readBuildInfo(data string) (*BuildInfo, bool) { + if len(data) < 32 { + return nil, false + } + data = data[16 : len(data)-16] + + const ( + pathLine = "path\t" + modLine = "mod\t" + depLine = "dep\t" + repLine = "=>\t" + ) + + info := &BuildInfo{} + + var line string + // Reverse of cmd/go/internal/modload.PackageBuildInfo + for len(data) > 0 { + i := strings.IndexByte(data, '\n') + if i < 0 { + break + } + line, data = data[:i], data[i+1:] + switch { + case strings.HasPrefix(line, pathLine): + elem := line[len(pathLine):] + info.Path = elem + case strings.HasPrefix(line, modLine): + elem := strings.Split(line[len(modLine):], "\t") + if len(elem) != 3 { + return nil, false + } + info.Main = Module{ + Path: elem[0], + Version: elem[1], + Sum: elem[2], + } + case strings.HasPrefix(line, depLine): + elem := strings.Split(line[len(depLine):], "\t") + if len(elem) != 2 && len(elem) != 3 { + return nil, false + } + sum := "" + if len(elem) == 3 { + sum = elem[2] + } + info.Deps = append(info.Deps, &Module{ + Path: elem[0], + Version: elem[1], + Sum: sum, + }) + case strings.HasPrefix(line, repLine): + elem := strings.Split(line[len(repLine):], "\t") + if len(elem) != 3 { + return nil, false + } + last := len(info.Deps) - 1 + if last < 0 { + return nil, false + } + info.Deps[last].Replace = &Module{ + Path: elem[0], + Version: elem[1], + Sum: elem[2], + } + } + } + return info, true +} diff --git a/src/runtime/debug_test.go b/src/runtime/debug_test.go index a34f4c77f7cbe..f77a373d1332f 100644 --- a/src/runtime/debug_test.go +++ b/src/runtime/debug_test.go @@ -17,6 +17,8 @@ package runtime_test import ( "fmt" + "io/ioutil" + "regexp" "runtime" "runtime/debug" "sync/atomic" @@ -25,12 +27,23 @@ import ( ) func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) { + // This can deadlock if run under a debugger because it + // depends on catching SIGTRAP, which is usually swallowed by + // a debugger. + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads or if a GC - // tries to interrupt an atomic loop (see issue #10958). - ogomaxprocs := runtime.GOMAXPROCS(2) + // tries to interrupt an atomic loop (see issue #10958). We + // use 8 Ps so there's room for the debug call worker, + // something that's trying to preempt the call worker, and the + // goroutine that's trying to stop the call worker. + ogomaxprocs := runtime.GOMAXPROCS(8) ogcpercent := debug.SetGCPercent(-1) - ready := make(chan *runtime.G) + // ready is a buffered channel so debugCallWorker won't block + // on sending to it. This makes it less likely we'll catch + // debugCallWorker while it's in the runtime. + ready := make(chan *runtime.G, 1) var stop uint32 done := make(chan error) go debugCallWorker(ready, &stop, done) @@ -60,6 +73,10 @@ func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) { close(done) } +// Don't inline this function, since we want to test adjusting +// pointers in the arguments. +// +//go:noinline func debugCallWorker2(stop *uint32, x *int) { for atomic.LoadUint32(stop) == 0 { // Strongly encourage x to live in a register so we @@ -73,6 +90,28 @@ func debugCallTKill(tid int) error { return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP) } +// skipUnderDebugger skips the current test when running under a +// debugger (specifically if this process has a tracer). This is +// Linux-specific. +func skipUnderDebugger(t *testing.T) { + pid := syscall.Getpid() + status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) + if err != nil { + t.Logf("couldn't get proc tracer: %s", err) + return + } + re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`) + sub := re.FindSubmatch(status) + if sub == nil { + t.Logf("couldn't find proc tracer PID") + return + } + if string(sub[1]) == "0" { + return + } + t.Skip("test will deadlock under a debugger") +} + func TestDebugCall(t *testing.T) { g, after := startDebugCallWorker(t) defer after() @@ -160,9 +199,11 @@ func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) { } func TestDebugCallUnsafePoint(t *testing.T) { + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads or if a GC // tries to interrupt an atomic loop (see issue #10958). - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8)) defer debug.SetGCPercent(debug.SetGCPercent(-1)) // Test that the runtime refuses call injection at unsafe points. @@ -181,8 +222,10 @@ func TestDebugCallUnsafePoint(t *testing.T) { } func TestDebugCallPanic(t *testing.T) { + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads. - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8)) ready := make(chan *runtime.G) var stop uint32 diff --git a/src/runtime/defs2_linux.go b/src/runtime/defs2_linux.go index c10dfb8624000..b08c0dafe12f2 100644 --- a/src/runtime/defs2_linux.go +++ b/src/runtime/defs2_linux.go @@ -58,7 +58,10 @@ const ( MAP_PRIVATE = C.MAP_PRIVATE MAP_FIXED = C.MAP_FIXED - MADV_DONTNEED = C.MADV_DONTNEED + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_HNOUGEPAGE SA_RESTART = C.SA_RESTART SA_ONSTACK = C.SA_ONSTACK diff --git a/src/runtime/defs_aix.go b/src/runtime/defs_aix.go new file mode 100644 index 0000000000000..812c7fcfa231f --- /dev/null +++ b/src/runtime/defs_aix.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs +GOARCH=ppc64 go tool cgo -godefs defs_aix.go > defs_aix_ppc64_tmp.go + +This is only an helper to create defs_aix_ppc64.go +Go runtime functions require the "linux" name of fields (ss_sp, si_addr, etc) +However, AIX structures don't provide such names and must be modified. + +TODO(aix): create a script to automatise defs_aix creation. + +Modifications made: + - sigset replaced by a [4]uint64 array + - add sigset_all variable + - siginfo.si_addr uintptr instead of *byte + - add (*timeval) set_usec + - stackt.ss_sp uintptr instead of *byte + - stackt.ss_size uintptr instead of uint64 + - sigcontext.sc_jmpbuf context64 instead of jumbuf + - ucontext.__extctx is a uintptr because we don't need extctx struct + - ucontext.uc_mcontext: replace jumbuf structure by context64 structure + - sigaction.sa_handler represents union field as both are uintptr + - tstate.* replace *byte by uintptr + + +*/ + +package runtime + +/* + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +*/ +import "C" + +const ( + _EPERM = C.EPERM + _ENOENT = C.ENOENT + _EINTR = C.EINTR + _EAGAIN = C.EAGAIN + _ENOMEM = C.ENOMEM + _EACCES = C.EACCES + _EFAULT = C.EFAULT + _EINVAL = C.EINVAL + _ETIMEDOUT = C.ETIMEDOUT + + _PROT_NONE = C.PROT_NONE + _PROT_READ = C.PROT_READ + _PROT_WRITE = C.PROT_WRITE + _PROT_EXEC = C.PROT_EXEC + + _MAP_ANONYMOUS = C.MAP_ANONYMOUS + _MAP_PRIVATE = C.MAP_PRIVATE + _MAP_FIXED = C.MAP_FIXED + _MADV_DONTNEED = C.MADV_DONTNEED + + _SIGHUP = C.SIGHUP + _SIGINT = C.SIGINT + _SIGQUIT = C.SIGQUIT + _SIGILL = C.SIGILL + _SIGTRAP = C.SIGTRAP + _SIGABRT = C.SIGABRT + _SIGBUS = C.SIGBUS + _SIGFPE = C.SIGFPE + _SIGKILL = C.SIGKILL + _SIGUSR1 = C.SIGUSR1 + _SIGSEGV = C.SIGSEGV + _SIGUSR2 = C.SIGUSR2 + _SIGPIPE = C.SIGPIPE + _SIGALRM = C.SIGALRM + _SIGCHLD = C.SIGCHLD + _SIGCONT = C.SIGCONT + _SIGSTOP = C.SIGSTOP + _SIGTSTP = C.SIGTSTP + _SIGTTIN = C.SIGTTIN + _SIGTTOU = C.SIGTTOU + _SIGURG = C.SIGURG + _SIGXCPU = C.SIGXCPU + _SIGXFSZ = C.SIGXFSZ + _SIGVTALRM = C.SIGVTALRM + _SIGPROF = C.SIGPROF + _SIGWINCH = C.SIGWINCH + _SIGIO = C.SIGIO + _SIGPWR = C.SIGPWR + _SIGSYS = C.SIGSYS + _SIGTERM = C.SIGTERM + _SIGEMT = C.SIGEMT + _SIGWAITING = C.SIGWAITING + + _FPE_INTDIV = C.FPE_INTDIV + _FPE_INTOVF = C.FPE_INTOVF + _FPE_FLTDIV = C.FPE_FLTDIV + _FPE_FLTOVF = C.FPE_FLTOVF + _FPE_FLTUND = C.FPE_FLTUND + _FPE_FLTRES = C.FPE_FLTRES + _FPE_FLTINV = C.FPE_FLTINV + _FPE_FLTSUB = C.FPE_FLTSUB + + _BUS_ADRALN = C.BUS_ADRALN + _BUS_ADRERR = C.BUS_ADRERR + _BUS_OBJERR = C.BUS_OBJERR + + _SEGV_MAPERR = C.SEGV_MAPERR + _SEGV_ACCERR = C.SEGV_ACCERR + + _ITIMER_REAL = C.ITIMER_REAL + _ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + _ITIMER_PROF = C.ITIMER_PROF + + _O_RDONLY = C.O_RDONLY + + _SS_DISABLE = C.SS_DISABLE + _SI_USER = C.SI_USER + _SIG_BLOCK = C.SIG_BLOCK + _SIG_UNBLOCK = C.SIG_UNBLOCK + _SIG_SETMASK = C.SIG_SETMASK + + _SA_SIGINFO = C.SA_SIGINFO + _SA_RESTART = C.SA_RESTART + _SA_ONSTACK = C.SA_ONSTACK + + _PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + __SC_PAGE_SIZE = C._SC_PAGE_SIZE + __SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN + + _F_SETFD = C.F_SETFD + _F_SETFL = C.F_SETFL + _F_GETFD = C.F_GETFD + _F_GETFL = C.F_GETFL + _FD_CLOEXEC = C.FD_CLOEXEC +) + +type sigset C.sigset_t +type siginfo C.siginfo_t +type timespec C.struct_timespec +type timestruc C.struct_timestruc_t +type timeval C.struct_timeval +type itimerval C.struct_itimerval + +type stackt C.stack_t +type sigcontext C.struct_sigcontext +type ucontext C.ucontext_t +type _Ctype_struct___extctx uint64 // ucontext use a pointer to this structure but it shouldn't be used +type jmpbuf C.struct___jmpbuf +type context64 C.struct___context64 +type sigactiont C.struct_sigaction +type tstate C.struct_tstate +type rusage C.struct_rusage + +type pthread C.pthread_t +type pthread_attr C.pthread_attr_t + +type semt C.sem_t diff --git a/src/runtime/defs_aix_ppc64.go b/src/runtime/defs_aix_ppc64.go new file mode 100644 index 0000000000000..e7480d06ba23a --- /dev/null +++ b/src/runtime/defs_aix_ppc64.go @@ -0,0 +1,203 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +const ( + _EPERM = 0x1 + _ENOENT = 0x2 + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + _EACCES = 0xd + _EFAULT = 0xe + _EINVAL = 0x16 + _ETIMEDOUT = 0x4e + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANONYMOUS = 0x10 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x100 + _MADV_DONTNEED = 0x4 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0xa + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0x1e + _SIGSEGV = 0xb + _SIGUSR2 = 0x1f + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGCHLD = 0x14 + _SIGCONT = 0x13 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x10 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x22 + _SIGPROF = 0x20 + _SIGWINCH = 0x1c + _SIGIO = 0x17 + _SIGPWR = 0x1d + _SIGSYS = 0xc + _SIGTERM = 0xf + _SIGEMT = 0x7 + _SIGWAITING = 0x27 + + _FPE_INTDIV = 0x14 + _FPE_INTOVF = 0x15 + _FPE_FLTDIV = 0x16 + _FPE_FLTOVF = 0x17 + _FPE_FLTUND = 0x18 + _FPE_FLTRES = 0x19 + _FPE_FLTINV = 0x1a + _FPE_FLTSUB = 0x1b + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + _ + _SEGV_MAPERR = 0x32 + _SEGV_ACCERR = 0x33 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _O_RDONLY = 0x0 + + _SS_DISABLE = 0x2 + _SI_USER = 0x0 + _SIG_BLOCK = 0x0 + _SIG_UNBLOCK = 0x1 + _SIG_SETMASK = 0x2 + + _SA_SIGINFO = 0x100 + _SA_RESTART = 0x8 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + __SC_PAGE_SIZE = 0x30 + __SC_NPROCESSORS_ONLN = 0x48 + + _F_SETFD = 0x2 + _F_SETFL = 0x4 + _F_GETFD = 0x1 + _F_GETFL = 0x3 + _FD_CLOEXEC = 0x1 +) + +type sigset [4]uint64 + +var sigset_all = sigset{^uint64(0), ^uint64(0), ^uint64(0), ^uint64(0)} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uintptr + si_band int64 + si_value [2]int32 // [8]byte + __si_flags int32 + __pad [3]int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + __pad [4]int32 + pas_cgo_0 [4]byte +} + +type sigcontext struct { + sc_onstack int32 + pad_cgo_0 [4]byte + sc_mask sigset + sc_uerror int32 + sc_jmpbuf context64 +} + +type ucontext struct { + __sc_onstack int32 + pad_cgo_0 [4]byte + uc_sigmask sigset + __sc_error int32 + pad_cgo_1 [4]byte + uc_mcontext context64 + uc_link *ucontext + uc_stack stackt + __extctx uintptr // pointer to struct __extctx but we don't use it + __extctx_magic int32 + __pad int32 +} + +type context64 struct { + gpr [32]uint64 + msr uint64 + iar uint64 + lr uint64 + ctr uint64 + cr uint32 + xer uint32 + fpscr uint32 + fpscrx uint32 + except [1]uint64 + fpr [32]float64 + fpeu uint8 + fpinfo uint8 + fpscr24_31 uint8 + pad [1]uint8 + excp_type int32 +} + +type sigactiont struct { + sa_handler uintptr // a union of two pointer + sa_mask sigset + sa_flags int32 + pad_cgo_0 [4]byte +} + +type pthread uint32 +type pthread_attr *byte + +type semt int32 diff --git a/src/runtime/defs_freebsd.go b/src/runtime/defs_freebsd.go index 29a6ec20a56ba..53c1508eb798f 100644 --- a/src/runtime/defs_freebsd.go +++ b/src/runtime/defs_freebsd.go @@ -19,6 +19,7 @@ package runtime #include #include #include +#define _WANT_FREEBSD11_KEVENT 1 #include #include #include @@ -149,7 +150,7 @@ type Itimerval C.struct_itimerval type Umtx_time C.struct__umtx_time -type Kevent C.struct_kevent +type Kevent C.struct_kevent_freebsd11 type bintime C.struct_bintime type vdsoTimehands C.struct_vdso_timehands diff --git a/src/runtime/defs_linux.go b/src/runtime/defs_linux.go index 553366a50ba6c..2d810136d9871 100644 --- a/src/runtime/defs_linux.go +++ b/src/runtime/defs_linux.go @@ -47,7 +47,10 @@ const ( MAP_PRIVATE = C.MAP_PRIVATE MAP_FIXED = C.MAP_FIXED - MADV_DONTNEED = C.MADV_DONTNEED + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_HNOUGEPAGE SA_RESTART = C.SA_RESTART SA_ONSTACK = C.SA_ONSTACK diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go index a7e435f854fe3..0ebac17aefa9e 100644 --- a/src/runtime/defs_linux_386.go +++ b/src/runtime/defs_linux_386.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go index e8c6a212db770..c0a0ef0dd4ec5 100644 --- a/src/runtime/defs_linux_amd64.go +++ b/src/runtime/defs_linux_amd64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go index 62ec8fab5e9b5..43946bb79ca8e 100644 --- a/src/runtime/defs_linux_arm.go +++ b/src/runtime/defs_linux_arm.go @@ -16,6 +16,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_arm64.go b/src/runtime/defs_linux_arm64.go index c295bc0257520..c2cc281ab4f5c 100644 --- a/src/runtime/defs_linux_arm64.go +++ b/src/runtime/defs_linux_arm64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go index df11cb0965d69..9dacd5d1e9bac 100644 --- a/src/runtime/defs_linux_mips64x.go +++ b/src/runtime/defs_linux_mips64x.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_mipsx.go b/src/runtime/defs_linux_mipsx.go index 702fbb51c861c..9532ac54ee83d 100644 --- a/src/runtime/defs_linux_mipsx.go +++ b/src/runtime/defs_linux_mipsx.go @@ -22,6 +22,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_ppc64.go b/src/runtime/defs_linux_ppc64.go index 45363d12854be..5a4326da07a94 100644 --- a/src/runtime/defs_linux_ppc64.go +++ b/src/runtime/defs_linux_ppc64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_ppc64le.go b/src/runtime/defs_linux_ppc64le.go index 45363d12854be..5a4326da07a94 100644 --- a/src/runtime/defs_linux_ppc64le.go +++ b/src/runtime/defs_linux_ppc64le.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_s390x.go b/src/runtime/defs_linux_s390x.go index ab90723f75485..a6cc9c48e91de 100644 --- a/src/runtime/defs_linux_s390x.go +++ b/src/runtime/defs_linux_s390x.go @@ -19,6 +19,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_openbsd_arm.go b/src/runtime/defs_openbsd_arm.go index 1eea9ad45aeac..59f9410e1d63f 100644 --- a/src/runtime/defs_openbsd_arm.go +++ b/src/runtime/defs_openbsd_arm.go @@ -114,13 +114,17 @@ type sigcontext struct { sc_usr_lr uint32 sc_svc_lr uint32 sc_pc uint32 + sc_fpused uint32 + sc_fpscr uint32 + sc_fpreg [32]uint64 } type siginfo struct { - si_signo int32 - si_code int32 - si_errno int32 - _data [116]byte + si_signo int32 + si_code int32 + si_errno int32 + pad_cgo_0 [4]byte + _data [120]byte } type stackt struct { @@ -130,8 +134,9 @@ type stackt struct { } type timespec struct { - tv_sec int64 - tv_nsec int32 + tv_sec int64 + tv_nsec int32 + pad_cgo_0 [4]byte } func (ts *timespec) set_sec(x int64) { @@ -143,8 +148,9 @@ func (ts *timespec) set_nsec(x int32) { } type timeval struct { - tv_sec int64 - tv_usec int32 + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte } func (tv *timeval) set_usec(x int32) { @@ -157,10 +163,12 @@ type itimerval struct { } type keventt struct { - ident uint32 - filter int16 - flags uint16 - fflags uint32 - data int64 - udata *byte + ident uint32 + filter int16 + flags uint16 + fflags uint32 + pad_cgo_0 [4]byte + data int64 + udata *byte + pad_cgo_1 [4]byte } diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go index 589a7884cdaec..8c0d6d8b98fdc 100644 --- a/src/runtime/defs_windows_386.go +++ b/src/runtime/defs_windows_386.go @@ -104,8 +104,12 @@ type context struct { func (c *context) ip() uintptr { return uintptr(c.eip) } func (c *context) sp() uintptr { return uintptr(c.esp) } -func (c *context) setip(x uintptr) { c.eip = uint32(x) } -func (c *context) setsp(x uintptr) { c.esp = uint32(x) } +// 386 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } +func (c *context) set_lr(x uintptr) {} + +func (c *context) set_ip(x uintptr) { c.eip = uint32(x) } +func (c *context) set_sp(x uintptr) { c.esp = uint32(x) } func dumpregs(r *context) { print("eax ", hex(r.eax), "\n") diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go index 1e173e934d67d..42a446d3cdfc7 100644 --- a/src/runtime/defs_windows_amd64.go +++ b/src/runtime/defs_windows_amd64.go @@ -119,8 +119,12 @@ type context struct { func (c *context) ip() uintptr { return uintptr(c.rip) } func (c *context) sp() uintptr { return uintptr(c.rsp) } -func (c *context) setip(x uintptr) { c.rip = uint64(x) } -func (c *context) setsp(x uintptr) { c.rsp = uint64(x) } +// Amd64 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } +func (c *context) set_lr(x uintptr) {} + +func (c *context) set_ip(x uintptr) { c.rip = uint64(x) } +func (c *context) set_sp(x uintptr) { c.rsp = uint64(x) } func dumpregs(r *context) { print("rax ", hex(r.rax), "\n") diff --git a/src/runtime/defs_windows_arm.go b/src/runtime/defs_windows_arm.go new file mode 100644 index 0000000000000..049f5b613a74d --- /dev/null +++ b/src/runtime/defs_windows_arm.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + _PROT_NONE = 0 + _PROT_READ = 1 + _PROT_WRITE = 2 + _PROT_EXEC = 4 + + _MAP_ANON = 1 + _MAP_PRIVATE = 2 + + _DUPLICATE_SAME_ACCESS = 0x2 + _THREAD_PRIORITY_HIGHEST = 0x2 + + _SIGINT = 0x2 + _CTRL_C_EVENT = 0x0 + _CTRL_BREAK_EVENT = 0x1 + + _CONTEXT_CONTROL = 0x10001 + _CONTEXT_FULL = 0x10007 + + _EXCEPTION_ACCESS_VIOLATION = 0xc0000005 + _EXCEPTION_BREAKPOINT = 0x80000003 + _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d + _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e + _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f + _EXCEPTION_FLT_OVERFLOW = 0xc0000091 + _EXCEPTION_FLT_UNDERFLOW = 0xc0000093 + _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094 + _EXCEPTION_INT_OVERFLOW = 0xc0000095 + + _INFINITE = 0xffffffff + _WAIT_TIMEOUT = 0x102 + + _EXCEPTION_CONTINUE_EXECUTION = -0x1 + _EXCEPTION_CONTINUE_SEARCH = 0x0 +) + +type systeminfo struct { + anon0 [4]byte + dwpagesize uint32 + lpminimumapplicationaddress *byte + lpmaximumapplicationaddress *byte + dwactiveprocessormask uint32 + dwnumberofprocessors uint32 + dwprocessortype uint32 + dwallocationgranularity uint32 + wprocessorlevel uint16 + wprocessorrevision uint16 +} + +type exceptionrecord struct { + exceptioncode uint32 + exceptionflags uint32 + exceptionrecord *exceptionrecord + exceptionaddress *byte + numberparameters uint32 + exceptioninformation [15]uint32 +} + +type neon128 struct { + low uint64 + high int64 +} + +type context struct { + contextflags uint32 + r0 uint32 + r1 uint32 + r2 uint32 + r3 uint32 + r4 uint32 + r5 uint32 + r6 uint32 + r7 uint32 + r8 uint32 + r9 uint32 + r10 uint32 + r11 uint32 + r12 uint32 + + spr uint32 + lrr uint32 + pc uint32 + cpsr uint32 + + fpscr uint32 + padding uint32 + + floatNeon [16]neon128 + + bvr [8]uint32 + bcr [8]uint32 + wvr [1]uint32 + wcr [1]uint32 + padding2 [2]uint32 +} + +func (c *context) ip() uintptr { return uintptr(c.pc) } +func (c *context) sp() uintptr { return uintptr(c.spr) } +func (c *context) lr() uintptr { return uintptr(c.lrr) } + +func (c *context) set_ip(x uintptr) { c.pc = uint32(x) } +func (c *context) set_sp(x uintptr) { c.spr = uint32(x) } +func (c *context) set_lr(x uintptr) { c.lrr = uint32(x) } + +func dumpregs(r *context) { + print("r0 ", hex(r.r0), "\n") + print("r1 ", hex(r.r1), "\n") + print("r2 ", hex(r.r2), "\n") + print("r3 ", hex(r.r3), "\n") + print("r4 ", hex(r.r4), "\n") + print("r5 ", hex(r.r5), "\n") + print("r6 ", hex(r.r6), "\n") + print("r7 ", hex(r.r7), "\n") + print("r8 ", hex(r.r8), "\n") + print("r9 ", hex(r.r9), "\n") + print("r10 ", hex(r.r10), "\n") + print("r11 ", hex(r.r11), "\n") + print("r12 ", hex(r.r12), "\n") + print("sp ", hex(r.spr), "\n") + print("lr ", hex(r.lrr), "\n") + print("pc ", hex(r.pc), "\n") + print("cpsr ", hex(r.cpsr), "\n") +} + +type overlapped struct { + internal uint32 + internalhigh uint32 + anon0 [8]byte + hevent *byte +} + +type memoryBasicInformation struct { + baseAddress uintptr + allocationBase uintptr + allocationProtect uint32 + regionSize uintptr + state uint32 + protect uint32 + type_ uint32 +} + +func stackcheck() { + // TODO: not implemented on ARM +} diff --git a/src/runtime/env_posix.go b/src/runtime/env_posix.go index 032e7122ce360..03208c7c10f23 100644 --- a/src/runtime/env_posix.go +++ b/src/runtime/env_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package runtime @@ -14,13 +14,36 @@ func gogetenv(key string) string { throw("getenv before env init") } for _, s := range env { - if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key { + if len(s) > len(key) && s[len(key)] == '=' && envKeyEqual(s[:len(key)], key) { return s[len(key)+1:] } } return "" } +// envKeyEqual reports whether a == b, with ASCII-only case insensitivity +// on Windows. The two strings must have the same length. +func envKeyEqual(a, b string) bool { + if GOOS == "windows" { // case insensitive + for i := 0; i < len(a); i++ { + ca, cb := a[i], b[i] + if ca == cb || lowerASCII(ca) == lowerASCII(cb) { + continue + } + return false + } + return true + } + return a == b +} + +func lowerASCII(c byte) byte { + if 'A' <= c && c <= 'Z' { + return c + ('a' - 'A') + } + return c +} + var _cgo_setenv unsafe.Pointer // pointer to C function var _cgo_unsetenv unsafe.Pointer // pointer to C function diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go index d34c1fd7dc58d..e97dd52f209d9 100644 --- a/src/runtime/export_debug_test.go +++ b/src/runtime/export_debug_test.go @@ -50,19 +50,31 @@ func InjectDebugCall(gp *g, fn, args interface{}, tkill func(tid int) error) (in h.gp = gp h.fv, h.argp, h.argSize = fv, argp, argSize h.handleF = h.handle // Avoid allocating closure during signal - noteclear(&h.done) defer func() { testSigtrap = nil }() - testSigtrap = h.inject - if err := tkill(tid); err != nil { - return nil, err - } - // Wait for completion. - notetsleepg(&h.done, -1) - if len(h.err) != 0 { - return nil, h.err + for i := 0; ; i++ { + testSigtrap = h.inject + noteclear(&h.done) + h.err = "" + + if err := tkill(tid); err != nil { + return nil, err + } + // Wait for completion. + notetsleepg(&h.done, -1) + if h.err != "" { + switch h.err { + case "retry _Grunnable", "executing on Go runtime stack": + // These are transient states. Try to get out of them. + if i < 100 { + Gosched() + continue + } + } + return nil, h.err + } + return h.panic, nil } - return h.panic, nil } type debugCallHandler struct { @@ -99,12 +111,18 @@ func (h *debugCallHandler) inject(info *siginfo, ctxt *sigctxt, gp2 *g) bool { h.savedRegs.fpstate = nil // Set PC to debugCallV1. ctxt.set_rip(uint64(funcPC(debugCallV1))) + // Call injected. Switch to the debugCall protocol. + testSigtrap = h.handleF + case _Grunnable: + // Ask InjectDebugCall to pause for a bit and then try + // again to interrupt this goroutine. + h.err = plainError("retry _Grunnable") + notewakeup(&h.done) default: h.err = plainError("goroutine in unexpected state at call inject") - return true + notewakeup(&h.done) } - // Switch to the debugCall protocol and resume execution. - testSigtrap = h.handleF + // Resume execution. return true } @@ -115,7 +133,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool { return false } f := findfunc(uintptr(ctxt.rip())) - if !(hasprefix(funcname(f), "runtime.debugCall") || hasprefix(funcname(f), "debugCall")) { + if !(hasPrefix(funcname(f), "runtime.debugCall") || hasPrefix(funcname(f), "debugCall")) { println("trap in unknown function", funcname(f)) return false } @@ -149,6 +167,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool { sp := ctxt.rsp() reason := *(*string)(unsafe.Pointer(uintptr(sp))) h.err = plainError(reason) + // Don't wake h.done. We need to transition to status 16 first. case 16: // Restore all registers except RIP and RSP. rip, rsp := ctxt.rip(), ctxt.rsp() @@ -162,6 +181,7 @@ func (h *debugCallHandler) handle(info *siginfo, ctxt *sigctxt, gp2 *g) bool { notewakeup(&h.done) default: h.err = plainError("unexpected debugCallV1 status") + notewakeup(&h.done) } // Resume execution. return true diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 89f887b765db6..9eaf92dc7cbb4 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -297,6 +297,7 @@ func ReadMemStatsSlow() (base, slow MemStats) { slow.TotalAlloc = 0 slow.Mallocs = 0 slow.Frees = 0 + slow.HeapReleased = 0 var bySize [_NumSizeClasses]struct { Mallocs, Frees uint64 } @@ -336,6 +337,10 @@ func ReadMemStatsSlow() (base, slow MemStats) { slow.BySize[i].Frees = bySize[i].Frees } + for i := mheap_.scav.start(); i.valid(); i = i.next() { + slow.HeapReleased += uint64(i.span().released()) + } + getg().m.mallocing-- }) @@ -472,3 +477,39 @@ func stackOverflow(x *byte) { var buf [256]byte stackOverflow(&buf[0]) } + +func MapTombstoneCheck(m map[int]int) { + // Make sure emptyOne and emptyRest are distributed correctly. + // We should have a series of filled and emptyOne cells, followed by + // a series of emptyRest cells. + h := *(**hmap)(unsafe.Pointer(&m)) + i := interface{}(m) + t := *(**maptype)(unsafe.Pointer(&i)) + + for x := 0; x < 1<= n && b.tophash[i] != emptyRest { + panic("late non-emptyRest") + } + if k == n-1 && b.tophash[i] == emptyOne { + panic("last non-emptyRest entry is emptyOne") + } + k++ + } + } + } +} diff --git a/src/runtime/export_unix_test.go b/src/runtime/export_unix_test.go index 54d577072ec91..eecdfb7eb2216 100644 --- a/src/runtime/export_unix_test.go +++ b/src/runtime/export_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime diff --git a/src/runtime/extern.go b/src/runtime/extern.go index 7171b139c3239..af858a331f63c 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -50,19 +50,13 @@ It is a comma-separated list of name=val pairs setting these named variables: gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines onto smaller stacks. In this mode, a goroutine's stack can only grow. - gcrescanstacks: setting gcrescanstacks=1 enables stack - re-scanning during the STW mark termination phase. This is - helpful for debugging if objects are being prematurely - garbage collected. - gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection, making every garbage collection a stop-the-world event. Setting gcstoptheworld=2 also disables concurrent sweeping after the garbage collection finishes. gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard error at each collection, summarizing the amount of memory collected and the - length of the pause. Setting gctrace=2 emits the same summary but also - repeats each collection. The format of this line is subject to change. + length of the pause. The format of this line is subject to change. Currently, it is: gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P where the fields are as follows: @@ -95,6 +89,11 @@ It is a comma-separated list of name=val pairs setting these named variables: released: # MB released to the system consumed: # MB allocated from the system + madvdontneed: setting madvdontneed=1 will use MADV_DONTNEED + instead of MADV_FREE on Linux when returning memory to the + kernel. This is less efficient, but causes RSS numbers to drop + more quickly. + memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate. When set to 0 memory profiling is disabled. Refer to the description of MemProfileRate for the default value. @@ -172,27 +171,13 @@ import "runtime/internal/sys" // program counter, file name, and line number within the file of the corresponding // call. The boolean ok is false if it was not possible to recover the information. func Caller(skip int) (pc uintptr, file string, line int, ok bool) { - // Make room for three PCs: the one we were asked for, - // what it called, so that CallersFrames can see if it "called" - // sigpanic, and possibly a PC for skipPleaseUseCallersFrames. - var rpc [3]uintptr - if callers(1+skip-1, rpc[:]) < 2 { - return - } - var stackExpander stackExpander - callers := stackExpander.init(rpc[:]) - // We asked for one extra, so skip that one. If this is sigpanic, - // stepping over this frame will set up state in Frames so the - // next frame is correct. - callers, _, ok = stackExpander.next(callers, true) - if !ok { + rpc := make([]uintptr, 1) + n := callers(skip+1, rpc[:]) + if n < 1 { return } - _, frame, _ := stackExpander.next(callers, true) - pc = frame.PC - file = frame.File - line = frame.Line - return + frame, _ := CallersFrames(rpc).Next() + return frame.PC, frame.File, frame.Line, frame.PC != 0 } // Callers fills the slice pc with the return program counters of function invocations @@ -208,6 +193,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) { // directly is discouraged, as is using FuncForPC on any of the // returned PCs, since these cannot account for inlining or return // program counter adjustment. +//go:noinline func Callers(skip int, pc []uintptr) int { // runtime.callers uses pc.array==nil as a signal // to print a stack trace. Pick off 0-length pc here @@ -238,6 +224,7 @@ func Version() string { // GOOS is the running program's operating system target: // one of darwin, freebsd, linux, and so on. +// To view possible combinations of GOOS and GOARCH, run "go tool dist list". const GOOS string = sys.GOOS // GOARCH is the running program's architecture target: diff --git a/src/runtime/fastlog2table.go b/src/runtime/fastlog2table.go index c36d5835f64b3..6ba4a7d3f24cc 100644 --- a/src/runtime/fastlog2table.go +++ b/src/runtime/fastlog2table.go @@ -1,4 +1,4 @@ -// AUTO-GENERATED by mkfastlog2table.go +// Code generated by mkfastlog2table.go; DO NOT EDIT. // Run go generate from src/runtime to update. // See mkfastlog2table.go for comments. diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h index e6e0306e65bbd..1ee67c86837ed 100644 --- a/src/runtime/funcdata.h +++ b/src/runtime/funcdata.h @@ -16,6 +16,7 @@ #define FUNCDATA_LocalsPointerMaps 1 #define FUNCDATA_InlTree 2 #define FUNCDATA_RegPointerMaps 3 +#define FUNCDATA_StackObjects 4 // Pseudo-assembly statements. diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 4895a0e2ac90b..51e8ea4d31393 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -21,6 +21,12 @@ func TestGcSys(t *testing.T) { if os.Getenv("GOGC") == "off" { t.Skip("skipping test; GOGC=off in environment") } + if runtime.GOOS == "windows" { + t.Skip("skipping test; GOOS=windows http://golang.org/issue/27156") + } + if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" { + t.Skip("skipping test; GOOS=linux GOARCH=arm64 https://github.com/golang/go/issues/27636") + } got := runTestProg(t, "testprog", "GCSys") want := "OK\n" if got != want { @@ -568,8 +574,8 @@ func BenchmarkWriteBarrier(b *testing.B) { n := &node{mkTree(level - 1), mkTree(level - 1)} if level == 10 { // Seed GC with enough early pointers so it - // doesn't accidentally switch to mark 2 when - // it only has the top of the tree. + // doesn't start termination barriers when it + // only has the top of the tree. wbRoots = append(wbRoots, n) } return n diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go index 7dd1a5607cfd5..0741f6361cfdf 100644 --- a/src/runtime/gcinfo_test.go +++ b/src/runtime/gcinfo_test.go @@ -35,14 +35,46 @@ func TestGCInfo(t *testing.T) { verifyGCInfo(t, "data eface", &dataEface, infoEface) verifyGCInfo(t, "data iface", &dataIface, infoIface) - verifyGCInfo(t, "stack Ptr", new(Ptr), infoPtr) - verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr) - verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar) - verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct()) - verifyGCInfo(t, "stack string", new(string), infoString) - verifyGCInfo(t, "stack slice", new([]string), infoSlice) - verifyGCInfo(t, "stack eface", new(interface{}), infoEface) - verifyGCInfo(t, "stack iface", new(Iface), infoIface) + { + var x Ptr + verifyGCInfo(t, "stack Ptr", &x, infoPtr) + runtime.KeepAlive(x) + } + { + var x ScalarPtr + verifyGCInfo(t, "stack ScalarPtr", &x, infoScalarPtr) + runtime.KeepAlive(x) + } + { + var x PtrScalar + verifyGCInfo(t, "stack PtrScalar", &x, infoPtrScalar) + runtime.KeepAlive(x) + } + { + var x BigStruct + verifyGCInfo(t, "stack BigStruct", &x, infoBigStruct()) + runtime.KeepAlive(x) + } + { + var x string + verifyGCInfo(t, "stack string", &x, infoString) + runtime.KeepAlive(x) + } + { + var x []string + verifyGCInfo(t, "stack slice", &x, infoSlice) + runtime.KeepAlive(x) + } + { + var x interface{} + verifyGCInfo(t, "stack eface", &x, infoEface) + runtime.KeepAlive(x) + } + { + var x Iface + verifyGCInfo(t, "stack iface", &x, infoIface) + runtime.KeepAlive(x) + } for i := 0; i < 10; i++ { verifyGCInfo(t, "heap Ptr", escape(new(Ptr)), trimDead(padDead(infoPtr))) diff --git a/src/runtime/hash_test.go b/src/runtime/hash_test.go index 7b8ebc4f3c00f..fe25a7f84be38 100644 --- a/src/runtime/hash_test.go +++ b/src/runtime/hash_test.go @@ -177,13 +177,13 @@ func twoNonZero(h *HashSet, n int) { b := make([]byte, n) // all zero - h.addB(b[:]) + h.addB(b) // one non-zero byte for i := 0; i < n; i++ { for x := 1; x < 256; x++ { b[i] = byte(x) - h.addB(b[:]) + h.addB(b) b[i] = 0 } } @@ -195,7 +195,7 @@ func twoNonZero(h *HashSet, n int) { for j := i + 1; j < n; j++ { for y := 1; y < 256; y++ { b[j] = byte(y) - h.addB(b[:]) + h.addB(b) b[j] = 0 } } diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 0fc02a8e8068b..ca56708a04d81 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -346,7 +346,7 @@ func dumpgoroutine(gp *g) { dumpint(uint64(gp.goid)) dumpint(uint64(gp.gopc)) dumpint(uint64(readgstatus(gp))) - dumpbool(isSystemGoroutine(gp)) + dumpbool(isSystemGoroutine(gp, false)) dumpbool(false) // isbackground dumpint(uint64(gp.waitsince)) dumpstr(gp.waitreason.String()) @@ -428,9 +428,9 @@ func dumproots() { dumpmemrange(unsafe.Pointer(firstmoduledata.bss), firstmoduledata.ebss-firstmoduledata.bss) dumpfields(firstmoduledata.gcbssmask) - // MSpan.types + // mspan.types for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { // Finalizers for sp := s.specials; sp != nil; sp = sp.next { if sp.kind != _KindSpecialFinalizer { @@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool func dumpobjs() { for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } p := s.base() @@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, func dumpmemprof() { iterate_memprof(dumpmemprof_callback) for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } for sp := s.specials; sp != nil; sp = sp.next { @@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n") func mdump() { // make sure we're done sweeping for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { s.ensureSwept() } } @@ -661,7 +661,7 @@ func writeheapdump_m(fd uintptr) { _g_.waitreason = waitReasonDumpingHeap // Update stats so we can dump them. - // As a side effect, flushes all the MCaches so the MSpan.freelist + // As a side effect, flushes all the mcaches so the mspan.freelist // lists contain all the free objects. updatememstats() diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 7ab731151e5a2..8eca2e849d511 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -267,6 +267,34 @@ func panicnildottype(want *_type) { // Just to match other nil conversion errors, we don't for now. } +// The specialized convTx routines need a type descriptor to use when calling mallocgc. +// We don't need the type to be exact, just to have the correct size, alignment, and pointer-ness. +// However, when debugging, it'd be nice to have some indication in mallocgc where the types came from, +// so we use named types here. +// We then construct interface values of these types, +// and then extract the type word to use as needed. +type ( + uint16InterfacePtr uint16 + uint32InterfacePtr uint32 + uint64InterfacePtr uint64 + stringInterfacePtr string + sliceInterfacePtr []byte +) + +var ( + uint16Eface interface{} = uint16InterfacePtr(0) + uint32Eface interface{} = uint32InterfacePtr(0) + uint64Eface interface{} = uint64InterfacePtr(0) + stringEface interface{} = stringInterfacePtr("") + sliceEface interface{} = sliceInterfacePtr(nil) + + uint16Type *_type = (*eface)(unsafe.Pointer(&uint16Eface))._type + uint32Type *_type = (*eface)(unsafe.Pointer(&uint32Eface))._type + uint64Type *_type = (*eface)(unsafe.Pointer(&uint64Eface))._type + stringType *_type = (*eface)(unsafe.Pointer(&stringEface))._type + sliceType *_type = (*eface)(unsafe.Pointer(&sliceEface))._type +) + // The conv and assert functions below do very similar things. // The convXXX functions are guaranteed by the compiler to succeed. // The assertXXX functions may fail (either panicking or returning false, @@ -290,80 +318,54 @@ func convT2E(t *_type, elem unsafe.Pointer) (e eface) { return } -func convT2E16(t *_type, val uint16) (e eface) { - var x unsafe.Pointer +func convT16(val uint16) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(2, t, false) + x = mallocgc(2, uint16Type, false) *(*uint16)(x) = val } - e._type = t - e.data = x return } -func convT2E32(t *_type, val uint32) (e eface) { - var x unsafe.Pointer +func convT32(val uint32) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(4, t, false) + x = mallocgc(4, uint32Type, false) *(*uint32)(x) = val } - e._type = t - e.data = x return } -func convT2E64(t *_type, val uint64) (e eface) { - var x unsafe.Pointer +func convT64(val uint64) (x unsafe.Pointer) { if val == 0 { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(8, t, false) + x = mallocgc(8, uint64Type, false) *(*uint64)(x) = val } - e._type = t - e.data = x return } -func convT2Estring(t *_type, elem unsafe.Pointer) (e eface) { - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Estring)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if *(*string)(elem) == "" { +func convTstring(val string) (x unsafe.Pointer) { + if val == "" { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(t.size, t, true) - *(*string)(x) = *(*string)(elem) + x = mallocgc(unsafe.Sizeof(val), stringType, true) + *(*string)(x) = val } - e._type = t - e.data = x return } -func convT2Eslice(t *_type, elem unsafe.Pointer) (e eface) { - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Eslice)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if v := *(*slice)(elem); uintptr(v.array) == 0 { +func convTslice(val []byte) (x unsafe.Pointer) { + // Note: this must work for any element type, not just byte. + if (*slice)(unsafe.Pointer(&val)).array == nil { x = unsafe.Pointer(&zeroVal[0]) } else { - x = mallocgc(t.size, t, true) - *(*slice)(x) = *(*slice)(elem) + x = mallocgc(unsafe.Sizeof(val), sliceType, true) + *(*[]byte)(x) = val } - e._type = t - e.data = x return } @@ -396,88 +398,6 @@ func convT2I(tab *itab, elem unsafe.Pointer) (i iface) { return } -func convT2I16(tab *itab, val uint16) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(2, t, false) - *(*uint16)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2I32(tab *itab, val uint32) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(4, t, false) - *(*uint32)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2I64(tab *itab, val uint64) (i iface) { - t := tab._type - var x unsafe.Pointer - if val == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(8, t, false) - *(*uint64)(x) = val - } - i.tab = tab - i.data = x - return -} - -func convT2Istring(tab *itab, elem unsafe.Pointer) (i iface) { - t := tab._type - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Istring)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if *(*string)(elem) == "" { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(t.size, t, true) - *(*string)(x) = *(*string)(elem) - } - i.tab = tab - i.data = x - return -} - -func convT2Islice(tab *itab, elem unsafe.Pointer) (i iface) { - t := tab._type - if raceenabled { - raceReadObjectPC(t, elem, getcallerpc(), funcPC(convT2Islice)) - } - if msanenabled { - msanread(elem, t.size) - } - var x unsafe.Pointer - if v := *(*slice)(elem); uintptr(v.array) == 0 { - x = unsafe.Pointer(&zeroVal[0]) - } else { - x = mallocgc(t.size, t, true) - *(*slice)(x) = *(*slice)(elem) - } - i.tab = tab - i.data = x - return -} - func convT2Inoptr(tab *itab, elem unsafe.Pointer) (i iface) { t := tab._type if raceenabled { diff --git a/src/runtime/internal/atomic/asm_386.s b/src/runtime/internal/atomic/asm_386.s index 86a3ef33b9b3a..13289a88d068b 100644 --- a/src/runtime/internal/atomic/asm_386.s +++ b/src/runtime/internal/atomic/asm_386.s @@ -23,6 +23,9 @@ TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13 JMP runtime∕internal∕atomic·Cas(SB) +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-13 + JMP runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8 JMP runtime∕internal∕atomic·Load(SB) @@ -180,6 +183,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8 XCHGL AX, 0(BX) RET +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8 + JMP runtime∕internal∕atomic·Store(SB) + // uint64 atomicload64(uint64 volatile* addr); TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), AX diff --git a/src/runtime/internal/atomic/asm_amd64.s b/src/runtime/internal/atomic/asm_amd64.s index 6fb5211c9cedd..e18aee7d59e31 100644 --- a/src/runtime/internal/atomic/asm_amd64.s +++ b/src/runtime/internal/atomic/asm_amd64.s @@ -43,6 +43,9 @@ TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25 JMP runtime∕internal∕atomic·Cas64(SB) +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17 + JMP runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16 JMP runtime∕internal∕atomic·Load64(SB) @@ -130,6 +133,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 XCHGL AX, 0(BX) RET +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 + JMP runtime∕internal∕atomic·Store(SB) + TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 MOVQ ptr+0(FP), BX MOVQ val+8(FP), AX diff --git a/src/runtime/internal/atomic/asm_amd64p32.s b/src/runtime/internal/atomic/asm_amd64p32.s index ff590e601b232..35b5ef205ec2a 100644 --- a/src/runtime/internal/atomic/asm_amd64p32.s +++ b/src/runtime/internal/atomic/asm_amd64p32.s @@ -23,6 +23,9 @@ TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-17 JMP runtime∕internal∕atomic·Cas(SB) +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17 + JMP runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-12 JMP runtime∕internal∕atomic·Load(SB) @@ -130,6 +133,9 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8 XCHGL AX, 0(BX) RET +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8 + JMP runtime∕internal∕atomic·Store(SB) + TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 MOVL ptr+0(FP), BX MOVQ val+8(FP), AX diff --git a/src/runtime/internal/atomic/asm_arm.s b/src/runtime/internal/atomic/asm_arm.s index 09724c1c34a62..d4ef11560e773 100644 --- a/src/runtime/internal/atomic/asm_arm.s +++ b/src/runtime/internal/atomic/asm_arm.s @@ -53,12 +53,18 @@ casfail: TEXT runtime∕internal∕atomic·Loadp(SB),NOSPLIT|NOFRAME,$0-8 B runtime∕internal∕atomic·Load(SB) +TEXT runtime∕internal∕atomic·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 + B runtime∕internal∕atomic·Load(SB) + TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13 B runtime∕internal∕atomic·Cas(SB) TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0-13 B runtime∕internal∕atomic·Cas(SB) +TEXT runtime∕internal∕atomic·CasRel(SB),NOSPLIT,$0-13 + B runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8 B runtime∕internal∕atomic·Load(SB) @@ -71,6 +77,9 @@ TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8 TEXT runtime∕internal∕atomic·StorepNoWB(SB),NOSPLIT,$0-8 B runtime∕internal∕atomic·Store(SB) +TEXT runtime∕internal∕atomic·StoreRel(SB),NOSPLIT,$0-8 + B runtime∕internal∕atomic·Store(SB) + TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12 B runtime∕internal∕atomic·Xadd(SB) diff --git a/src/runtime/internal/atomic/asm_arm64.s b/src/runtime/internal/atomic/asm_arm64.s index 56b89a5a0b038..8336a859ad03d 100644 --- a/src/runtime/internal/atomic/asm_arm64.s +++ b/src/runtime/internal/atomic/asm_arm64.s @@ -29,6 +29,9 @@ ok: TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25 B runtime∕internal∕atomic·Cas64(SB) +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17 + B runtime∕internal∕atomic·Cas(SB) + TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16 B runtime∕internal∕atomic·Load64(SB) diff --git a/src/runtime/internal/atomic/asm_mips64x.s b/src/runtime/internal/atomic/asm_mips64x.s index 19d131e5a61d2..9cb10371b7a48 100644 --- a/src/runtime/internal/atomic/asm_mips64x.s +++ b/src/runtime/internal/atomic/asm_mips64x.s @@ -62,6 +62,9 @@ cas64_fail: TEXT ·Casuintptr(SB), NOSPLIT, $0-25 JMP ·Cas64(SB) +TEXT ·CasRel(SB), NOSPLIT, $0-17 + JMP ·Cas(SB) + TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 JMP ·Load64(SB) @@ -152,6 +155,9 @@ TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 JMP ·Store64(SB) +TEXT ·StoreRel(SB), NOSPLIT, $0-12 + JMP ·Store(SB) + TEXT ·Store(SB), NOSPLIT, $0-12 MOVV ptr+0(FP), R1 MOVW val+8(FP), R2 diff --git a/src/runtime/internal/atomic/asm_mipsx.s b/src/runtime/internal/atomic/asm_mipsx.s index 30550fd02e8d1..73d7ea3ad4d6f 100644 --- a/src/runtime/internal/atomic/asm_mipsx.s +++ b/src/runtime/internal/atomic/asm_mipsx.s @@ -70,6 +70,9 @@ try_xchg: TEXT ·Casuintptr(SB),NOSPLIT,$0-13 JMP ·Cas(SB) +TEXT ·CasRel(SB),NOSPLIT,$0-13 + JMP ·Cas(SB) + TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 JMP ·Load(SB) @@ -100,6 +103,9 @@ TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 JMP ·Store(SB) +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + JMP ·Store(SB) + // void Or8(byte volatile*, byte); TEXT ·Or8(SB),NOSPLIT,$0-5 MOVW ptr+0(FP), R1 diff --git a/src/runtime/internal/atomic/asm_ppc64x.s b/src/runtime/internal/atomic/asm_ppc64x.s index a2ed4adc91a03..052b031cfbace 100644 --- a/src/runtime/internal/atomic/asm_ppc64x.s +++ b/src/runtime/internal/atomic/asm_ppc64x.s @@ -59,6 +59,24 @@ cas64_fail: MOVB R0, ret+24(FP) RET +TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17 + MOVD ptr+0(FP), R3 + MOVWZ old+8(FP), R4 + MOVWZ new+12(FP), R5 + LWSYNC +cas_again: + LWAR (R3), $0, R6 // 0 = Mutex release hint + CMPW R6, R4 + BNE cas_fail + STWCCC R5, (R3) + BNE cas_again + MOVD $1, R3 + MOVB R3, ret+16(FP) + RET +cas_fail: + MOVB R0, ret+16(FP) + RET + TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25 BR runtime∕internal∕atomic·Cas64(SB) @@ -159,6 +177,13 @@ TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 MOVD R4, 0(R3) RET +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 + MOVD ptr+0(FP), R3 + MOVW val+8(FP), R4 + LWSYNC + MOVW R4, 0(R3) + RET + // void runtime∕internal∕atomic·Or8(byte volatile*, byte); TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9 MOVD ptr+0(FP), R3 diff --git a/src/runtime/internal/atomic/asm_s390x.s b/src/runtime/internal/atomic/asm_s390x.s index e25703e0772a3..512fde5a12465 100644 --- a/src/runtime/internal/atomic/asm_s390x.s +++ b/src/runtime/internal/atomic/asm_s390x.s @@ -48,6 +48,10 @@ cas64_fail: TEXT ·Casuintptr(SB), NOSPLIT, $0-25 BR ·Cas64(SB) +// func CasRel(ptr *uint32, old, new uint32) bool +TEXT ·CasRel(SB), NOSPLIT, $0-17 + BR ·Cas(SB) + // func Loaduintptr(ptr *uintptr) uintptr TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 BR ·Load64(SB) diff --git a/src/runtime/internal/atomic/atomic_386.go b/src/runtime/internal/atomic/atomic_386.go index 4284d2bd7d240..ad71ebd971a7b 100644 --- a/src/runtime/internal/atomic/atomic_386.go +++ b/src/runtime/internal/atomic/atomic_386.go @@ -20,6 +20,12 @@ func Loadp(ptr unsafe.Pointer) unsafe.Pointer { return *(*unsafe.Pointer)(ptr) } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:noescape func Xadd64(ptr *uint64, delta int64) uint64 @@ -52,11 +58,17 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) //go:noescape func Store64(ptr *uint64, val uint64) +//go:noescape +func StoreRel(ptr *uint32, val uint32) + // NO go:noescape annotation; see atomic_pointer.go. func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/runtime/internal/atomic/atomic_amd64x.go b/src/runtime/internal/atomic/atomic_amd64x.go index 54851d30f4575..d4fe461609988 100644 --- a/src/runtime/internal/atomic/atomic_amd64x.go +++ b/src/runtime/internal/atomic/atomic_amd64x.go @@ -26,6 +26,12 @@ func Load64(ptr *uint64) uint64 { return *ptr } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:noescape func Xadd(ptr *uint32, delta int32) uint32 @@ -55,12 +61,18 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) //go:noescape func Store64(ptr *uint64, val uint64) +//go:noescape +func StoreRel(ptr *uint32, val uint32) + // StorepNoWB performs *ptr = val atomically and without a write // barrier. // diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go index 4ed7e991fe294..51b42ba23846b 100644 --- a/src/runtime/internal/atomic/atomic_arm.go +++ b/src/runtime/internal/atomic/atomic_arm.go @@ -7,7 +7,7 @@ package atomic import ( - "runtime/internal/sys" + "internal/cpu" "unsafe" ) @@ -31,7 +31,7 @@ func (l *spinlock) unlock() { var locktab [57]struct { l spinlock - pad [sys.CacheLineSize - unsafe.Sizeof(spinlock{})]byte + pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte } func addrLock(addr *uint64) *spinlock { @@ -74,6 +74,9 @@ func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) //go:noescape func Store(addr *uint32, v uint32) +//go:noescape +func StoreRel(addr *uint32, v uint32) + //go:nosplit func goCas64(addr *uint64, old, new uint64) bool { if uintptr(unsafe.Pointer(addr))&7 != 0 { @@ -181,9 +184,15 @@ func Load(addr *uint32) uint32 //go:noescape func Loadp(addr unsafe.Pointer) unsafe.Pointer +//go:noescape +func LoadAcq(addr *uint32) uint32 + //go:noescape func Cas64(addr *uint64, old, new uint64) bool +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool + //go:noescape func Xadd64(addr *uint64, delta int64) uint64 diff --git a/src/runtime/internal/atomic/atomic_arm64.go b/src/runtime/internal/atomic/atomic_arm64.go index 3554b7f23674c..a2da27e7ed4eb 100644 --- a/src/runtime/internal/atomic/atomic_arm64.go +++ b/src/runtime/internal/atomic/atomic_arm64.go @@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64 //go:noescape func Loadp(ptr unsafe.Pointer) unsafe.Pointer +//go:noescape +func LoadAcq(addr *uint32) uint32 + //go:noescape func Or8(ptr *uint8, val uint8) @@ -44,6 +47,9 @@ func And8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) @@ -52,3 +58,6 @@ func Store64(ptr *uint64, val uint64) // NO go:noescape annotation; see atomic_pointer.go. func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) diff --git a/src/runtime/internal/atomic/atomic_arm64.s b/src/runtime/internal/atomic/atomic_arm64.s index 354fd1e94bb7b..c979f2246f516 100644 --- a/src/runtime/internal/atomic/atomic_arm64.s +++ b/src/runtime/internal/atomic/atomic_arm64.s @@ -25,9 +25,16 @@ TEXT ·Loadp(SB),NOSPLIT,$0-16 MOVD R0, ret+8(FP) RET +// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr) +TEXT ·LoadAcq(SB),NOSPLIT,$0-12 + B ·Load(SB) + TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16 B runtime∕internal∕atomic·Store64(SB) +TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 + B runtime∕internal∕atomic·Store(SB) + TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 diff --git a/src/runtime/internal/atomic/atomic_mips64x.go b/src/runtime/internal/atomic/atomic_mips64x.go index d06ea4809a731..98a8fca929186 100644 --- a/src/runtime/internal/atomic/atomic_mips64x.go +++ b/src/runtime/internal/atomic/atomic_mips64x.go @@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64 //go:noescape func Loadp(ptr unsafe.Pointer) unsafe.Pointer +//go:noescape +func LoadAcq(ptr *uint32) uint32 + //go:noescape func And8(ptr *uint8, val uint8) @@ -46,6 +49,9 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) @@ -54,3 +60,6 @@ func Store64(ptr *uint64, val uint64) // NO go:noescape annotation; see atomic_pointer.go. func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) diff --git a/src/runtime/internal/atomic/atomic_mips64x.s b/src/runtime/internal/atomic/atomic_mips64x.s index 087672f5ccf46..5214afe2d6753 100644 --- a/src/runtime/internal/atomic/atomic_mips64x.s +++ b/src/runtime/internal/atomic/atomic_mips64x.s @@ -34,3 +34,7 @@ TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16 SYNC MOVV R1, ret+8(FP) RET + +// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12 + JMP atomic·Load(SB) diff --git a/src/runtime/internal/atomic/atomic_mipsx.go b/src/runtime/internal/atomic/atomic_mipsx.go index 32be1c779d969..1cd6d9a9ce33b 100644 --- a/src/runtime/internal/atomic/atomic_mipsx.go +++ b/src/runtime/internal/atomic/atomic_mipsx.go @@ -7,14 +7,14 @@ package atomic import ( - "runtime/internal/sys" + "internal/cpu" "unsafe" ) // TODO implement lock striping var lock struct { state uint32 - pad [sys.CacheLineSize - 4]byte + pad [cpu.CacheLinePadSize - 4]byte } //go:noescape @@ -119,6 +119,9 @@ func Load(ptr *uint32) uint32 //go:noescape func Loadp(ptr unsafe.Pointer) unsafe.Pointer +//go:noescape +func LoadAcq(ptr *uint32) uint32 + //go:noescape func And8(ptr *uint8, val uint8) @@ -130,3 +133,9 @@ func Store(ptr *uint32, val uint32) // NO go:noescape annotation; see atomic_pointer.go. func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) + +//go:noescape +func StoreRel(ptr *uint32, val uint32) + +//go:noescape +func CasRel(addr *uint32, old, new uint32) bool diff --git a/src/runtime/internal/atomic/atomic_ppc64x.go b/src/runtime/internal/atomic/atomic_ppc64x.go index 72c98eb0c5f6c..4f1a95c5bd843 100644 --- a/src/runtime/internal/atomic/atomic_ppc64x.go +++ b/src/runtime/internal/atomic/atomic_ppc64x.go @@ -35,6 +35,9 @@ func Load64(ptr *uint64) uint64 //go:noescape func Loadp(ptr unsafe.Pointer) unsafe.Pointer +//go:noescape +func LoadAcq(ptr *uint32) uint32 + //go:noescape func And8(ptr *uint8, val uint8) @@ -46,11 +49,17 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Cas64(ptr *uint64, old, new uint64) bool +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool + //go:noescape func Store(ptr *uint32, val uint32) //go:noescape func Store64(ptr *uint64, val uint64) +//go:noescape +func StoreRel(ptr *uint32, val uint32) + // NO go:noescape annotation; see atomic_pointer.go. func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/src/runtime/internal/atomic/atomic_ppc64x.s b/src/runtime/internal/atomic/atomic_ppc64x.s index c9c2d1fc0c6e4..c079ea494f956 100644 --- a/src/runtime/internal/atomic/atomic_ppc64x.s +++ b/src/runtime/internal/atomic/atomic_ppc64x.s @@ -38,3 +38,12 @@ TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$-8-16 ISYNC MOVD R3, ret+8(FP) RET + +// uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$-8-12 + MOVD ptr+0(FP), R3 + MOVWZ 0(R3), R3 + CMPW R3, R3, CR7 + BC 4, 30, 1(PC) // bne- cr7, 0x4 + MOVW R3, ret+8(FP) + RET diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go index 9343853485e33..ec294a27ba073 100644 --- a/src/runtime/internal/atomic/atomic_s390x.go +++ b/src/runtime/internal/atomic/atomic_s390x.go @@ -24,6 +24,12 @@ func Load64(ptr *uint64) uint64 { return *ptr } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:noinline //go:nosplit func Store(ptr *uint32, val uint32) { @@ -43,6 +49,12 @@ func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) { *(*uintptr)(ptr) = uintptr(val) } +//go:noinline +//go:nosplit +func StoreRel(ptr *uint32, val uint32) { + *ptr = val +} + //go:noescape func And8(ptr *uint8, val uint8) @@ -71,3 +83,6 @@ func Xchguintptr(ptr *uintptr, new uintptr) uintptr //go:noescape func Cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func CasRel(ptr *uint32, old, new uint32) bool diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go index cbf254fcb5d40..71288e9003dce 100644 --- a/src/runtime/internal/atomic/atomic_wasm.go +++ b/src/runtime/internal/atomic/atomic_wasm.go @@ -21,6 +21,12 @@ func Loadp(ptr unsafe.Pointer) unsafe.Pointer { return *(*unsafe.Pointer)(ptr) } +//go:nosplit +//go:noinline +func LoadAcq(ptr *uint32) uint32 { + return *ptr +} + //go:nosplit //go:noinline func Load64(ptr *uint64) uint64 { @@ -105,6 +111,12 @@ func Store(ptr *uint32, val uint32) { *ptr = val } +//go:nosplit +//go:noinline +func StoreRel(ptr *uint32, val uint32) { + *ptr = val +} + //go:nosplit //go:noinline func Store64(ptr *uint64, val uint64) { @@ -147,6 +159,16 @@ func Casuintptr(ptr *uintptr, old, new uintptr) bool { return false } +//go:nosplit +//go:noinline +func CasRel(ptr *uint32, old, new uint32) bool { + if *ptr == old { + *ptr = new + return true + } + return false +} + //go:nosplit //go:noinline func Storeuintptr(ptr *uintptr, new uintptr) { diff --git a/src/runtime/internal/math/math.go b/src/runtime/internal/math/math.go new file mode 100644 index 0000000000000..5385f5dd86894 --- /dev/null +++ b/src/runtime/internal/math/math.go @@ -0,0 +1,19 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math + +import "runtime/internal/sys" + +const MaxUintptr = ^uintptr(0) + +// MulUintptr returns a * b and whether the multiplication overflowed. +// On supported platforms this is an intrinsic lowered by the compiler. +func MulUintptr(a, b uintptr) (uintptr, bool) { + if a|b < 1<<(4*sys.PtrSize) || a == 0 { + return a * b, false + } + overflow := b > MaxUintptr/a + return a * b, overflow +} diff --git a/src/runtime/internal/math/math_test.go b/src/runtime/internal/math/math_test.go new file mode 100644 index 0000000000000..303eb63405a1d --- /dev/null +++ b/src/runtime/internal/math/math_test.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package math_test + +import ( + . "runtime/internal/math" + "testing" +) + +const ( + UintptrSize = 32 << (^uintptr(0) >> 63) +) + +type mulUintptrTest struct { + a uintptr + b uintptr + overflow bool +} + +var mulUintptrTests = []mulUintptrTest{ + {0, 0, false}, + {1000, 1000, false}, + {MaxUintptr, 0, false}, + {MaxUintptr, 1, false}, + {MaxUintptr / 2, 2, false}, + {MaxUintptr / 2, 3, true}, + {MaxUintptr, 10, true}, + {MaxUintptr, 100, true}, + {MaxUintptr / 100, 100, false}, + {MaxUintptr / 1000, 1001, true}, + {1<<(UintptrSize/2) - 1, 1<<(UintptrSize/2) - 1, false}, + {1 << (UintptrSize / 2), 1 << (UintptrSize / 2), true}, + {MaxUintptr >> 32, MaxUintptr >> 32, false}, + {MaxUintptr, MaxUintptr, true}, +} + +func TestMulUintptr(t *testing.T) { + for _, test := range mulUintptrTests { + a, b := test.a, test.b + for i := 0; i < 2; i++ { + mul, overflow := MulUintptr(a, b) + if mul != a*b || overflow != test.overflow { + t.Errorf("MulUintptr(%v, %v) = %v, %v want %v, %v", + a, b, mul, overflow, a*b, test.overflow) + } + a, b = b, a + } + } +} + +var SinkUintptr uintptr +var SinkBool bool + +var x, y uintptr + +func BenchmarkMulUintptr(b *testing.B) { + x, y = 1, 2 + b.Run("small", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var overflow bool + SinkUintptr, overflow = MulUintptr(x, y) + if overflow { + SinkUintptr = 0 + } + } + }) + x, y = MaxUintptr, MaxUintptr-1 + b.Run("large", func(b *testing.B) { + for i := 0; i < b.N; i++ { + var overflow bool + SinkUintptr, overflow = MulUintptr(x, y) + if overflow { + SinkUintptr = 0 + } + } + }) +} diff --git a/src/runtime/internal/sys/arch_386.go b/src/runtime/internal/sys/arch_386.go index 5fb1fba02b6ef..537570133709d 100644 --- a/src/runtime/internal/sys/arch_386.go +++ b/src/runtime/internal/sys/arch_386.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = I386 BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = GoosNacl*65536 + (1-GoosNacl)*4096 // 4k normally; 64k on NaCl PCQuantum = 1 Int64Align = 4 diff --git a/src/runtime/internal/sys/arch_amd64.go b/src/runtime/internal/sys/arch_amd64.go index 2f32bc469ffdc..86fed4d53102f 100644 --- a/src/runtime/internal/sys/arch_amd64.go +++ b/src/runtime/internal/sys/arch_amd64.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = AMD64 BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 4096 PCQuantum = 1 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_amd64p32.go b/src/runtime/internal/sys/arch_amd64p32.go index c560907c6784e..749d724809f2b 100644 --- a/src/runtime/internal/sys/arch_amd64p32.go +++ b/src/runtime/internal/sys/arch_amd64p32.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = AMD64 BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 65536*GoosNacl + 4096*(1-GoosNacl) PCQuantum = 1 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_arm.go b/src/runtime/internal/sys/arch_arm.go index f383d82027cf4..2af09e0e35491 100644 --- a/src/runtime/internal/sys/arch_arm.go +++ b/src/runtime/internal/sys/arch_arm.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = ARM BigEndian = false - CacheLineSize = 32 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 4 diff --git a/src/runtime/internal/sys/arch_arm64.go b/src/runtime/internal/sys/arch_arm64.go index cb83ecc445724..f13d2de129aa9 100644 --- a/src/runtime/internal/sys/arch_arm64.go +++ b/src/runtime/internal/sys/arch_arm64.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = ARM64 BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_mips.go b/src/runtime/internal/sys/arch_mips.go index e12f32d0eeb82..e9bd69c928e48 100644 --- a/src/runtime/internal/sys/arch_mips.go +++ b/src/runtime/internal/sys/arch_mips.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = MIPS BigEndian = true - CacheLineSize = 32 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 4 diff --git a/src/runtime/internal/sys/arch_mips64.go b/src/runtime/internal/sys/arch_mips64.go index 973ec10e17f9e..5eb7b2b7b1346 100644 --- a/src/runtime/internal/sys/arch_mips64.go +++ b/src/runtime/internal/sys/arch_mips64.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = MIPS64 BigEndian = true - CacheLineSize = 32 DefaultPhysPageSize = 16384 PCQuantum = 4 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_mips64le.go b/src/runtime/internal/sys/arch_mips64le.go index e96d962f368b9..14c804ed85bb0 100644 --- a/src/runtime/internal/sys/arch_mips64le.go +++ b/src/runtime/internal/sys/arch_mips64le.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = MIPS64 BigEndian = false - CacheLineSize = 32 DefaultPhysPageSize = 16384 PCQuantum = 4 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_mipsle.go b/src/runtime/internal/sys/arch_mipsle.go index 25742ae9d3f18..91badb17d5175 100644 --- a/src/runtime/internal/sys/arch_mipsle.go +++ b/src/runtime/internal/sys/arch_mipsle.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = MIPS BigEndian = false - CacheLineSize = 32 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 4 diff --git a/src/runtime/internal/sys/arch_ppc64.go b/src/runtime/internal/sys/arch_ppc64.go index a538bbdec0b53..8cde4e18d0cd6 100644 --- a/src/runtime/internal/sys/arch_ppc64.go +++ b/src/runtime/internal/sys/arch_ppc64.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = PPC64 BigEndian = true - CacheLineSize = 128 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_ppc64le.go b/src/runtime/internal/sys/arch_ppc64le.go index aa506891817e3..10c0066849a95 100644 --- a/src/runtime/internal/sys/arch_ppc64le.go +++ b/src/runtime/internal/sys/arch_ppc64le.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = PPC64 BigEndian = false - CacheLineSize = 128 DefaultPhysPageSize = 65536 PCQuantum = 4 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_s390x.go b/src/runtime/internal/sys/arch_s390x.go index e42c420a542c1..77fd4bf07d7db 100644 --- a/src/runtime/internal/sys/arch_s390x.go +++ b/src/runtime/internal/sys/arch_s390x.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = S390X BigEndian = true - CacheLineSize = 256 DefaultPhysPageSize = 4096 PCQuantum = 2 Int64Align = 8 diff --git a/src/runtime/internal/sys/arch_wasm.go b/src/runtime/internal/sys/arch_wasm.go index 5463f934d607a..203fc2e472b28 100644 --- a/src/runtime/internal/sys/arch_wasm.go +++ b/src/runtime/internal/sys/arch_wasm.go @@ -7,7 +7,6 @@ package sys const ( ArchFamily = WASM BigEndian = false - CacheLineSize = 64 DefaultPhysPageSize = 65536 PCQuantum = 1 Int64Align = 8 diff --git a/src/runtime/internal/sys/stubs.go b/src/runtime/internal/sys/stubs.go index 53280232682b8..10b0173f601d5 100644 --- a/src/runtime/internal/sys/stubs.go +++ b/src/runtime/internal/sys/stubs.go @@ -11,3 +11,6 @@ const RegSize = 4 << (^Uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) const SpAlign = 1*(1-GoarchArm64) + 16*GoarchArm64 // SP alignment: 1 normally, 16 for ARM64 var DefaultGoroot string // set at link time + +// AIX requires a larger stack for syscalls. +const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix diff --git a/src/runtime/internal/sys/zgoos_aix.go b/src/runtime/internal/sys/zgoos_aix.go new file mode 100644 index 0000000000000..909bfc5e934e1 --- /dev/null +++ b/src/runtime/internal/sys/zgoos_aix.go @@ -0,0 +1,23 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +// +build aix + +package sys + +const GOOS = `aix` + +const GoosAix = 1 +const GoosAndroid = 0 +const GoosDarwin = 0 +const GoosDragonfly = 0 +const GoosFreebsd = 0 +const GoosHurd = 0 +const GoosJs = 0 +const GoosLinux = 0 +const GoosNacl = 0 +const GoosNetbsd = 0 +const GoosOpenbsd = 0 +const GoosPlan9 = 0 +const GoosSolaris = 0 +const GoosWindows = 0 +const GoosZos = 0 diff --git a/src/runtime/internal/sys/zgoos_android.go b/src/runtime/internal/sys/zgoos_android.go index bfdc37792e8e0..434ce46712d7b 100644 --- a/src/runtime/internal/sys/zgoos_android.go +++ b/src/runtime/internal/sys/zgoos_android.go @@ -6,10 +6,12 @@ package sys const GOOS = `android` +const GoosAix = 0 const GoosAndroid = 1 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_darwin.go b/src/runtime/internal/sys/zgoos_darwin.go index 1c4667f6debee..b645d1cf5fd51 100644 --- a/src/runtime/internal/sys/zgoos_darwin.go +++ b/src/runtime/internal/sys/zgoos_darwin.go @@ -6,10 +6,12 @@ package sys const GOOS = `darwin` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 1 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_dragonfly.go b/src/runtime/internal/sys/zgoos_dragonfly.go index 728bf6abe85e5..154cec370fe03 100644 --- a/src/runtime/internal/sys/zgoos_dragonfly.go +++ b/src/runtime/internal/sys/zgoos_dragonfly.go @@ -6,10 +6,12 @@ package sys const GOOS = `dragonfly` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 1 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_freebsd.go b/src/runtime/internal/sys/zgoos_freebsd.go index a8d659169b11d..5f41c0344507f 100644 --- a/src/runtime/internal/sys/zgoos_freebsd.go +++ b/src/runtime/internal/sys/zgoos_freebsd.go @@ -6,10 +6,12 @@ package sys const GOOS = `freebsd` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 1 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_hurd.go b/src/runtime/internal/sys/zgoos_hurd.go new file mode 100644 index 0000000000000..53f7fc384bf6e --- /dev/null +++ b/src/runtime/internal/sys/zgoos_hurd.go @@ -0,0 +1,23 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +// +build hurd + +package sys + +const GOOS = `hurd` + +const GoosAix = 0 +const GoosAndroid = 0 +const GoosDarwin = 0 +const GoosDragonfly = 0 +const GoosFreebsd = 0 +const GoosHurd = 1 +const GoosJs = 0 +const GoosLinux = 0 +const GoosNacl = 0 +const GoosNetbsd = 0 +const GoosOpenbsd = 0 +const GoosPlan9 = 0 +const GoosSolaris = 0 +const GoosWindows = 0 +const GoosZos = 0 diff --git a/src/runtime/internal/sys/zgoos_js.go b/src/runtime/internal/sys/zgoos_js.go index cc8eef080fb80..c6cca49bd9b9d 100644 --- a/src/runtime/internal/sys/zgoos_js.go +++ b/src/runtime/internal/sys/zgoos_js.go @@ -6,10 +6,12 @@ package sys const GOOS = `js` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 1 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_linux.go b/src/runtime/internal/sys/zgoos_linux.go index 289400c6122c5..088dbc105b0a9 100644 --- a/src/runtime/internal/sys/zgoos_linux.go +++ b/src/runtime/internal/sys/zgoos_linux.go @@ -7,10 +7,12 @@ package sys const GOOS = `linux` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 1 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_nacl.go b/src/runtime/internal/sys/zgoos_nacl.go index 3fedb0a2c3b1b..65bec4af9e85a 100644 --- a/src/runtime/internal/sys/zgoos_nacl.go +++ b/src/runtime/internal/sys/zgoos_nacl.go @@ -6,10 +6,12 @@ package sys const GOOS = `nacl` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 1 diff --git a/src/runtime/internal/sys/zgoos_netbsd.go b/src/runtime/internal/sys/zgoos_netbsd.go index 3346e3711ca35..93d0fa7e11303 100644 --- a/src/runtime/internal/sys/zgoos_netbsd.go +++ b/src/runtime/internal/sys/zgoos_netbsd.go @@ -6,10 +6,12 @@ package sys const GOOS = `netbsd` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_openbsd.go b/src/runtime/internal/sys/zgoos_openbsd.go index 13c0323249d2e..79193593f5cff 100644 --- a/src/runtime/internal/sys/zgoos_openbsd.go +++ b/src/runtime/internal/sys/zgoos_openbsd.go @@ -6,10 +6,12 @@ package sys const GOOS = `openbsd` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_plan9.go b/src/runtime/internal/sys/zgoos_plan9.go index 6b2e977b5ea32..2b95e08080197 100644 --- a/src/runtime/internal/sys/zgoos_plan9.go +++ b/src/runtime/internal/sys/zgoos_plan9.go @@ -6,10 +6,12 @@ package sys const GOOS = `plan9` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_solaris.go b/src/runtime/internal/sys/zgoos_solaris.go index cbf70f079a39c..6e3988aed0c2e 100644 --- a/src/runtime/internal/sys/zgoos_solaris.go +++ b/src/runtime/internal/sys/zgoos_solaris.go @@ -6,10 +6,12 @@ package sys const GOOS = `solaris` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_windows.go b/src/runtime/internal/sys/zgoos_windows.go index 70839ca7938ef..a56e12544a4cf 100644 --- a/src/runtime/internal/sys/zgoos_windows.go +++ b/src/runtime/internal/sys/zgoos_windows.go @@ -6,10 +6,12 @@ package sys const GOOS = `windows` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/internal/sys/zgoos_zos.go b/src/runtime/internal/sys/zgoos_zos.go index ecf449f70348c..0f56e46002569 100644 --- a/src/runtime/internal/sys/zgoos_zos.go +++ b/src/runtime/internal/sys/zgoos_zos.go @@ -6,10 +6,12 @@ package sys const GOOS = `zos` +const GoosAix = 0 const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 +const GoosHurd = 0 const GoosJs = 0 const GoosLinux = 0 const GoosNacl = 0 diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go index 4ce7d2a098827..ea3455a8c4c93 100644 --- a/src/runtime/lfstack_64bit.go +++ b/src/runtime/lfstack_64bit.go @@ -28,9 +28,20 @@ const ( // bottom, because node must be pointer-aligned, giving a total of 19 bits // of count. cntBits = 64 - addrBits + 3 + + // On AIX, 64-bit addresses are split into 36-bit segment number and 28-bit + // offset in segment. Segment numbers in the range 0x0A0000000-0x0AFFFFFFF(LSA) + // are available for mmap. + // We assume all lfnode addresses are from memory allocated with mmap. + // We use one bit to distinguish between the two ranges. + aixAddrBits = 57 + aixCntBits = 64 - aixAddrBits + 3 ) func lfstackPack(node *lfnode, cnt uintptr) uint64 { + if GOARCH == "ppc64" && GOOS == "aix" { + return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<> cntBits << 3))) } + if GOARCH == "ppc64" && GOOS == "aix" { + return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56))) + } return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) } diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index b590c4b92bde1..d2828b138ab21 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -230,7 +230,7 @@ func notetsleepg(n *note, ns int64) bool { return ok } -func pauseSchedulerUntilCallback() bool { +func beforeIdle() bool { return false } diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go index df321e5196373..f58c915b630a9 100644 --- a/src/runtime/lock_js.go +++ b/src/runtime/lock_js.go @@ -92,7 +92,7 @@ func notetsleepg(n *note, ns int64) bool { delay = 1<<31 - 1 // cap to max int32 } - id := scheduleCallback(delay) + id := scheduleTimeoutEvent(delay) mp := acquirem() notes[n] = gp notesWithTimeout[n] = noteWithTimeout{gp: gp, deadline: deadline} @@ -100,7 +100,7 @@ func notetsleepg(n *note, ns int64) bool { gopark(nil, nil, waitReasonSleep, traceEvNone, 1) - clearScheduledCallback(id) // note might have woken early, clear timeout + clearTimeoutEvent(id) // note might have woken early, clear timeout mp = acquirem() delete(notes, n) delete(notesWithTimeout, n) @@ -127,46 +127,68 @@ func notetsleepg(n *note, ns int64) bool { func checkTimeouts() { now := nanotime() for n, nt := range notesWithTimeout { - if n.key == note_cleared && now > nt.deadline { + if n.key == note_cleared && now >= nt.deadline { n.key = note_timeout goready(nt.gp, 1) } } } -var waitingForCallback *g +var returnedEventHandler *g -// sleepUntilCallback puts the current goroutine to sleep until a callback is triggered. -// It is currently only used by the callback routine of the syscall/js package. -//go:linkname sleepUntilCallback syscall/js.sleepUntilCallback -func sleepUntilCallback() { - waitingForCallback = getg() +func init() { + // At the toplevel we need an extra goroutine that handles asynchronous events. + initg := getg() + go func() { + returnedEventHandler = getg() + goready(initg, 1) + + gopark(nil, nil, waitReasonZero, traceEvNone, 1) + returnedEventHandler = nil + + pause(getcallersp() - 16) + }() gopark(nil, nil, waitReasonZero, traceEvNone, 1) - waitingForCallback = nil } -// pauseSchedulerUntilCallback gets called from the scheduler and pauses the execution -// of Go's WebAssembly code until a callback is triggered. Then it checks for note timeouts -// and resumes goroutines that are waiting for a callback. -func pauseSchedulerUntilCallback() bool { - if waitingForCallback == nil && len(notesWithTimeout) == 0 { - return false +// beforeIdle gets called by the scheduler if no goroutine is awake. +// We resume the event handler (if available) which will pause the execution. +func beforeIdle() bool { + if returnedEventHandler != nil { + goready(returnedEventHandler, 1) + return true } + return false +} + +// pause sets SP to newsp and pauses the execution of Go's WebAssembly code until an event is triggered. +func pause(newsp uintptr) + +// scheduleTimeoutEvent tells the WebAssembly environment to trigger an event after ms milliseconds. +// It returns a timer id that can be used with clearTimeoutEvent. +func scheduleTimeoutEvent(ms int64) int32 + +// clearTimeoutEvent clears a timeout event scheduled by scheduleTimeoutEvent. +func clearTimeoutEvent(id int32) + +func handleEvent() { + prevReturnedEventHandler := returnedEventHandler + returnedEventHandler = nil - pause() checkTimeouts() - if waitingForCallback != nil { - goready(waitingForCallback, 1) - } - return true -} + eventHandler() + + returnedEventHandler = getg() + gopark(nil, nil, waitReasonZero, traceEvNone, 1) + + returnedEventHandler = prevReturnedEventHandler -// pause pauses the execution of Go's WebAssembly code until a callback is triggered. -func pause() + pause(getcallersp() - 16) +} -// scheduleCallback tells the WebAssembly environment to trigger a callback after ms milliseconds. -// It returns a timer id that can be used with clearScheduledCallback. -func scheduleCallback(ms int64) int32 +var eventHandler func() -// clearScheduledCallback clears a callback scheduled by scheduleCallback. -func clearScheduledCallback(id int32) +//go:linkname setEventHandler syscall/js.setEventHandler +func setEventHandler(fn func()) { + eventHandler = fn +} diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 6e01d70f7578f..08dfd2b664590 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin nacl netbsd openbsd plan9 solaris windows +// +build aix darwin nacl netbsd openbsd plan9 solaris windows package runtime @@ -283,7 +283,7 @@ func notetsleepg(n *note, ns int64) bool { return ok } -func pauseSchedulerUntilCallback() bool { +func beforeIdle() bool { return false } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 07e0a67240ceb..8c617bb42b486 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -106,6 +106,7 @@ package runtime import ( "runtime/internal/atomic" + "runtime/internal/math" "runtime/internal/sys" "unsafe" ) @@ -124,8 +125,6 @@ const ( // have the most objects per span. maxObjsPerSpan = pageSize / 8 - mSpanInUse = _MSpanInUse - concurrentSweep = _ConcurrentSweep _PageSize = 1 << _PageShift @@ -138,8 +137,7 @@ const ( _TinySize = 16 _TinySizeClass = int8(2) - _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc - _MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap. + _FixAllocChunk = 16 << 10 // Chunk size for FixAlloc // Per-P, per order stack segment cache size. _StackCacheSize = 32 * 1024 @@ -162,7 +160,7 @@ const ( // amd64, addresses are sign-extended beyond heapAddrBits. On // other arches, they are zero-extended. // - // On 64-bit platforms, we limit this to 48 bits based on a + // On most 64-bit platforms, we limit this to 48 bits based on a // combination of hardware and OS limitations. // // amd64 hardware limits addresses to 48 bits, sign-extended @@ -180,10 +178,9 @@ const ( // bits, in the range [0, 1<<48). // // ppc64, mips64, and s390x support arbitrary 64 bit addresses - // in hardware. However, since Go only supports Linux on - // these, we lean on OS limits. Based on Linux's processor.h, - // the user address space is limited as follows on 64-bit - // architectures: + // in hardware. On Linux, Go leans on stricter OS limits. Based + // on Linux's processor.h, the user address space is limited as + // follows on 64-bit architectures: // // Architecture Name Maximum Value (exclusive) // --------------------------------------------------------------------- @@ -200,13 +197,17 @@ const ( // exceed Go's 48 bit limit, it's extremely unlikely in // practice. // + // On aix/ppc64, the limits is increased to 1<<60 to accept addresses + // returned by mmap syscall. These are in range: + // 0x0a00000000000000 - 0x0afffffffffffff + // // On 32-bit platforms, we accept the full 32-bit address // space because doing so is cheap. // mips32 only has access to the low 2GB of virtual memory, so // we further limit it to 31 bits. // // WebAssembly currently has a limit of 4GB linear memory. - heapAddrBits = (_64bit*(1-sys.GoarchWasm))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosAix))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 60*sys.GoosAix // maxAlloc is the maximum size of an allocation. On 64-bit, // it's theoretically possible to allocate 1<= 0; i-- { var p uintptr switch { @@ -427,6 +434,13 @@ func mallocinit() { p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) case GOARCH == "arm64": p = uintptr(i)<<40 | uintptrMask&(0x0040<<32) + case GOOS == "aix": + if i == 0 { + // We don't use addresses directly after 0x0A00000000000000 + // to avoid collisions with others mmaps done by non-go programs. + continue + } + p = uintptr(i)<<40 | uintptrMask&(0xa0<<52) case raceenabled: // The TSAN runtime requires the heap // to be in the range [0x00c000000000, @@ -460,7 +474,7 @@ func mallocinit() { // 3. We try to stake out a reasonably large initial // heap reservation. - const arenaMetaSize = unsafe.Sizeof([1 << arenaBits]heapArena{}) + const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{}) meta := uintptr(sysReserve(nil, arenaMetaSize)) if meta != 0 { mheap_.heapArenaAlloc.init(meta, arenaMetaSize) @@ -643,6 +657,27 @@ mapped: } } + // Add the arena to the arenas list. + if len(h.allArenas) == cap(h.allArenas) { + size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize + if size == 0 { + size = physPageSize + } + newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys)) + if newArray == nil { + throw("out of memory allocating allArenas") + } + oldSlice := h.allArenas + *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} + copy(h.allArenas, oldSlice) + // Do not free the old backing array because + // there may be concurrent readers. Since we + // double the array each time, this can lead + // to at most 2x waste. + } + h.allArenas = h.allArenas[:len(h.allArenas)+1] + h.allArenas[len(h.allArenas)-1] = ri + // Store atomically just in case an object from the // new heap arena becomes visible before the heap lock // is released (which shouldn't happen, but there's @@ -735,6 +770,9 @@ func nextFreeFast(s *mspan) gclinkptr { // weight allocation. If it is a heavy weight allocation the caller must // determine whether a new GC cycle needs to be started or if the GC is active // whether this goroutine needs to assist the GC. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { s = c.alloc[spc] shouldhelpgc = false @@ -745,9 +783,7 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) throw("s.allocCount != s.nelems && freeIndex == s.nelems") } - systemstack(func() { - c.refill(spc) - }) + c.refill(spc) shouldhelpgc = true s = c.alloc[spc] @@ -976,7 +1012,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } if rate := MemProfileRate; rate > 0 { - if size < uintptr(rate) && int32(size) < c.next_sample { + if rate != 1 && int32(size) < c.next_sample { c.next_sample -= int32(size) } else { mp := acquirem() @@ -993,7 +1029,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if shouldhelpgc { if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { - gcStart(gcBackgroundMode, t) + gcStart(t) } } @@ -1042,10 +1078,11 @@ func newarray(typ *_type, n int) unsafe.Pointer { if n == 1 { return mallocgc(typ.size, typ, true) } - if n < 0 || uintptr(n) > maxSliceCap(typ.size) { + mem, overflow := math.MulUintptr(typ.size, uintptr(n)) + if overflow || mem > maxAlloc || n < 0 { panic(plainError("runtime: allocation size out of range")) } - return mallocgc(typ.size*uintptr(n), typ, true) + return mallocgc(mem, typ, true) } //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray @@ -1130,6 +1167,15 @@ var globalAlloc struct { persistentAlloc } +// persistentChunkSize is the number of bytes we allocate when we grow +// a persistentAlloc. +const persistentChunkSize = 256 << 10 + +// persistentChunks is a list of all the persistent chunks we have +// allocated. The list is maintained through the first word in the +// persistent chunk. This is updated atomically. +var persistentChunks *notInHeap + // Wrapper around sysAlloc that can allocate small chunks. // There is no associated free operation. // Intended for things like function/type/debug-related persistent data. @@ -1150,7 +1196,6 @@ func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer { //go:systemstack func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { const ( - chunk = 256 << 10 maxBlock = 64 << 10 // VM reservation granularity is 64K on windows ) @@ -1181,15 +1226,24 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { persistent = &globalAlloc.persistentAlloc } persistent.off = round(persistent.off, align) - if persistent.off+size > chunk || persistent.base == nil { - persistent.base = (*notInHeap)(sysAlloc(chunk, &memstats.other_sys)) + if persistent.off+size > persistentChunkSize || persistent.base == nil { + persistent.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys)) if persistent.base == nil { if persistent == &globalAlloc.persistentAlloc { unlock(&globalAlloc.mutex) } throw("runtime: cannot allocate memory") } - persistent.off = 0 + + // Add the new chunk to the persistentChunks list. + for { + chunks := uintptr(unsafe.Pointer(persistentChunks)) + *(*uintptr)(unsafe.Pointer(persistent.base)) = chunks + if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), chunks, uintptr(unsafe.Pointer(persistent.base))) { + break + } + } + persistent.off = sys.PtrSize } p := persistent.base.add(persistent.off) persistent.off += size @@ -1205,6 +1259,21 @@ func persistentalloc1(size, align uintptr, sysStat *uint64) *notInHeap { return p } +// inPersistentAlloc reports whether p points to memory allocated by +// persistentalloc. This must be nosplit because it is called by the +// cgo checker code, which is called by the write barrier code. +//go:nosplit +func inPersistentAlloc(p uintptr) bool { + chunk := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks))) + for chunk != 0 { + if p >= chunk && p < chunk+persistentChunkSize { + return true + } + chunk = *(*uintptr)(unsafe.Pointer(chunk)) + } + return false +} + // linearAlloc is a simple linear allocator that pre-reserves a region // of memory and then maps that region as needed. The caller is // responsible for locking. diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go index e6afc25ea9b72..a2d5864d3d472 100644 --- a/src/runtime/malloc_test.go +++ b/src/runtime/malloc_test.go @@ -168,6 +168,14 @@ func TestTinyAlloc(t *testing.T) { } } +func TestPhysicalMemoryUtilization(t *testing.T) { + got := runTestProg(t, "testprog", "GCPhys") + want := "OK\n" + if got != want { + t.Fatalf("expected %q, but got %q", want, got) + } +} + type acLink struct { x [1 << 20]byte } @@ -175,6 +183,14 @@ type acLink struct { var arenaCollisionSink []*acLink func TestArenaCollision(t *testing.T) { + if GOOS == "darwin" && race.Enabled { + // Skip this test on Darwin in race mode because Darwin 10.10 has + // issues following arena hints and runs out of them in race mode, so + // MAP_FIXED is used to ensure we keep the heap in the memory region the + // race detector expects. + // TODO(mknyszek): Delete this when Darwin 10.10 is no longer supported. + t.Skip("disabled on Darwin with race mode since MAP_FIXED is used") + } testenv.MustHaveExec(t) // Test that mheap.sysAlloc handles collisions with other diff --git a/src/runtime/map.go b/src/runtime/map.go index 208c92cb0d75f..9c25b63348f5b 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -55,6 +55,7 @@ package runtime import ( "runtime/internal/atomic" + "runtime/internal/math" "runtime/internal/sys" "unsafe" ) @@ -88,11 +89,12 @@ const ( // Each bucket (including its overflow buckets, if any) will have either all or none of its // entries in the evacuated* states (except during the evacuate() method, which only happens // during map writes and thus no one else can observe the map during that time). - empty = 0 // cell is empty - evacuatedEmpty = 1 // cell is empty, bucket is evacuated. + emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. + emptyOne = 1 // this cell is empty evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. evacuatedY = 3 // same as above, but evacuated to second half of larger table. - minTopHash = 4 // minimum tophash for a normal filled cell. + evacuatedEmpty = 4 // cell is empty, bucket is evacuated. + minTopHash = 5 // minimum tophash for a normal filled cell. // flags iterator = 1 // there may be an iterator using buckets @@ -104,6 +106,11 @@ const ( noCheck = 1<<(8*sys.PtrSize) - 1 ) +// isEmpty reports whether the given tophash array entry represents an empty bucket entry. +func isEmpty(x uint8) bool { + return x <= emptyOne +} + // A header for a Go map. type hmap struct { // Note: the format of the hmap is also encoded in cmd/compile/internal/gc/reflect.go. @@ -196,7 +203,7 @@ func tophash(hash uintptr) uint8 { func evacuated(b *bmap) bool { h := b.tophash[0] - return h > empty && h < minTopHash + return h > emptyOne && h < minTopHash } func (b *bmap) overflow(t *maptype) *bmap { @@ -296,7 +303,8 @@ func makemap_small() *hmap { // If h != nil, the map can be created directly in h. // If h.buckets != nil, bucket pointed to can be used as the first bucket. func makemap(t *maptype, hint int, h *hmap) *hmap { - if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) { + mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size) + if overflow || mem > maxAlloc { hint = 0 } @@ -306,7 +314,8 @@ func makemap(t *maptype, hint int, h *hmap) *hmap { } h.hash0 = fastrand() - // find size parameter which will hold the requested # of elements + // Find the size parameter B which will hold the requested # of elements. + // For hint < 0 overLoadFactor returns false since hint < bucketCnt. B := uint8(0) for overLoadFactor(hint, B) { B++ @@ -395,6 +404,9 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { msanread(key, t.key.size) } if h == nil || h.count == 0 { + if t.hashMightPanic() { + t.key.alg.hash(key, 0) // see issue 23734 + } return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { @@ -415,18 +427,22 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { } } top := tophash(hash) +bucketloop: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey { + if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue { + if t.indirectvalue() { v = *((*unsafe.Pointer)(v)) } return v @@ -447,6 +463,9 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) msanread(key, t.key.size) } if h == nil || h.count == 0 { + if t.hashMightPanic() { + t.key.alg.hash(key, 0) // see issue 23734 + } return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { @@ -467,18 +486,22 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) } } top := tophash(hash) +bucketloop: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey { + if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue { + if t.indirectvalue() { v = *((*unsafe.Pointer)(v)) } return v, true @@ -508,18 +531,22 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe } } top := tophash(hash) +bucketloop: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey { + if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if alg.equal(key, k) { v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue { + if t.indirectvalue() { v = *((*unsafe.Pointer)(v)) } return k, v @@ -567,7 +594,7 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { // Set hashWriting after calling alg.hash, since alg.hash may panic, // in which case we have not actually done a write. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -584,25 +611,29 @@ again: var inserti *uint8 var insertk unsafe.Pointer var val unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { - if b.tophash[i] == empty && inserti == nil { + if isEmpty(b.tophash[i]) && inserti == nil { inserti = &b.tophash[i] insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) - if t.indirectkey { + if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } if !alg.equal(key, k) { continue } // already have a mapping for key. Update it. - if t.needkeyupdate { + if t.needkeyupdate() { typedmemmove(t.key, k, key) } val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) @@ -633,12 +664,12 @@ again: } // store new key/value at insert position - if t.indirectkey { + if t.indirectkey() { kmem := newobject(t.key) *(*unsafe.Pointer)(insertk) = kmem insertk = kmem } - if t.indirectvalue { + if t.indirectvalue() { vmem := newobject(t.elem) *(*unsafe.Pointer)(val) = vmem } @@ -651,7 +682,7 @@ done: throw("concurrent map writes") } h.flags &^= hashWriting - if t.indirectvalue { + if t.indirectvalue() { val = *((*unsafe.Pointer)(val)) } return val @@ -668,6 +699,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { msanread(key, t.key.size) } if h == nil || h.count == 0 { + if t.hashMightPanic() { + t.key.alg.hash(key, 0) // see issue 23734 + } return } if h.flags&hashWriting != 0 { @@ -679,43 +713,79 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { // Set hashWriting after calling alg.hash, since alg.hash may panic, // in which case we have not actually done a write (delete). - h.flags |= hashWriting + h.flags ^= hashWriting bucket := hash & bucketMask(h.B) if h.growing() { growWork(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { + if b.tophash[i] == emptyRest { + break search + } continue } k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) k2 := k - if t.indirectkey { + if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } if !alg.equal(key, k2) { continue } // Only clear key if there are pointers in it. - if t.indirectkey { + if t.indirectkey() { *(*unsafe.Pointer)(k) = nil } else if t.key.kind&kindNoPointers == 0 { memclrHasPointers(k, t.key.size) } v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) - if t.indirectvalue { + if t.indirectvalue() { *(*unsafe.Pointer)(v) = nil } else if t.elem.kind&kindNoPointers == 0 { memclrHasPointers(v, t.elem.size) } else { memclrNoHeapPointers(v, t.elem.size) } - b.tophash[i] = empty + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + // It would be nice to make this a separate function, but + // for loops are not currently inlineable. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: h.count-- break search } @@ -830,11 +900,13 @@ next: } for ; i < bucketCnt; i++ { offi := (i + it.offset) & (bucketCnt - 1) - if b.tophash[offi] == empty || b.tophash[offi] == evacuatedEmpty { + if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { + // TODO: emptyRest is hard to use here, as we start iterating + // in the middle of a bucket. It's feasible, just tricky. continue } k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) - if t.indirectkey { + if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) @@ -846,7 +918,7 @@ next: // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). - if t.reflexivekey || alg.equal(k, k) { + if t.reflexivekey() || alg.equal(k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. hash := alg.hash(k, uintptr(h.hash0)) @@ -867,13 +939,13 @@ next: } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.reflexivekey || alg.equal(k, k)) { + !(t.reflexivekey() || alg.equal(k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. // That's lucky for us because when key!=key we can't look it up successfully. it.key = k - if t.indirectvalue { + if t.indirectvalue() { v = *((*unsafe.Pointer)(v)) } it.value = v @@ -921,7 +993,7 @@ func mapclear(t *maptype, h *hmap) { throw("concurrent map writes") } - h.flags |= hashWriting + h.flags ^= hashWriting h.flags &^= sameSizeGrow h.oldbuckets = nil @@ -1089,7 +1161,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { v := add(k, bucketCnt*uintptr(t.keysize)) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { top := b.tophash[i] - if top == empty { + if isEmpty(top) { b.tophash[i] = evacuatedEmpty continue } @@ -1097,7 +1169,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { throw("bad map state") } k2 := k - if t.indirectkey { + if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } var useY uint8 @@ -1105,7 +1177,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { // Compute hash to make our evacuation decision (whether we need // to send this key/value to bucket x or bucket y). hash := t.key.alg.hash(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) { + if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the @@ -1126,7 +1198,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { } } - if evacuatedX+1 != evacuatedY { + if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY { throw("bad evacuatedN") } @@ -1140,12 +1212,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) } dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check - if t.indirectkey { + if t.indirectkey() { *(*unsafe.Pointer)(dst.k) = k2 // copy pointer } else { typedmemmove(t.key, dst.k, k) // copy value } - if t.indirectvalue { + if t.indirectvalue() { *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v) } else { typedmemmove(t.elem, dst.v, v) @@ -1211,12 +1283,12 @@ func reflect_makemap(t *maptype, cap int) *hmap { if !ismapkey(t.key) { throw("runtime.reflect_makemap: unsupported map key type") } - if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) || - t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { + if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) || + t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) { throw("key size wrong") } - if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) || - t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { + if t.elem.size > maxValueSize && (!t.indirectvalue() || t.valuesize != uint8(sys.PtrSize)) || + t.elem.size <= maxValueSize && (t.indirectvalue() || t.valuesize != uint8(t.elem.size)) { throw("value size wrong") } if t.key.align > bucketCnt { @@ -1282,6 +1354,11 @@ func reflect_mapiterkey(it *hiter) unsafe.Pointer { return it.key } +//go:linkname reflect_mapitervalue reflect.mapitervalue +func reflect_mapitervalue(it *hiter) unsafe.Pointer { + return it.value +} + //go:linkname reflect_maplen reflect.maplen func reflect_maplen(h *hmap) int { if h == nil { diff --git a/src/runtime/map_benchmark_test.go b/src/runtime/map_benchmark_test.go index 025c0398d3bda..d37dadcb5694d 100644 --- a/src/runtime/map_benchmark_test.go +++ b/src/runtime/map_benchmark_test.go @@ -5,6 +5,7 @@ package runtime_test import ( "fmt" + "math/rand" "strconv" "strings" "testing" @@ -206,6 +207,67 @@ func BenchmarkIntMap(b *testing.B) { } } +func BenchmarkMapFirst(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[0] + } + }) + } +} +func BenchmarkMapMid(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[n>>1] + } + }) + } +} +func BenchmarkMapLast(b *testing.B) { + for n := 1; n <= 16; n++ { + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m[n-1] + } + }) + } +} + +func BenchmarkMapCycle(b *testing.B) { + // Arrange map entries to be a permuation, so that + // we hit all entries, and one lookup is data dependent + // on the previous lookup. + const N = 3127 + p := rand.New(rand.NewSource(1)).Perm(N) + m := map[int]int{} + for i := 0; i < N; i++ { + m[i] = p[i] + } + b.ResetTimer() + j := 0 + for i := 0; i < b.N; i++ { + j = m[j] + } + sink = uint64(j) +} + // Accessing the same keys in a row. func benchmarkRepeatedLookup(b *testing.B, lookupKeySize int) { m := make(map[string]bool) @@ -228,6 +290,23 @@ func benchmarkRepeatedLookup(b *testing.B, lookupKeySize int) { func BenchmarkRepeatedLookupStrMapKey32(b *testing.B) { benchmarkRepeatedLookup(b, 32) } func BenchmarkRepeatedLookupStrMapKey1M(b *testing.B) { benchmarkRepeatedLookup(b, 1<<20) } +func BenchmarkMakeMap(b *testing.B) { + b.Run("[Byte]Byte", func(b *testing.B) { + var m map[byte]byte + for i := 0; i < b.N; i++ { + m = make(map[byte]byte, 10) + } + hugeSink = m + }) + b.Run("[Int]Int", func(b *testing.B) { + var m map[int]int + for i := 0; i < b.N; i++ { + m = make(map[int]int, 10) + } + hugeSink = m + }) +} + func BenchmarkNewEmptyMap(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { @@ -370,3 +449,37 @@ func BenchmarkGoMapClear(b *testing.B) { } }) } + +func BenchmarkMapStringConversion(b *testing.B) { + for _, length := range []int{32, 64} { + b.Run(strconv.Itoa(length), func(b *testing.B) { + bytes := make([]byte, length) + b.Run("simple", func(b *testing.B) { + b.ReportAllocs() + m := make(map[string]int) + m[string(bytes)] = 0 + for i := 0; i < b.N; i++ { + _ = m[string(bytes)] + } + }) + b.Run("struct", func(b *testing.B) { + b.ReportAllocs() + type stringstruct struct{ s string } + m := make(map[stringstruct]int) + m[stringstruct{string(bytes)}] = 0 + for i := 0; i < b.N; i++ { + _ = m[stringstruct{string(bytes)}] + } + }) + b.Run("array", func(b *testing.B) { + b.ReportAllocs() + type stringarray [1]string + m := make(map[stringarray]int) + m[stringarray{string(bytes)}] = 0 + for i := 0; i < b.N; i++ { + _ = m[stringarray{string(bytes)}] + } + }) + }) + } +} diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go index bf0b23604bb0c..20f55e17c6eac 100644 --- a/src/runtime/map_fast32.go +++ b/src/runtime/map_fast32.go @@ -41,7 +41,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { } for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && b.tophash[i] != empty { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) } } @@ -81,7 +81,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { } for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && b.tophash[i] != empty { + if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true } } @@ -103,7 +103,7 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapassign. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -120,13 +120,17 @@ again: var inserti uintptr var insertk unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { - if b.tophash[i] == empty { + if isEmpty(b.tophash[i]) { if insertb == nil { inserti = i insertb = b } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) @@ -189,7 +193,7 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapassign. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -206,13 +210,17 @@ again: var inserti uintptr var insertk unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { - if b.tophash[i] == empty { + if isEmpty(b.tophash[i]) { if insertb == nil { inserti = i insertb = b } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4))) @@ -276,17 +284,18 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapdelete - h.flags |= hashWriting + h.flags ^= hashWriting bucket := hash & bucketMask(h.B) if h.growing() { growWork_fast32(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b search: for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { - if key != *(*uint32)(k) || b.tophash[i] == empty { + if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { continue } // Only clear key if there are pointers in it. @@ -299,7 +308,37 @@ search: } else { memclrNoHeapPointers(v, t.elem.size) } - b.tophash[i] = empty + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: h.count-- break search } @@ -350,7 +389,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { v := add(k, bucketCnt*4) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) { top := b.tophash[i] - if top == empty { + if isEmpty(top) { b.tophash[i] = evacuatedEmpty continue } diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go index 4bde9e2be0726..e00a7569f944f 100644 --- a/src/runtime/map_fast64.go +++ b/src/runtime/map_fast64.go @@ -41,7 +41,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { } for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && b.tophash[i] != empty { + if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) } } @@ -81,7 +81,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { } for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && b.tophash[i] != empty { + if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true } } @@ -103,7 +103,7 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapassign. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -120,13 +120,17 @@ again: var inserti uintptr var insertk unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { - if b.tophash[i] == empty { + if isEmpty(b.tophash[i]) { if insertb == nil { insertb = b inserti = i } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) @@ -189,7 +193,7 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapassign. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -206,13 +210,17 @@ again: var inserti uintptr var insertk unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { - if b.tophash[i] == empty { + if isEmpty(b.tophash[i]) { if insertb == nil { insertb = b inserti = i } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8))) @@ -276,17 +284,18 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapdelete - h.flags |= hashWriting + h.flags ^= hashWriting bucket := hash & bucketMask(h.B) if h.growing() { growWork_fast64(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b search: for ; b != nil; b = b.overflow(t) { for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { - if key != *(*uint64)(k) || b.tophash[i] == empty { + if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { continue } // Only clear key if there are pointers in it. @@ -299,7 +308,37 @@ search: } else { memclrNoHeapPointers(v, t.elem.size) } - b.tophash[i] = empty + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: h.count-- break search } @@ -350,7 +389,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { v := add(k, bucketCnt*8) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) { top := b.tophash[i] - if top == empty { + if isEmpty(top) { b.tophash[i] = evacuatedEmpty continue } diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index 415bbff143ff5..2eac2b5bb584e 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -28,7 +28,10 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { // short key, doing lots of comparisons is ok for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] == empty { + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { @@ -41,7 +44,10 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { keymaybe := uintptr(bucketCnt) for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] == empty { + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } continue } if k.str == key.str { @@ -117,7 +123,10 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { // short key, doing lots of comparisons is ok for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] == empty { + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { @@ -130,7 +139,10 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { keymaybe := uintptr(bucketCnt) for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] == empty { + if k.len != key.len || isEmpty(b.tophash[i]) { + if b.tophash[i] == emptyRest { + break + } continue } if k.str == key.str { @@ -202,7 +214,7 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapassign. - h.flags |= hashWriting + h.flags ^= hashWriting if h.buckets == nil { h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) @@ -220,13 +232,17 @@ again: var inserti uintptr var insertk unsafe.Pointer +bucketloop: for { for i := uintptr(0); i < bucketCnt; i++ { if b.tophash[i] != top { - if b.tophash[i] == empty && insertb == nil { + if isEmpty(b.tophash[i]) && insertb == nil { insertb = b inserti = i } + if b.tophash[i] == emptyRest { + break bucketloop + } continue } k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) @@ -294,13 +310,14 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) // Set hashWriting after calling alg.hash for consistency with mapdelete - h.flags |= hashWriting + h.flags ^= hashWriting bucket := hash & bucketMask(h.B) if h.growing() { growWork_faststr(t, h, bucket) } b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) + bOrig := b top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { @@ -320,7 +337,37 @@ search: } else { memclrNoHeapPointers(v, t.elem.size) } - b.tophash[i] = empty + b.tophash[i] = emptyOne + // If the bucket now ends in a bunch of emptyOne states, + // change those to emptyRest states. + if i == bucketCnt-1 { + if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { + goto notLast + } + } else { + if b.tophash[i+1] != emptyRest { + goto notLast + } + } + for { + b.tophash[i] = emptyRest + if i == 0 { + if b == bOrig { + break // beginning of initial bucket, we're done. + } + // Find previous bucket, continue at its last entry. + c := b + for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { + } + i = bucketCnt - 1 + } else { + i-- + } + if b.tophash[i] != emptyOne { + break + } + } + notLast: h.count-- break search } @@ -371,7 +418,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { v := add(k, bucketCnt*2*sys.PtrSize) for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) { top := b.tophash[i] - if top == empty { + if isEmpty(top) { b.tophash[i] = evacuatedEmpty continue } diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go index 4713ce25ec533..ee9468dd0e699 100644 --- a/src/runtime/map_test.go +++ b/src/runtime/map_test.go @@ -435,11 +435,11 @@ func TestEmptyKeyAndValue(t *testing.T) { // ("quick keys") as well as long keys. func TestSingleBucketMapStringKeys_DupLen(t *testing.T) { testMapLookups(t, map[string]string{ - "x": "x1val", - "xx": "x2val", - "foo": "fooval", - "bar": "barval", // same key length as "foo" - "xxxx": "x4val", + "x": "x1val", + "xx": "x2val", + "foo": "fooval", + "bar": "barval", // same key length as "foo" + "xxxx": "x4val", strings.Repeat("x", 128): "longval1", strings.Repeat("y", 128): "longval2", }) @@ -1131,3 +1131,28 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { t.Errorf("incremented 0 to %d", n2) } } + +func TestMapTombstones(t *testing.T) { + m := map[int]int{} + const N = 10000 + // Fill a map. + for i := 0; i < N; i++ { + m[i] = i + } + runtime.MapTombstoneCheck(m) + // Delete half of the entries. + for i := 0; i < N; i += 2 { + delete(m, i) + } + runtime.MapTombstoneCheck(m) + // Add new entries to fill in holes. + for i := N; i < 3*N/2; i++ { + m[i] = i + } + runtime.MapTombstoneCheck(m) + // Delete everything. + for i := 0; i < 3*N/2; i++ { + delete(m, i) + } + runtime.MapTombstoneCheck(m) +} diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index b6c5ee0658061..6da8cf2ccb81f 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -226,8 +226,6 @@ func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) { //go:nosplit func typedslicecopy(typ *_type, dst, src slice) int { - // TODO(rsc): If typedslicecopy becomes faster than calling - // typedmemmove repeatedly, consider using during func growslice. n := dst.len if n > src.len { n = src.len @@ -320,6 +318,19 @@ func typedmemclr(typ *_type, ptr unsafe.Pointer) { memclrNoHeapPointers(ptr, typ.size) } +//go:linkname reflect_typedmemclr reflect.typedmemclr +func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { + typedmemclr(typ, ptr) +} + +//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial +func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { + if typ.kind&kindNoPointers == 0 { + bulkBarrierPreWrite(uintptr(ptr), 0, size) + } + memclrNoHeapPointers(ptr, size) +} + // memclrHasPointers clears n bytes of typed memory starting at ptr. // The caller must ensure that the type of the object at ptr has // pointers, usually by checking typ.kind&kindNoPointers. However, ptr diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 75f23a16b413d..2f00add83e43d 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -242,7 +242,7 @@ func (s *mspan) nextFreeIndex() uintptr { return result } -// isFree returns whether the index'th object in s is unallocated. +// isFree reports whether the index'th object in s is unallocated. func (s *mspan) isFree(index uintptr) bool { if index < s.freeindex { return false @@ -283,9 +283,7 @@ func (m markBits) isMarked() bool { return *m.bytep&m.mask != 0 } -// setMarked sets the marked bit in the markbits, atomically. Some compilers -// are not able to inline atomic.Or8 function so if it appears as a hot spot consider -// inlining it manually. +// setMarked sets the marked bit in the markbits, atomically. func (m markBits) setMarked() { // Might be racing with other updates, so use atomic update always. // We used to be clever here and use a non-atomic update in certain @@ -365,7 +363,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui s = spanOf(p) // If p is a bad pointer, it may not be in s's bounds. if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { - if s == nil || s.state == _MSpanManual { + if s == nil || s.state == mSpanManual { // If s is nil, the virtual address has never been part of the heap. // This pointer may be to some mmap'd region, so we allow it. // Pointers into stacks are also ok, the runtime manages these explicitly. @@ -519,7 +517,7 @@ func (h heapBits) bits() uint32 { return uint32(*h.bitp) >> (h.shift & 31) } -// morePointers returns true if this word and all remaining words in this object +// morePointers reports whether this word and all remaining words in this object // are scalars. // h must not describe the second word of the object. func (h heapBits) morePointers() bool { @@ -611,7 +609,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { } } return - } else if s.state != _MSpanInUse || dst < s.base() || s.limit <= dst { + } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst { // dst was heap memory at some point, but isn't now. // It can't be a global. It must be either our stack, // or in the case of direct channel sends, it could be @@ -647,6 +645,35 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { } } +// bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but +// does not execute write barriers for [dst, dst+size). +// +// In addition to the requirements of bulkBarrierPreWrite +// callers need to ensure [dst, dst+size) is zeroed. +// +// This is used for special cases where e.g. dst was just +// created and zeroed with malloc. +//go:nosplit +func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { + if (dst|src|size)&(sys.PtrSize-1) != 0 { + throw("bulkBarrierPreWrite: unaligned arguments") + } + if !writeBarrier.needed { + return + } + buf := &getg().m.p.ptr().wbBuf + h := heapBitsForAddr(dst) + for i := uintptr(0); i < size; i += sys.PtrSize { + if h.isPointer() { + srcx := (*uintptr)(unsafe.Pointer(src + i)) + if !buf.putFast(0, *srcx) { + wbBufFlush(nil, 0) + } + } + h = h.next() + } +} + // bulkBarrierBitmap executes write barriers for copying from [src, // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is // assumed to start maskOffset bytes into the data covered by the @@ -1882,6 +1909,20 @@ Run: return totalBits } +// materializeGCProg allocates space for the (1-bit) pointer bitmask +// for an object of size ptrdata. Then it fills that space with the +// pointer bitmask specified by the program prog. +// The bitmask starts at s.startAddr. +// The result must be deallocated with dematerializeGCProg. +func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { + s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys) + runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) + return s +} +func dematerializeGCProg(s *mspan) { + mheap_.freeManual(s, &memstats.gc_sys) +} + func dumpGCProg(p *byte) { nptr := 0 for { @@ -1951,7 +1992,9 @@ func reflect_gcbits(x interface{}) []byte { return ret } -// Returns GC type info for object p for testing. +// Returns GC type info for the pointer stored in ep for testing. +// If ep points to the stack, only static live information will be returned +// (i.e. not for objects which are only dynamically live stack objects). func getgcmask(ep interface{}) (mask []byte) { e := *efaceOf(&ep) p := e.data @@ -2008,7 +2051,7 @@ func getgcmask(ep interface{}) (mask []byte) { _g_ := getg() gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0) if frame.fn.valid() { - locals, _ := getStackMap(&frame, nil, false) + locals, _, _ := getStackMap(&frame, nil, false) if locals.n == 0 { return } diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index d0b007f915e76..7895e489bccae 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -4,7 +4,10 @@ package runtime -import "unsafe" +import ( + "runtime/internal/atomic" + "unsafe" +) // Per-thread (in Go, per-P) cache for small objects. // No locking needed because it is per-thread (per-P). @@ -42,6 +45,12 @@ type mcache struct { local_largefree uintptr // bytes freed for large objects (>maxsmallsize) local_nlargefree uintptr // number of frees for large objects (>maxsmallsize) local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize) + + // flushGen indicates the sweepgen during which this mcache + // was last flushed. If flushGen != mheap_.sweepgen, the spans + // in this mcache are stale and need to the flushed so they + // can be swept. This is done in acquirep. + flushGen uint32 } // A gclink is a node in a linked list of blocks, like mlink, @@ -70,12 +79,13 @@ type stackfreelist struct { size uintptr // total size of stacks in list } -// dummy MSpan that contains no free objects. +// dummy mspan that contains no free objects. var emptymspan mspan func allocmcache() *mcache { lock(&mheap_.lock) c := (*mcache)(mheap_.cachealloc.alloc()) + c.flushGen = mheap_.sweepgen unlock(&mheap_.lock) for i := range c.alloc { c.alloc[i] = &emptymspan @@ -101,21 +111,24 @@ func freemcache(c *mcache) { }) } -// Gets a span that has a free object in it and assigns it -// to be the cached span for the given sizeclass. Returns this span. +// refill acquires a new span of span class spc for c. This span will +// have at least one free object. The current span in c must be full. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) refill(spc spanClass) { - _g_ := getg() - - _g_.m.locks++ // Return the current cached span to the central lists. s := c.alloc[spc] if uintptr(s.allocCount) != s.nelems { throw("refill of span with free space remaining") } - if s != &emptymspan { - s.incache = false + // Mark this span as no longer cached. + if s.sweepgen != mheap_.sweepgen+3 { + throw("bad sweepgen in refill") + } + atomic.Store(&s.sweepgen, mheap_.sweepgen) } // Get a new cached span from the central lists. @@ -128,8 +141,11 @@ func (c *mcache) refill(spc spanClass) { throw("span has no free space") } + // Indicate that this span is cached and prevent asynchronous + // sweeping in the next sweep phase. + s.sweepgen = mheap_.sweepgen + 3 + c.alloc[spc] = s - _g_.m.locks-- } func (c *mcache) releaseAll() { @@ -144,3 +160,26 @@ func (c *mcache) releaseAll() { c.tiny = 0 c.tinyoffset = 0 } + +// prepareForSweep flushes c if the system has entered a new sweep phase +// since c was populated. This must happen between the sweep phase +// starting and the first allocation from c. +func (c *mcache) prepareForSweep() { + // Alternatively, instead of making sure we do this on every P + // between starting the world and allocating on that P, we + // could leave allocate-black on, allow allocation to continue + // as usual, use a ragged barrier at the beginning of sweep to + // ensure all cached spans are swept, and then disable + // allocate-black. However, with this approach it's difficult + // to avoid spilling mark bits into the *next* GC cycle. + sg := mheap_.sweepgen + if c.flushGen == sg { + return + } else if c.flushGen != sg-2 { + println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg) + throw("bad flushGen") + } + c.releaseAll() + stackcache_clear(c) + atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart +} diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index c1e0b472bc9cf..a60eb9fd0ca5b 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -6,8 +6,8 @@ // // See malloc.go for an overview. // -// The MCentral doesn't actually contain the list of free objects; the MSpan does. -// Each MCentral is two lists of MSpans: those with free objects (c->nonempty) +// The mcentral doesn't actually contain the list of free objects; the mspan does. +// Each mcentral is two lists of mspans: those with free objects (c->nonempty) // and those that are completely allocated (c->empty). package runtime @@ -36,7 +36,7 @@ func (c *mcentral) init(spc spanClass) { c.empty.init() } -// Allocate a span to use in an MCache. +// Allocate a span to use in an mcache. func (c *mcentral) cacheSpan() *mspan { // Deduct credit for this span allocation and sweep if necessary. spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize @@ -117,8 +117,7 @@ havespan: if trace.enabled && !traceDone { traceGCSweepDone() } - cap := int32((s.npages << _PageShift) / s.elemsize) - n := cap - int32(s.allocCount) + n := int(s.nelems) - int(s.allocCount) if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { throw("span has no free objects") } @@ -135,7 +134,6 @@ havespan: // heap_live changed. gcController.revise() } - s.incache = true freeByteBase := s.freeindex &^ (64 - 1) whichByte := freeByteBase / 8 // Init alloc bits cache. @@ -148,30 +146,56 @@ havespan: return s } -// Return span from an MCache. +// Return span from an mcache. func (c *mcentral) uncacheSpan(s *mspan) { - lock(&c.lock) - - s.incache = false - if s.allocCount == 0 { throw("uncaching span but s.allocCount == 0") } - cap := int32((s.npages << _PageShift) / s.elemsize) - n := cap - int32(s.allocCount) + sg := mheap_.sweepgen + stale := s.sweepgen == sg+1 + if stale { + // Span was cached before sweep began. It's our + // responsibility to sweep it. + // + // Set sweepgen to indicate it's not cached but needs + // sweeping and can't be allocated from. sweep will + // set s.sweepgen to indicate s is swept. + atomic.Store(&s.sweepgen, sg-1) + } else { + // Indicate that s is no longer cached. + atomic.Store(&s.sweepgen, sg) + } + + n := int(s.nelems) - int(s.allocCount) if n > 0 { - c.empty.remove(s) - c.nonempty.insert(s) - // mCentral_CacheSpan conservatively counted - // unallocated slots in heap_live. Undo this. - atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) // cacheSpan updated alloc assuming all objects on s // were going to be allocated. Adjust for any that - // weren't. + // weren't. We must do this before potentially + // sweeping the span. atomic.Xadd64(&c.nmalloc, -int64(n)) + + lock(&c.lock) + c.empty.remove(s) + c.nonempty.insert(s) + if !stale { + // mCentral_CacheSpan conservatively counted + // unallocated slots in heap_live. Undo this. + // + // If this span was cached before sweep, then + // heap_live was totally recomputed since + // caching this span, so we don't do this for + // stale spans. + atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) + } + unlock(&c.lock) + } + + if stale { + // Now that s is in the right mcentral list, we can + // sweep it. + s.sweep(false) } - unlock(&c.lock) } // freeSpan updates c and s after sweeping s. @@ -179,17 +203,17 @@ func (c *mcentral) uncacheSpan(s *mspan) { // and, based on the number of free objects in s, // moves s to the appropriate list of c or returns it // to the heap. -// freeSpan returns true if s was returned to the heap. +// freeSpan reports whether s was returned to the heap. // If preserve=true, it does not move s (the caller // must take care of it). func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { - if s.incache { + if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 { throw("freeSpan given cached span") } s.needzero = 1 if preserve { - // preserve is set only when called from MCentral_CacheSpan above, + // preserve is set only when called from (un)cacheSpan above, // the span must be in the empty list. if !s.inList() { throw("can't preserve unlinked span") @@ -207,7 +231,7 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { } // delay updating sweepgen until here. This is the signal that - // the span may be used in an MCache, so it must come after the + // the span may be used in an mcache, so it must come after the // linked list operations above (actually, just after the // lock of c above.) atomic.Store(&s.sweepgen, mheap_.sweepgen) @@ -219,7 +243,7 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { c.nonempty.remove(s) unlock(&c.lock) - mheap_.freeSpan(s, 0) + mheap_.freeSpan(s, false) return true } diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_aix.go similarity index 51% rename from src/runtime/mem_darwin.go rename to src/runtime/mem_aix.go index 75c59f9cdd77e..f11f0aba52c18 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_aix.go @@ -1,26 +1,36 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime -import "unsafe" +import ( + "unsafe" +) -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. +// Don't split the stack as this method may be invoked without a valid G, which +// prevents us from allocating more stack. //go:nosplit func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { - v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANONYMOUS|_MAP_PRIVATE, -1, 0) if err != 0 { + if err == _EACCES { + print("runtime: mmap: access denied\n") + exit(2) + } + if err == _EAGAIN { + print("runtime: mmap: too much locked memory (check 'ulimit -l').\n") + exit(2) + } + //println("sysAlloc failed: ", err) return nil } mSysStatInc(sysStat, n) - return v + return p } func sysUnused(v unsafe.Pointer, n uintptr) { - // Linux's MADV_DONTNEED is like BSD's MADV_FREE. - madvise(v, n, _MADV_FREE) + madvise(v, n, _MADV_DONTNEED) } func sysUsed(v unsafe.Pointer, n uintptr) { @@ -32,27 +42,29 @@ func sysUsed(v unsafe.Pointer, n uintptr) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { mSysStatDec(sysStat, n) munmap(v, n) + } func sysFault(v unsafe.Pointer, n uintptr) { - mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) + mmap(v, n, _PROT_NONE, _MAP_ANONYMOUS|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { - p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + p, err := mmap(v, n, _PROT_NONE, _MAP_ANONYMOUS|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } return p } -const ( - _ENOMEM = 12 -) - func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { mSysStatInc(sysStat, n) - p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + + // AIX does not allow mapping a range that is already mapped. + // So always unmap first even if it is already unmapped. + munmap(v, n) + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANONYMOUS|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM { throw("runtime: out of memory") } diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index cc70e806ead1f..84238d7279fb5 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build dragonfly freebsd nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd nacl netbsd openbsd solaris package runtime @@ -42,7 +42,19 @@ func sysFault(v unsafe.Pointer, n uintptr) { } func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { - p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + flags := int32(_MAP_ANON | _MAP_PRIVATE) + if raceenabled && GOOS == "darwin" { + // Currently the race detector expects memory to live within a certain + // range, and on Darwin 10.10 mmap is prone to ignoring hints, moreso + // than later versions and other BSDs (#26475). So, even though it's + // potentially dangerous to MAP_FIXED, we do it in the race detection + // case because it'll help maintain the race detector's invariants. + // + // TODO(mknyszek): Drop this once support for Darwin 10.10 is dropped, + // and reconsider this when #24133 is addressed. + flags |= _MAP_FIXED + } + p, err := mmap(v, n, _PROT_NONE, flags, -1, 0) if err != 0 { return nil } diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 7aa48170a1164..1e45ed6301568 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -5,6 +5,7 @@ package runtime import ( + "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -34,10 +35,12 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { return p } +var adviseUnused = uint32(_MADV_FREE) + func sysUnused(v unsafe.Pointer, n uintptr) { // By default, Linux's "transparent huge page" support will // merge pages into a huge page if there's even a single - // present regular page, undoing the effects of the DONTNEED + // present regular page, undoing the effects of madvise(adviseUnused) // below. On amd64, that means khugepaged can turn a single // 4KB page to 2MB, bloating the process's RSS by as much as // 512X. (See issue #8832 and Linux kernel bug @@ -102,7 +105,18 @@ func sysUnused(v unsafe.Pointer, n uintptr) { throw("unaligned sysUnused") } - madvise(v, n, _MADV_DONTNEED) + var advise uint32 + if debug.madvdontneed != 0 { + advise = _MADV_DONTNEED + } else { + advise = atomic.Load(&adviseUnused) + } + if errno := madvise(v, n, int32(advise)); advise == _MADV_FREE && errno != 0 { + // MADV_FREE was added in Linux 4.5. Fall back to MADV_DONTNEED if it is + // not supported. + atomic.Store(&adviseUnused, _MADV_DONTNEED) + madvise(v, n, _MADV_DONTNEED) + } } func sysUsed(v unsafe.Pointer, n uintptr) { diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 690f55eb5c83a..fc52ec59a05e0 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -61,7 +61,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) { func sysUsed(v unsafe.Pointer, n uintptr) { r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE) - if r == uintptr(v) { + if r != 0 { return } diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s index a6703b3641114..65f7196312cec 100644 --- a/src/runtime/memclr_386.s +++ b/src/runtime/memclr_386.s @@ -4,6 +4,7 @@ // +build !plan9 +#include "go_asm.h" #include "textflag.h" // NOTE: Windows externalthreadhandler expects memclr to preserve DX. @@ -28,7 +29,7 @@ tail: JBE _5through8 CMPL BX, $16 JBE _9through16 - CMPB runtime·support_sse2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 JNE nosse2 PXOR X0, X0 CMPL BX, $32 diff --git a/src/runtime/memclr_ppc64x.s b/src/runtime/memclr_ppc64x.s index 3b23ce89d87b1..072963f75687d 100644 --- a/src/runtime/memclr_ppc64x.s +++ b/src/runtime/memclr_ppc64x.s @@ -14,34 +14,68 @@ TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT|NOFRAME, $0-16 // Determine if there are doublewords to clear check: ANDCC $7, R4, R5 // R5: leftover bytes to clear - SRAD $3, R4, R6 // R6: double words to clear + SRD $3, R4, R6 // R6: double words to clear CMP R6, $0, CR1 // CR1[EQ] set if no double words - BC 12, 6, nozerolarge // only single bytes - MOVD R6, CTR // R6 = number of double words - SRADCC $2, R6, R7 // 32 byte chunks? - BNE zero32setup + BC 12, 6, nozerolarge // only single bytes + CMP R4, $512 + BLT under512 // special case for < 512 + ANDCC $127, R3, R8 // check for 128 alignment of address + BEQ zero512setup + + ANDCC $7, R3, R15 + BEQ zero512xsetup // at least 8 byte aligned + + // zero bytes up to 8 byte alignment + + ANDCC $1, R3, R15 // check for byte alignment + BEQ byte2 + MOVB R0, 0(R3) // zero 1 byte + ADD $1, R3 // bump ptr by 1 + ADD $-1, R4 + +byte2: + ANDCC $2, R3, R15 // check for 2 byte alignment + BEQ byte4 + MOVH R0, 0(R3) // zero 2 bytes + ADD $2, R3 // bump ptr by 2 + ADD $-2, R4 + +byte4: + ANDCC $4, R3, R15 // check for 4 byte alignment + BEQ zero512xsetup + MOVW R0, 0(R3) // zero 4 bytes + ADD $4, R3 // bump ptr by 4 + ADD $-4, R4 + BR zero512xsetup // ptr should now be 8 byte aligned + +under512: + MOVD R6, CTR // R6 = number of double words + SRDCC $2, R6, R7 // 32 byte chunks? + BNE zero32setup // Clear double words zero8: MOVD R0, 0(R3) // double word ADD $8, R3 + ADD $-8, R4 BC 16, 0, zero8 // dec ctr, br zero8 if ctr not 0 - BR nozerolarge // handle remainder + BR nozerolarge // handle leftovers // Prepare to clear 32 bytes at a time. zero32setup: - DCBTST (R3) // prepare data cache - MOVD R7, CTR // number of 32 byte chunks + DCBTST (R3) // prepare data cache + XXLXOR VS32, VS32, VS32 // clear VS32 (V0) + MOVD R7, CTR // number of 32 byte chunks + MOVD $16, R8 zero32: - MOVD R0, 0(R3) // clear 4 double words - MOVD R0, 8(R3) - MOVD R0, 16(R3) - MOVD R0, 24(R3) + STXVD2X VS32, (R3+R0) // store 16 bytes + STXVD2X VS32, (R3+R8) ADD $32, R3 + ADD $-32, R4 BC 16, 0, zero32 // dec ctr, br zero32 if ctr not 0 RLDCLCC $61, R4, $3, R6 // remaining doublewords BEQ nozerolarge @@ -49,8 +83,8 @@ zero32: BR zero8 nozerolarge: - CMP R5, $0 // any remaining bytes - BC 4, 1, LR // ble lr + ANDCC $7, R4, R5 // any remaining bytes + BC 4, 1, LR // ble lr zerotail: MOVD R5, CTR // set up to clear tail bytes @@ -60,3 +94,70 @@ zerotailloop: ADD $1, R3 BC 16, 0, zerotailloop // dec ctr, br zerotailloop if ctr not 0 RET + +zero512xsetup: // 512 chunk with extra needed + ANDCC $8, R3, R11 // 8 byte alignment? + BEQ zero512setup16 + MOVD R0, 0(R3) // clear 8 bytes + ADD $8, R3 // update ptr to next 8 + ADD $-8, R4 // dec count by 8 + +zero512setup16: + ANDCC $127, R3, R14 // < 128 byte alignment + BEQ zero512setup // handle 128 byte alignment + MOVD $128, R15 + SUB R14, R15, R14 // find increment to 128 alignment + SRD $4, R14, R15 // number of 16 byte chunks + +zero512presetup: + MOVD R15, CTR // loop counter of 16 bytes + XXLXOR VS32, VS32, VS32 // clear VS32 (V0) + +zero512preloop: // clear up to 128 alignment + STXVD2X VS32, (R3+R0) // clear 16 bytes + ADD $16, R3 // update ptr + ADD $-16, R4 // dec count + BC 16, 0, zero512preloop + +zero512setup: // setup for dcbz loop + CMP R4, $512 // check if at least 512 + BLT remain + SRD $9, R4, R8 // loop count for 512 chunks + MOVD R8, CTR // set up counter + MOVD $128, R9 // index regs for 128 bytes + MOVD $256, R10 + MOVD $384, R11 + +zero512: + DCBZ (R3+R0) // clear first chunk + DCBZ (R3+R9) // clear second chunk + DCBZ (R3+R10) // clear third chunk + DCBZ (R3+R11) // clear fourth chunk + ADD $512, R3 + ADD $-512, R4 + BC 16, 0, zero512 + +remain: + CMP R4, $128 // check if 128 byte chunks left + BLT smaller + DCBZ (R3+R0) // clear 128 + ADD $128, R3 + ADD $-128, R4 + BR remain + +smaller: + ANDCC $127, R4, R7 // find leftovers + BEQ done + CMP R7, $64 // more than 64, do 32 at a time + BLT zero8setup // less than 64, do 8 at a time + SRD $5, R7, R7 // set up counter for 32 + BR zero32setup + +zero8setup: + SRDCC $3, R7, R7 // less than 8 bytes + BEQ nozerolarge + MOVD R7, CTR + BR zero8 + +done: + RET diff --git a/src/runtime/memmove_386.s b/src/runtime/memmove_386.s index 172ea40820b30..7b54070f595cc 100644 --- a/src/runtime/memmove_386.s +++ b/src/runtime/memmove_386.s @@ -25,6 +25,7 @@ // +build !plan9 +#include "go_asm.h" #include "textflag.h" // func memmove(to, from unsafe.Pointer, n uintptr) @@ -51,7 +52,7 @@ tail: JBE move_5through8 CMPL BX, $16 JBE move_9through16 - CMPB runtime·support_sse2(SB), $1 + CMPB internal∕cpu·X86+const_offsetX86HasSSE2(SB), $1 JNE nosse2 CMPL BX, $32 JBE move_17through32 @@ -72,7 +73,7 @@ nosse2: */ forward: // If REP MOVSB isn't fast, don't use it - CMPB runtime·support_erms(SB), $1 // enhanced REP MOVSB/STOSB + CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB JNE fwdBy4 // Check alignment diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s index cb5cd02e45b92..b4243a833b6e6 100644 --- a/src/runtime/memmove_amd64.s +++ b/src/runtime/memmove_amd64.s @@ -25,6 +25,7 @@ // +build !plan9 +#include "go_asm.h" #include "textflag.h" // func memmove(to, from unsafe.Pointer, n uintptr) @@ -83,7 +84,7 @@ forward: JLS move_256through2048 // If REP MOVSB isn't fast, don't use it - CMPB runtime·support_erms(SB), $1 // enhanced REP MOVSB/STOSB + CMPB internal∕cpu·X86+const_offsetX86HasERMS(SB), $1 // enhanced REP MOVSB/STOSB JNE fwdBy8 // Check alignment diff --git a/src/runtime/memmove_amd64p32.s b/src/runtime/memmove_amd64p32.s index 8e9fdd14c5e3c..114077311ca06 100644 --- a/src/runtime/memmove_amd64p32.s +++ b/src/runtime/memmove_amd64p32.s @@ -34,7 +34,7 @@ back: ADDL BX, DI ADDL BX, SI STD - + MOVL BX, CX SHRL $2, CX ANDL $3, BX diff --git a/src/runtime/memmove_arm.s b/src/runtime/memmove_arm.s index 324b21bf7a5fd..8352fb7860979 100644 --- a/src/runtime/memmove_arm.s +++ b/src/runtime/memmove_arm.s @@ -138,7 +138,7 @@ _f32loop: CMP TMP, TS BHS _f4tail - MOVM.IA.W (FROM), [R1-R8] + MOVM.IA.W (FROM), [R1-R8] MOVM.IA.W [R1-R8], (TS) B _f32loop diff --git a/src/runtime/memmove_plan9_386.s b/src/runtime/memmove_plan9_386.s index 7ff01940a2d4c..65dec93f6bba7 100644 --- a/src/runtime/memmove_plan9_386.s +++ b/src/runtime/memmove_plan9_386.s @@ -56,7 +56,7 @@ tail: /* * forward copy loop */ -forward: +forward: MOVL BX, CX SHRL $2, CX ANDL $3, BX diff --git a/src/runtime/memmove_plan9_amd64.s b/src/runtime/memmove_plan9_amd64.s index f18b59f3d22ef..b729c7c0e7d94 100644 --- a/src/runtime/memmove_plan9_amd64.s +++ b/src/runtime/memmove_plan9_amd64.s @@ -73,7 +73,7 @@ back: ADDQ BX, CX CMPQ CX, DI JLS forward - + /* * whole thing backwards has * adjusted addresses diff --git a/src/runtime/memmove_ppc64x.s b/src/runtime/memmove_ppc64x.s index b79f76d38874d..60cbcc41ec569 100644 --- a/src/runtime/memmove_ppc64x.s +++ b/src/runtime/memmove_ppc64x.s @@ -16,7 +16,7 @@ TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 // copy so a more efficient move can be done check: ANDCC $7, R5, R7 // R7: bytes to copy - SRAD $3, R5, R6 // R6: double words to copy + SRD $3, R5, R6 // R6: double words to copy CMP R6, $0, CR1 // CR1[EQ] set if no double words to copy // Determine overlap by subtracting dest - src and comparing against the @@ -31,9 +31,9 @@ check: // Copying forward if no overlap. BC 12, 6, noforwardlarge // "BEQ CR1, noforwardlarge" - MOVD R6,CTR // R6 = number of double words - SRADCC $2,R6,R8 // 32 byte chunks? + SRDCC $2,R6,R8 // 32 byte chunks? BNE forward32setup // + MOVD R6,CTR // R6 = number of double words // Move double words @@ -51,17 +51,14 @@ forward32setup: DCBTST (R3) // prepare data cache DCBT (R4) MOVD R8, CTR // double work count + MOVD $16, R8 forward32: - MOVD 0(R4), R8 // load 4 double words - MOVD 8(R4), R9 - MOVD 16(R4), R14 - MOVD 24(R4), R15 - ADD $32,R4 - MOVD R8, 0(R3) // store those 4 - MOVD R9, 8(R3) - MOVD R14,16(R3) - MOVD R15,24(R3) + LXVD2X (R4+R0), VS32 // load 16 bytes + LXVD2X (R4+R8), VS33 + ADD $32, R4 + STXVD2X VS32, (R3+R0) // store 16 bytes + STXVD2X VS33, (R3+R8) ADD $32,R3 // bump up for next set BC 16, 0, forward32 // continue RLDCLCC $61,R5,$3,R6 // remaining doublewords @@ -71,7 +68,7 @@ forward32: noforwardlarge: CMP R7,$0 // any remaining bytes - BC 4, 1, LR + BC 4, 1, LR // ble lr forwardtail: MOVD R7, CTR // move tail bytes @@ -101,19 +98,39 @@ backwardtailloop: SUB $1,R4 MOVBZ R8, -1(R3) SUB $1,R3 - BC 16, 0, backwardtailloop + BC 16, 0, backwardtailloop // bndz nobackwardtail: - CMP R6,$0 - BC 4, 5, LR + BC 4, 5, LR // ble CR1 lr backwardlarge: MOVD R6, CTR + SUB R3, R4, R9 // Use vsx if moving + CMP R9, $32 // at least 32 byte chunks + BLT backwardlargeloop // and distance >= 32 + SRDCC $2,R6,R8 // 32 byte chunks + BNE backward32setup backwardlargeloop: MOVD -8(R4), R8 SUB $8,R4 MOVD R8, -8(R3) SUB $8,R3 - BC 16, 0, backwardlargeloop // + BC 16, 0, backwardlargeloop // bndz RET + +backward32setup: + MOVD R8, CTR // set up loop ctr + MOVD $16, R8 // 32 bytes at at time + +backward32loop: + SUB $32, R4 + SUB $32, R3 + LXVD2X (R4+R0), VS32 // load 16 bytes + LXVD2X (R4+R8), VS33 + STXVD2X VS32, (R3+R0) // store 16 bytes + STXVD2X VS33, (R3+R8) + BC 16, 0, backward32loop // bndz + BC 4, 5, LR // ble CR1 lr + MOVD R6, CTR + BR backwardlargeloop diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go index 1febe782bb6e4..f9dd6ca474dfc 100644 --- a/src/runtime/mfixalloc.go +++ b/src/runtime/mfixalloc.go @@ -12,7 +12,7 @@ import "unsafe" // FixAlloc is a simple free-list allocator for fixed size objects. // Malloc uses a FixAlloc wrapped around sysAlloc to manage its -// MCache and MSpan objects. +// mcache and mspan objects. // // Memory returned by fixalloc.alloc is zeroed by default, but the // caller may take responsibility for zeroing allocations by setting diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 6a3219de73d33..4d4cdc14ca810 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -28,8 +28,7 @@ // b. Sweep any unswept spans. There will only be unswept spans if // this GC cycle was forced before the expected time. // -// 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are -// allowed to locally cache parts of the work queue. +// 2. GC performs the mark phase. // // a. Prepare for the mark phase by setting gcphase to _GCmark // (from _GCoff), enabling the write barrier, enabling mutator @@ -54,28 +53,21 @@ // object to black and shading all pointers found in the object // (which in turn may add those pointers to the work queue). // -// 3. Once the global work queue is empty (but local work queue caches -// may still contain work), GC performs the "mark 2" sub-phase. +// e. Because GC work is spread across local caches, GC uses a +// distributed termination algorithm to detect when there are no +// more root marking jobs or grey objects (see gcMarkDone). At this +// point, GC transitions to mark termination. // -// a. GC stops all workers, disables local work queue caches, -// flushes each P's local work queue cache to the global work queue -// cache, and reenables workers. -// -// b. GC again drains the work queue, as in 2d above. -// -// 4. Once the work queue is empty, GC performs mark termination. +// 3. GC performs mark termination. // // a. Stop the world. // // b. Set gcphase to _GCmarktermination, and disable workers and // assists. // -// c. Drain any remaining work from the work queue (typically there -// will be none). -// -// d. Perform other housekeeping like flushing mcaches. +// c. Perform housekeeping like flushing mcaches. // -// 5. GC performs the sweep phase. +// 4. GC performs the sweep phase. // // a. Prepare for the sweep phase by setting gcphase to _GCoff, // setting up sweep state and disabling the write barrier. @@ -86,7 +78,7 @@ // c. GC does concurrent sweeping in the background and in response // to allocation. See description below. // -// 6. When sufficient allocation has taken place, replay the sequence +// 5. When sufficient allocation has taken place, replay the sequence // starting with 1 above. See discussion of GC rate below. // Concurrent sweep. @@ -137,8 +129,8 @@ package runtime import ( + "internal/cpu" "runtime/internal/atomic" - "runtime/internal/sys" "unsafe" ) @@ -261,21 +253,6 @@ var writeBarrier struct { // gcphase == _GCmark. var gcBlackenEnabled uint32 -// gcBlackenPromptly indicates that optimizations that may -// hide work from the global work queue should be disabled. -// -// If gcBlackenPromptly is true, per-P gcWork caches should -// be flushed immediately and new objects should be allocated black. -// -// There is a tension between allocating objects white and -// allocating them black. If white and the objects die before being -// marked they can be collected during this GC cycle. On the other -// hand allocating them black will reduce _GCmarktermination latency -// since more work is done in the mark phase. This tension is resolved -// by allocating white until the mark phase is approaching its end and -// then allocating black for the remainder of the mark phase. -var gcBlackenPromptly bool - const ( _GCoff = iota // GC not running; sweeping in background, write barrier disabled _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED @@ -407,14 +384,14 @@ type gcControllerState struct { // each P that isn't running a dedicated worker. // // For example, if the utilization goal is 25% and there are - // no dedicated workers, this will be 0.25. If there goal is + // no dedicated workers, this will be 0.25. If the goal is // 25%, there is one dedicated worker, and GOMAXPROCS is 5, // this will be 0.05 to make up the missing 5%. // // If this is zero, no fractional workers are needed. fractionalUtilizationGoal float64 - _ [sys.CacheLineSize]byte + _ cpu.CacheLinePad } // startCycle resets the GC controller's state and computes estimates @@ -478,6 +455,12 @@ func (c *gcControllerState) startCycle() { c.fractionalUtilizationGoal = 0 } + // In STW mode, we just want dedicated workers. + if debug.gcstoptheworld > 0 { + c.dedicatedMarkWorkersNeeded = int64(gomaxprocs) + c.fractionalUtilizationGoal = 0 + } + // Clear per-P state for _, p := range allp { p.gcAssistTime = 0 @@ -752,7 +735,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { return gp } -// pollFractionalWorkerExit returns true if a fractional mark worker +// pollFractionalWorkerExit reports whether a fractional mark worker // should self-preempt. It assumes it is called from the fractional // worker. func pollFractionalWorkerExit() bool { @@ -806,7 +789,7 @@ func gcSetTriggerRatio(triggerRatio float64) { trigger = uint64(float64(memstats.heap_marked) * (1 + triggerRatio)) // Don't trigger below the minimum heap size. minTrigger := heapminimum - if !gosweepdone() { + if !isSweepDone() { // Concurrent sweep happens in the heap growth // from heap_live to gc_trigger, so ensure // that concurrent sweep has some heap growth @@ -851,7 +834,7 @@ func gcSetTriggerRatio(triggerRatio float64) { } // Update sweep pacing. - if gosweepdone() { + if isSweepDone() { mheap_.sweepPagesPerByte = 0 } else { // Concurrent sweep needs to sweep all of the in-use @@ -901,7 +884,7 @@ const gcGoalUtilization = 0.30 // mutator latency. const gcBackgroundUtilization = 0.25 -// gcCreditSlack is the amount of scan work credit that can can +// gcCreditSlack is the amount of scan work credit that can // accumulate locally before updating gcController.scanWork and, // optionally, gcController.bgScanCredit. Lower values give a more // accurate assist ratio and make it more likely that assists will @@ -919,9 +902,9 @@ const gcAssistTimeSlack = 5000 const gcOverAssistWork = 64 << 10 var work struct { - full lfstack // lock-free list of full blocks workbuf - empty lfstack // lock-free list of empty blocks workbuf - pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait + full lfstack // lock-free list of full blocks workbuf + empty lfstack // lock-free list of empty blocks workbuf + pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait wbufSpans struct { lock mutex @@ -955,32 +938,15 @@ var work struct { markrootNext uint32 // next markroot job markrootJobs uint32 // number of markroot jobs - nproc uint32 - tstart int64 - nwait uint32 - ndone uint32 - alldone note - - // helperDrainBlock indicates that GC mark termination helpers - // should pass gcDrainBlock to gcDrain to block in the - // getfull() barrier. Otherwise, they should pass gcDrainNoBlock. - // - // TODO: This is a temporary fallback to work around races - // that cause early mark termination. - helperDrainBlock bool + nproc uint32 + tstart int64 + nwait uint32 + ndone uint32 // Number of roots of various root types. Set by gcMarkRootPrepare. nFlushCacheRoots int nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int - // markrootDone indicates that roots have been marked at least - // once during the current GC cycle. This is checked by root - // marking operations that have to happen only during the - // first root marking pass, whether that's during the - // concurrent mark phase in current GC or mark termination in - // STW GC. - markrootDone bool - // Each type of GC state transition is protected by a lock. // Since multiple threads can simultaneously detect the state // transition condition, any thread that detects a transition @@ -996,8 +962,7 @@ var work struct { // startSema protects the transition from "off" to mark or // mark termination. startSema uint32 - // markDoneSema protects transitions from mark 1 to mark 2 and - // from mark 2 to mark termination. + // markDoneSema protects transitions from mark to mark termination. markDoneSema uint32 bgMarkReady note // signal background mark worker has started @@ -1023,15 +988,15 @@ var work struct { // there was neither enough credit to steal or enough work to // do. assistQueue struct { - lock mutex - head, tail guintptr + lock mutex + q gQueue } // sweepWaiters is a list of blocked goroutines to wake when // we transition from mark termination to sweep. sweepWaiters struct { lock mutex - head guintptr + list gList } // cycles is the number of completed GC cycles, where a GC @@ -1087,7 +1052,7 @@ func GC() { // We're now in sweep N or later. Trigger GC cycle N+1, which // will first finish sweep N if necessary and then enter sweep // termination N+1. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1}) + gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1}) // Wait for mark termination N+1 to complete. gcWaitOnMark(n + 1) @@ -1096,7 +1061,7 @@ func GC() { // complete the cycle and because runtime.GC() is often used // as part of tests and benchmarks to get the system into a // relatively stable and isolated state. - for atomic.Load(&work.cycles) == n+1 && gosweepone() != ^uintptr(0) { + for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) { sweep.nbgsweep++ Gosched() } @@ -1146,9 +1111,7 @@ func gcWaitOnMark(n uint32) { // Wait until sweep termination, mark, and mark // termination of cycle N complete. - gp := getg() - gp.schedlink = work.sweepWaiters.head - work.sweepWaiters.head.set(gp) + work.sweepWaiters.list.push(getg()) goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1) } } @@ -1194,7 +1157,7 @@ const ( gcTriggerCycle ) -// test returns true if the trigger condition is satisfied, meaning +// test reports whether the trigger condition is satisfied, meaning // that the exit condition for the _GCoff phase has been met. The exit // condition should be tested when allocating. func (t gcTrigger) test() bool { @@ -1227,13 +1190,13 @@ func (t gcTrigger) test() bool { return true } -// gcStart transitions the GC from _GCoff to _GCmark (if -// !mode.stwMark) or _GCmarktermination (if mode.stwMark) by -// performing sweep termination and GC initialization. +// gcStart starts the GC. It transitions from _GCoff to _GCmark (if +// debug.gcstoptheworld == 0) or performs all of GC (if +// debug.gcstoptheworld != 0). // // This may return without performing this transition in some cases, // such as when called on a system stack or with locks held. -func gcStart(mode gcMode, trigger gcTrigger) { +func gcStart(trigger gcTrigger) { // Since this is called from malloc and malloc is called in // the guts of a number of libraries that might be holding // locks, don't attempt to start GC in non-preemptible or @@ -1256,7 +1219,7 @@ func gcStart(mode gcMode, trigger gcTrigger) { // // We check the transition condition continuously here in case // this G gets delayed in to the next GC cycle. - for trigger.test() && gosweepone() != ^uintptr(0) { + for trigger.test() && sweepone() != ^uintptr(0) { sweep.nbgsweep++ } @@ -1276,12 +1239,11 @@ func gcStart(mode gcMode, trigger gcTrigger) { // We do this after re-checking the transition condition so // that multiple goroutines that detect the heap trigger don't // start multiple STW GCs. - if mode == gcBackgroundMode { - if debug.gcstoptheworld == 1 { - mode = gcForceMode - } else if debug.gcstoptheworld == 2 { - mode = gcForceBlockMode - } + mode := gcBackgroundMode + if debug.gcstoptheworld == 1 { + mode = gcForceMode + } else if debug.gcstoptheworld == 2 { + mode = gcForceBlockMode } // Ok, we're doing it! Stop everybody else @@ -1291,10 +1253,16 @@ func gcStart(mode gcMode, trigger gcTrigger) { traceGCStart() } - if mode == gcBackgroundMode { - gcBgMarkStartWorkers() + // Check that all Ps have finished deferred mcache flushes. + for _, p := range allp { + if fg := atomic.Load(&p.mcache.flushGen); fg != mheap_.sweepgen { + println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen) + throw("p mcache not flushed") + } } + gcBgMarkStartWorkers() + gcResetMarkState() work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs @@ -1323,199 +1291,299 @@ func gcStart(mode gcMode, trigger gcTrigger) { clearpools() work.cycles++ - if mode == gcBackgroundMode { // Do as much work concurrently as possible - gcController.startCycle() - work.heapGoal = memstats.next_gc - // Enter concurrent mark phase and enable - // write barriers. - // - // Because the world is stopped, all Ps will - // observe that write barriers are enabled by - // the time we start the world and begin - // scanning. - // - // Write barriers must be enabled before assists are - // enabled because they must be enabled before - // any non-leaf heap objects are marked. Since - // allocations are blocked until assists can - // happen, we want enable assists as early as - // possible. - setGCPhase(_GCmark) - - gcBgMarkPrepare() // Must happen before assist enable. - gcMarkRootPrepare() - - // Mark all active tinyalloc blocks. Since we're - // allocating from these, they need to be black like - // other allocations. The alternative is to blacken - // the tiny block on every allocation from it, which - // would slow down the tiny allocator. - gcMarkTinyAllocs() - - // At this point all Ps have enabled the write - // barrier, thus maintaining the no white to - // black invariant. Enable mutator assists to - // put back-pressure on fast allocating - // mutators. - atomic.Store(&gcBlackenEnabled, 1) - - // Assists and workers can start the moment we start - // the world. - gcController.markStartTime = now - - // Concurrent mark. - systemstack(func() { - now = startTheWorldWithSema(trace.enabled) - }) + gcController.startCycle() + work.heapGoal = memstats.next_gc + + // In STW mode, disable scheduling of user Gs. This may also + // disable scheduling of this goroutine, so it may block as + // soon as we start the world again. + if mode != gcBackgroundMode { + schedEnableUser(false) + } + + // Enter concurrent mark phase and enable + // write barriers. + // + // Because the world is stopped, all Ps will + // observe that write barriers are enabled by + // the time we start the world and begin + // scanning. + // + // Write barriers must be enabled before assists are + // enabled because they must be enabled before + // any non-leaf heap objects are marked. Since + // allocations are blocked until assists can + // happen, we want enable assists as early as + // possible. + setGCPhase(_GCmark) + + gcBgMarkPrepare() // Must happen before assist enable. + gcMarkRootPrepare() + + // Mark all active tinyalloc blocks. Since we're + // allocating from these, they need to be black like + // other allocations. The alternative is to blacken + // the tiny block on every allocation from it, which + // would slow down the tiny allocator. + gcMarkTinyAllocs() + + // At this point all Ps have enabled the write + // barrier, thus maintaining the no white to + // black invariant. Enable mutator assists to + // put back-pressure on fast allocating + // mutators. + atomic.Store(&gcBlackenEnabled, 1) + + // Assists and workers can start the moment we start + // the world. + gcController.markStartTime = now + + // Concurrent mark. + systemstack(func() { + now = startTheWorldWithSema(trace.enabled) work.pauseNS += now - work.pauseStart work.tMark = now - } else { - if trace.enabled { - // Switch to mark termination STW. - traceGCSTWDone() - traceGCSTWStart(0) - } - t := nanotime() - work.tMark, work.tMarkTerm = t, t - work.heapGoal = work.heap0 - - // Perform mark termination. This will restart the world. - gcMarkTermination(memstats.triggerRatio) + }) + // In STW mode, we could block the instant systemstack + // returns, so don't do anything important here. Make sure we + // block rather than returning to user code. + if mode != gcBackgroundMode { + Gosched() } semrelease(&work.startSema) } -// gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2 -// to mark termination. +// gcMarkDoneFlushed counts the number of P's with flushed work. // -// This should be called when all mark work has been drained. In mark -// 1, this includes all root marking jobs, global work buffers, and -// active work buffers in assists and background workers; however, -// work may still be cached in per-P work buffers. In mark 2, per-P -// caches are disabled. +// Ideally this would be a captured local in gcMarkDone, but forEachP +// escapes its callback closure, so it can't capture anything. +// +// This is protected by markDoneSema. +var gcMarkDoneFlushed uint32 + +// debugCachedWork enables extra checks for debugging premature mark +// termination. +// +// For debugging issue #27993. +const debugCachedWork = false + +// gcWorkPauseGen is for debugging the mark completion algorithm. +// gcWork put operations spin while gcWork.pauseGen == gcWorkPauseGen. +// Only used if debugCachedWork is true. +// +// For debugging issue #27993. +var gcWorkPauseGen uint32 = 1 + +// gcMarkDone transitions the GC from mark to mark termination if all +// reachable objects have been marked (that is, there are no grey +// objects and can be no more in the future). Otherwise, it flushes +// all local work to the global queues where it can be discovered by +// other workers. +// +// This should be called when all local mark work has been drained and +// there are no remaining workers. Specifically, when +// +// work.nwait == work.nproc && !gcMarkWorkAvailable(p) // // The calling context must be preemptible. // -// Note that it is explicitly okay to have write barriers in this -// function because completion of concurrent mark is best-effort -// anyway. Any work created by write barriers here will be cleaned up -// by mark termination. +// Flushing local work is important because idle Ps may have local +// work queued. This is the only way to make that work visible and +// drive GC to completion. +// +// It is explicitly okay to have write barriers in this function. If +// it does transition to mark termination, then all reachable objects +// have been marked, so the write barrier cannot shade any more +// objects. func gcMarkDone() { -top: + // Ensure only one thread is running the ragged barrier at a + // time. semacquire(&work.markDoneSema) +top: // Re-check transition condition under transition lock. + // + // It's critical that this checks the global work queues are + // empty before performing the ragged barrier. Otherwise, + // there could be global work that a P could take after the P + // has passed the ragged barrier. if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { semrelease(&work.markDoneSema) return } - // Disallow starting new workers so that any remaining workers - // in the current mark phase will drain out. - // - // TODO(austin): Should dedicated workers keep an eye on this - // and exit gcDrain promptly? - atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) - prevFractionalGoal := gcController.fractionalUtilizationGoal - gcController.fractionalUtilizationGoal = 0 - - if !gcBlackenPromptly { - // Transition from mark 1 to mark 2. - // - // The global work list is empty, but there can still be work - // sitting in the per-P work caches. - // Flush and disable work caches. - - // Disallow caching workbufs and indicate that we're in mark 2. - gcBlackenPromptly = true - - // Prevent completion of mark 2 until we've flushed - // cached workbufs. - atomic.Xadd(&work.nwait, -1) - - // GC is set up for mark 2. Let Gs blocked on the - // transition lock go while we flush caches. - semrelease(&work.markDoneSema) - - systemstack(func() { - // Flush all currently cached workbufs and - // ensure all Ps see gcBlackenPromptly. This - // also blocks until any remaining mark 1 - // workers have exited their loop so we can - // start new mark 2 workers. - forEachP(func(_p_ *p) { - wbBufFlush1(_p_) - _p_.gcw.dispose() - }) + // Flush all local buffers and collect flushedWork flags. + gcMarkDoneFlushed = 0 + systemstack(func() { + gp := getg().m.curg + // Mark the user stack as preemptible so that it may be scanned. + // Otherwise, our attempt to force all P's to a safepoint could + // result in a deadlock as we attempt to preempt a worker that's + // trying to preempt us (e.g. for a stack scan). + casgstatus(gp, _Grunning, _Gwaiting) + forEachP(func(_p_ *p) { + // Flush the write barrier buffer, since this may add + // work to the gcWork. + wbBufFlush1(_p_) + // For debugging, shrink the write barrier + // buffer so it flushes immediately. + // wbBuf.reset will keep it at this size as + // long as throwOnGCWork is set. + if debugCachedWork { + b := &_p_.wbBuf + b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers])) + b.debugGen = gcWorkPauseGen + } + // Flush the gcWork, since this may create global work + // and set the flushedWork flag. + // + // TODO(austin): Break up these workbufs to + // better distribute work. + _p_.gcw.dispose() + // Collect the flushedWork flag. + if _p_.gcw.flushedWork { + atomic.Xadd(&gcMarkDoneFlushed, 1) + _p_.gcw.flushedWork = false + } else if debugCachedWork { + // For debugging, freeze the gcWork + // until we know whether we've reached + // completion or not. If we think + // we've reached completion, but + // there's a paused gcWork, then + // that's a bug. + _p_.gcw.pauseGen = gcWorkPauseGen + // Capture the G's stack. + for i := range _p_.gcw.pauseStack { + _p_.gcw.pauseStack[i] = 0 + } + callers(1, _p_.gcw.pauseStack[:]) + } }) + casgstatus(gp, _Gwaiting, _Grunning) + }) - // Check that roots are marked. We should be able to - // do this before the forEachP, but based on issue - // #16083 there may be a (harmless) race where we can - // enter mark 2 while some workers are still scanning - // stacks. The forEachP ensures these scans are done. - // - // TODO(austin): Figure out the race and fix this - // properly. - gcMarkRootCheck() + if gcMarkDoneFlushed != 0 { + if debugCachedWork { + // Release paused gcWorks. + atomic.Xadd(&gcWorkPauseGen, 1) + } + // More grey objects were discovered since the + // previous termination check, so there may be more + // work to do. Keep going. It's possible the + // transition condition became true again during the + // ragged barrier, so re-check it. + goto top + } - // Now we can start up mark 2 workers. - atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) - gcController.fractionalUtilizationGoal = prevFractionalGoal + if debugCachedWork { + throwOnGCWork = true + // Release paused gcWorks. If there are any, they + // should now observe throwOnGCWork and panic. + atomic.Xadd(&gcWorkPauseGen, 1) + } - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { - // This loop will make progress because - // gcBlackenPromptly is now true, so it won't - // take this same "if" branch. - goto top + // There was no global work, no local work, and no Ps + // communicated work since we took markDoneSema. Therefore + // there are no grey objects and no more objects can be + // shaded. Transition to mark termination. + now := nanotime() + work.tMarkTerm = now + work.pauseStart = now + getg().m.preemptoff = "gcing" + if trace.enabled { + traceGCSTWStart(0) + } + systemstack(stopTheWorldWithSema) + // The gcphase is _GCmark, it will transition to _GCmarktermination + // below. The important thing is that the wb remains active until + // all marking is complete. This includes writes made by the GC. + + if debugCachedWork { + // For debugging, double check that no work was added after we + // went around above and disable write barrier buffering. + for _, p := range allp { + gcw := &p.gcw + if !gcw.empty() { + printlock() + print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork) + if gcw.wbuf1 == nil { + print(" wbuf1=") + } else { + print(" wbuf1.n=", gcw.wbuf1.nobj) + } + if gcw.wbuf2 == nil { + print(" wbuf2=") + } else { + print(" wbuf2.n=", gcw.wbuf2.nobj) + } + print("\n") + if gcw.pauseGen == gcw.putGen { + println("runtime: checkPut already failed at this generation") + } + throw("throwOnGCWork") + } } } else { - // Transition to mark termination. - now := nanotime() - work.tMarkTerm = now - work.pauseStart = now - getg().m.preemptoff = "gcing" - if trace.enabled { - traceGCSTWStart(0) + // For unknown reasons (see issue #27993), there is + // sometimes work left over when we enter mark + // termination. Detect this and resume concurrent + // mark. This is obviously unfortunate. + // + // Switch to the system stack to call wbBufFlush1, + // though in this case it doesn't matter because we're + // non-preemptible anyway. + restart := false + systemstack(func() { + for _, p := range allp { + wbBufFlush1(p) + if !p.gcw.empty() { + restart = true + break + } + } + }) + if restart { + getg().m.preemptoff = "" + systemstack(func() { + now := startTheWorldWithSema(true) + work.pauseNS += now - work.pauseStart + }) + goto top } - systemstack(stopTheWorldWithSema) - // The gcphase is _GCmark, it will transition to _GCmarktermination - // below. The important thing is that the wb remains active until - // all marking is complete. This includes writes made by the GC. + } - // Record that one root marking pass has completed. - work.markrootDone = true + // Disable assists and background workers. We must do + // this before waking blocked assists. + atomic.Store(&gcBlackenEnabled, 0) - // Disable assists and background workers. We must do - // this before waking blocked assists. - atomic.Store(&gcBlackenEnabled, 0) + // Wake all blocked assists. These will run when we + // start the world again. + gcWakeAllAssists() - // Wake all blocked assists. These will run when we - // start the world again. - gcWakeAllAssists() + // Likewise, release the transition lock. Blocked + // workers and assists will run when we start the + // world again. + semrelease(&work.markDoneSema) - // Likewise, release the transition lock. Blocked - // workers and assists will run when we start the - // world again. - semrelease(&work.markDoneSema) + // In STW mode, re-enable user goroutines. These will be + // queued to run after we start the world. + schedEnableUser(true) - // endCycle depends on all gcWork cache stats being - // flushed. This is ensured by mark 2. - nextTriggerRatio := gcController.endCycle() + // endCycle depends on all gcWork cache stats being flushed. + // The termination algorithm above ensured that up to + // allocations since the ragged barrier. + nextTriggerRatio := gcController.endCycle() - // Perform mark termination. This will restart the world. - gcMarkTermination(nextTriggerRatio) - } + // Perform mark termination. This will restart the world. + gcMarkTermination(nextTriggerRatio) } func gcMarkTermination(nextTriggerRatio float64) { // World is stopped. // Start marktermination which includes enabling the write barrier. atomic.Store(&gcBlackenEnabled, 0) - gcBlackenPromptly = false setGCPhase(_GCmarktermination) work.heap1 = memstats.heap_live @@ -1548,35 +1616,22 @@ func gcMarkTermination(nextTriggerRatio float64) { systemstack(func() { work.heap2 = work.bytesMarked if debug.gccheckmark > 0 { - // Run a full stop-the-world mark using checkmark bits, - // to check that we didn't forget to mark anything during - // the concurrent mark process. + // Run a full non-parallel, stop-the-world + // mark using checkmark bits, to check that we + // didn't forget to mark anything during the + // concurrent mark process. gcResetMarkState() initCheckmarks() - gcMark(startTime) + gcw := &getg().m.p.ptr().gcw + gcDrain(gcw, 0) + wbBufFlush1(getg().m.p.ptr()) + gcw.dispose() clearCheckmarks() } // marking is complete so we can turn the write barrier off setGCPhase(_GCoff) gcSweep(work.mode) - - if debug.gctrace > 1 { - startTime = nanotime() - // The g stacks have been scanned so - // they have gcscanvalid==true and gcworkdone==true. - // Reset these so that all stacks will be rescanned. - gcResetMarkState() - finishsweep_m() - - // Still in STW but gcphase is _GCoff, reset to _GCmarktermination - // At this point all objects will be found during the gcMark which - // does a complete STW mark and object scan. - setGCPhase(_GCmarktermination) - gcMark(startTime) - setGCPhase(_GCoff) // marking is done, turn off wb. - gcSweep(work.mode) - } }) _g_.m.traceback = 0 @@ -1632,8 +1687,7 @@ func gcMarkTermination(nextTriggerRatio float64) { // Bump GC cycle count and wake goroutines waiting on sweep. lock(&work.sweepWaiters.lock) memstats.numgc++ - injectglist(work.sweepWaiters.head.ptr()) - work.sweepWaiters.head = 0 + injectglist(&work.sweepWaiters.list) unlock(&work.sweepWaiters.lock) // Finish the current heap profiling cycle and start a new @@ -1655,6 +1709,16 @@ func gcMarkTermination(nextTriggerRatio float64) { // Free stack spans. This must be done between GC cycles. systemstack(freeStackSpans) + // Ensure all mcaches are flushed. Each P will flush its own + // mcache before allocating, but idle Ps may not. Since this + // is necessary to sweep all spans, we need to ensure all + // mcaches are flushed before we start the next GC cycle. + systemstack(func() { + forEachP(func(_p_ *p) { + _p_.mcache.prepareForSweep() + }) + }) + // Print gctrace before dropping worldsema. As soon as we drop // worldsema another cycle could start and smash the stats // we're trying to print. @@ -1855,7 +1919,7 @@ func gcBgMarkWorker(_p_ *p) { } // Go back to draining, this time // without preemption. - gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit) + gcDrain(&_p_.gcw, gcDrainFlushBgCredit) case gcMarkWorkerFractionalMode: gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit) case gcMarkWorkerIdleMode: @@ -1864,16 +1928,6 @@ func gcBgMarkWorker(_p_ *p) { casgstatus(gp, _Gwaiting, _Grunning) }) - // If we are nearing the end of mark, dispose - // of the cache promptly. We must do this - // before signaling that we're no longer - // working so that other workers can't observe - // no workers and no work while we have this - // cached, and before we compute done. - if gcBlackenPromptly { - _p_.gcw.dispose() - } - // Account for time. duration := nanotime() - startTime switch _p_.gcMarkWorkerMode { @@ -1920,7 +1974,7 @@ func gcBgMarkWorker(_p_ *p) { } } -// gcMarkWorkAvailable returns true if executing a mark worker +// gcMarkWorkAvailable reports whether executing a mark worker // on p is potentially useful. p may be nil, in which case it only // checks the global sources of work. func gcMarkWorkAvailable(p *p) bool { @@ -1950,50 +2004,11 @@ func gcMark(start_time int64) { } work.tstart = start_time - // Queue root marking jobs. - gcMarkRootPrepare() - - work.nwait = 0 - work.ndone = 0 - work.nproc = uint32(gcprocs()) - - if work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots == 0 { - // There's no work on the work queue and no root jobs - // that can produce work, so don't bother entering the - // getfull() barrier. - // - // This will be the situation the vast majority of the - // time after concurrent mark. However, we still need - // a fallback for STW GC and because there are some - // known races that occasionally leave work around for - // mark termination. - // - // We're still hedging our bets here: if we do - // accidentally produce some work, we'll still process - // it, just not necessarily in parallel. - // - // TODO(austin): Fix the races and and remove - // work draining from mark termination so we don't - // need the fallback path. - work.helperDrainBlock = false - } else { - work.helperDrainBlock = true - } - - if work.nproc > 1 { - noteclear(&work.alldone) - helpgc(int32(work.nproc)) - } - - gchelperstart() - - gcw := &getg().m.p.ptr().gcw - if work.helperDrainBlock { - gcDrain(gcw, gcDrainBlock) - } else { - gcDrain(gcw, gcDrainNoBlock) + // Check that there's no marking work remaining. + if work.full != 0 || work.markrootNext < work.markrootJobs { + print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") + panic("non-empty mark queue after concurrent mark") } - gcw.dispose() if debug.gccheckmark > 0 { // This is expensive when there's a large number of @@ -2004,25 +2019,52 @@ func gcMark(start_time int64) { throw("work.full != 0") } - if work.nproc > 1 { - notesleep(&work.alldone) - } - - // Record that at least one root marking pass has completed. - work.markrootDone = true - - // Double-check that all gcWork caches are empty. This should - // be ensured by mark 2 before we enter mark termination. + // Clear out buffers and double-check that all gcWork caches + // are empty. This should be ensured by gcMarkDone before we + // enter mark termination. + // + // TODO: We could clear out buffers just before mark if this + // has a non-negligible impact on STW time. for _, p := range allp { + // The write barrier may have buffered pointers since + // the gcMarkDone barrier. However, since the barrier + // ensured all reachable objects were marked, all of + // these must be pointers to black objects. Hence we + // can just discard the write barrier buffer. + if debug.gccheckmark > 0 || throwOnGCWork { + // For debugging, flush the buffer and make + // sure it really was all marked. + wbBufFlush1(p) + } else { + p.wbBuf.reset() + } + gcw := &p.gcw if !gcw.empty() { + printlock() + print("runtime: P ", p.id, " flushedWork ", gcw.flushedWork) + if gcw.wbuf1 == nil { + print(" wbuf1=") + } else { + print(" wbuf1.n=", gcw.wbuf1.nobj) + } + if gcw.wbuf2 == nil { + print(" wbuf2=") + } else { + print(" wbuf2.n=", gcw.wbuf2.nobj) + } + print("\n") throw("P has cached GC work at end of mark termination") } - if gcw.scanWork != 0 || gcw.bytesMarked != 0 { - throw("P has unflushed stats at end of mark termination") - } + // There may still be cached empty buffers, which we + // need to flush since we're going to free them. Also, + // there may be non-zero stats because we allocated + // black after the gcMarkDone barrier. + gcw.dispose() } + throwOnGCWork = false + cachestats() // Update the marked heap stat. @@ -2054,6 +2096,9 @@ func gcSweep(mode gcMode) { throw("non-empty swept list") } mheap_.pagesSwept = 0 + mheap_.sweepArenas = mheap_.allArenas + mheap_.reclaimIndex = 0 + mheap_.reclaimCredit = 0 unlock(&mheap_.lock) if !_ConcurrentSweep || mode == gcForceBlockMode { @@ -2103,9 +2148,20 @@ func gcResetMarkState() { } unlock(&allglock) + // Clear page marks. This is just 1MB per 64GB of heap, so the + // time here is pretty trivial. + lock(&mheap_.lock) + arenas := mheap_.allArenas + unlock(&mheap_.lock) + for _, ai := range arenas { + ha := mheap_.arenas[ai.l1()][ai.l2()] + for i := range ha.pageMarks { + ha.pageMarks[i] = 0 + } + } + work.bytesMarked = 0 work.initialHeapLive = atomic.Load64(&memstats.heap_live) - work.markrootDone = false } // Hooks for other packages @@ -2152,48 +2208,6 @@ func clearpools() { unlock(&sched.deferlock) } -// gchelper runs mark termination tasks on Ps other than the P -// coordinating mark termination. -// -// The caller is responsible for ensuring that this has a P to run on, -// even though it's running during STW. Because of this, it's allowed -// to have write barriers. -// -//go:yeswritebarrierrec -func gchelper() { - _g_ := getg() - _g_.m.traceback = 2 - gchelperstart() - - // Parallel mark over GC roots and heap - if gcphase == _GCmarktermination { - gcw := &_g_.m.p.ptr().gcw - if work.helperDrainBlock { - gcDrain(gcw, gcDrainBlock) // blocks in getfull - } else { - gcDrain(gcw, gcDrainNoBlock) - } - gcw.dispose() - } - - nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone - if atomic.Xadd(&work.ndone, +1) == nproc-1 { - notewakeup(&work.alldone) - } - _g_.m.traceback = 0 -} - -func gchelperstart() { - _g_ := getg() - - if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { - throw("gchelperstart: bad m->helpgc") - } - if _g_ != _g_.m.g0 { - throw("gchelper not running on g0 stack") - } -} - // Timing // itoaDiv formats val/(10**dec) into buf. diff --git a/src/runtime/mgclarge.go b/src/runtime/mgclarge.go index e7fa831937aca..7b01a117808fc 100644 --- a/src/runtime/mgclarge.go +++ b/src/runtime/mgclarge.go @@ -46,13 +46,57 @@ type treapNode struct { priority uint32 // random number used by treap algorithm to keep tree probabilistically balanced } -func (t *treapNode) init() { - t.right = nil - t.left = nil - t.parent = nil - t.spanKey = nil - t.npagesKey = 0 - t.priority = 0 +func (t *treapNode) pred() *treapNode { + if t.left != nil { + // If it has a left child, its predecessor will be + // its right most left (grand)child. + t = t.left + for t.right != nil { + t = t.right + } + return t + } + // If it has no left child, its predecessor will be + // the first grandparent who's right child is its + // ancestor. + // + // We compute this by walking up the treap until the + // current node's parent is its parent's right child. + // + // If we find at any point walking up the treap + // that the current node doesn't have a parent, + // we've hit the root. This means that t is already + // the left-most node in the treap and therefore + // has no predecessor. + for t.parent != nil && t.parent.right != t { + if t.parent.left != t { + println("runtime: predecessor t=", t, "t.spanKey=", t.spanKey) + throw("node is not its parent's child") + } + t = t.parent + } + return t.parent +} + +func (t *treapNode) succ() *treapNode { + if t.right != nil { + // If it has a right child, its successor will be + // its left-most right (grand)child. + t = t.right + for t.left != nil { + t = t.left + } + return t + } + // See pred. + for t.parent != nil && t.parent.left != t { + if t.parent.right != t { + println("runtime: predecessor t=", t, "t.spanKey=", t.spanKey) + throw("node is not its parent's child") + } + t = t.parent + } + return t.parent } // isSpanInTreap is handy for debugging. One should hold the heap lock, usually @@ -109,6 +153,68 @@ func checkTreapNode(t *treapNode) { } } +// treapIter is a bidirectional iterator type which may be used to iterate over a +// an mTreap in-order forwards (increasing order) or backwards (decreasing order). +// Its purpose is to hide details about the treap from users when trying to iterate +// over it. +// +// To create iterators over the treap, call start or end on an mTreap. +type treapIter struct { + t *treapNode +} + +// span returns the span at the current position in the treap. +// If the treap is not valid, span will panic. +func (i *treapIter) span() *mspan { + return i.t.spanKey +} + +// valid returns whether the iterator represents a valid position +// in the mTreap. +func (i *treapIter) valid() bool { + return i.t != nil +} + +// next moves the iterator forward by one. Once the iterator +// ceases to be valid, calling next will panic. +func (i treapIter) next() treapIter { + i.t = i.t.succ() + return i +} + +// prev moves the iterator backwards by one. Once the iterator +// ceases to be valid, calling prev will panic. +func (i treapIter) prev() treapIter { + i.t = i.t.pred() + return i +} + +// start returns an iterator which points to the start of the treap (the +// left-most node in the treap). +func (root *mTreap) start() treapIter { + t := root.treap + if t == nil { + return treapIter{} + } + for t.left != nil { + t = t.left + } + return treapIter{t: t} +} + +// end returns an iterator which points to the end of the treap (the +// right-most node in the treap). +func (root *mTreap) end() treapIter { + t := root.treap + if t == nil { + return treapIter{} + } + for t.right != nil { + t = t.right + } + return treapIter{t: t} +} + // insert adds span to the large span treap. func (root *mTreap) insert(span *mspan) { npages := span.npages @@ -120,10 +226,10 @@ func (root *mTreap) insert(span *mspan) { pt = &t.right } else if t.npagesKey > npages { pt = &t.left - } else if uintptr(unsafe.Pointer(t.spanKey)) < uintptr(unsafe.Pointer(span)) { + } else if t.spanKey.base() < span.base() { // t.npagesKey == npages, so sort on span addresses. pt = &t.right - } else if uintptr(unsafe.Pointer(t.spanKey)) > uintptr(unsafe.Pointer(span)) { + } else if t.spanKey.base() > span.base() { pt = &t.left } else { throw("inserting span already in treap") @@ -140,7 +246,6 @@ func (root *mTreap) insert(span *mspan) { // https://faculty.washington.edu/aragon/pubs/rst89.pdf t := (*treapNode)(mheap_.treapalloc.alloc()) - t.init() t.npagesKey = span.npages t.priority = fastrand() t.spanKey = span @@ -168,7 +273,6 @@ func (root *mTreap) removeNode(t *treapNode) { if t.spanKey.npages != t.npagesKey { throw("span and treap node npages do not match") } - // Rotate t down to be leaf of tree for removal, respecting priorities. for t.right != nil || t.left != nil { if t.right == nil || t.left != nil && t.left.priority < t.right.priority { @@ -188,19 +292,16 @@ func (root *mTreap) removeNode(t *treapNode) { root.treap = nil } // Return the found treapNode's span after freeing the treapNode. - t.spanKey = nil - t.npagesKey = 0 mheap_.treapalloc.free(unsafe.Pointer(t)) } -// remove searches for, finds, removes from the treap, and returns the smallest -// span that can hold npages. If no span has at least npages return nil. +// find searches for, finds, and returns the treap node containing the +// smallest span that can hold npages. If no span has at least npages +// it returns nil. // This is slightly more complicated than a simple binary tree search // since if an exact match is not found the next larger node is // returned. -// If the last node inspected > npagesKey not holding -// a left node (a smaller npages) is the "best fit" node. -func (root *mTreap) remove(npages uintptr) *mspan { +func (root *mTreap) find(npages uintptr) *treapNode { t := root.treap for t != nil { if t.spanKey == nil { @@ -211,9 +312,7 @@ func (root *mTreap) remove(npages uintptr) *mspan { } else if t.left != nil && t.left.npagesKey >= npages { t = t.left } else { - result := t.spanKey - root.removeNode(t) - return result + return t } } return nil @@ -231,24 +330,21 @@ func (root *mTreap) removeSpan(span *mspan) { t = t.right } else if t.npagesKey > npages { t = t.left - } else if uintptr(unsafe.Pointer(t.spanKey)) < uintptr(unsafe.Pointer(span)) { + } else if t.spanKey.base() < span.base() { t = t.right - } else if uintptr(unsafe.Pointer(t.spanKey)) > uintptr(unsafe.Pointer(span)) { + } else if t.spanKey.base() > span.base() { t = t.left } } root.removeNode(t) } -// scavengetreap visits each node in the treap and scavenges the -// treapNode's span. -func scavengetreap(treap *treapNode, now, limit uint64) uintptr { - if treap == nil { - return 0 - } - return scavengeTreapNode(treap, now, limit) + - scavengetreap(treap.left, now, limit) + - scavengetreap(treap.right, now, limit) +// erase removes the element referred to by the current position of the +// iterator. This operation consumes the given iterator, so it should no +// longer be used. It is up to the caller to get the next or previous +// iterator before calling erase, if need be. +func (root *mTreap) erase(i treapIter) { + root.removeNode(i.t) } // rotateLeft rotates the tree rooted at node x. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index e8cfdce4fc1b3..86416caab5288 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -52,11 +52,7 @@ const ( // //go:nowritebarrier func gcMarkRootPrepare() { - if gcphase == _GCmarktermination { - work.nFlushCacheRoots = int(gomaxprocs) - } else { - work.nFlushCacheRoots = 0 - } + work.nFlushCacheRoots = 0 // Compute how many data and BSS root blocks there are. nBlocks := func(bytes uintptr) int { @@ -66,63 +62,42 @@ func gcMarkRootPrepare() { work.nDataRoots = 0 work.nBSSRoots = 0 - // Only scan globals once per cycle; preferably concurrently. - if !work.markrootDone { - for _, datap := range activeModules() { - nDataRoots := nBlocks(datap.edata - datap.data) - if nDataRoots > work.nDataRoots { - work.nDataRoots = nDataRoots - } - } - - for _, datap := range activeModules() { - nBSSRoots := nBlocks(datap.ebss - datap.bss) - if nBSSRoots > work.nBSSRoots { - work.nBSSRoots = nBSSRoots - } + // Scan globals. + for _, datap := range activeModules() { + nDataRoots := nBlocks(datap.edata - datap.data) + if nDataRoots > work.nDataRoots { + work.nDataRoots = nDataRoots } } - if !work.markrootDone { - // On the first markroot, we need to scan span roots. - // In concurrent GC, this happens during concurrent - // mark and we depend on addfinalizer to ensure the - // above invariants for objects that get finalizers - // after concurrent mark. In STW GC, this will happen - // during mark termination. - // - // We're only interested in scanning the in-use spans, - // which will all be swept at this point. More spans - // may be added to this list during concurrent GC, but - // we only care about spans that were allocated before - // this mark phase. - work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() - - // On the first markroot, we need to scan all Gs. Gs - // may be created after this point, but it's okay that - // we ignore them because they begin life without any - // roots, so there's nothing to scan, and any roots - // they create during the concurrent phase will be - // scanned during mark termination. During mark - // termination, allglen isn't changing, so we'll scan - // all Gs. - work.nStackRoots = int(atomic.Loaduintptr(&allglen)) - } else { - // We've already scanned span roots and kept the scan - // up-to-date during concurrent mark. - work.nSpanRoots = 0 - - // The hybrid barrier ensures that stacks can't - // contain pointers to unmarked objects, so on the - // second markroot, there's no need to scan stacks. - work.nStackRoots = 0 - - if debug.gcrescanstacks > 0 { - // Scan stacks anyway for debugging. - work.nStackRoots = int(atomic.Loaduintptr(&allglen)) + for _, datap := range activeModules() { + nBSSRoots := nBlocks(datap.ebss - datap.bss) + if nBSSRoots > work.nBSSRoots { + work.nBSSRoots = nBSSRoots } } + // Scan span roots for finalizer specials. + // + // We depend on addfinalizer to mark objects that get + // finalizers after root marking. + // + // We're only interested in scanning the in-use spans, + // which will all be swept at this point. More spans + // may be added to this list during concurrent GC, but + // we only care about spans that were allocated before + // this mark phase. + work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() + + // Scan stacks. + // + // Gs may be created after this point, but it's okay that we + // ignore them because they begin life without any roots, so + // there's nothing to scan, and any roots they create during + // the concurrent phase will be scanned during mark + // termination. + work.nStackRoots = int(atomic.Loaduintptr(&allglen)) + work.markrootNext = 0 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) } @@ -138,19 +113,10 @@ func gcMarkRootCheck() { lock(&allglock) // Check that stacks have been scanned. var gp *g - if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 { - for i := 0; i < len(allgs); i++ { - gp = allgs[i] - if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead { - goto fail - } - } - } else { - for i := 0; i < work.nStackRoots; i++ { - gp = allgs[i] - if !gp.gcscandone { - goto fail - } + for i := 0; i < work.nStackRoots; i++ { + gp = allgs[i] + if !gp.gcscandone { + goto fail } } unlock(&allglock) @@ -201,27 +167,18 @@ func markroot(gcw *gcWork, i uint32) { } case i == fixedRootFinalizers: - // Only do this once per GC cycle since we don't call - // queuefinalizer during marking. - if work.markrootDone { - break - } for fb := allfin; fb != nil; fb = fb.alllink { cnt := uintptr(atomic.Load(&fb.cnt)) - scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) } case i == fixedRootFreeGStacks: - // Only do this once per GC cycle; preferably - // concurrently. - if !work.markrootDone { - // Switch to the system stack so we can call - // stackfree. - systemstack(markrootFreeGStacks) - } + // Switch to the system stack so we can call + // stackfree. + systemstack(markrootFreeGStacks) case baseSpans <= i && i < baseStacks: - // mark MSpan.specials + // mark mspan.specials markrootSpans(gcw, int(i-baseSpans)) default: @@ -291,7 +248,7 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { } // Scan this shard. - scanblock(b, n, ptrmask, gcw) + scanblock(b, n, ptrmask, gcw, nil) } // markrootFreeGStacks frees stacks of dead Gs. @@ -302,26 +259,27 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { //TODO go:nowritebarrier func markrootFreeGStacks() { // Take list of dead Gs with stacks. - lock(&sched.gflock) - list := sched.gfreeStack - sched.gfreeStack = nil - unlock(&sched.gflock) - if list == nil { + lock(&sched.gFree.lock) + list := sched.gFree.stack + sched.gFree.stack = gList{} + unlock(&sched.gFree.lock) + if list.empty() { return } // Free stacks. - tail := list - for gp := list; gp != nil; gp = gp.schedlink.ptr() { + q := gQueue{list.head, list.head} + for gp := list.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { shrinkstack(gp) - tail = gp + // Manipulate the queue directly since the Gs are + // already all linked the right way. + q.tail.set(gp) } // Put Gs back on the free list. - lock(&sched.gflock) - tail.schedlink.set(sched.gfreeNoStack) - sched.gfreeNoStack = list - unlock(&sched.gflock) + lock(&sched.gFree.lock) + sched.gFree.noStack.pushAll(q) + unlock(&sched.gFree.lock) } // markrootSpans marks roots for one shard of work.spans. @@ -341,10 +299,6 @@ func markrootSpans(gcw *gcWork, shard int) { // TODO(austin): There are several ideas for making this more // efficient in issue #11485. - if work.markrootDone { - throw("markrootSpans during second markroot") - } - sg := mheap_.sweepgen spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard) // Note that work.spans may not include spans that were @@ -357,7 +311,8 @@ func markrootSpans(gcw *gcWork, shard int) { if s.state != mSpanInUse { continue } - if !useCheckmark && s.sweepgen != sg { + // Check that this span was swept (it may be cached or uncached). + if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { // sweepgen was updated (+2) during non-checkmark GC pass print("sweep ", s.sweepgen, " ", sg, "\n") throw("gc: unswept span") @@ -394,7 +349,7 @@ func markrootSpans(gcw *gcWork, shard int) { scanobject(p, gcw) // The special itself is a root. - scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) } unlock(&s.speciallock) @@ -555,11 +510,6 @@ func gcAssistAlloc1(gp *g, scanWork int64) { // will be more cache friendly. gcw := &getg().m.p.ptr().gcw workDone := gcDrainN(gcw, scanWork) - // If we are near the end of the mark phase - // dispose of the gcw. - if gcBlackenPromptly { - gcw.dispose() - } casgstatus(gp, _Gwaiting, _Grunning) @@ -576,8 +526,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) { incnwait := atomic.Xadd(&work.nwait, +1) if incnwait > work.nproc { println("runtime: work.nwait=", incnwait, - "work.nproc=", work.nproc, - "gcBlackenPromptly=", gcBlackenPromptly) + "work.nproc=", work.nproc) throw("work.nwait > work.nproc") } @@ -602,15 +551,14 @@ func gcAssistAlloc1(gp *g, scanWork int64) { // new assists from going to sleep after this point. func gcWakeAllAssists() { lock(&work.assistQueue.lock) - injectglist(work.assistQueue.head.ptr()) - work.assistQueue.head.set(nil) - work.assistQueue.tail.set(nil) + list := work.assistQueue.q.popList() + injectglist(&list) unlock(&work.assistQueue.lock) } // gcParkAssist puts the current goroutine on the assist queue and parks. // -// gcParkAssist returns whether the assist is now satisfied. If it +// gcParkAssist reports whether the assist is now satisfied. If it // returns false, the caller must retry the assist. // //go:nowritebarrier @@ -625,24 +573,17 @@ func gcParkAssist() bool { } gp := getg() - oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail - if oldHead == 0 { - work.assistQueue.head.set(gp) - } else { - oldTail.ptr().schedlink.set(gp) - } - work.assistQueue.tail.set(gp) - gp.schedlink.set(nil) + oldList := work.assistQueue.q + work.assistQueue.q.pushBack(gp) // Recheck for background credit now that this G is in // the queue, but can still back out. This avoids a // race in case background marking has flushed more // credit since we checked above. if atomic.Loadint64(&gcController.bgScanCredit) > 0 { - work.assistQueue.head = oldHead - work.assistQueue.tail = oldTail - if oldTail != 0 { - oldTail.ptr().schedlink.set(nil) + work.assistQueue.q = oldList + if oldList.tail != 0 { + oldList.tail.ptr().schedlink.set(nil) } unlock(&work.assistQueue.lock) return false @@ -663,7 +604,7 @@ func gcParkAssist() bool { // //go:nowritebarrierrec func gcFlushBgCredit(scanWork int64) { - if work.assistQueue.head == 0 { + if work.assistQueue.q.empty() { // Fast path; there are no blocked assists. There's a // small window here where an assist may add itself to // the blocked queue and park. If that happens, we'll @@ -675,23 +616,21 @@ func gcFlushBgCredit(scanWork int64) { scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork) lock(&work.assistQueue.lock) - gp := work.assistQueue.head.ptr() - for gp != nil && scanBytes > 0 { + for !work.assistQueue.q.empty() && scanBytes > 0 { + gp := work.assistQueue.q.pop() // Note that gp.gcAssistBytes is negative because gp // is in debt. Think carefully about the signs below. if scanBytes+gp.gcAssistBytes >= 0 { // Satisfy this entire assist debt. scanBytes += gp.gcAssistBytes gp.gcAssistBytes = 0 - xgp := gp - gp = gp.schedlink.ptr() - // It's important that we *not* put xgp in + // It's important that we *not* put gp in // runnext. Otherwise, it's possible for user // code to exploit the GC worker's high // scheduler priority to get itself always run // before other goroutines and always in the // fresh quantum started by GC. - ready(xgp, 0, false) + ready(gp, 0, false) } else { // Partially satisfy this assist. gp.gcAssistBytes += scanBytes @@ -700,23 +639,10 @@ func gcFlushBgCredit(scanWork int64) { // back of the queue so that large assists // can't clog up the assist queue and // substantially delay small assists. - xgp := gp - gp = gp.schedlink.ptr() - if gp == nil { - // gp is the only assist in the queue. - gp = xgp - } else { - xgp.schedlink = 0 - work.assistQueue.tail.ptr().schedlink.set(xgp) - work.assistQueue.tail.set(xgp) - } + work.assistQueue.q.pushBack(gp) break } } - work.assistQueue.head.set(gp) - if gp == nil { - work.assistQueue.tail.set(nil) - } if scanBytes > 0 { // Convert from scan bytes back to work. @@ -759,53 +685,140 @@ func scanstack(gp *g, gcw *gcWork) { if gp == getg() { throw("can't scan our own stack") } - mp := gp.m - if mp != nil && mp.helpgc != 0 { - throw("can't scan gchelper stack") - } - // Shrink the stack if not much of it is being used. During - // concurrent GC, we can do this during concurrent mark. - if !work.markrootDone { - shrinkstack(gp) + // Shrink the stack if not much of it is being used. + shrinkstack(gp) + + var state stackScanState + state.stack = gp.stack + + if stackTraceDebug { + println("stack trace goroutine", gp.goid) } // Scan the saved context register. This is effectively a live // register that gets moved back and forth between the // register and sched.ctxt without a write barrier. if gp.sched.ctxt != nil { - scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state) } - // Scan the stack. - var cache pcvalueCache + // Scan the stack. Accumulate a list of stack objects. scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { - scanframeworker(frame, &cache, gcw) + scanframeworker(frame, &state, gcw) return true } gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) tracebackdefers(gp, scanframe, nil) + + // Find and scan all reachable stack objects. + state.buildIndex() + for { + p := state.getPtr() + if p == 0 { + break + } + obj := state.findObject(p) + if obj == nil { + continue + } + t := obj.typ + if t == nil { + // We've already scanned this object. + continue + } + obj.setType(nil) // Don't scan it again. + if stackTraceDebug { + println(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string()) + } + gcdata := t.gcdata + var s *mspan + if t.kind&kindGCProg != 0 { + // This path is pretty unlikely, an object large enough + // to have a GC program allocated on the stack. + // We need some space to unpack the program into a straight + // bitmask, which we allocate/free here. + // TODO: it would be nice if there were a way to run a GC + // program without having to store all its bits. We'd have + // to change from a Lempel-Ziv style program to something else. + // Or we can forbid putting objects on stacks if they require + // a gc program (see issue 27447). + s = materializeGCProg(t.ptrdata, gcdata) + gcdata = (*byte)(unsafe.Pointer(s.startAddr)) + } + + scanblock(state.stack.lo+uintptr(obj.off), t.ptrdata, gcdata, gcw, &state) + + if s != nil { + dematerializeGCProg(s) + } + } + + // Deallocate object buffers. + // (Pointer buffers were all deallocated in the loop above.) + for state.head != nil { + x := state.head + state.head = x.next + if stackTraceDebug { + for _, obj := range x.obj[:x.nobj] { + if obj.typ == nil { // reachable + continue + } + println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string()) + // Note: not necessarily really dead - only reachable-from-ptr dead. + } + } + x.nobj = 0 + putempty((*workbuf)(unsafe.Pointer(x))) + } + if state.buf != nil || state.freeBuf != nil { + throw("remaining pointer buffers") + } + gp.gcscanvalid = true } // Scan a stack frame: local variables and function arguments/results. //go:nowritebarrier -func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { +func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { if _DebugGC > 1 && frame.continpc != 0 { print("scanframe ", funcname(frame.fn), "\n") } - locals, args := getStackMap(frame, cache, false) + locals, args, objs := getStackMap(frame, &state.cache, false) // Scan local variables if stack frame has been allocated. if locals.n > 0 { size := uintptr(locals.n) * sys.PtrSize - scanblock(frame.varp-size, size, locals.bytedata, gcw) + scanblock(frame.varp-size, size, locals.bytedata, gcw, state) } // Scan arguments. if args.n > 0 { - scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw) + scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state) + } + + // Add all stack objects to the stack object list. + if frame.varp != 0 { + // varp is 0 for defers, where there are no locals. + // In that case, there can't be a pointer to its args, either. + // (And all args would be scanned above anyway.) + for _, obj := range objs { + off := obj.off + base := frame.varp // locals base pointer + if off >= 0 { + base = frame.argp // arguments and return values base pointer + } + ptr := base + uintptr(off) + if ptr < frame.sp { + // object hasn't been allocated in the frame yet. + continue + } + if stackTraceDebug { + println("stkobj at", hex(ptr), "of type", obj.typ.string()) + } + state.addObject(ptr, obj.typ) + } } } @@ -813,34 +826,26 @@ type gcDrainFlags int const ( gcDrainUntilPreempt gcDrainFlags = 1 << iota - gcDrainNoBlock gcDrainFlushBgCredit gcDrainIdle gcDrainFractional - - // gcDrainBlock means neither gcDrainUntilPreempt or - // gcDrainNoBlock. It is the default, but callers should use - // the constant for documentation purposes. - gcDrainBlock gcDrainFlags = 0 ) // gcDrain scans roots and objects in work buffers, blackening grey -// objects until all roots and work buffers have been drained. +// objects until it is unable to get more work. It may return before +// GC is done; it's the caller's responsibility to balance work from +// other Ps. // // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt -// is set. This implies gcDrainNoBlock. +// is set. // // If flags&gcDrainIdle != 0, gcDrain returns when there is other work -// to do. This implies gcDrainNoBlock. +// to do. // // If flags&gcDrainFractional != 0, gcDrain self-preempts when // pollFractionalWorkerExit() returns true. This implies // gcDrainNoBlock. // -// If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is -// unable to get more work. Otherwise, it will block until all -// blocking calls are blocked in gcDrain. -// // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work // credit to gcController.bgScanCredit every gcCreditSlack units of // scan work. @@ -853,7 +858,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { gp := getg().m.curg preemptible := flags&gcDrainUntilPreempt != 0 - blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0 flushBgCredit := flags&gcDrainFlushBgCredit != 0 idle := flags&gcDrainIdle != 0 @@ -897,17 +901,19 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { gcw.balance() } - var b uintptr - if blocking { - b = gcw.get() - } else { - b = gcw.tryGetFast() + b := gcw.tryGetFast() + if b == 0 { + b = gcw.tryGet() if b == 0 { + // Flush the write barrier + // buffer; this may create + // more work. + wbBufFlush(nil, 0) b = gcw.tryGet() } } if b == 0 { - // work barrier reached or tryGet failed. + // Unable to get work. break } scanobject(b, gcw) @@ -933,10 +939,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } } - // In blocking mode, write barriers are not allowed after this - // point because we must preserve the condition that the work - // buffers are empty. - done: // Flush remaining scan work credit. if gcw.scanWork > 0 { @@ -985,6 +987,12 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { b := gcw.tryGetFast() if b == 0 { b = gcw.tryGet() + if b == 0 { + // Flush the write barrier buffer; + // this may create more work. + wbBufFlush(nil, 0) + b = gcw.tryGet() + } } if b == 0 { @@ -1025,8 +1033,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { // This is used to scan non-heap roots, so it does not update // gcw.bytesMarked or gcw.scanWork. // +// If stk != nil, possible stack pointers are also reported to stk.putPtr. //go:nowritebarrier -func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { +func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { // Use local copies of original parameters, so that a stack trace // due to one of the throws below shows the original block // base and extent. @@ -1043,10 +1052,12 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { for j := 0; j < 8 && i < n; j++ { if bits&1 != 0 { // Same work as in scanobject; see comments there. - obj := *(*uintptr)(unsafe.Pointer(b + i)) - if obj != 0 { - if obj, span, objIndex := findObject(obj, b, i); obj != 0 { + p := *(*uintptr)(unsafe.Pointer(b + i)) + if p != 0 { + if obj, span, objIndex := findObject(p, b, i); obj != 0 { greyobject(obj, b, i, span, gcw, objIndex) + } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { + stk.putPtr(p) } } } @@ -1164,11 +1175,6 @@ func shade(b uintptr) { if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { gcw := &getg().m.p.ptr().gcw greyobject(obj, 0, 0, span, gcw, objIndex) - if gcphase == _GCmarktermination || gcBlackenPromptly { - // Ps aren't allowed to cache work during mark - // termination. - gcw.dispose() - } } } @@ -1222,8 +1228,14 @@ func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintp if mbits.isMarked() { return } - // mbits.setMarked() // Avoid extra call overhead with manual inlining. - atomic.Or8(mbits.bytep, mbits.mask) + mbits.setMarked() + + // Mark span. + arena, pageIdx, pageMask := pageIndexOf(span.base()) + if arena.pageMarks[pageIdx]&pageMask == 0 { + atomic.Or8(&arena.pageMarks[pageIdx], pageMask) + } + // If this is a noscan object, fast-track it to black // instead of greying it. if span.spanclass.noscan() { @@ -1261,7 +1273,7 @@ func gcDumpObject(label string, obj, off uintptr) { skipped := false size := s.elemsize - if s.state == _MSpanManual && size == 0 { + if s.state == mSpanManual && size == 0 { // We're printing something from a stack frame. We // don't know how big it is, so just show up to an // including off. @@ -1298,18 +1310,13 @@ func gcDumpObject(label string, obj, off uintptr) { //go:nowritebarrier //go:nosplit func gcmarknewobject(obj, size, scanSize uintptr) { - if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen. + if useCheckmark { // The world should be stopped so this should not happen. throw("gcmarknewobject called while doing checkmark") } markBitsForAddr(obj).setMarked() gcw := &getg().m.p.ptr().gcw gcw.bytesMarked += uint64(size) gcw.scanWork += int64(scanSize) - if gcBlackenPromptly { - // There shouldn't be anything in the work queue, but - // we still need to flush stats. - gcw.dispose() - } } // gcMarkTinyAllocs greys all active tiny alloc blocks. @@ -1324,9 +1331,6 @@ func gcMarkTinyAllocs() { _, span, objIndex := findObject(c.tiny, 0, 0) gcw := &p.gcw greyobject(c.tiny, 0, 0, span, gcw, objIndex) - if gcBlackenPromptly { - gcw.dispose() - } } } @@ -1357,7 +1361,7 @@ var useCheckmark = false func initCheckmarks() { useCheckmark = true for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout()) } } @@ -1366,7 +1370,7 @@ func initCheckmarks() { func clearCheckmarks() { useCheckmark = false for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout()) } } diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go new file mode 100644 index 0000000000000..86e60d43815ef --- /dev/null +++ b/src/runtime/mgcstack.go @@ -0,0 +1,330 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: stack objects and stack tracing +// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing +// Also see issue 22350. + +// Stack tracing solves the problem of determining which parts of the +// stack are live and should be scanned. It runs as part of scanning +// a single goroutine stack. +// +// Normally determining which parts of the stack are live is easy to +// do statically, as user code has explicit references (reads and +// writes) to stack variables. The compiler can do a simple dataflow +// analysis to determine liveness of stack variables at every point in +// the code. See cmd/compile/internal/gc/plive.go for that analysis. +// +// However, when we take the address of a stack variable, determining +// whether that variable is still live is less clear. We can still +// look for static accesses, but accesses through a pointer to the +// variable are difficult in general to track statically. That pointer +// can be passed among functions on the stack, conditionally retained, +// etc. +// +// Instead, we will track pointers to stack variables dynamically. +// All pointers to stack-allocated variables will themselves be on the +// stack somewhere (or in associated locations, like defer records), so +// we can find them all efficiently. +// +// Stack tracing is organized as a mini garbage collection tracing +// pass. The objects in this garbage collection are all the variables +// on the stack whose address is taken, and which themselves contain a +// pointer. We call these variables "stack objects". +// +// We begin by determining all the stack objects on the stack and all +// the statically live pointers that may point into the stack. We then +// process each pointer to see if it points to a stack object. If it +// does, we scan that stack object. It may contain pointers into the +// heap, in which case those pointers are passed to the main garbage +// collection. It may also contain pointers into the stack, in which +// case we add them to our set of stack pointers. +// +// Once we're done processing all the pointers (including the ones we +// added during processing), we've found all the stack objects that +// are live. Any dead stack objects are not scanned and their contents +// will not keep heap objects live. Unlike the main garbage +// collection, we can't sweep the dead stack objects; they live on in +// a moribund state until the stack frame that contains them is +// popped. +// +// A stack can look like this: +// +// +----------+ +// | foo() | +// | +------+ | +// | | A | | <---\ +// | +------+ | | +// | | | +// | +------+ | | +// | | B | | | +// | +------+ | | +// | | | +// +----------+ | +// | bar() | | +// | +------+ | | +// | | C | | <-\ | +// | +----|-+ | | | +// | | | | | +// | +----v-+ | | | +// | | D ---------/ +// | +------+ | | +// | | | +// +----------+ | +// | baz() | | +// | +------+ | | +// | | E -------/ +// | +------+ | +// | ^ | +// | F: --/ | +// | | +// +----------+ +// +// foo() calls bar() calls baz(). Each has a frame on the stack. +// foo() has stack objects A and B. +// bar() has stack objects C and D, with C pointing to D and D pointing to A. +// baz() has a stack object E pointing to C, and a local variable F pointing to E. +// +// Starting from the pointer in local variable F, we will eventually +// scan all of E, C, D, and A (in that order). B is never scanned +// because there is no live pointer to it. If B is also statically +// dead (meaning that foo() never accesses B again after it calls +// bar()), then B's pointers into the heap are not considered live. + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +const stackTraceDebug = false + +// Buffer for pointers found during stack tracing. +// Must be smaller than or equal to workbuf. +// +//go:notinheap +type stackWorkBuf struct { + stackWorkBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr +} + +// Header declaration must come after the buf declaration above, because of issue #14620. +// +//go:notinheap +type stackWorkBufHdr struct { + workbufhdr + next *stackWorkBuf // linked list of workbufs + // Note: we could theoretically repurpose lfnode.next as this next pointer. + // It would save 1 word, but that probably isn't worth busting open + // the lfnode API. +} + +// Buffer for stack objects found on a goroutine stack. +// Must be smaller than or equal to workbuf. +// +//go:notinheap +type stackObjectBuf struct { + stackObjectBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject +} + +//go:notinheap +type stackObjectBufHdr struct { + workbufhdr + next *stackObjectBuf +} + +func init() { + if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackWorkBuf too big") + } + if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackObjectBuf too big") + } +} + +// A stackObject represents a variable on the stack that has had +// its address taken. +// +//go:notinheap +type stackObject struct { + off uint32 // offset above stack.lo + size uint32 // size of object + typ *_type // type info (for ptr/nonptr bits). nil if object has been scanned. + left *stackObject // objects with lower addresses + right *stackObject // objects with higher addresses +} + +// obj.typ = typ, but with no write barrier. +//go:nowritebarrier +func (obj *stackObject) setType(typ *_type) { + // Types of stack objects are always in read-only memory, not the heap. + // So not using a write barrier is ok. + *(*uintptr)(unsafe.Pointer(&obj.typ)) = uintptr(unsafe.Pointer(typ)) +} + +// A stackScanState keeps track of the state used during the GC walk +// of a goroutine. +// +//go:notinheap +type stackScanState struct { + cache pcvalueCache + + // stack limits + stack stack + + // buf contains the set of possible pointers to stack objects. + // Organized as a LIFO linked list of buffers. + // All buffers except possibly the head buffer are full. + buf *stackWorkBuf + freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis + + // list of stack objects + // Objects are in increasing address order. + head *stackObjectBuf + tail *stackObjectBuf + nobjs int + + // root of binary tree for fast object lookup by address + // Initialized by buildIndex. + root *stackObject +} + +// Add p as a potential pointer to a stack object. +// p must be a stack address. +func (s *stackScanState) putPtr(p uintptr) { + if p < s.stack.lo || p >= s.stack.hi { + throw("address not a stack address") + } + buf := s.buf + if buf == nil { + // Initial setup. + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + buf.nobj = 0 + buf.next = nil + s.buf = buf + } else if buf.nobj == len(buf.obj) { + if s.freeBuf != nil { + buf = s.freeBuf + s.freeBuf = nil + } else { + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + } + buf.nobj = 0 + buf.next = s.buf + s.buf = buf + } + buf.obj[buf.nobj] = p + buf.nobj++ +} + +// Remove and return a potential pointer to a stack object. +// Returns 0 if there are no more pointers available. +func (s *stackScanState) getPtr() uintptr { + buf := s.buf + if buf == nil { + // Never had any data. + return 0 + } + if buf.nobj == 0 { + if s.freeBuf != nil { + // Free old freeBuf. + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + } + // Move buf to the freeBuf. + s.freeBuf = buf + buf = buf.next + s.buf = buf + if buf == nil { + // No more data. + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + s.freeBuf = nil + return 0 + } + } + buf.nobj-- + return buf.obj[buf.nobj] +} + +// addObject adds a stack object at addr of type typ to the set of stack objects. +func (s *stackScanState) addObject(addr uintptr, typ *_type) { + x := s.tail + if x == nil { + // initial setup + x = (*stackObjectBuf)(unsafe.Pointer(getempty())) + x.next = nil + s.head = x + s.tail = x + } + if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { + throw("objects added out of order or overlapping") + } + if x.nobj == len(x.obj) { + // full buffer - allocate a new buffer, add to end of linked list + y := (*stackObjectBuf)(unsafe.Pointer(getempty())) + y.next = nil + x.next = y + s.tail = y + x = y + } + obj := &x.obj[x.nobj] + x.nobj++ + obj.off = uint32(addr - s.stack.lo) + obj.size = uint32(typ.size) + obj.setType(typ) + // obj.left and obj.right will be initalized by buildIndex before use. + s.nobjs++ +} + +// buildIndex initializes s.root to a binary search tree. +// It should be called after all addObject calls but before +// any call of findObject. +func (s *stackScanState) buildIndex() { + s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) +} + +// Build a binary search tree with the n objects in the list +// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... +// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. +// (The first object that was not included in the binary search tree.) +// If n == 0, returns nil, x. +func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { + if n == 0 { + return nil, x, idx + } + var left, right *stackObject + left, x, idx = binarySearchTree(x, idx, n/2) + root = &x.obj[idx] + idx++ + if idx == len(x.obj) { + x = x.next + idx = 0 + } + right, x, idx = binarySearchTree(x, idx, n-n/2-1) + root.left = left + root.right = right + return root, x, idx +} + +// findObject returns the stack object containing address a, if any. +// Must have called buildIndex previously. +func (s *stackScanState) findObject(a uintptr) *stackObject { + off := uint32(a - s.stack.lo) + obj := s.root + for { + if obj == nil { + return nil + } + if off < obj.off { + obj = obj.left + continue + } + if off >= obj.off+obj.size { + obj = obj.right + continue + } + return obj + } +} diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index c7baa455fe19e..edb9fcac09026 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -4,6 +4,24 @@ // Garbage collector: sweeping +// The sweeper consists of two different algorithms: +// +// * The object reclaimer finds and frees unmarked slots in spans. It +// can free a whole span if none of the objects are marked, but that +// isn't its goal. This can be driven either synchronously by +// mcentral.cacheSpan for mcentral spans, or asynchronously by +// sweepone from the list of all in-use spans in mheap_.sweepSpans. +// +// * The span reclaimer looks for spans that contain no marked objects +// and frees whole spans. This is a separate algorithm because +// freeing whole spans is the hardest task for the object reclaimer, +// but is critical when allocating new spans. The entry point for +// this is mheap_.reclaim and it's driven by a sequential scan of +// the page marks bitmap in the heap arenas. +// +// Both algorithms ultimately call mspan.sweep, which sweeps a single +// heap span. + package runtime import ( @@ -52,7 +70,7 @@ func bgsweep(c chan int) { goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1) for { - for gosweepone() != ^uintptr(0) { + for sweepone() != ^uintptr(0) { sweep.nbgsweep++ Gosched() } @@ -60,7 +78,7 @@ func bgsweep(c chan int) { Gosched() } lock(&sweep.lock) - if !gosweepdone() { + if !isSweepDone() { // This can happen if a GC runs between // gosweepone returning ^0 above // and the lock being acquired. @@ -72,9 +90,8 @@ func bgsweep(c chan int) { } } -// sweeps one span -// returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep -//go:nowritebarrier +// sweepone sweeps some unswept heap span and returns the number of pages returned +// to the heap, or ^uintptr(0) if there was nothing to sweep. func sweepone() uintptr { _g_ := getg() sweepRatio := mheap_.sweepPagesPerByte // For debugging @@ -88,10 +105,11 @@ func sweepone() uintptr { } atomic.Xadd(&mheap_.sweepers, +1) - npages := ^uintptr(0) + // Find a span to sweep. + var s *mspan sg := mheap_.sweepgen for { - s := mheap_.sweepSpans[1-sg/2%2].pop() + s = mheap_.sweepSpans[1-sg/2%2].pop() if s == nil { atomic.Store(&mheap_.sweepdone, 1) break @@ -100,23 +118,32 @@ func sweepone() uintptr { // This can happen if direct sweeping already // swept this span, but in that case the sweep // generation should always be up-to-date. - if s.sweepgen != sg { + if !(s.sweepgen == sg || s.sweepgen == sg+3) { print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n") throw("non in-use span in unswept list") } continue } - if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) { - continue + if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { + break } + } + + // Sweep the span we found. + npages := ^uintptr(0) + if s != nil { npages = s.npages - if !s.sweep(false) { + if s.sweep(false) { + // Whole span was freed. Count it toward the + // page reclaimer credit since these pages can + // now be used for span allocation. + atomic.Xadduintptr(&mheap_.reclaimCredit, npages) + } else { // Span is still in-use, so this returned no // pages to the heap and the span needs to // move to the swept in-use list. npages = 0 } - break } // Decrement the number of active sweepers and if this is the @@ -130,17 +157,13 @@ func sweepone() uintptr { return npages } -//go:nowritebarrier -func gosweepone() uintptr { - var ret uintptr - systemstack(func() { - ret = sweepone() - }) - return ret -} - -//go:nowritebarrier -func gosweepdone() bool { +// isSweepDone reports whether all spans are swept or currently being swept. +// +// Note that this condition may transition from false to true at any +// time as the sweeper runs. It may transition from true to false if a +// GC runs; to prevent that the caller must be non-preemptible or must +// somehow block GC progress. +func isSweepDone() bool { return mheap_.sweepdone != 0 } @@ -152,20 +175,25 @@ func (s *mspan) ensureSwept() { // (if GC is triggered on another goroutine). _g_ := getg() if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { - throw("MSpan_EnsureSwept: m is not locked") + throw("mspan.ensureSwept: m is not locked") } sg := mheap_.sweepgen - if atomic.Load(&s.sweepgen) == sg { + spangen := atomic.Load(&s.sweepgen) + if spangen == sg || spangen == sg+3 { return } - // The caller must be sure that the span is a MSpanInUse span. + // The caller must be sure that the span is a mSpanInUse span. if atomic.Cas(&s.sweepgen, sg-2, sg-1) { s.sweep(false) return } // unfortunate condition, and we don't have efficient means to wait - for atomic.Load(&s.sweepgen) != sg { + for { + spangen := atomic.Load(&s.sweepgen) + if spangen == sg || spangen == sg+3 { + break + } osyield() } } @@ -173,7 +201,7 @@ func (s *mspan) ensureSwept() { // Sweep frees or collects finalizers for blocks not marked in the mark phase. // It clears the mark bits in preparation for the next GC round. // Returns true if the span was returned to heap. -// If preserve=true, don't return it to heap nor relink in MCentral lists; +// If preserve=true, don't return it to heap nor relink in mcentral lists; // caller takes care of it. //TODO go:nowritebarrier func (s *mspan) sweep(preserve bool) bool { @@ -181,12 +209,12 @@ func (s *mspan) sweep(preserve bool) bool { // GC must not start while we are in the middle of this function. _g_ := getg() if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { - throw("MSpan_Sweep: m is not locked") + throw("mspan.sweep: m is not locked") } sweepgen := mheap_.sweepgen if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { - print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") - throw("MSpan_Sweep: bad span state") + print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") + throw("mspan.sweep: bad span state") } if trace.enabled { @@ -322,8 +350,8 @@ func (s *mspan) sweep(preserve bool) bool { // The span must be in our exclusive ownership until we update sweepgen, // check for potential races. if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { - print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") - throw("MSpan_Sweep: bad span state after sweep") + print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") + throw("mspan.sweep: bad span state after sweep") } // Serialization point. // At this point the mark bits are cleared and allocation ready @@ -334,29 +362,29 @@ func (s *mspan) sweep(preserve bool) bool { if nfreed > 0 && spc.sizeclass() != 0 { c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed) res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty) - // MCentral_FreeSpan updates sweepgen + // mcentral.freeSpan updates sweepgen } else if freeToHeap { // Free large span to heap // NOTE(rsc,dvyukov): The original implementation of efence - // in CL 22060046 used SysFree instead of SysFault, so that + // in CL 22060046 used sysFree instead of sysFault, so that // the operating system would eventually give the memory // back to us again, so that an efence program could run // longer without running out of memory. Unfortunately, - // calling SysFree here without any kind of adjustment of the + // calling sysFree here without any kind of adjustment of the // heap data structures means that when the memory does // come back to us, we have the wrong metadata for it, either in - // the MSpan structures or in the garbage collection bitmap. - // Using SysFault here means that the program will run out of + // the mspan structures or in the garbage collection bitmap. + // Using sysFault here means that the program will run out of // memory fairly quickly in efence mode, but at least it won't // have mysterious crashes due to confused memory reuse. - // It should be possible to switch back to SysFree if we also - // implement and then call some kind of MHeap_DeleteSpan. + // It should be possible to switch back to sysFree if we also + // implement and then call some kind of mheap.deleteSpan. if debug.efence > 0 { s.limit = 0 // prevent mlookup from finding this span sysFault(unsafe.Pointer(s.base()), size) } else { - mheap_.freeSpan(s, 1) + mheap_.freeSpan(s, true) } c.local_nlargefree++ c.local_largefree += size @@ -404,7 +432,7 @@ retry: newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages) for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) { - if gosweepone() == ^uintptr(0) { + if sweepone() == ^uintptr(0) { mheap_.sweepPagesPerByte = 0 break } diff --git a/src/runtime/mgcsweepbuf.go b/src/runtime/mgcsweepbuf.go index 6c1118e3857cc..0491f7ccf6c98 100644 --- a/src/runtime/mgcsweepbuf.go +++ b/src/runtime/mgcsweepbuf.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/cpu" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -83,7 +84,7 @@ retry: if newCap == 0 { newCap = gcSweepBufInitSpineCap } - newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys) + newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys) if b.spineCap != 0 { // Blocks are allocated off-heap, so // no write barriers. @@ -102,7 +103,7 @@ retry: } // Allocate a new block and add it to the spine. - block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys)) + block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys)) blockp := add(b.spine, sys.PtrSize*top) // Blocks are allocated off-heap, so no write barrier. atomic.StorepNoWB(blockp, unsafe.Pointer(block)) diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 99771e2e57f2c..f2c16d7d8c4df 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -22,6 +22,13 @@ const ( workbufAlloc = 32 << 10 ) +// throwOnGCWork causes any operations that add pointers to a gcWork +// buffer to throw. +// +// TODO(austin): This is a temporary debugging measure for issue +// #27993. To be removed before release. +var throwOnGCWork bool + func init() { if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 { throw("bad workbufAlloc") @@ -46,10 +53,7 @@ func init() { // // (preemption must be disabled) // gcw := &getg().m.p.ptr().gcw -// .. call gcw.put() to produce and gcw.get() to consume .. -// if gcBlackenPromptly { -// gcw.dispose() -// } +// .. call gcw.put() to produce and gcw.tryGet() to consume .. // // It's important that any use of gcWork during the mark phase prevent // the garbage collector from transitioning to mark termination since @@ -83,6 +87,23 @@ type gcWork struct { // Scan work performed on this gcWork. This is aggregated into // gcController by dispose and may also be flushed by callers. scanWork int64 + + // flushedWork indicates that a non-empty work buffer was + // flushed to the global work list since the last gcMarkDone + // termination check. Specifically, this indicates that this + // gcWork may have communicated work to another gcWork. + flushedWork bool + + // pauseGen causes put operations to spin while pauseGen == + // gcWorkPauseGen if debugCachedWork is true. + pauseGen uint32 + + // putGen is the pauseGen of the last putGen. + putGen uint32 + + // pauseStack is the stack at which this P was paused if + // debugCachedWork is true. + pauseStack [16]uintptr } // Most of the methods of gcWork are go:nowritebarrierrec because the @@ -101,10 +122,60 @@ func (w *gcWork) init() { w.wbuf2 = wbuf2 } +func (w *gcWork) checkPut(ptr uintptr, ptrs []uintptr) { + if debugCachedWork { + alreadyFailed := w.putGen == w.pauseGen + w.putGen = w.pauseGen + if m := getg().m; m.locks > 0 || m.mallocing != 0 || m.preemptoff != "" || m.p.ptr().status != _Prunning { + // If we were to spin, the runtime may + // deadlock: the condition above prevents + // preemption (see newstack), which could + // prevent gcMarkDone from finishing the + // ragged barrier and releasing the spin. + return + } + for atomic.Load(&gcWorkPauseGen) == w.pauseGen { + } + if throwOnGCWork { + printlock() + if alreadyFailed { + println("runtime: checkPut already failed at this generation") + } + println("runtime: late gcWork put") + if ptr != 0 { + gcDumpObject("ptr", ptr, ^uintptr(0)) + } + for _, ptr := range ptrs { + gcDumpObject("ptrs", ptr, ^uintptr(0)) + } + println("runtime: paused at") + for _, pc := range w.pauseStack { + if pc == 0 { + break + } + f := findfunc(pc) + if f.valid() { + // Obviously this doesn't + // relate to ancestor + // tracebacks, but this + // function prints what we + // want. + printAncestorTracebackFuncInfo(f, pc) + } else { + println("\tunknown PC ", hex(pc), "\n") + } + } + throw("throwOnGCWork") + } + } +} + // put enqueues a pointer for the garbage collector to trace. // obj must point to the beginning of a heap object or an oblet. //go:nowritebarrierrec func (w *gcWork) put(obj uintptr) { + w.checkPut(obj, nil) + flushed := false wbuf := w.wbuf1 if wbuf == nil { @@ -116,6 +187,7 @@ func (w *gcWork) put(obj uintptr) { wbuf = w.wbuf1 if wbuf.nobj == len(wbuf.obj) { putfull(wbuf) + w.flushedWork = true wbuf = getempty() w.wbuf1 = wbuf flushed = true @@ -134,10 +206,12 @@ func (w *gcWork) put(obj uintptr) { } } -// putFast does a put and returns true if it can be done quickly +// putFast does a put and reports whether it can be done quickly // otherwise it returns false and the caller needs to call put. //go:nowritebarrierrec func (w *gcWork) putFast(obj uintptr) bool { + w.checkPut(obj, nil) + wbuf := w.wbuf1 if wbuf == nil { return false @@ -159,6 +233,8 @@ func (w *gcWork) putBatch(obj []uintptr) { return } + w.checkPut(0, obj) + flushed := false wbuf := w.wbuf1 if wbuf == nil { @@ -169,6 +245,7 @@ func (w *gcWork) putBatch(obj []uintptr) { for len(obj) > 0 { for wbuf.nobj == len(wbuf.obj) { putfull(wbuf) + w.flushedWork = true w.wbuf1, w.wbuf2 = w.wbuf2, getempty() wbuf = w.wbuf1 flushed = true @@ -231,37 +308,6 @@ func (w *gcWork) tryGetFast() uintptr { return wbuf.obj[wbuf.nobj] } -// get dequeues a pointer for the garbage collector to trace, blocking -// if necessary to ensure all pointers from all queues and caches have -// been retrieved. get returns 0 if there are no pointers remaining. -//go:nowritebarrierrec -func (w *gcWork) get() uintptr { - wbuf := w.wbuf1 - if wbuf == nil { - w.init() - wbuf = w.wbuf1 - // wbuf is empty at this point. - } - if wbuf.nobj == 0 { - w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1 - wbuf = w.wbuf1 - if wbuf.nobj == 0 { - owbuf := wbuf - wbuf = getfull() - if wbuf == nil { - return 0 - } - putempty(owbuf) - w.wbuf1 = wbuf - } - } - - // TODO: This might be a good place to add prefetch code - - wbuf.nobj-- - return wbuf.obj[wbuf.nobj] -} - // dispose returns any cached pointers to the global queue. // The buffers are being put on the full queue so that the // write barriers will not simply reacquire them before the @@ -275,6 +321,7 @@ func (w *gcWork) dispose() { putempty(wbuf) } else { putfull(wbuf) + w.flushedWork = true } w.wbuf1 = nil @@ -283,6 +330,7 @@ func (w *gcWork) dispose() { putempty(wbuf) } else { putfull(wbuf) + w.flushedWork = true } w.wbuf2 = nil } @@ -308,10 +356,14 @@ func (w *gcWork) balance() { return } if wbuf := w.wbuf2; wbuf.nobj != 0 { + w.checkPut(0, wbuf.obj[:wbuf.nobj]) putfull(wbuf) + w.flushedWork = true w.wbuf2 = getempty() } else if wbuf := w.wbuf1; wbuf.nobj > 4 { + w.checkPut(0, wbuf.obj[:wbuf.nobj]) w.wbuf1 = handoff(wbuf) + w.flushedWork = true // handoff did putfull } else { return } @@ -321,7 +373,7 @@ func (w *gcWork) balance() { } } -// empty returns true if w has no mark work available. +// empty reports whether w has no mark work available. //go:nowritebarrierrec func (w *gcWork) empty() bool { return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0) @@ -440,61 +492,6 @@ func trygetfull() *workbuf { return b } -// Get a full work buffer off the work.full list. -// If nothing is available wait until all the other gc helpers have -// finished and then return nil. -// getfull acts as a barrier for work.nproc helpers. As long as one -// gchelper is actively marking objects it -// may create a workbuffer that the other helpers can work on. -// The for loop either exits when a work buffer is found -// or when _all_ of the work.nproc GC helpers are in the loop -// looking for work and thus not capable of creating new work. -// This is in fact the termination condition for the STW mark -// phase. -//go:nowritebarrier -func getfull() *workbuf { - b := (*workbuf)(work.full.pop()) - if b != nil { - b.checknonempty() - return b - } - - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait > work.nproc { - println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - for i := 0; ; i++ { - if work.full != 0 { - decnwait := atomic.Xadd(&work.nwait, -1) - if decnwait == work.nproc { - println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - b = (*workbuf)(work.full.pop()) - if b != nil { - b.checknonempty() - return b - } - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait > work.nproc { - println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - } - if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs { - return nil - } - if i < 10 { - procyield(20) - } else if i < 20 { - osyield() - } else { - usleep(100) - } - } -} - //go:nowritebarrier func handoff(b *workbuf) *workbuf { // Make new buffer with half of b's pointers. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index b11853ca18dcc..47e3a33391141 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -9,6 +9,7 @@ package runtime import ( + "internal/cpu" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -20,7 +21,7 @@ import ( const minPhysPageSize = 4096 // Main malloc heap. -// The heap itself is the "free[]" and "large" arrays, +// The heap itself is the "free" and "scav" treaps, // but all the other global data is here too. // // mheap must not be heap-allocated because it contains mSpanLists, @@ -29,13 +30,11 @@ const minPhysPageSize = 4096 //go:notinheap type mheap struct { lock mutex - free [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList - freelarge mTreap // free treap of length >= _MaxMHeapList - busy [_MaxMHeapList]mSpanList // busy lists of large spans of given length - busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList - sweepgen uint32 // sweep generation, see comment in mspan - sweepdone uint32 // all spans are swept - sweepers uint32 // number of active sweepone calls + free mTreap // free and non-scavenged spans + scav mTreap // free and scavenged spans + sweepgen uint32 // sweep generation, see comment in mspan + sweepdone uint32 // all spans are swept + sweepers uint32 // number of active sweepone calls // allspans is a slice of all mspans ever created. Each mspan // appears exactly once. @@ -61,7 +60,7 @@ type mheap struct { // on the swept stack. sweepSpans [2]gcSweepBuf - //_ uint32 // align uint64 fields on 32-bit for atomics + _ uint32 // align uint64 fields on 32-bit for atomics // Proportional sweep // @@ -81,7 +80,7 @@ type mheap struct { // accounting for current progress. If we could only adjust // the slope, it would create a discontinuity in debt if any // progress has already been made. - pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock + pagesInUse uint64 // pages of spans in stats mSpanInUse; R/W with mheap.lock pagesSwept uint64 // pages swept this cycle; updated atomically pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without @@ -89,6 +88,33 @@ type mheap struct { // TODO(austin): pagesInUse should be a uintptr, but the 386 // compiler can't 8-byte align fields. + // Page reclaimer state + + // reclaimIndex is the page index in allArenas of next page to + // reclaim. Specifically, it refers to page (i % + // pagesPerArena) of arena allArenas[i / pagesPerArena]. + // + // If this is >= 1<<63, the page reclaimer is done scanning + // the page marks. + // + // This is accessed atomically. + reclaimIndex uint64 + // reclaimCredit is spare credit for extra pages swept. Since + // the page reclaimer works in large chunks, it may reclaim + // more than requested. Any spare pages released go to this + // credit pool. + // + // This is accessed atomically. + reclaimCredit uintptr + + // scavengeCredit is spare credit for extra bytes scavenged. + // Since the scavenging mechanisms operate on spans, it may + // scavenge more than requested. Any spare pages released + // go to this credit pool. + // + // This is protected by the mheap lock. + scavengeCredit uintptr + // Malloc stats. largealloc uint64 // bytes allocated for large objects nlargealloc uint64 // number of large object allocations @@ -133,21 +159,35 @@ type mheap struct { // (the actual arenas). This is only used on 32-bit. arena linearAlloc - //_ uint32 // ensure 64-bit alignment of central + // allArenas is the arenaIndex of every mapped arena. This can + // be used to iterate through the address space. + // + // Access is protected by mheap_.lock. However, since this is + // append-only and old backing arrays are never freed, it is + // safe to acquire mheap_.lock, copy the slice header, and + // then release mheap_.lock. + allArenas []arenaIdx + + // sweepArenas is a snapshot of allArenas taken at the + // beginning of the sweep cycle. This can be read safely by + // simply blocking GC (by disabling preemption). + sweepArenas []arenaIdx + + // _ uint32 // ensure 64-bit alignment of central // central free lists for small size classes. - // the padding makes sure that the MCentrals are - // spaced CacheLineSize bytes apart, so that each MCentral.lock + // the padding makes sure that the mcentrals are + // spaced CacheLinePadSize bytes apart, so that each mcentral.lock // gets its own cache line. // central is indexed by spanClass. central [numSpanClasses]struct { mcentral mcentral - pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte + pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte } spanalloc fixalloc // allocator for span* cachealloc fixalloc // allocator for mcache* - treapalloc fixalloc // allocator for treapNodes* used by large objects + treapalloc fixalloc // allocator for treapNodes* specialfinalizeralloc fixalloc // allocator for specialfinalizer* specialprofilealloc fixalloc // allocator for specialprofile* speciallock mutex // lock for special record allocators. @@ -184,6 +224,29 @@ type heapArena struct { // must not be a safe-point between establishing that an // address is live and looking it up in the spans array. spans [pagesPerArena]*mspan + + // pageInUse is a bitmap that indicates which spans are in + // state mSpanInUse. This bitmap is indexed by page number, + // but only the bit corresponding to the first page in each + // span is used. + // + // Writes are protected by mheap_.lock. + pageInUse [pagesPerArena / 8]uint8 + + // pageMarks is a bitmap that indicates which spans have any + // marked objects on them. Like pageInUse, only the bit + // corresponding to the first page in each span is used. + // + // Writes are done atomically during marking. Reads are + // non-atomic and lock-free since they only occur during + // sweeping (and hence never race with writes). + // + // This is used to quickly find whole spans that can be freed. + // + // TODO(austin): It would be nice if this was uint64 for + // faster scanning, but we don't have 64-bit atomic bit + // operations. + pageMarks [pagesPerArena / 8]uint8 } // arenaHint is a hint for where to grow the heap arenas. See @@ -196,20 +259,21 @@ type arenaHint struct { next *arenaHint } -// An MSpan is a run of pages. +// An mspan is a run of pages. // -// When a MSpan is in the heap free list, state == MSpanFree +// When a mspan is in the heap free treap, state == mSpanFree // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. +// If the mspan is in the heap scav treap, then in addition to the +// above scavenged == true. scavenged == false in all other cases. // -// When a MSpan is allocated, state == MSpanInUse or MSpanManual +// When a mspan is allocated, state == mSpanInUse or mSpanManual // and heapmap(i) == span for all s->start <= i < s->start+s->npages. -// Every MSpan is in one doubly-linked list, -// either one of the MHeap's free lists or one of the -// MCentral's span lists. +// Every mspan is in one doubly-linked list, either in the mheap's +// busy list or one of the mcentral's span lists. -// An MSpan representing actual memory has state _MSpanInUse, -// _MSpanManual, or _MSpanFree. Transitions between these states are +// An mspan representing actual memory has state mSpanInUse, +// mSpanManual, or mSpanFree. Transitions between these states are // constrained as follows: // // * A span may transition from free to in-use or manual during any GC @@ -225,19 +289,19 @@ type arenaHint struct { type mSpanState uint8 const ( - _MSpanDead mSpanState = iota - _MSpanInUse // allocated for garbage collected heap - _MSpanManual // allocated for manual management (e.g., stack allocator) - _MSpanFree + mSpanDead mSpanState = iota + mSpanInUse // allocated for garbage collected heap + mSpanManual // allocated for manual management (e.g., stack allocator) + mSpanFree ) // mSpanStateNames are the names of the span states, indexed by // mSpanState. var mSpanStateNames = []string{ - "_MSpanDead", - "_MSpanInUse", - "_MSpanManual", - "_MSpanFree", + "mSpanDead", + "mSpanInUse", + "mSpanManual", + "mSpanFree", } // mSpanList heads a linked list of spans. @@ -257,7 +321,7 @@ type mspan struct { startAddr uintptr // address of first byte of span aka s.base() npages uintptr // number of pages in span - manualFreeList gclinkptr // list of free objects in _MSpanManual spans + manualFreeList gclinkptr // list of free objects in mSpanManual spans // freeindex is the slot index between 0 and nelems at which to begin scanning // for the next free object in this span. @@ -316,6 +380,8 @@ type mspan struct { // if sweepgen == h->sweepgen - 2, the span needs sweeping // if sweepgen == h->sweepgen - 1, the span is currently being swept // if sweepgen == h->sweepgen, the span is swept and ready to use + // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping + // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached // h->sweepgen is incremented by 2 after every GC sweepgen uint32 @@ -323,14 +389,13 @@ type mspan struct { baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base allocCount uint16 // number of allocated objects spanclass spanClass // size class and noscan (uint8) - incache bool // being used by an mcache state mSpanState // mspaninuse etc needzero uint8 // needs to be zeroed before allocation divShift uint8 // for divide by elemsize - divMagic.shift divShift2 uint8 // for divide by elemsize - divMagic.shift2 + scavenged bool // whether this span has had its pages released to the OS elemsize uintptr // computed from sizeclass or from npages unusedsince int64 // first time spotted by gc in mspanfree state - npreleased uintptr // number of pages released to the os limit uintptr // end of data in span speciallock mutex // guards specials list specials *special // linked list of special records sorted by offset. @@ -349,6 +414,154 @@ func (s *mspan) layout() (size, n, total uintptr) { return } +// physPageBounds returns the start and end of the span +// rounded in to the physical page size. +func (s *mspan) physPageBounds() (uintptr, uintptr) { + start := s.base() + end := start + s.npages<<_PageShift + if physPageSize > _PageSize { + // Round start and end in. + start = (start + physPageSize - 1) &^ (physPageSize - 1) + end &^= physPageSize - 1 + } + return start, end +} + +func (h *mheap) coalesce(s *mspan) { + // We scavenge s at the end after coalescing if s or anything + // it merged with is marked scavenged. + needsScavenge := false + prescavenged := s.released() // number of bytes already scavenged. + + // merge is a helper which merges other into s, deletes references to other + // in heap metadata, and then discards it. other must be adjacent to s. + merge := func(other *mspan) { + // Adjust s via base and npages and also in heap metadata. + s.npages += other.npages + s.needzero |= other.needzero + if other.startAddr < s.startAddr { + s.startAddr = other.startAddr + h.setSpan(s.base(), s) + } else { + h.setSpan(s.base()+s.npages*pageSize-1, s) + } + + // If before or s are scavenged, then we need to scavenge the final coalesced span. + needsScavenge = needsScavenge || other.scavenged || s.scavenged + prescavenged += other.released() + + // The size is potentially changing so the treap needs to delete adjacent nodes and + // insert back as a combined node. + if other.scavenged { + h.scav.removeSpan(other) + } else { + h.free.removeSpan(other) + } + other.state = mSpanDead + h.spanalloc.free(unsafe.Pointer(other)) + } + + // realign is a helper which shrinks other and grows s such that their + // boundary is on a physical page boundary. + realign := func(a, b, other *mspan) { + // Caller must ensure a.startAddr < b.startAddr and that either a or + // b is s. a and b must be adjacent. other is whichever of the two is + // not s. + + // If pageSize <= physPageSize then spans are always aligned + // to physical page boundaries, so just exit. + if pageSize <= physPageSize { + return + } + // Since we're resizing other, we must remove it from the treap. + if other.scavenged { + h.scav.removeSpan(other) + } else { + h.free.removeSpan(other) + } + // Round boundary to the nearest physical page size, toward the + // scavenged span. + boundary := b.startAddr + if a.scavenged { + boundary &^= (physPageSize - 1) + } else { + boundary = (boundary + physPageSize - 1) &^ (physPageSize - 1) + } + a.npages = (boundary - a.startAddr) / pageSize + b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize + b.startAddr = boundary + + h.setSpan(boundary-1, a) + h.setSpan(boundary, b) + + // Re-insert other now that it has a new size. + if other.scavenged { + h.scav.insert(other) + } else { + h.free.insert(other) + } + } + + // Coalesce with earlier, later spans. + if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree { + if s.scavenged == before.scavenged { + merge(before) + } else { + realign(before, s, before) + } + } + + // Now check to see if next (greater addresses) span is free and can be coalesced. + if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree { + if s.scavenged == after.scavenged { + merge(after) + } else { + realign(s, after, after) + } + } + + if needsScavenge { + // When coalescing spans, some physical pages which + // were not returned to the OS previously because + // they were only partially covered by the span suddenly + // become available for scavenging. We want to make sure + // those holes are filled in, and the span is properly + // scavenged. Rather than trying to detect those holes + // directly, we collect how many bytes were already + // scavenged above and subtract that from heap_released + // before re-scavenging the entire newly-coalesced span, + // which will implicitly bump up heap_released. + memstats.heap_released -= uint64(prescavenged) + s.scavenge() + } +} + +func (s *mspan) scavenge() uintptr { + // start and end must be rounded in, otherwise madvise + // will round them *out* and release more memory + // than we want. + start, end := s.physPageBounds() + if end <= start { + // start and end don't span a whole physical page. + return 0 + } + released := end - start + memstats.heap_released += uint64(released) + s.scavenged = true + sysUnused(unsafe.Pointer(start), released) + return released +} + +// released returns the number of bytes in this span +// which were returned back to the OS. +func (s *mspan) released() uintptr { + if !s.scavenged { + return 0 + } + start, end := s.physPageBounds() + return end - start +} + // recordspan adds a newly allocated span to h.allspans. // // This only happens the first time a span is allocated from @@ -457,7 +670,7 @@ func (i arenaIdx) l2() uint { } // inheap reports whether b is a pointer into a (potentially dead) heap object. -// It returns false for pointers into _MSpanManual spans. +// It returns false for pointers into mSpanManual spans. // Non-preemptible because it is used by write barriers. //go:nowritebarrier //go:nosplit @@ -476,7 +689,7 @@ func inHeapOrStack(b uintptr) bool { return false } switch s.state { - case mSpanInUse, _MSpanManual: + case mSpanInUse, mSpanManual: return b < s.limit default: return false @@ -550,6 +763,16 @@ func spanOfHeap(p uintptr) *mspan { return s } +// pageIndexOf returns the arena, page index, and page mask for pointer p. +// The caller must ensure p is in the heap. +func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) { + ai := arenaIndex(p) + arena = mheap_.arenas[ai.l1()][ai.l2()] + pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse)) + pageMask = byte(1 << ((p / pageSize) % 8)) + return +} + // Initialize the heap. func (h *mheap) init() { h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys) @@ -569,120 +792,182 @@ func (h *mheap) init() { h.spanalloc.zero = false // h->mapcache needs no init - for i := range h.free { - h.free[i].init() - h.busy[i].init() - } - h.busylarge.init() for i := range h.central { h.central[i].mcentral.init(spanClass(i)) } } -// Sweeps spans in list until reclaims at least npages into heap. -// Returns the actual number of pages reclaimed. -func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr { - n := uintptr(0) - sg := mheap_.sweepgen -retry: - for s := list.first; s != nil; s = s.next { - if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { - list.remove(s) - // swept spans are at the end of the list - list.insertBack(s) // Puts it back on a busy list. s is not in the treap at this point. - unlock(&h.lock) - snpages := s.npages - if s.sweep(false) { - n += snpages +// reclaim sweeps and reclaims at least npage pages into the heap. +// It is called before allocating npage pages to keep growth in check. +// +// reclaim implements the page-reclaimer half of the sweeper. +// +// h must NOT be locked. +func (h *mheap) reclaim(npage uintptr) { + // This scans pagesPerChunk at a time. Higher values reduce + // contention on h.reclaimPos, but increase the minimum + // latency of performing a reclaim. + // + // Must be a multiple of the pageInUse bitmap element size. + // + // The time required by this can vary a lot depending on how + // many spans are actually freed. Experimentally, it can scan + // for pages at ~300 GB/ms on a 2.6GHz Core i7, but can only + // free spans at ~32 MB/ms. Using 512 pages bounds this at + // roughly 100µs. + // + // TODO(austin): Half of the time spent freeing spans is in + // locking/unlocking the heap (even with low contention). We + // could make the slow path here several times faster by + // batching heap frees. + const pagesPerChunk = 512 + + // Bail early if there's no more reclaim work. + if atomic.Load64(&h.reclaimIndex) >= 1<<63 { + return + } + + // Disable preemption so the GC can't start while we're + // sweeping, so we can read h.sweepArenas, and so + // traceGCSweepStart/Done pair on the P. + mp := acquirem() + + if trace.enabled { + traceGCSweepStart() + } + + arenas := h.sweepArenas + locked := false + for npage > 0 { + // Pull from accumulated credit first. + if credit := atomic.Loaduintptr(&h.reclaimCredit); credit > 0 { + take := credit + if take > npage { + // Take only what we need. + take = npage } - lock(&h.lock) - if n >= npages { - return n + if atomic.Casuintptr(&h.reclaimCredit, credit, credit-take) { + npage -= take } - // the span could have been moved elsewhere - goto retry - } - if s.sweepgen == sg-1 { - // the span is being swept by background sweeper, skip continue } - // already swept empty span, - // all subsequent ones must also be either swept or in process of sweeping - break - } - return n -} -// Sweeps and reclaims at least npage pages into heap. -// Called before allocating npage pages. -func (h *mheap) reclaim(npage uintptr) { - // First try to sweep busy spans with large objects of size >= npage, - // this has good chances of reclaiming the necessary space. - for i := int(npage); i < len(h.busy); i++ { - if h.reclaimList(&h.busy[i], npage) != 0 { - return // Bingo! + // Claim a chunk of work. + idx := uintptr(atomic.Xadd64(&h.reclaimIndex, pagesPerChunk) - pagesPerChunk) + if idx/pagesPerArena >= uintptr(len(arenas)) { + // Page reclaiming is done. + atomic.Store64(&h.reclaimIndex, 1<<63) + break } - } - // Then -- even larger objects. - if h.reclaimList(&h.busylarge, npage) != 0 { - return // Bingo! - } + if !locked { + // Lock the heap for reclaimChunk. + lock(&h.lock) + locked = true + } - // Now try smaller objects. - // One such object is not enough, so we need to reclaim several of them. - reclaimed := uintptr(0) - for i := 0; i < int(npage) && i < len(h.busy); i++ { - reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed) - if reclaimed >= npage { - return + // Scan this chunk. + nfound := h.reclaimChunk(arenas, idx, pagesPerChunk) + if nfound <= npage { + npage -= nfound + } else { + // Put spare pages toward global credit. + atomic.Xadduintptr(&h.reclaimCredit, nfound-npage) + npage = 0 } } + if locked { + unlock(&h.lock) + } - // Now sweep everything that is not yet swept. - unlock(&h.lock) - for { - n := sweepone() - if n == ^uintptr(0) { // all spans are swept - break + if trace.enabled { + traceGCSweepDone() + } + releasem(mp) +} + +// reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n). +// It returns the number of pages returned to the heap. +// +// h.lock must be held and the caller must be non-preemptible. +func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { + // The heap lock must be held because this accesses the + // heapArena.spans arrays using potentially non-live pointers. + // In particular, if a span were freed and merged concurrently + // with this probing heapArena.spans, it would be possible to + // observe arbitrary, stale span pointers. + n0 := n + var nFreed uintptr + sg := h.sweepgen + for n > 0 { + ai := arenas[pageIdx/pagesPerArena] + ha := h.arenas[ai.l1()][ai.l2()] + + // Get a chunk of the bitmap to work on. + arenaPage := uint(pageIdx % pagesPerArena) + inUse := ha.pageInUse[arenaPage/8:] + marked := ha.pageMarks[arenaPage/8:] + if uintptr(len(inUse)) > n/8 { + inUse = inUse[:n/8] + marked = marked[:n/8] } - reclaimed += n - if reclaimed >= npage { - break + + // Scan this bitmap chunk for spans that are in-use + // but have no marked objects on them. + for i := range inUse { + inUseUnmarked := inUse[i] &^ marked[i] + if inUseUnmarked == 0 { + continue + } + + for j := uint(0); j < 8; j++ { + if inUseUnmarked&(1< ts.spanKey.npages) { + s = ts.spanKey + h.scav.removeNode(ts) + } + return s +} + // Allocates a span of the given size. h must be locked. // The returned span has been removed from the -// free list, but its state is still MSpanFree. +// free structures, but its state is still mSpanFree. func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { - var list *mSpanList var s *mspan - // Try in fixed-size lists up to max. - for i := int(npage); i < len(h.free); i++ { - list = &h.free[i] - if !list.isEmpty() { - s = list.first - list.remove(s) - goto HaveSpan - } + s = h.pickFreeSpan(npage) + if s != nil { + goto HaveSpan } - // Best fit in list of large spans. - s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us - if s == nil { - if !h.grow(npage) { - return nil - } - s = h.allocLarge(npage) - if s == nil { - return nil - } + // On failure, grow the heap and try again. + if !h.grow(npage) { + return nil } + s = h.pickFreeSpan(npage) + if s != nil { + goto HaveSpan + } + throw("grew heap, but no adequate free span found") HaveSpan: // Mark span in use. - if s.state != _MSpanFree { - throw("MHeap_AllocLocked - MSpan not free") + if s.state != mSpanFree { + throw("candidate mspan for allocation is not free") } if s.npages < npage { - throw("MHeap_AllocLocked - bad npages") - } - if s.npreleased > 0 { - sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) - memstats.heap_released -= uint64(s.npreleased << _PageShift) - s.npreleased = 0 + throw("candidate mspan for allocation is too small") } + // First, subtract any memory that was released back to + // the OS from s. We will re-scavenge the trimmed section + // if necessary. + memstats.heap_released -= uint64(s.released()) + if s.npages > npage { // Trim extra and put it back in the heap. t := (*mspan)(h.spanalloc.alloc()) @@ -877,10 +1179,35 @@ HaveSpan: h.setSpan(t.base(), t) h.setSpan(t.base()+t.npages*pageSize-1, t) t.needzero = s.needzero - s.state = _MSpanManual // prevent coalescing with s - t.state = _MSpanManual + // If s was scavenged, then t may be scavenged. + start, end := t.physPageBounds() + if s.scavenged && start < end { + memstats.heap_released += uint64(end - start) + t.scavenged = true + } + s.state = mSpanManual // prevent coalescing with s + t.state = mSpanManual h.freeSpanLocked(t, false, false, s.unusedsince) - s.state = _MSpanFree + s.state = mSpanFree + } + // "Unscavenge" s only AFTER splitting so that + // we only sysUsed whatever we actually need. + if s.scavenged { + // sysUsed all the pages that are actually available + // in the span. Note that we don't need to decrement + // heap_released since we already did so earlier. + sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift) + s.scavenged = false + + // Since we allocated out of a scavenged span, we just + // grew the RSS. Mitigate this by scavenging enough free + // space to make up for it. + // + // Also, scavengeLargest may cause coalescing, so prevent + // coalescing with s by temporarily changing its state. + s.state = mSpanManual + h.scavengeLargest(s.npages * pageSize) + s.state = mSpanFree } s.unusedsince = 0 @@ -896,21 +1223,6 @@ HaveSpan: return s } -// Large spans have a minimum size of 1MByte. The maximum number of large spans to support -// 1TBytes is 1 million, experimentation using random sizes indicates that the depth of -// the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced -// by a perfectly balanced tree with a depth of 20. Twice that is an acceptable 40. -func (h *mheap) isLargeSpan(npages uintptr) bool { - return npages >= uintptr(len(h.free)) -} - -// allocLarge allocates a span of at least npage pages from the treap of large spans. -// Returns nil if no such span currently exists. -func (h *mheap) allocLarge(npage uintptr) *mspan { - // Search treap for smallest span with >= npage pages. - return h.freelarge.remove(npage) -} - // Try to add at least npage pages of memory to the heap, // returning whether it worked. // @@ -923,20 +1235,31 @@ func (h *mheap) grow(npage uintptr) bool { return false } + // Scavenge some pages out of the free treap to make up for + // the virtual memory space we just allocated. We prefer to + // scavenge the largest spans first since the cost of scavenging + // is proportional to the number of sysUnused() calls rather than + // the number of pages released, so we make fewer of those calls + // with larger spans. + h.scavengeLargest(size) + // Create a fake "in use" span and free it, so that the // right coalescing happens. s := (*mspan)(h.spanalloc.alloc()) s.init(uintptr(v), size/pageSize) h.setSpans(s.base(), s.npages, s) atomic.Store(&s.sweepgen, h.sweepgen) - s.state = _MSpanInUse + s.state = mSpanInUse h.pagesInUse += uint64(s.npages) h.freeSpanLocked(s, false, true, 0) return true } // Free the span back into the heap. -func (h *mheap) freeSpan(s *mspan, acct int32) { +// +// large must match the value of large passed to mheap.alloc. This is +// used for accounting. +func (h *mheap) freeSpan(s *mspan, large bool) { systemstack(func() { mp := getg().m lock(&h.lock) @@ -950,7 +1273,8 @@ func (h *mheap) freeSpan(s *mspan, acct int32) { bytes := s.npages << _PageShift msanfree(base, bytes) } - if acct != 0 { + if large { + // Match accounting done in mheap.alloc. memstats.heap_objects-- } if gcBlackenEnabled != 0 { @@ -982,21 +1306,25 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { unlock(&h.lock) } -// s must be on a busy list (h.busy or h.busylarge) or unlinked. +// s must be on the busy list or unlinked. func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { switch s.state { - case _MSpanManual: + case mSpanManual: if s.allocCount != 0 { - throw("MHeap_FreeSpanLocked - invalid stack free") + throw("mheap.freeSpanLocked - invalid stack free") } - case _MSpanInUse: + case mSpanInUse: if s.allocCount != 0 || s.sweepgen != h.sweepgen { - print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") - throw("MHeap_FreeSpanLocked - invalid free") + print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") + throw("mheap.freeSpanLocked - invalid free") } h.pagesInUse -= uint64(s.npages) + + // Clear in-use bit in arena page bitmap. + arena, pageIdx, pageMask := pageIndexOf(s.base()) + arena.pageInUse[pageIdx] &^= pageMask default: - throw("MHeap_FreeSpanLocked - invalid span state") + throw("mheap.freeSpanLocked - invalid span state") } if acctinuse { @@ -1005,10 +1333,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i if acctidle { memstats.heap_idle += uint64(s.npages << _PageShift) } - s.state = _MSpanFree - if s.inList() { - h.busyList(s.npages).remove(s) - } + s.state = mSpanFree // Stamp newly unused spans. The scavenger will use that // info to potentially give back some pages to the OS. @@ -1016,133 +1341,88 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i if unusedsince == 0 { s.unusedsince = nanotime() } - s.npreleased = 0 - // Coalesce with earlier, later spans. - if before := spanOf(s.base() - 1); before != nil && before.state == _MSpanFree { - // Now adjust s. - s.startAddr = before.startAddr - s.npages += before.npages - s.npreleased = before.npreleased // absorb released pages - s.needzero |= before.needzero - h.setSpan(before.base(), s) - // The size is potentially changing so the treap needs to delete adjacent nodes and - // insert back as a combined node. - if h.isLargeSpan(before.npages) { - // We have a t, it is large so it has to be in the treap so we can remove it. - h.freelarge.removeSpan(before) - } else { - h.freeList(before.npages).remove(before) - } - before.state = _MSpanDead - h.spanalloc.free(unsafe.Pointer(before)) - } + // Coalesce span with neighbors. + h.coalesce(s) - // Now check to see if next (greater addresses) span is free and can be coalesced. - if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == _MSpanFree { - s.npages += after.npages - s.npreleased += after.npreleased - s.needzero |= after.needzero - h.setSpan(s.base()+s.npages*pageSize-1, s) - if h.isLargeSpan(after.npages) { - h.freelarge.removeSpan(after) - } else { - h.freeList(after.npages).remove(after) - } - after.state = _MSpanDead - h.spanalloc.free(unsafe.Pointer(after)) - } - - // Insert s into appropriate list or treap. - if h.isLargeSpan(s.npages) { - h.freelarge.insert(s) + // Insert s into the appropriate treap. + if s.scavenged { + h.scav.insert(s) } else { - h.freeList(s.npages).insert(s) - } -} - -func (h *mheap) freeList(npages uintptr) *mSpanList { - return &h.free[npages] -} - -func (h *mheap) busyList(npages uintptr) *mSpanList { - if npages < uintptr(len(h.busy)) { - return &h.busy[npages] - } - return &h.busylarge -} - -func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr { - s := t.spanKey - var sumreleased uintptr - if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages { - start := s.base() - end := start + s.npages<<_PageShift - if physPageSize > _PageSize { - // We can only release pages in - // physPageSize blocks, so round start - // and end in. (Otherwise, madvise - // will round them *out* and release - // more memory than we want.) - start = (start + physPageSize - 1) &^ (physPageSize - 1) - end &^= physPageSize - 1 - if end <= start { - // start and end don't span a - // whole physical page. - return sumreleased - } - } - len := end - start - released := len - (s.npreleased << _PageShift) - if physPageSize > _PageSize && released == 0 { - return sumreleased - } - memstats.heap_released += uint64(released) - sumreleased += released - s.npreleased = len >> _PageShift - sysUnused(unsafe.Pointer(start), len) + h.free.insert(s) } - return sumreleased } -func scavengelist(list *mSpanList, now, limit uint64) uintptr { - if list.isEmpty() { - return 0 +// scavengeLargest scavenges nbytes worth of spans in unscav +// starting from the largest span and working down. It then takes those spans +// and places them in scav. h must be locked. +func (h *mheap) scavengeLargest(nbytes uintptr) { + // Use up scavenge credit if there's any available. + if nbytes > h.scavengeCredit { + nbytes -= h.scavengeCredit + h.scavengeCredit = 0 + } else { + h.scavengeCredit -= nbytes + return } - - var sumreleased uintptr - for s := list.first; s != nil; s = s.next { - if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages { - continue + // Iterate over the treap backwards (from largest to smallest) scavenging spans + // until we've reached our quota of nbytes. + released := uintptr(0) + for t := h.free.end(); released < nbytes && t.valid(); { + s := t.span() + r := s.scavenge() + if r == 0 { + // Since we're going in order of largest-to-smallest span, this + // means all other spans are no bigger than s. There's a high + // chance that the other spans don't even cover a full page, + // (though they could) but iterating further just for a handful + // of pages probably isn't worth it, so just stop here. + // + // This check also preserves the invariant that spans that have + // `scavenged` set are only ever in the `scav` treap, and + // those which have it unset are only in the `free` treap. + return } - start := s.base() - end := start + s.npages<<_PageShift - if physPageSize > _PageSize { - // We can only release pages in - // physPageSize blocks, so round start - // and end in. (Otherwise, madvise - // will round them *out* and release - // more memory than we want.) - start = (start + physPageSize - 1) &^ (physPageSize - 1) - end &^= physPageSize - 1 - if end <= start { - // start and end don't span a - // whole physical page. - continue + n := t.prev() + h.free.erase(t) + // Now that s is scavenged, we must eagerly coalesce it + // with its neighbors to prevent having two spans with + // the same scavenged state adjacent to each other. + h.coalesce(s) + t = n + h.scav.insert(s) + released += r + } + // If we over-scavenged, turn that extra amount into credit. + if released > nbytes { + h.scavengeCredit += released - nbytes + } +} + +// scavengeAll visits each node in the unscav treap and scavenges the +// treapNode's span. It then removes the scavenged span from +// unscav and adds it into scav before continuing. h must be locked. +func (h *mheap) scavengeAll(now, limit uint64) uintptr { + // Iterate over the treap scavenging spans if unused for at least limit time. + released := uintptr(0) + for t := h.free.start(); t.valid(); { + s := t.span() + n := t.next() + if (now - uint64(s.unusedsince)) > limit { + r := s.scavenge() + if r != 0 { + h.free.erase(t) + // Now that s is scavenged, we must eagerly coalesce it + // with its neighbors to prevent having two spans with + // the same scavenged state adjacent to each other. + h.coalesce(s) + h.scav.insert(s) + released += r } } - len := end - start - - released := len - (s.npreleased << _PageShift) - if physPageSize > _PageSize && released == 0 { - continue - } - memstats.heap_released += uint64(released) - sumreleased += released - s.npreleased = len >> _PageShift - sysUnused(unsafe.Pointer(start), len) + t = n } - return sumreleased + return released } func (h *mheap) scavenge(k int32, now, limit uint64) { @@ -1152,17 +1432,13 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { gp := getg() gp.m.mallocing++ lock(&h.lock) - var sumreleased uintptr - for i := 0; i < len(h.free); i++ { - sumreleased += scavengelist(&h.free[i], now, limit) - } - sumreleased += scavengetreap(h.freelarge.treap, now, limit) + released := h.scavengeAll(now, limit) unlock(&h.lock) gp.m.mallocing-- if debug.gctrace > 0 { - if sumreleased > 0 { - print("scvg", k, ": ", sumreleased>>20, " MB released\n") + if released > 0 { + print("scvg", k, ": ", released>>20, " MB released\n") } print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n") } @@ -1184,11 +1460,10 @@ func (span *mspan) init(base uintptr, npages uintptr) { span.npages = npages span.allocCount = 0 span.spanclass = 0 - span.incache = false span.elemsize = 0 - span.state = _MSpanDead + span.state = mSpanDead span.unusedsince = 0 - span.npreleased = 0 + span.scavenged = false span.speciallock.key = 0 span.specials = nil span.needzero = 0 @@ -1209,9 +1484,9 @@ func (list *mSpanList) init() { func (list *mSpanList) remove(span *mspan) { if span.list != list { - print("runtime: failed MSpanList_Remove span.npages=", span.npages, + print("runtime: failed mSpanList.remove span.npages=", span.npages, " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n") - throw("MSpanList_Remove") + throw("mSpanList.remove") } if list.first == span { list.first = span.next @@ -1234,8 +1509,8 @@ func (list *mSpanList) isEmpty() bool { func (list *mSpanList) insert(span *mspan) { if span.next != nil || span.prev != nil || span.list != nil { - println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list) - throw("MSpanList_Insert") + println("runtime: failed mSpanList.insert", span, span.next, span.prev, span.list) + throw("mSpanList.insert") } span.next = list.first if list.first != nil { @@ -1252,8 +1527,8 @@ func (list *mSpanList) insert(span *mspan) { func (list *mSpanList) insertBack(span *mspan) { if span.next != nil || span.prev != nil || span.list != nil { - println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list) - throw("MSpanList_InsertBack") + println("runtime: failed mSpanList.insertBack", span, span.next, span.prev, span.list) + throw("mSpanList.insertBack") } span.prev = list.last if list.last != nil { @@ -1436,10 +1711,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p scanobject(base, gcw) // Mark the finalizer itself, since the // special isn't part of the GC'd heap. - scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) - if gcBlackenPromptly { - gcw.dispose() - } + scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } return true @@ -1484,7 +1756,7 @@ func setprofilebucket(p unsafe.Pointer, b *bucket) { } // Do whatever cleanup needs to be done to deallocate s. It has -// already been unlinked from the MSpan specials list. +// already been unlinked from the mspan specials list. func freespecial(s *special, p unsafe.Pointer, size uintptr) { switch s.kind { case _KindSpecialFinalizer: diff --git a/src/runtime/mkfastlog2table.go b/src/runtime/mkfastlog2table.go index 587ebf476d388..305c84a7c1169 100644 --- a/src/runtime/mkfastlog2table.go +++ b/src/runtime/mkfastlog2table.go @@ -20,7 +20,7 @@ import ( func main() { var buf bytes.Buffer - fmt.Fprintln(&buf, "// AUTO-GENERATED by mkfastlog2table.go") + fmt.Fprintln(&buf, "// Code generated by mkfastlog2table.go; DO NOT EDIT.") fmt.Fprintln(&buf, "// Run go generate from src/runtime to update.") fmt.Fprintln(&buf, "// See mkfastlog2table.go for comments.") fmt.Fprintln(&buf) diff --git a/src/runtime/mknacl.sh b/src/runtime/mknacl.sh index 3454b624d6ea2..306ae3d9c15f7 100644 --- a/src/runtime/mknacl.sh +++ b/src/runtime/mknacl.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright 2013 The Go Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go index fe09e7029e955..2868f3fd4e597 100644 --- a/src/runtime/mmap.go +++ b/src/runtime/mmap.go @@ -10,6 +10,7 @@ // +build !linux !arm64 // +build !js // +build !darwin +// +build !aix package runtime diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 43e4810d97878..2bd41b650f4f7 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -723,7 +723,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { isOK := func(gp1 *g) bool { // Checking isSystemGoroutine here makes GoroutineProfile // consistent with both NumGoroutine and Stack. - return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1) + return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false) } stopTheWorld("profile") diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index f67d05414daf2..9250865ed180e 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -38,24 +38,10 @@ type mstats struct { heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_idle uint64 // bytes in idle spans - heap_inuse uint64 // bytes in _MSpanInUse spans + heap_inuse uint64 // bytes in mSpanInUse spans heap_released uint64 // bytes released to the os heap_objects uint64 // total number of allocated objects - // TODO(austin): heap_released is both useless and inaccurate - // in its current form. It's useless because, from the user's - // and OS's perspectives, there's no difference between a page - // that has not yet been faulted in and a page that has been - // released back to the OS. We could fix this by considering - // newly mapped spans to be "released". It's inaccurate - // because when we split a large span for allocation, we - // "unrelease" all pages in the large span and not just the - // ones we split off for use. This is trickier to fix because - // we currently don't know which pages of a span we've - // released. We could fix it by separating "free" and - // "released" spans, but then we have to allocate from runs of - // free and released spans. - // Statistics about allocation of low-level fixed-size structures. // Protected by FixAlloc locks. stacks_inuse uint64 // bytes in manually-managed stack spans @@ -543,7 +529,7 @@ func updatememstats() { memstats.by_size[i].nfree = 0 } - // Flush MCache's to MCentral. + // Flush mcache's to mcentral. systemstack(flushallmcaches) // Aggregate local stats. diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go index 4df16d55b8b69..f444452bab502 100644 --- a/src/runtime/mwbbuf.go +++ b/src/runtime/mwbbuf.go @@ -23,6 +23,7 @@ package runtime import ( + "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -56,6 +57,12 @@ type wbBuf struct { // on. This must be a multiple of wbBufEntryPointers because // the write barrier only checks for overflow once per entry. buf [wbBufEntryPointers * wbBufEntries]uintptr + + // debugGen causes the write barrier buffer to flush after + // every write barrier if equal to gcWorkPauseGen. This is for + // debugging #27993. This is only set if debugCachedWork is + // set. + debugGen uint32 } const ( @@ -79,7 +86,7 @@ const ( func (b *wbBuf) reset() { start := uintptr(unsafe.Pointer(&b.buf[0])) b.next = start - if gcBlackenPromptly || writeBarrier.cgo { + if writeBarrier.cgo || (debugCachedWork && (throwOnGCWork || b.debugGen == atomic.Load(&gcWorkPauseGen))) { // Effectively disable the buffer by forcing a flush // on every barrier. b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers])) @@ -107,6 +114,11 @@ func (b *wbBuf) discard() { b.next = uintptr(unsafe.Pointer(&b.buf[0])) } +// empty reports whether b contains no pointers. +func (b *wbBuf) empty() bool { + return b.next == uintptr(unsafe.Pointer(&b.buf[0])) +} + // putFast adds old and new to the write barrier buffer and returns // false if a flush is necessary. Callers should use this as: // @@ -192,10 +204,32 @@ func wbBufFlush(dst *uintptr, src uintptr) { // Switch to the system stack so we don't have to worry about // the untyped stack slots or safe points. systemstack(func() { - wbBufFlush1(getg().m.p.ptr()) + if debugCachedWork { + // For debugging, include the old value of the + // slot and some other data in the traceback. + wbBuf := &getg().m.p.ptr().wbBuf + var old uintptr + if dst != nil { + // dst may be nil in direct calls to wbBufFlush. + old = *dst + } + wbBufFlush1Debug(old, wbBuf.buf[0], wbBuf.buf[1], &wbBuf.buf[0], wbBuf.next) + } else { + wbBufFlush1(getg().m.p.ptr()) + } }) } +// wbBufFlush1Debug is a temporary function for debugging issue +// #27993. It exists solely to add some context to the traceback. +// +//go:nowritebarrierrec +//go:systemstack +//go:noinline +func wbBufFlush1Debug(old, buf1, buf2 uintptr, start *uintptr, next uintptr) { + wbBufFlush1(getg().m.p.ptr()) +} + // wbBufFlush1 flushes p's write barrier buffer to the GC work queue. // // This must not have write barriers because it is part of the write @@ -212,14 +246,16 @@ func wbBufFlush1(_p_ *p) { n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0]) ptrs := _p_.wbBuf.buf[:n] - // Reset the buffer. - _p_.wbBuf.reset() + // Poison the buffer to make extra sure nothing is enqueued + // while we're processing the buffer. + _p_.wbBuf.next = 0 if useCheckmark { // Slow path for checkmark mode. for _, ptr := range ptrs { shade(ptr) } + _p_.wbBuf.reset() return } @@ -270,9 +306,6 @@ func wbBufFlush1(_p_ *p) { // Enqueue the greyed objects. gcw.putBatch(ptrs[:pos]) - if gcphase == _GCmarktermination || gcBlackenPromptly { - // Ps aren't allowed to cache work during mark - // termination. - gcw.dispose() - } + + _p_.wbBuf.reset() } diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index c8fb95d3aaf5d..71ca993cc0565 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package runtime @@ -56,14 +56,15 @@ type pollDesc struct { lock mutex // protects the following fields fd uintptr closing bool - seq uintptr // protects from stale timers and ready notifications + user uint32 // user settable cookie + rseq uintptr // protects from stale read timers rg uintptr // pdReady, pdWait, G waiting for read or nil rt timer // read deadline timer (set if rt.f != nil) rd int64 // read deadline + wseq uintptr // protects from stale write timers wg uintptr // pdReady, pdWait, G waiting for write or nil wt timer // write deadline timer wd int64 // write deadline - user uint32 // user settable cookie } type pollCache struct { @@ -92,12 +93,19 @@ func netpollinited() bool { return atomic.Load(&netpollInited) != 0 } -//go:linkname poll_runtime_pollServerDescriptor internal/poll.runtime_pollServerDescriptor +//go:linkname poll_runtime_isPollServerDescriptor internal/poll.runtime_isPollServerDescriptor -// poll_runtime_pollServerDescriptor returns the descriptor being used, -// or ^uintptr(0) if the system does not use a poll descriptor. -func poll_runtime_pollServerDescriptor() uintptr { - return netpolldescriptor() +// poll_runtime_isPollServerDescriptor reports whether fd is a +// descriptor being used by netpoll. +func poll_runtime_isPollServerDescriptor(fd uintptr) bool { + fds := netpolldescriptor() + if GOOS != "aix" { + return fd == fds + } else { + // AIX have a pipe in its netpoll implementation. + // Therefore, two fd are returned by netpolldescriptor using a mask. + return fd == fds&0xFFFF || fd == (fds>>16)&0xFFFF + } } //go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen @@ -112,9 +120,10 @@ func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { } pd.fd = fd pd.closing = false - pd.seq++ + pd.rseq++ pd.rg = 0 pd.rd = 0 + pd.wseq++ pd.wg = 0 pd.wd = 0 unlock(&pd.lock) @@ -166,8 +175,8 @@ func poll_runtime_pollWait(pd *pollDesc, mode int) int { if err != 0 { return err } - // As for now only Solaris uses level-triggered IO. - if GOOS == "solaris" { + // As for now only Solaris and AIX use level-triggered IO. + if GOOS == "solaris" || GOOS == "aix" { netpollarm(pd, mode) } for !netpollblock(pd, int32(mode), false) { @@ -197,19 +206,15 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { unlock(&pd.lock) return } - pd.seq++ // invalidate current timers - // Reset current timers. - if pd.rt.f != nil { - deltimer(&pd.rt) - pd.rt.f = nil - } - if pd.wt.f != nil { - deltimer(&pd.wt) - pd.wt.f = nil - } - // Setup new timers. - if d != 0 && d <= nanotime() { - d = -1 + rd0, wd0 := pd.rd, pd.wd + combo0 := rd0 > 0 && rd0 == wd0 + if d > 0 { + d += nanotime() + if d <= 0 { + // If the user has a deadline in the future, but the delay calculation + // overflows, then set the deadline to the maximum possible value. + d = 1<<63 - 1 + } } if mode == 'r' || mode == 'r'+'w' { pd.rd = d @@ -217,39 +222,58 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { if mode == 'w' || mode == 'r'+'w' { pd.wd = d } - if pd.rd > 0 && pd.rd == pd.wd { - pd.rt.f = netpollDeadline - pd.rt.when = pd.rd - // Copy current seq into the timer arg. - // Timer func will check the seq against current descriptor seq, - // if they differ the descriptor was reused or timers were reset. - pd.rt.arg = pd - pd.rt.seq = pd.seq - addtimer(&pd.rt) - } else { + combo := pd.rd > 0 && pd.rd == pd.wd + rtf := netpollReadDeadline + if combo { + rtf = netpollDeadline + } + if pd.rt.f == nil { if pd.rd > 0 { - pd.rt.f = netpollReadDeadline + pd.rt.f = rtf pd.rt.when = pd.rd + // Copy current seq into the timer arg. + // Timer func will check the seq against current descriptor seq, + // if they differ the descriptor was reused or timers were reset. pd.rt.arg = pd - pd.rt.seq = pd.seq + pd.rt.seq = pd.rseq addtimer(&pd.rt) } - if pd.wd > 0 { + } else if pd.rd != rd0 || combo != combo0 { + pd.rseq++ // invalidate current timers + if pd.rd > 0 { + modtimer(&pd.rt, pd.rd, 0, rtf, pd, pd.rseq) + } else { + deltimer(&pd.rt) + pd.rt.f = nil + } + } + if pd.wt.f == nil { + if pd.wd > 0 && !combo { pd.wt.f = netpollWriteDeadline pd.wt.when = pd.wd pd.wt.arg = pd - pd.wt.seq = pd.seq + pd.wt.seq = pd.wseq addtimer(&pd.wt) } + } else if pd.wd != wd0 || combo != combo0 { + pd.wseq++ // invalidate current timers + if pd.wd > 0 && !combo { + modtimer(&pd.wt, pd.wd, 0, netpollWriteDeadline, pd, pd.wseq) + } else { + deltimer(&pd.wt) + pd.wt.f = nil + } } // If we set the new deadline in the past, unblock currently pending IO if any. var rg, wg *g - atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock - if pd.rd < 0 { - rg = netpollunblock(pd, 'r', false) - } - if pd.wd < 0 { - wg = netpollunblock(pd, 'w', false) + if pd.rd < 0 || pd.wd < 0 { + atomic.StorepNoWB(noescape(unsafe.Pointer(&wg)), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock + if pd.rd < 0 { + rg = netpollunblock(pd, 'r', false) + } + if pd.wd < 0 { + wg = netpollunblock(pd, 'w', false) + } } unlock(&pd.lock) if rg != nil { @@ -267,9 +291,10 @@ func poll_runtime_pollUnblock(pd *pollDesc) { throw("runtime: unblock on closing polldesc") } pd.closing = true - pd.seq++ + pd.rseq++ + pd.wseq++ var rg, wg *g - atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock + atomic.StorepNoWB(noescape(unsafe.Pointer(&rg)), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock rg = netpollunblock(pd, 'r', false) wg = netpollunblock(pd, 'w', false) if pd.rt.f != nil { @@ -289,24 +314,22 @@ func poll_runtime_pollUnblock(pd *pollDesc) { } } -// make pd ready, newly runnable goroutines (if any) are returned in rg/wg +// make pd ready, newly runnable goroutines (if any) are added to toRun. // May run during STW, so write barriers are not allowed. //go:nowritebarrier -func netpollready(gpp *guintptr, pd *pollDesc, mode int32) { - var rg, wg guintptr +func netpollready(toRun *gList, pd *pollDesc, mode int32) { + var rg, wg *g if mode == 'r' || mode == 'r'+'w' { - rg.set(netpollunblock(pd, 'r', true)) + rg = netpollunblock(pd, 'r', true) } if mode == 'w' || mode == 'r'+'w' { - wg.set(netpollunblock(pd, 'w', true)) + wg = netpollunblock(pd, 'w', true) } - if rg != 0 { - rg.ptr().schedlink = *gpp - *gpp = rg + if rg != nil { + toRun.push(rg) } - if wg != 0 { - wg.ptr().schedlink = *gpp - *gpp = wg + if wg != nil { + toRun.push(wg) } } @@ -406,7 +429,11 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { lock(&pd.lock) // Seq arg is seq when the timer was set. // If it's stale, ignore the timer event. - if seq != pd.seq { + currentSeq := pd.rseq + if !read { + currentSeq = pd.wseq + } + if seq != currentSeq { // The descriptor was reused or timers were reset. unlock(&pd.lock) return @@ -417,7 +444,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { throw("runtime: inconsistent read deadline") } pd.rd = -1 - atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock + atomic.StorepNoWB(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock rg = netpollunblock(pd, 'r', false) } var wg *g @@ -426,7 +453,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { throw("runtime: inconsistent write deadline") } pd.wd = -1 - atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock + atomic.StorepNoWB(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock wg = netpollunblock(pd, 'w', false) } unlock(&pd.lock) diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go new file mode 100644 index 0000000000000..1e886dae949e9 --- /dev/null +++ b/src/runtime/netpoll_aix.go @@ -0,0 +1,247 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// This is based on the former libgo/runtime/netpoll_select.c implementation +// except that it uses poll instead of select and is written in Go. +// It's also based on Solaris implementation for the arming mechanisms + +//go:cgo_import_dynamic libc_poll poll "libc.a/shr_64.o" +//go:linkname libc_poll libc_poll + +var libc_poll libFunc + +//go:nosplit +func poll(pfds *pollfd, npfds uintptr, timeout uintptr) (int32, int32) { + r, err := syscall3(&libc_poll, uintptr(unsafe.Pointer(pfds)), npfds, timeout) + return int32(r), int32(err) +} + +//go:nosplit +func fcntl(fd, cmd int32, arg uintptr) int32 { + r, _ := syscall3(&libc_fcntl, uintptr(fd), uintptr(cmd), arg) + return int32(r) +} + +// pollfd represents the poll structure for AIX operating system. +type pollfd struct { + fd int32 + events int16 + revents int16 +} + +const _POLLIN = 0x0001 +const _POLLOUT = 0x0002 +const _POLLHUP = 0x2000 +const _POLLERR = 0x4000 +const _O_NONBLOCK = 0x4 + +var ( + pfds []pollfd + pds []*pollDesc + mtxpoll mutex + mtxset mutex + rdwake int32 + wrwake int32 + pendingUpdates int32 +) + +const pollVerbose = false + +func netpollinit() { + var p [2]int32 + + // Create the pipe we use to wakeup poll. + if err := pipe(&p[0]); err < 0 { + throw("netpollinit: failed to create pipe") + } + rdwake = p[0] + wrwake = p[1] + + fl := uintptr(fcntl(rdwake, _F_GETFL, 0)) + fcntl(rdwake, _F_SETFL, fl|_O_NONBLOCK) + fcntl(rdwake, _F_SETFD, _FD_CLOEXEC) + + fl = uintptr(fcntl(wrwake, _F_GETFL, 0)) + fcntl(wrwake, _F_SETFL, fl|_O_NONBLOCK) + fcntl(wrwake, _F_SETFD, _FD_CLOEXEC) + + // Pre-allocate array of pollfd structures for poll. + if pollVerbose { + println("*** allocating") + } + pfds = make([]pollfd, 1, 128) + if pollVerbose { + println("*** allocating done", &pfds[0]) + } + + // Poll the read side of the pipe. + pfds[0].fd = rdwake + pfds[0].events = _POLLIN + + pds = make([]*pollDesc, 1, 128) + pds[0] = nil +} + +func netpolldescriptor() uintptr { + // Both fd must be returned + if rdwake > 0xFFFF || wrwake > 0xFFFF { + throw("netpolldescriptor: invalid fd number") + } + return uintptr(rdwake<<16 | wrwake) +} + +// netpollwakeup writes on wrwake to wakeup poll before any changes. +func netpollwakeup() { + if pendingUpdates == 0 { + pendingUpdates = 1 + if pollVerbose { + println("*** writing 1 byte") + } + b := [1]byte{0} + write(uintptr(wrwake), unsafe.Pointer(&b[0]), 1) + } +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + if pollVerbose { + println("*** netpollopen", fd) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + pd.user = uint32(len(pfds)) + pfds = append(pfds, pollfd{fd: int32(fd)}) + pds = append(pds, pd) + unlock(&mtxset) + return 0 +} + +func netpollclose(fd uintptr) int32 { + if pollVerbose { + println("*** netpollclose", fd) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + for i := 0; i < len(pfds); i++ { + if pfds[i].fd == int32(fd) { + pfds[i] = pfds[len(pfds)-1] + pfds = pfds[:len(pfds)-1] + + pds[i] = pds[len(pds)-1] + pds[i].user = uint32(i) + pds = pds[:len(pds)-1] + break + } + } + unlock(&mtxset) + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { + if pollVerbose { + println("*** netpollarm", pd.fd, mode) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + switch mode { + case 'r': + pfds[pd.user].events |= _POLLIN + case 'w': + pfds[pd.user].events |= _POLLOUT + } + unlock(&mtxset) +} + +//go:nowritebarrierrec +func netpoll(block bool) gList { + timeout := ^uintptr(0) + if !block { + timeout = 0 + return gList{} + } + if pollVerbose { + println("*** netpoll", block) + } +retry: + lock(&mtxpoll) + lock(&mtxset) + pendingUpdates = 0 + unlock(&mtxpoll) + + if pollVerbose { + println("*** netpoll before poll") + } + n, e := poll(&pfds[0], uintptr(len(pfds)), timeout) + if pollVerbose { + println("*** netpoll after poll", n) + } + if n < 0 { + if e != _EINTR { + println("errno=", e, " len(pfds)=", len(pfds)) + throw("poll failed") + } + if pollVerbose { + println("*** poll failed") + } + unlock(&mtxset) + goto retry + } + // Check if some descriptors need to be changed + if n != 0 && pfds[0].revents&(_POLLIN|_POLLHUP|_POLLERR) != 0 { + var b [1]byte + for read(rdwake, unsafe.Pointer(&b[0]), 1) == 1 { + if pollVerbose { + println("*** read 1 byte from pipe") + } + } + // Do not look at the other fds in this case as the mode may have changed + // XXX only additions of flags are made, so maybe it is ok + unlock(&mtxset) + goto retry + } + var toRun gList + for i := 0; i < len(pfds) && n > 0; i++ { + pfd := &pfds[i] + + var mode int32 + if pfd.revents&(_POLLIN|_POLLHUP|_POLLERR) != 0 { + mode += 'r' + pfd.events &= ^_POLLIN + } + if pfd.revents&(_POLLOUT|_POLLHUP|_POLLERR) != 0 { + mode += 'w' + pfd.events &= ^_POLLOUT + } + if mode != 0 { + if pollVerbose { + println("*** netpollready i=", i, "revents=", pfd.revents, "events=", pfd.events, "pd=", pds[i]) + } + netpollready(&toRun, pds[i], mode) + n-- + } + } + unlock(&mtxset) + if block && toRun.empty() { + goto retry + } + if pollVerbose { + println("*** netpoll returning end") + } + return toRun +} diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go index 1908220ebbd9b..f764d6ff7c886 100644 --- a/src/runtime/netpoll_epoll.go +++ b/src/runtime/netpoll_epoll.go @@ -58,9 +58,9 @@ func netpollarm(pd *pollDesc, mode int) { // polls for ready network connections // returns list of goroutines that become runnable -func netpoll(block bool) *g { +func netpoll(block bool) gList { if epfd == -1 { - return nil + return gList{} } waitms := int32(-1) if !block { @@ -76,7 +76,7 @@ retry: } goto retry } - var gp guintptr + var toRun gList for i := int32(0); i < n; i++ { ev := &events[i] if ev.events == 0 { @@ -92,11 +92,11 @@ retry: if mode != 0 { pd := *(**pollDesc)(unsafe.Pointer(&ev.data)) - netpollready(&gp, pd, mode) + netpollready(&toRun, pd, mode) } } - if block && gp == 0 { + if block && toRun.empty() { goto retry } - return gp.ptr() + return toRun } diff --git a/src/runtime/netpoll_fake.go b/src/runtime/netpoll_fake.go index aab18dc8468cd..5b1a63a8787d0 100644 --- a/src/runtime/netpoll_fake.go +++ b/src/runtime/netpoll_fake.go @@ -27,6 +27,6 @@ func netpollclose(fd uintptr) int32 { func netpollarm(pd *pollDesc, mode int) { } -func netpoll(block bool) *g { - return nil +func netpoll(block bool) gList { + return gList{} } diff --git a/src/runtime/netpoll_kqueue.go b/src/runtime/netpoll_kqueue.go index 0f73bf385e48d..fdaa1cd80debd 100644 --- a/src/runtime/netpoll_kqueue.go +++ b/src/runtime/netpoll_kqueue.go @@ -59,9 +59,9 @@ func netpollarm(pd *pollDesc, mode int) { // Polls for ready network connections. // Returns list of goroutines that become runnable. -func netpoll(block bool) *g { +func netpoll(block bool) gList { if kq == -1 { - return nil + return gList{} } var tp *timespec var ts timespec @@ -78,7 +78,7 @@ retry: } goto retry } - var gp guintptr + var toRun gList for i := 0; i < int(n); i++ { ev := &events[i] var mode int32 @@ -102,11 +102,11 @@ retry: mode += 'w' } if mode != 0 { - netpollready(&gp, (*pollDesc)(unsafe.Pointer(ev.udata)), mode) + netpollready(&toRun, (*pollDesc)(unsafe.Pointer(ev.udata)), mode) } } - if block && gp == 0 { + if block && toRun.empty() { goto retry } - return gp.ptr() + return toRun } diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go index 853e5f63e3bed..6bd484afaaec8 100644 --- a/src/runtime/netpoll_solaris.go +++ b/src/runtime/netpoll_solaris.go @@ -180,9 +180,9 @@ func netpollarm(pd *pollDesc, mode int) { // polls for ready network connections // returns list of goroutines that become runnable -func netpoll(block bool) *g { +func netpoll(block bool) gList { if portfd == -1 { - return nil + return gList{} } var wait *timespec @@ -202,7 +202,7 @@ retry: goto retry } - var gp guintptr + var toRun gList for i := 0; i < int(n); i++ { ev := &events[i] @@ -233,12 +233,12 @@ retry: } if mode != 0 { - netpollready(&gp, pd, mode) + netpollready(&toRun, pd, mode) } } - if block && gp == 0 { + if block && toRun.empty() { goto retry } - return gp.ptr() + return toRun } diff --git a/src/runtime/netpoll_stub.go b/src/runtime/netpoll_stub.go index a4d6b4608ac63..f585333579dab 100644 --- a/src/runtime/netpoll_stub.go +++ b/src/runtime/netpoll_stub.go @@ -10,10 +10,10 @@ var netpollWaiters uint32 // Polls for ready network connections. // Returns list of goroutines that become runnable. -func netpoll(block bool) (gp *g) { +func netpoll(block bool) gList { // Implementation for platforms that do not support // integrated network poller. - return + return gList{} } func netpollinited() bool { diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go index 134071f5e3ca1..07ef15ce2f389 100644 --- a/src/runtime/netpoll_windows.go +++ b/src/runtime/netpoll_windows.go @@ -63,17 +63,17 @@ func netpollarm(pd *pollDesc, mode int) { // Polls for completed network IO. // Returns list of goroutines that become runnable. -func netpoll(block bool) *g { +func netpoll(block bool) gList { var entries [64]overlappedEntry var wait, qty, key, flags, n, i uint32 var errno int32 var op *net_op - var gp guintptr + var toRun gList mp := getg().m if iocphandle == _INVALID_HANDLE_VALUE { - return nil + return gList{} } wait = 0 if block { @@ -92,7 +92,7 @@ retry: mp.blocked = false errno = int32(getlasterror()) if !block && errno == _WAIT_TIMEOUT { - return nil + return gList{} } println("runtime: GetQueuedCompletionStatusEx failed (errno=", errno, ")") throw("runtime: netpoll failed") @@ -105,7 +105,7 @@ retry: if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 { errno = int32(getlasterror()) } - handlecompletion(&gp, op, errno, qty) + handlecompletion(&toRun, op, errno, qty) } } else { op = nil @@ -118,7 +118,7 @@ retry: mp.blocked = false errno = int32(getlasterror()) if !block && errno == _WAIT_TIMEOUT { - return nil + return gList{} } if op == nil { println("runtime: GetQueuedCompletionStatus failed (errno=", errno, ")") @@ -127,15 +127,15 @@ retry: // dequeued failed IO packet, so report that } mp.blocked = false - handlecompletion(&gp, op, errno, qty) + handlecompletion(&toRun, op, errno, qty) } - if block && gp == 0 { + if block && toRun.empty() { goto retry } - return gp.ptr() + return toRun } -func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) { +func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) { if op == nil { println("runtime: GetQueuedCompletionStatus returned op == nil") throw("runtime: netpoll failed") @@ -147,5 +147,5 @@ func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) { } op.errno = errno op.qty = qty - netpollready(gpp, op.pd, mode) + netpollready(toRun, op.pd, mode) } diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go new file mode 100644 index 0000000000000..d0349191c6d30 --- /dev/null +++ b/src/runtime/os2_aix.go @@ -0,0 +1,621 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains main runtime AIX syscalls. +// Pollset syscalls are in netpoll_aix.go. +// The implementation is based on Solaris and Windows. +// Each syscall is made by calling its libc symbol using asmcgocall and asmsyscall6 +// asssembly functions. + +package runtime + +import ( + "unsafe" +) + +// Symbols imported for __start function. + +//go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o" +//go:linkname libc___n_pthreads libc___n_pthread +//go:linkname libc___mod_init libc___mod_init + +var ( + libc___n_pthread, + libc___mod_init libFunc +) + +// Syscalls + +//go:cgo_import_dynamic libc__Errno _Errno "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_close close "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_exit exit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_kill kill "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_malloc malloc "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mmap mmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_open open "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_raise raise "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_read read "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_init sem_init "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_post sem_post "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_timedwait sem_timedwait "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_wait sem_wait "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setitimer setitimer "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sigaction sigaction "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_usleep usleep "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_write write "libc.a/shr_64.o" + +//go:cgo_import_dynamic libpthread___pth_init __pth_init "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_destroy pthread_attr_destroy "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_init pthread_attr_init "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_getstacksize pthread_attr_getstacksize "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setstacksize pthread_attr_setstacksize "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setdetachstate pthread_attr_setdetachstate "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setstackaddr pthread_attr_setstackaddr "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_create pthread_create "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_sigthreadmask sigthreadmask "libpthread.a/shr_xpg5_64.o" + +//go:linkname libc__Errno libc__Errno +//go:linkname libc_clock_gettime libc_clock_gettime +//go:linkname libc_close libc_close +//go:linkname libc_exit libc_exit +//go:linkname libc_getpid libc_getpid +//go:linkname libc_getsystemcfg libc_getsystemcfg +//go:linkname libc_kill libc_kill +//go:linkname libc_madvise libc_madvise +//go:linkname libc_malloc libc_malloc +//go:linkname libc_mmap libc_mmap +//go:linkname libc_munmap libc_munmap +//go:linkname libc_open libc_open +//go:linkname libc_pipe libc_pipe +//go:linkname libc_raise libc_raise +//go:linkname libc_read libc_read +//go:linkname libc_sched_yield libc_sched_yield +//go:linkname libc_sem_init libc_sem_init +//go:linkname libc_sem_post libc_sem_post +//go:linkname libc_sem_timedwait libc_sem_timedwait +//go:linkname libc_sem_wait libc_sem_wait +//go:linkname libc_setitimer libc_setitimer +//go:linkname libc_sigaction libc_sigaction +//go:linkname libc_sigaltstack libc_sigaltstack +//go:linkname libc_sysconf libc_sysconf +//go:linkname libc_usleep libc_usleep +//go:linkname libc_write libc_write + +//go:linkname libpthread___pth_init libpthread___pth_init +//go:linkname libpthread_attr_destroy libpthread_attr_destroy +//go:linkname libpthread_attr_init libpthread_attr_init +//go:linkname libpthread_attr_getstacksize libpthread_attr_getstacksize +//go:linkname libpthread_attr_setstacksize libpthread_attr_setstacksize +//go:linkname libpthread_attr_setdetachstate libpthread_attr_setdetachstate +//go:linkname libpthread_attr_setstackaddr libpthread_attr_setstackaddr +//go:linkname libpthread_create libpthread_create +//go:linkname libpthread_sigthreadmask libpthread_sigthreadmask + +var ( + //libc + libc__Errno, + libc_clock_gettime, + libc_close, + libc_exit, + libc_getpid, + libc_getsystemcfg, + libc_kill, + libc_madvise, + libc_malloc, + libc_mmap, + libc_munmap, + libc_open, + libc_pipe, + libc_raise, + libc_read, + libc_sched_yield, + libc_sem_init, + libc_sem_post, + libc_sem_timedwait, + libc_sem_wait, + libc_setitimer, + libc_sigaction, + libc_sigaltstack, + libc_sysconf, + libc_usleep, + libc_write, + //libpthread + libpthread___pth_init, + libpthread_attr_destroy, + libpthread_attr_init, + libpthread_attr_getstacksize, + libpthread_attr_setstacksize, + libpthread_attr_setdetachstate, + libpthread_attr_setstackaddr, + libpthread_create, + libpthread_sigthreadmask libFunc +) + +type libFunc uintptr + +// asmsyscall6 calls the libc symbol using a C convention. +// It's defined in sys_aix_ppc64.go. +var asmsyscall6 libFunc + +//go:nowritebarrier +//go:nosplit +func syscall0(fn *libFunc) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 0 + c.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall1(fn *libFunc, a0 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 1 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall2(fn *libFunc, a0, a1 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 2 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall3(fn *libFunc, a0, a1, a2 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 3 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall4(fn *libFunc, a0, a1, a2, a3 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 4 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall5(fn *libFunc, a0, a1, a2, a3, a4 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 5 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall6(fn *libFunc, a0, a1, a2, a3, a4, a5 uintptr) (r, err uintptr) { + gp := getg() + var mp *m + if gp != nil { + mp = gp.m + } + if mp != nil && mp.libcallsp == 0 { + mp.libcallg.set(gp) + mp.libcallpc = getcallerpc() + // sp must be the last, because once async cpu profiler finds + // all three values to be non-zero, it will use them + mp.libcallsp = getcallersp() + } else { + mp = nil // See comment in sys_darwin.go:libcCall + } + + c := &gp.m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 6 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + if mp != nil { + mp.libcallsp = 0 + } + + return c.r1, c.err +} + +//go:nosplit +func exit(code int32) { + syscall1(&libc_exit, uintptr(code)) +} + +//go:nosplit +func write(fd uintptr, p unsafe.Pointer, n int32) int32 { + r, _ := syscall3(&libc_write, uintptr(fd), uintptr(p), uintptr(n)) + return int32(r) + +} + +//go:nosplit +func read(fd int32, p unsafe.Pointer, n int32) int32 { + r, _ := syscall3(&libc_read, uintptr(fd), uintptr(p), uintptr(n)) + return int32(r) +} + +//go:nosplit +func open(name *byte, mode, perm int32) int32 { + r, _ := syscall3(&libc_open, uintptr(unsafe.Pointer(name)), uintptr(mode), uintptr(perm)) + return int32(r) +} + +//go:nosplit +func closefd(fd int32) int32 { + r, _ := syscall1(&libc_close, uintptr(fd)) + return int32(r) +} + +//go:nosplit +func pipe(fd *int32) int32 { + r, _ := syscall1(&libc_pipe, uintptr(unsafe.Pointer(fd))) + return int32(r) +} + +// mmap calls the mmap system call. +// We only pass the lower 32 bits of file offset to the +// assembly routine; the higher bits (if required), should be provided +// by the assembly routine as 0. +// The err result is an OS error code such as ENOMEM. +//go:nosplit +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) { + r, err0 := syscall6(&libc_mmap, uintptr(addr), uintptr(n), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(off)) + return unsafe.Pointer(r), int(err0) +} + +//go:nosplit +func munmap(addr unsafe.Pointer, n uintptr) { + r, err := syscall2(&libc_munmap, uintptr(addr), uintptr(n)) + if int32(r) == -1 { + println("syscall munmap failed: ", hex(err)) + throw("syscall munmap") + } +} + +//go:nosplit +func madvise(addr unsafe.Pointer, n uintptr, flags int32) { + r, err := syscall3(&libc_madvise, uintptr(addr), uintptr(n), uintptr(flags)) + if int32(r) == -1 { + println("syscall madvise failed: ", hex(err)) + throw("syscall madvise") + } +} + +//go:nosplit +func sigaction(sig uintptr, new, old *sigactiont) { + r, err := syscall3(&libc_sigaction, sig, uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("Sigaction failed for sig: ", sig, " with error:", hex(err)) + throw("syscall sigaction") + } +} + +//go:nosplit +func sigaltstack(new, old *stackt) { + r, err := syscall2(&libc_sigaltstack, uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("syscall sigaltstack failed: ", hex(err)) + throw("syscall sigaltstack") + } +} + +//go:nosplit +func getsystemcfg(label uint) uintptr { + r, _ := syscall1(&libc_getsystemcfg, uintptr(label)) + return r +} + +//go:nosplit +func usleep(us uint32) { + r, err := syscall1(&libc_usleep, uintptr(us)) + if int32(r) == -1 { + println("syscall usleep failed: ", hex(err)) + throw("syscall usleep") + } +} + +//go:nosplit +func clock_gettime(clockid int32, tp *timespec) int32 { + r, _ := syscall2(&libc_clock_gettime, uintptr(clockid), uintptr(unsafe.Pointer(tp))) + return int32(r) +} + +//go:nosplit +func setitimer(mode int32, new, old *itimerval) { + r, err := syscall3(&libc_setitimer, uintptr(mode), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("syscall setitimer failed: ", hex(err)) + throw("syscall setitimer") + } +} + +//go:nosplit +func malloc(size uintptr) unsafe.Pointer { + r, _ := syscall1(&libc_malloc, size) + return unsafe.Pointer(r) +} + +//go:nosplit +func sem_init(sem *semt, pshared int32, value uint32) int32 { + r, _ := syscall3(&libc_sem_init, uintptr(unsafe.Pointer(sem)), uintptr(pshared), uintptr(value)) + return int32(r) +} + +//go:nosplit +func sem_wait(sem *semt) (int32, int32) { + r, err := syscall1(&libc_sem_wait, uintptr(unsafe.Pointer(sem))) + return int32(r), int32(err) +} + +//go:nosplit +func sem_post(sem *semt) int32 { + r, _ := syscall1(&libc_sem_post, uintptr(unsafe.Pointer(sem))) + return int32(r) +} + +//go:nosplit +func sem_timedwait(sem *semt, timeout *timespec) (int32, int32) { + r, err := syscall2(&libc_sem_timedwait, uintptr(unsafe.Pointer(sem)), uintptr(unsafe.Pointer(timeout))) + return int32(r), int32(err) +} + +//go:nosplit +func raise(sig uint32) { + r, err := syscall1(&libc_raise, uintptr(sig)) + if int32(r) == -1 { + println("syscall raise failed: ", hex(err)) + throw("syscall raise") + } +} + +//go:nosplit +func raiseproc(sig uint32) { + pid, err := syscall0(&libc_getpid) + if int32(pid) == -1 { + println("syscall getpid failed: ", hex(err)) + throw("syscall raiseproc") + } + + syscall2(&libc_kill, pid, uintptr(sig)) +} + +func osyield1() + +//go:nosplit +func osyield() { + _g_ := getg() + + // Check the validity of m because we might be called in cgo callback + // path early enough where there isn't a m available yet. + if _g_ != nil && _g_.m != nil { + r, err := syscall0(&libc_sched_yield) + if int32(r) == -1 { + println("syscall osyield failed: ", hex(err)) + throw("syscall osyield") + } + return + } + osyield1() +} + +//go:nosplit +func sysconf(name int32) uintptr { + r, _ := syscall1(&libc_sysconf, uintptr(name)) + if int32(r) == -1 { + throw("syscall sysconf") + } + return r + +} + +// pthread functions returns its error code in the main return value +// Therefore, err returns by syscall means nothing and must not be used + +//go:nosplit +func pthread_attr_destroy(attr *pthread_attr) int32 { + r, _ := syscall1(&libpthread_attr_destroy, uintptr(unsafe.Pointer(attr))) + return int32(r) +} + +//go:nosplit +func pthread_attr_init(attr *pthread_attr) int32 { + r, _ := syscall1(&libpthread_attr_init, uintptr(unsafe.Pointer(attr))) + return int32(r) +} + +//go:nosplit +func pthread_attr_setdetachstate(attr *pthread_attr, state int32) int32 { + r, _ := syscall2(&libpthread_attr_setdetachstate, uintptr(unsafe.Pointer(attr)), uintptr(state)) + return int32(r) +} + +//go:nosplit +func pthread_attr_setstackaddr(attr *pthread_attr, stk unsafe.Pointer) int32 { + r, _ := syscall2(&libpthread_attr_setstackaddr, uintptr(unsafe.Pointer(attr)), uintptr(stk)) + return int32(r) +} + +//go:nosplit +func pthread_attr_getstacksize(attr *pthread_attr, size *uint64) int32 { + r, _ := syscall2(&libpthread_attr_getstacksize, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(size))) + return int32(r) +} + +//go:nosplit +func pthread_attr_setstacksize(attr *pthread_attr, size uint64) int32 { + r, _ := syscall2(&libpthread_attr_setstacksize, uintptr(unsafe.Pointer(attr)), uintptr(size)) + return int32(r) +} + +//go:nosplit +func pthread_create(tid *pthread, attr *pthread_attr, fn *funcDescriptor, arg unsafe.Pointer) int32 { + r, _ := syscall4(&libpthread_create, uintptr(unsafe.Pointer(tid)), uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(fn)), uintptr(arg)) + return int32(r) +} + +// On multi-thread program, sigprocmask must not be called. +// It's replaced by sigthreadmask. +//go:nosplit +func sigprocmask(how int32, new, old *sigset) { + r, err := syscall3(&libpthread_sigthreadmask, uintptr(how), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) != 0 { + println("syscall sigthreadmask failed: ", hex(err)) + throw("syscall sigthreadmask") + } +} diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go index 0e3a4c8024e19..15ca3359d2bf7 100644 --- a/src/runtime/os3_plan9.go +++ b/src/runtime/os3_plan9.go @@ -44,7 +44,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int { // level by the program but will otherwise be ignored. flags = _SigNotify for sig, t = range sigtable { - if hasprefix(notestr, t.name) { + if hasPrefix(notestr, t.name) { flags = t.flags break } diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go new file mode 100644 index 0000000000000..141ce3bb11956 --- /dev/null +++ b/src/runtime/os_aix.go @@ -0,0 +1,283 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +import ( + "internal/cpu" + "unsafe" +) + +const ( + threadStackSize = 0x100000 // size of a thread stack allocated by OS +) + +// funcDescriptor is a structure representing a function descriptor +// A variable with this type is always created in assembler +type funcDescriptor struct { + fn uintptr + toc uintptr + envPointer uintptr // unused in Golang +} + +type mOS struct { + waitsema uintptr // semaphore for parking on locks + perrno uintptr // pointer to tls errno +} + +//go:nosplit +func semacreate(mp *m) { + if mp.waitsema != 0 { + return + } + + var sem *semt + + // Call libc's malloc rather than malloc. This will + // allocate space on the C heap. We can't call mallocgc + // here because it could cause a deadlock. + sem = (*semt)(malloc(unsafe.Sizeof(*sem))) + if sem_init(sem, 0, 0) != 0 { + throw("sem_init") + } + mp.waitsema = uintptr(unsafe.Pointer(sem)) +} + +//go:nosplit +func semasleep(ns int64) int32 { + _m_ := getg().m + if ns >= 0 { + var ts timespec + + if clock_gettime(_CLOCK_REALTIME, &ts) != 0 { + throw("clock_gettime") + } + ts.tv_sec += ns / 1e9 + ts.tv_nsec += ns % 1e9 + if ts.tv_nsec >= 1e9 { + ts.tv_sec++ + ts.tv_nsec -= 1e9 + } + + if r, err := sem_timedwait((*semt)(unsafe.Pointer(_m_.waitsema)), &ts); r != 0 { + if err == _ETIMEDOUT || err == _EAGAIN || err == _EINTR { + return -1 + } + println("sem_timedwait err ", err, " ts.tv_sec ", ts.tv_sec, " ts.tv_nsec ", ts.tv_nsec, " ns ", ns, " id ", _m_.id) + throw("sem_timedwait") + } + return 0 + } + for { + r1, err := sem_wait((*semt)(unsafe.Pointer(_m_.waitsema))) + if r1 == 0 { + break + } + if err == _EINTR { + continue + } + throw("sem_wait") + } + return 0 +} + +//go:nosplit +func semawakeup(mp *m) { + if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 { + throw("sem_post") + } +} + +func osinit() { + ncpu = int32(sysconf(__SC_NPROCESSORS_ONLN)) + physPageSize = sysconf(__SC_PAGE_SIZE) + setupSystemConf() +} + +// Ms related functions +func mpreinit(mp *m) { + mp.gsignal = malg(32 * 1024) // AIX wants >= 8K + mp.gsignal.m = mp +} + +// errno address must be retrieved by calling _Errno libc function. +// This will return a pointer to errno +func miniterrno() { + mp := getg().m + r, _ := syscall0(&libc__Errno) + mp.perrno = r + +} + +func minit() { + miniterrno() + minitSignals() +} + +func unminit() { + unminitSignals() +} + +// tstart is a function descriptor to _tstart defined in assembly. +var tstart funcDescriptor + +func newosproc(mp *m) { + var ( + attr pthread_attr + oset sigset + tid pthread + ) + + if pthread_attr_init(&attr) != 0 { + throw("pthread_attr_init") + } + + if pthread_attr_setstacksize(&attr, threadStackSize) != 0 { + throw("pthread_attr_getstacksize") + } + + if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { + throw("pthread_attr_setdetachstate") + } + + // Disable signals during create, so that the new thread starts + // with signals disabled. It will enable them in minit. + sigprocmask(_SIG_SETMASK, &sigset_all, &oset) + var ret int32 + for tries := 0; tries < 20; tries++ { + // pthread_create can fail with EAGAIN for no reasons + // but it will be ok if it retries. + ret = pthread_create(&tid, &attr, &tstart, unsafe.Pointer(mp)) + if ret != _EAGAIN { + break + } + usleep(uint32(tries+1) * 1000) // Milliseconds. + } + sigprocmask(_SIG_SETMASK, &oset, nil) + if ret != 0 { + print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n") + if ret == _EAGAIN { + println("runtime: may need to increase max user processes (ulimit -u)") + } + throw("newosproc") + } + +} + +func exitThread(wait *uint32) { + // We should never reach exitThread on AIX because we let + // libc clean up threads. + throw("exitThread") +} + +var urandom_dev = []byte("/dev/urandom\x00") + +//go:nosplit +func getRandomData(r []byte) { + fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) + n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) + closefd(fd) + extendRandom(r, int(n)) +} + +func goenvs() { + goenvs_unix() +} + +/* SIGNAL */ + +const ( + _NSIG = 256 +) + +// sigtramp is a function descriptor to _sigtramp defined in assembly +var sigtramp funcDescriptor + +//go:nosplit +//go:nowritebarrierrec +func setsig(i uint32, fn uintptr) { + var sa sigactiont + sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART + sa.sa_mask = sigset_all + if fn == funcPC(sighandler) { + fn = uintptr(unsafe.Pointer(&sigtramp)) + } + sa.sa_handler = fn + sigaction(uintptr(i), &sa, nil) + +} + +//go:nosplit +//go:nowritebarrierrec +func setsigstack(i uint32) { + throw("Not yet implemented\n") +} + +//go:nosplit +//go:nowritebarrierrec +func getsig(i uint32) uintptr { + var sa sigactiont + sigaction(uintptr(i), nil, &sa) + return sa.sa_handler +} + +// setSignaltstackSP sets the ss_sp field of a stackt. +//go:nosplit +func setSignalstackSP(s *stackt, sp uintptr) { + *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp +} + +func (c *sigctxt) fixsigcode(sig uint32) { +} + +func sigaddset(mask *sigset, i int) { + (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63) +} + +func sigdelset(mask *sigset, i int) { + (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63) +} + +const ( + _CLOCK_REALTIME = 9 + _CLOCK_MONOTONIC = 10 +) + +//go:nosplit +func nanotime() int64 { + tp := ×pec{} + if clock_gettime(_CLOCK_REALTIME, tp) != 0 { + throw("syscall clock_gettime failed") + } + return tp.tv_sec*1000000000 + tp.tv_nsec +} + +func walltime() (sec int64, nsec int32) { + ts := ×pec{} + if clock_gettime(_CLOCK_REALTIME, ts) != 0 { + throw("syscall clock_gettime failed") + } + return ts.tv_sec, int32(ts.tv_nsec) +} + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +// setupSystemConf retrieves information about the CPU and updates +// cpu.HWCap variables. +func setupSystemConf() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + cpu.HWCap2 |= cpu.PPC_FEATURE2_ARCH_2_07 + } + if impl&_IMPL_POWER9 != 0 { + cpu.HWCap2 |= cpu.PPC_FEATURE2_ARCH_3_00 + } +} diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index d2144edf2ebdf..26b02820cd32c 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -34,6 +34,10 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { + var start int64 + if ns >= 0 { + start = nanotime() + } mp := getg().m pthread_mutex_lock(&mp.mutex) for { @@ -43,8 +47,13 @@ func semasleep(ns int64) int32 { return 0 } if ns >= 0 { + spent := nanotime() - start + if spent >= ns { + pthread_mutex_unlock(&mp.mutex) + return -1 + } var t timespec - t.set_nsec(ns) + t.set_nsec(ns - spent) err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t) if err == _ETIMEDOUT { pthread_mutex_unlock(&mp.mutex) diff --git a/src/runtime/os_darwin_arm.go b/src/runtime/os_darwin_arm.go index 8eb5655969c71..ee1bd174f1be6 100644 --- a/src/runtime/os_darwin_arm.go +++ b/src/runtime/os_darwin_arm.go @@ -4,8 +4,6 @@ package runtime -var hardDiv bool // TODO: set if a hardware divider is available - func checkgoarm() { // TODO(minux): FP checks like in os_linux_arm.go. diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go index 631dc20ab46ac..08f7b0ecf047f 100644 --- a/src/runtime/os_freebsd.go +++ b/src/runtime/os_freebsd.go @@ -389,6 +389,7 @@ const ( _AT_PAGESZ = 6 // Page size in bytes _AT_TIMEKEEP = 22 // Pointer to timehands. _AT_HWCAP = 25 // CPU feature flags + _AT_HWCAP2 = 26 // CPU feature flags 2 ) func sysauxv(auxv []uintptr) { diff --git a/src/runtime/os_freebsd_arm.go b/src/runtime/os_freebsd_arm.go index d2dc26f0c4f40..eb4de9bc2123d 100644 --- a/src/runtime/os_freebsd_arm.go +++ b/src/runtime/os_freebsd_arm.go @@ -4,22 +4,29 @@ package runtime +import "internal/cpu" + const ( _HWCAP_VFP = 1 << 6 _HWCAP_VFPv3 = 1 << 13 - _HWCAP_IDIVA = 1 << 17 ) -var hwcap = ^uint32(0) // set by archauxv -var hardDiv bool // set if a hardware divider is available +// AT_HWCAP is not available on FreeBSD-11.1-RELEASE or earlier. +// Default to mandatory VFP hardware support for arm being available. +// If AT_HWCAP is available goarmHWCap will be updated in archauxv. +// TODO(moehrmann) remove once all go supported FreeBSD versions support _AT_HWCAP. +var goarmHWCap uint = (_HWCAP_VFP | _HWCAP_VFPv3) func checkgoarm() { - if goarm > 5 && hwcap&_HWCAP_VFP == 0 { + // Update cpu.HWCap to match goarmHWCap in case they were not updated in archauxv. + cpu.HWCap = goarmHWCap + + if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 { print("runtime: this CPU has no floating point hardware, so it cannot run\n") print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n") exit(1) } - if goarm > 6 && hwcap&_HWCAP_VFPv3 == 0 { + if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 { print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n") print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n") exit(1) @@ -35,9 +42,11 @@ func checkgoarm() { func archauxv(tag, val uintptr) { switch tag { - case _AT_HWCAP: // CPU capability bit flags - hwcap = uint32(val) - hardDiv = (hwcap & _HWCAP_IDIVA) != 0 + case _AT_HWCAP: + cpu.HWCap = uint(val) + goarmHWCap = cpu.HWCap + case _AT_HWCAP2: + cpu.HWCap2 = uint(val) } } diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go index 14f1cfeaefb5d..207b0e4d4d125 100644 --- a/src/runtime/os_linux_arm.go +++ b/src/runtime/os_linux_arm.go @@ -4,20 +4,14 @@ package runtime -import "unsafe" +import "internal/cpu" const ( - _AT_PLATFORM = 15 // introduced in at least 2.6.11 - _HWCAP_VFP = 1 << 6 // introduced in at least 2.6.11 _HWCAP_VFPv3 = 1 << 13 // introduced in 2.6.30 - _HWCAP_IDIVA = 1 << 17 ) var randomNumber uint32 -var armArch uint8 = 6 // we default to ARMv6 -var hwcap uint32 // set by archauxv -var hardDiv bool // set if a hardware divider is available func checkgoarm() { // On Android, /proc/self/auxv might be unreadable and hwcap won't @@ -26,12 +20,12 @@ func checkgoarm() { if GOOS == "android" { return } - if goarm > 5 && hwcap&_HWCAP_VFP == 0 { + if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 { print("runtime: this CPU has no floating point hardware, so it cannot run\n") print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n") exit(1) } - if goarm > 6 && hwcap&_HWCAP_VFPv3 == 0 { + if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 { print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n") print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n") exit(1) @@ -47,15 +41,10 @@ func archauxv(tag, val uintptr) { randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 - case _AT_PLATFORM: // v5l, v6l, v7l - t := *(*uint8)(unsafe.Pointer(val + 1)) - if '5' <= t && t <= '7' { - armArch = t - '0' - } - - case _AT_HWCAP: // CPU capability bit flags - hwcap = uint32(val) - hardDiv = (hwcap & _HWCAP_IDIVA) != 0 + case _AT_HWCAP: + cpu.HWCap = uint(val) + case _AT_HWCAP2: + cpu.HWCap2 = uint(val) } } diff --git a/src/runtime/os_linux_arm64.go b/src/runtime/os_linux_arm64.go index 28a0319f10736..2d6f68bdd9e50 100644 --- a/src/runtime/os_linux_arm64.go +++ b/src/runtime/os_linux_arm64.go @@ -6,20 +6,10 @@ package runtime -// For go:linkname -import _ "unsafe" +import "internal/cpu" var randomNumber uint32 -// arm64 doesn't have a 'cpuid' instruction equivalent and relies on -// HWCAP/HWCAP2 bits for hardware capabilities. - -//go:linkname cpu_hwcap internal/cpu.hwcap -var cpu_hwcap uint - -//go:linkname cpu_hwcap2 internal/cpu.hwcap2 -var cpu_hwcap2 uint - func archauxv(tag, val uintptr) { switch tag { case _AT_RANDOM: @@ -28,10 +18,21 @@ func archauxv(tag, val uintptr) { // it as a byte array. randomNumber = uint32(startupRandomData[4]) | uint32(startupRandomData[5])<<8 | uint32(startupRandomData[6])<<16 | uint32(startupRandomData[7])<<24 + case _AT_HWCAP: - cpu_hwcap = uint(val) + // arm64 doesn't have a 'cpuid' instruction equivalent and relies on + // HWCAP/HWCAP2 bits for hardware capabilities. + hwcap := uint(val) + if GOOS == "android" { + // The Samsung S9+ kernel reports support for atomics, but not all cores + // actually support them, resulting in SIGILL. See issue #28431. + // TODO(elias.naur): Only disable the optimization on bad chipsets. + const hwcap_ATOMICS = 1 << 8 + hwcap &= ^uint(hwcap_ATOMICS) + } + cpu.HWCap = hwcap case _AT_HWCAP2: - cpu_hwcap2 = uint(val) + cpu.HWCap2 = uint(val) } } diff --git a/src/runtime/os_linux_novdso.go b/src/runtime/os_linux_novdso.go index ee4a7a95c2aff..e54c1c4dc138f 100644 --- a/src/runtime/os_linux_novdso.go +++ b/src/runtime/os_linux_novdso.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build !386,!amd64,!arm,!arm64 +// +build !386,!amd64,!arm,!arm64,!ppc64,!ppc64le package runtime diff --git a/src/runtime/os_linux_ppc64x.go b/src/runtime/os_linux_ppc64x.go index 2c67864a96df8..cc79cc4a66c31 100644 --- a/src/runtime/os_linux_ppc64x.go +++ b/src/runtime/os_linux_ppc64x.go @@ -7,23 +7,16 @@ package runtime -// For go:linkname -import _ "unsafe" - -// ppc64x doesn't have a 'cpuid' instruction equivalent and relies on -// HWCAP/HWCAP2 bits for hardware capabilities. - -//go:linkname cpu_hwcap internal/cpu.hwcap -var cpu_hwcap uint - -//go:linkname cpu_hwcap2 internal/cpu.hwcap2 -var cpu_hwcap2 uint +import "internal/cpu" func archauxv(tag, val uintptr) { switch tag { case _AT_HWCAP: - cpu_hwcap = uint(val) + // ppc64x doesn't have a 'cpuid' instruction + // equivalent and relies on HWCAP/HWCAP2 bits for + // hardware capabilities. + cpu.HWCap = uint(val) case _AT_HWCAP2: - cpu_hwcap2 = uint(val) + cpu.HWCap2 = uint(val) } } diff --git a/src/runtime/os_nacl.go b/src/runtime/os_nacl.go index 23ab03b953e96..155b763c3d7fe 100644 --- a/src/runtime/os_nacl.go +++ b/src/runtime/os_nacl.go @@ -197,23 +197,23 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { var ret int32 - systemstack(func() { _g_ := getg() if nacl_mutex_lock(_g_.m.waitsemalock) < 0 { throw("semasleep") } - + var ts timespec + if ns >= 0 { + end := ns + nanotime() + ts.tv_sec = end / 1e9 + ts.tv_nsec = int32(end % 1e9) + } for _g_.m.waitsemacount == 0 { if ns < 0 { if nacl_cond_wait(_g_.m.waitsema, _g_.m.waitsemalock) < 0 { throw("semasleep") } } else { - var ts timespec - end := ns + nanotime() - ts.tv_sec = end / 1e9 - ts.tv_nsec = int32(end % 1e9) r := nacl_cond_timed_wait_abs(_g_.m.waitsema, _g_.m.waitsemalock, &ts) if r == -_ETIMEDOUT { nacl_mutex_unlock(_g_.m.waitsemalock) @@ -317,3 +317,12 @@ int8 nacl_irt_thread_v0_1_str[] = "nacl-irt-thread-0.1"; void *nacl_irt_thread_v0_1[3]; // thread_create, thread_exit, thread_nice int32 nacl_irt_thread_v0_1_size = sizeof(nacl_irt_thread_v0_1); */ + +// The following functions are implemented in runtime assembly. +// Provide a Go declaration to go with its assembly definitions. + +//go:linkname syscall_naclWrite syscall.naclWrite +func syscall_naclWrite(fd int, b []byte) int + +//go:linkname syscall_now syscall.now +func syscall_now() (sec int64, nsec int32) diff --git a/src/runtime/os_nacl_arm.go b/src/runtime/os_nacl_arm.go index c64ebf31d3562..8669ee75b46c9 100644 --- a/src/runtime/os_nacl_arm.go +++ b/src/runtime/os_nacl_arm.go @@ -4,8 +4,6 @@ package runtime -var hardDiv bool // TODO: set if a hardware divider is available - func checkgoarm() { // TODO(minux): FP checks like in os_linux_arm.go. diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index a9bf407a36388..7deab3ed03569 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -126,15 +126,9 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { _g_ := getg() - - // Compute sleep deadline. - var tsp *timespec - var ts timespec + var deadline int64 if ns >= 0 { - var nsec int32 - ts.set_sec(timediv(ns, 1000000000, &nsec)) - ts.set_nsec(nsec) - tsp = &ts + deadline = nanotime() + ns } for { @@ -147,18 +141,21 @@ func semasleep(ns int64) int32 { } // Sleep until unparked by semawakeup or timeout. + var tsp *timespec + var ts timespec + if ns >= 0 { + wait := deadline - nanotime() + if wait <= 0 { + return -1 + } + var nsec int32 + ts.set_sec(timediv(wait, 1000000000, &nsec)) + ts.set_nsec(nsec) + tsp = &ts + } ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil) if ret == _ETIMEDOUT { return -1 - } else if ret == _EINTR && ns >= 0 { - // Avoid sleeping forever if we keep getting - // interrupted (for example by the profiling - // timer). It would be if tsp upon return had the - // remaining time to sleep, but this is good enough. - var nsec int32 - ns /= 2 - ts.set_sec(timediv(ns, 1000000000, &nsec)) - ts.set_nsec(nsec) } } } diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go index b02e36a73ab46..95603da64394b 100644 --- a/src/runtime/os_netbsd_arm.go +++ b/src/runtime/os_netbsd_arm.go @@ -6,8 +6,6 @@ package runtime import "unsafe" -var hardDiv bool // TODO: set if a hardware divider is available - func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr) { // Machine dependent mcontext initialisation for LWP. mc.__gregs[_REG_R15] = uint32(funcPC(lwp_tramp)) diff --git a/src/runtime/os_openbsd_arm.go b/src/runtime/os_openbsd_arm.go index c318578ab50e2..be2e1e9959da6 100644 --- a/src/runtime/os_openbsd_arm.go +++ b/src/runtime/os_openbsd_arm.go @@ -4,8 +4,6 @@ package runtime -var hardDiv bool // TODO: set if a hardware divider is available - func checkgoarm() { // TODO(minux): FP checks like in os_linux_arm.go. diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go index 9f41c5ac83a06..5469114a2b7db 100644 --- a/src/runtime/os_plan9.go +++ b/src/runtime/os_plan9.go @@ -112,20 +112,20 @@ func sigpanic() { } func atolwhex(p string) int64 { - for hasprefix(p, " ") || hasprefix(p, "\t") { + for hasPrefix(p, " ") || hasPrefix(p, "\t") { p = p[1:] } neg := false - if hasprefix(p, "-") || hasprefix(p, "+") { + if hasPrefix(p, "-") || hasPrefix(p, "+") { neg = p[0] == '-' p = p[1:] - for hasprefix(p, " ") || hasprefix(p, "\t") { + for hasPrefix(p, " ") || hasPrefix(p, "\t") { p = p[1:] } } var n int64 switch { - case hasprefix(p, "0x"), hasprefix(p, "0X"): + case hasPrefix(p, "0x"), hasPrefix(p, "0X"): p = p[2:] for ; len(p) > 0; p = p[1:] { if '0' <= p[0] && p[0] <= '9' { @@ -138,7 +138,7 @@ func atolwhex(p string) int64 { break } } - case hasprefix(p, "0"): + case hasPrefix(p, "0"): for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] { n = n*8 + int64(p[0]-'0') } diff --git a/src/runtime/os_plan9_arm.go b/src/runtime/os_plan9_arm.go index 1ce0141ce25b8..fdce1e7a352d6 100644 --- a/src/runtime/os_plan9_arm.go +++ b/src/runtime/os_plan9_arm.go @@ -4,8 +4,6 @@ package runtime -var hardDiv bool // TODO: set if a hardware divider is available - func checkgoarm() { return // TODO(minux) } diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 5607bf95c1d59..2e1ec58a0d9be 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -43,6 +43,7 @@ const ( //go:cgo_import_dynamic runtime._SetWaitableTimer SetWaitableTimer%6 "kernel32.dll" //go:cgo_import_dynamic runtime._SuspendThread SuspendThread%1 "kernel32.dll" //go:cgo_import_dynamic runtime._SwitchToThread SwitchToThread%0 "kernel32.dll" +//go:cgo_import_dynamic runtime._TlsAlloc TlsAlloc%0 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualAlloc VirtualAlloc%4 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualFree VirtualFree%3 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualQuery VirtualQuery%3 "kernel32.dll" @@ -91,6 +92,7 @@ var ( _SetWaitableTimer, _SuspendThread, _SwitchToThread, + _TlsAlloc, _VirtualAlloc, _VirtualFree, _VirtualQuery, @@ -196,6 +198,13 @@ func loadOptionalSyscalls() { } _NtWaitForSingleObject = windowsFindfunc(n32, []byte("NtWaitForSingleObject\000")) + if GOARCH == "arm" { + _QueryPerformanceCounter = windowsFindfunc(k32, []byte("QueryPerformanceCounter\000")) + if _QueryPerformanceCounter == nil { + throw("could not find QPC syscalls") + } + } + if windowsFindfunc(n32, []byte("wine_get_version\000")) != nil { // running on Wine initWine(k32) @@ -856,18 +865,28 @@ func profileloop() var profiletimer uintptr -func profilem(mp *m) { +func profilem(mp *m, thread uintptr) { var r *context rbuf := make([]byte, unsafe.Sizeof(*r)+15) - tls := &mp.tls[0] - gp := *((**g)(unsafe.Pointer(tls))) - // align Context to 16 bytes r = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&rbuf[15]))) &^ 15)) r.contextflags = _CONTEXT_CONTROL - stdcall2(_GetThreadContext, mp.thread, uintptr(unsafe.Pointer(r))) - sigprof(r.ip(), r.sp(), 0, gp, mp) + stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(r))) + + var gp *g + switch GOARCH { + default: + panic("unsupported architecture") + case "arm": + tls := &mp.tls[0] + gp = **((***g)(unsafe.Pointer(tls))) + case "386", "amd64": + tls := &mp.tls[0] + gp = *((**g)(unsafe.Pointer(tls))) + } + + sigprof(r.ip(), r.sp(), r.lr(), gp, mp) } func profileloop1(param uintptr) uint32 { @@ -884,9 +903,16 @@ func profileloop1(param uintptr) uint32 { if thread == 0 || mp.profilehz == 0 || mp.blocked { continue } - stdcall1(_SuspendThread, thread) + // mp may exit between the load above and the + // SuspendThread, so be careful. + if int32(stdcall1(_SuspendThread, thread)) == -1 { + // The thread no longer exists. + continue + } if mp.profilehz != 0 && !mp.blocked { - profilem(mp) + // Pass the thread handle in case mp + // was in the process of shutting down. + profilem(mp, thread) } stdcall1(_ResumeThread, thread) } diff --git a/src/runtime/os_windows_arm.go b/src/runtime/os_windows_arm.go new file mode 100644 index 0000000000000..10aff75e31191 --- /dev/null +++ b/src/runtime/os_windows_arm.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +//go:nosplit +func cputicks() int64 { + var counter int64 + stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) + return counter +} + +func checkgoarm() { + if goarm < 7 { + print("Need atomic synchronization instructions, coprocessor ", + "access instructions. Recompile using GOARM=7.\n") + exit(1) + } +} diff --git a/src/runtime/panic.go b/src/runtime/panic.go index a5287a0b86e41..bb83be4715d3a 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -37,7 +37,7 @@ var indexError = error(errorString("index out of range")) // entire runtime stack for easier debugging. func panicindex() { - if hasprefix(funcname(findfunc(getcallerpc())), "runtime.") { + if hasPrefix(funcname(findfunc(getcallerpc())), "runtime.") { throw(string(indexError.(errorString))) } panicCheckMalloc(indexError) @@ -47,7 +47,7 @@ func panicindex() { var sliceError = error(errorString("slice bounds out of range")) func panicslice() { - if hasprefix(funcname(findfunc(getcallerpc())), "runtime.") { + if hasPrefix(funcname(findfunc(getcallerpc())), "runtime.") { throw(string(sliceError.(errorString))) } panicCheckMalloc(sliceError) @@ -241,6 +241,15 @@ func newdefer(siz int32) *_defer { total := roundupsize(totaldefersize(uintptr(siz))) d = (*_defer)(mallocgc(total, deferType, true)) }) + if debugCachedWork { + // Duplicate the tail below so if there's a + // crash in checkPut we can tell if d was just + // allocated or came from the pool. + d.siz = siz + d.link = gp._defer + gp._defer = d + return d + } } d.siz = siz d.link = gp._defer @@ -720,10 +729,13 @@ func fatalpanic(msgs *_panic) { // It returns true if panic messages should be printed, or false if // the runtime is in bad shape and should just print stacks. // -// It can have write barriers because the write barrier explicitly -// ignores writes once dying > 0. +// It must not have write barriers even though the write barrier +// explicitly ignores writes once dying > 0. Write barriers still +// assume that g.m.p != nil, and this function may not have P +// in some contexts (e.g. a panic in a signal handler for a signal +// sent to an M with no P). // -//go:yeswritebarrierrec +//go:nowritebarrierrec func startpanic_m() bool { _g_ := getg() if mheap_.cachealloc.size == 0 { // very early @@ -743,8 +755,8 @@ func startpanic_m() bool { switch _g_.m.dying { case 0: + // Setting dying >0 has the side-effect of disabling this G's writebuf. _g_.m.dying = 1 - _g_.writebuf = nil atomic.Xadd(&panicking, 1) lock(&paniclk) if debug.schedtrace > 0 || debug.scheddetail > 0 { @@ -849,7 +861,7 @@ func canpanic(gp *g) bool { return true } -// shouldPushSigpanic returns true if pc should be used as sigpanic's +// shouldPushSigpanic reports whether pc should be used as sigpanic's // return PC (pushing a frame for the call). Otherwise, it should be // left alone so that LR is used as sigpanic's return PC, effectively // replacing the top-most frame with sigpanic. This is used by @@ -887,7 +899,7 @@ func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { return true } -// isAbortPC returns true if pc is the program counter at which +// isAbortPC reports whether pc is the program counter at which // runtime.abort raises a signal. // // It is nosplit because it's part of the isgoexception diff --git a/src/runtime/pprof/internal/profile/filter.go b/src/runtime/pprof/internal/profile/filter.go index 1baa096a49c80..9cad866df8c7f 100644 --- a/src/runtime/pprof/internal/profile/filter.go +++ b/src/runtime/pprof/internal/profile/filter.go @@ -55,7 +55,7 @@ func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, i return } -// matchesName returns whether the function name or file in the +// matchesName reports whether the function name or file in the // location matches the regular expression. func (loc *Location) matchesName(re *regexp.Regexp) bool { for _, ln := range loc.Line { diff --git a/src/runtime/pprof/internal/profile/profile.go b/src/runtime/pprof/internal/profile/profile.go index 64c3e3f054d38..a6f8354b1e8cc 100644 --- a/src/runtime/pprof/internal/profile/profile.go +++ b/src/runtime/pprof/internal/profile/profile.go @@ -200,7 +200,7 @@ var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) // first. func (p *Profile) setMain() { for i := 0; i < len(p.Mapping); i++ { - file := strings.TrimSpace(strings.Replace(p.Mapping[i].File, "(deleted)", "", -1)) + file := strings.TrimSpace(strings.ReplaceAll(p.Mapping[i].File, "(deleted)", "")) if len(file) == 0 { continue } @@ -415,16 +415,16 @@ func (p *Profile) String() string { for _, m := range p.Mapping { bits := "" if m.HasFunctions { - bits = bits + "[FN]" + bits += "[FN]" } if m.HasFilenames { - bits = bits + "[FL]" + bits += "[FL]" } if m.HasLineNumbers { - bits = bits + "[LN]" + bits += "[LN]" } if m.HasInlineFrames { - bits = bits + "[IN]" + bits += "[IN]" } ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", m.ID, @@ -573,7 +573,7 @@ func (p *Profile) Demangle(d Demangler) error { return nil } -// Empty returns true if the profile contains no samples. +// Empty reports whether the profile contains no samples. func (p *Profile) Empty() bool { return len(p.Sample) == 0 } diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index c1024c99ed26a..74cdd15cfbd16 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -28,6 +28,7 @@ // if err != nil { // log.Fatal("could not create CPU profile: ", err) // } +// defer f.Close() // if err := pprof.StartCPUProfile(f); err != nil { // log.Fatal("could not start CPU profile: ", err) // } @@ -41,11 +42,11 @@ // if err != nil { // log.Fatal("could not create memory profile: ", err) // } +// defer f.Close() // runtime.GC() // get up-to-date statistics // if err := pprof.WriteHeapProfile(f); err != nil { // log.Fatal("could not write memory profile: ", err) // } -// f.Close() // } // } // diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 44d514393ea44..7c6043ffdb2e8 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !nacl,!js +// +build !aix,!nacl,!js package pprof @@ -72,15 +72,24 @@ func cpuHog2(x int) int { return foo } +// Return a list of functions that we don't want to ever appear in CPU +// profiles. For gccgo, that list includes the sigprof handler itself. +func avoidFunctions() []string { + if runtime.Compiler == "gccgo" { + return []string{"runtime.sigprof"} + } + return nil +} + func TestCPUProfile(t *testing.T) { - testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) { + testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) { cpuHogger(cpuHog1, &salt1, dur) }) } func TestCPUProfileMultithreaded(t *testing.T) { defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) - testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) { + testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) { c := make(chan int) go func() { cpuHogger(cpuHog1, &salt1, dur) @@ -92,7 +101,7 @@ func TestCPUProfileMultithreaded(t *testing.T) { } func TestCPUProfileInlining(t *testing.T) { - testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) { + testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) { cpuHogger(inlinedCaller, &salt1, dur) }) } @@ -130,7 +139,9 @@ func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Loca } } -func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) { +// testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need, +// as interpreted by matches. +func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) { switch runtime.GOOS { case "darwin": switch runtime.GOARCH { @@ -169,7 +180,7 @@ func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) { f(duration) StopCPUProfile() - if profileOk(t, need, prof, duration) { + if profileOk(t, matches, need, avoid, prof, duration) { return } @@ -182,6 +193,10 @@ func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) { switch runtime.GOOS { case "darwin", "dragonfly", "netbsd", "solaris": t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS) + case "openbsd": + if runtime.GOARCH == "arm" { + t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH) + } } // Ignore the failure if the tests are running in a QEMU-based emulator, // QEMU is not perfect at emulating everything. @@ -202,29 +217,43 @@ func contains(slice []string, s string) bool { return false } -func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) { +// stackContains matches if a function named spec appears anywhere in the stack trace. +func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool { + for _, loc := range stk { + for _, line := range loc.Line { + if strings.Contains(line.Function.Name, spec) { + return true + } + } + } + return false +} + +type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool + +func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (ok bool) { ok = true - // Check that profile is well formed and contains need. + // Check that profile is well formed, contains 'need', and does not contain + // anything from 'avoid'. have := make([]uintptr, len(need)) + avoidSamples := make([]uintptr, len(avoid)) var samples uintptr var buf bytes.Buffer parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) { fmt.Fprintf(&buf, "%d:", count) fprintStack(&buf, stk) samples += count - for i, name := range need { - if semi := strings.Index(name, ";"); semi > -1 { - kv := strings.SplitN(name[semi+1:], "=", 2) - if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) { - continue - } - name = name[:semi] + for i, spec := range need { + if matches(spec, count, stk, labels) { + have[i] += count } + } + for i, name := range avoid { for _, loc := range stk { for _, line := range loc.Line { if strings.Contains(line.Function.Name, name) { - have[i] += count + avoidSamples[i] += count } } } @@ -251,6 +280,14 @@ func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Dur ok = false } + for i, name := range avoid { + bad := avoidSamples[i] + if bad != 0 { + t.Logf("found %d samples in avoid-function %s\n", bad, name) + ok = false + } + } + if len(need) == 0 { return ok } @@ -288,6 +325,10 @@ func TestCPUProfileWithFork(t *testing.T) { // Use smaller size for Android to avoid crash. heap = 100 << 20 } + if runtime.GOOS == "windows" && runtime.GOARCH == "arm" { + // Use smaller heap for Windows/ARM to avoid crash. + heap = 100 << 20 + } if testing.Short() { heap = 100 << 20 } @@ -318,6 +359,9 @@ func TestCPUProfileWithFork(t *testing.T) { // If it did, it would see inconsistent state and would either record an incorrect stack // or crash because the stack was malformed. func TestGoroutineSwitch(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("not applicable for gccgo") + } // How much to try. These defaults take about 1 seconds // on a 2012 MacBook Pro. The ones in short mode take // about 0.1 seconds. @@ -377,7 +421,7 @@ func fprintStack(w io.Writer, stk []*profile.Location) { // Test that profiling of division operations is okay, especially on ARM. See issue 6681. func TestMathBigDivide(t *testing.T) { - testCPUProfile(t, nil, func(duration time.Duration) { + testCPUProfile(t, nil, nil, nil, func(duration time.Duration) { t := time.After(duration) pi := new(big.Int) for { @@ -395,6 +439,48 @@ func TestMathBigDivide(t *testing.T) { }) } +// stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace. +func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool { + for _, f := range strings.Split(spec, ",") { + if !stackContains(f, count, stk, labels) { + return false + } + } + return true +} + +func TestMorestack(t *testing.T) { + testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) { + t := time.After(duration) + c := make(chan bool) + for { + go func() { + growstack1() + c <- true + }() + select { + case <-t: + return + case <-c: + } + } + }) +} + +//go:noinline +func growstack1() { + growstack() +} + +//go:noinline +func growstack() { + var buf [8 << 10]byte + use(buf) +} + +//go:noinline +func use(x [8 << 10]byte) {} + func TestBlockProfile(t *testing.T) { type TestCase struct { name string @@ -524,7 +610,7 @@ func TestBlockProfile(t *testing.T) { } for _, test := range tests { - if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) { + if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) { t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof) } } @@ -848,8 +934,25 @@ func TestEmptyCallStack(t *testing.T) { } } +// stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key +// and value and has funcname somewhere in the stack. +func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool { + semi := strings.Index(spec, ";") + if semi == -1 { + panic("no semicolon in key/value spec") + } + kv := strings.SplitN(spec[semi+1:], "=", 2) + if len(kv) != 2 { + panic("missing = in key/value spec") + } + if !contains(labels[kv[0]], kv[1]) { + return false + } + return stackContains(spec[:semi], count, stk, labels) +} + func TestCPUProfileLabel(t *testing.T) { - testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) { + testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) { Do(context.Background(), Labels("key", "value"), func(context.Context) { cpuHogger(cpuHog1, &salt1, dur) }) @@ -860,7 +963,7 @@ func TestLabelRace(t *testing.T) { // Test the race detector annotations for synchronization // between settings labels and consuming them from the // profile. - testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) { + testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) { start := time.Now() var wg sync.WaitGroup for time.Since(start) < dur { @@ -907,3 +1010,38 @@ func TestAtomicLoadStore64(t *testing.T) { atomic.StoreUint64(&flag, 1) <-done } + +func TestTracebackAll(t *testing.T) { + // With gccgo, if a profiling signal arrives at the wrong time + // during traceback, it may crash or hang. See issue #29448. + f, err := ioutil.TempFile("", "proftraceback") + if err != nil { + t.Fatalf("TempFile: %v", err) + } + defer os.Remove(f.Name()) + defer f.Close() + + if err := StartCPUProfile(f); err != nil { + t.Fatal(err) + } + defer StopCPUProfile() + + ch := make(chan int) + defer close(ch) + + count := 10 + for i := 0; i < count; i++ { + go func() { + <-ch // block + }() + } + + N := 10000 + if testing.Short() { + N = 500 + } + buf := make([]byte, 10*1024) + for i := 0; i < N; i++ { + runtime.Stack(buf, true) + } +} diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go index cbd0b83376416..7864dd79ad0c6 100644 --- a/src/runtime/pprof/proto.go +++ b/src/runtime/pprof/proto.go @@ -208,7 +208,7 @@ func (b *profileBuilder) pbMapping(tag int, id, base, limit, offset uint64, file } // locForPC returns the location ID for addr. -// addr must be a return PC. This returns the location of the call. +// addr must a return PC or 1 + the PC of an inline marker. This returns the location of the corresponding call. // It may emit to b.pb, so there must be no message encoding in progress. func (b *profileBuilder) locForPC(addr uintptr) uint64 { id := uint64(b.locs[addr]) @@ -524,6 +524,14 @@ func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, continue } file := string(line) + + // Trim deleted file marker. + deletedStr := " (deleted)" + deletedLen := len(deletedStr) + if len(file) >= deletedLen && file[len(file)-deletedLen:] == deletedStr { + file = file[:len(file)-deletedLen] + } + if len(inode) == 1 && inode[0] == '0' && file == "" { // Huge-page text mappings list the initial fragment of // mapped but unpopulated memory as being inode 0. diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go index 76bd46da028db..4452d5123158e 100644 --- a/src/runtime/pprof/proto_test.go +++ b/src/runtime/pprof/proto_test.go @@ -216,24 +216,89 @@ c000000000-c000036000 rw-p 00000000 00:00 0 07000000 07093000 06c00000 /path/to/gobench_server_main ` +var profSelfMapsTestsWithDeleted = ` +00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat (deleted) +0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat (deleted) +0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat (deleted) +014ab000-014cc000 rw-p 00000000 00:00 0 [heap] +7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive +7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 +7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 +7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 +7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 +7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] +7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 r-xp 00000090 00:00 0 [vsyscall] +-> +00400000 0040b000 00000000 /bin/cat +7f7d7797c000 7f7d77b36000 00000000 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d41000 7f7d77d64000 00000000 /lib/x86_64-linux-gnu/ld-2.19.so +7ffc34343000 7ffc34345000 00000000 [vdso] +ffffffffff600000 ffffffffff601000 00000090 [vsyscall] + +00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat with space +0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat with space +0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat with space +014ab000-014cc000 rw-p 00000000 00:00 0 [heap] +7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive +7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 +7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 +7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 +7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 +7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] +7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 r-xp 00000090 00:00 0 [vsyscall] +-> +00400000 0040b000 00000000 /bin/cat with space +7f7d7797c000 7f7d77b36000 00000000 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d41000 7f7d77d64000 00000000 /lib/x86_64-linux-gnu/ld-2.19.so +7ffc34343000 7ffc34345000 00000000 [vdso] +ffffffffff600000 ffffffffff601000 00000090 [vsyscall] +` + func TestProcSelfMaps(t *testing.T) { - for tx, tt := range strings.Split(profSelfMapsTests, "\n\n") { - i := strings.Index(tt, "->\n") - if i < 0 { - t.Fatal("malformed test case") - } - in, out := tt[:i], tt[i+len("->\n"):] - if len(out) > 0 && out[len(out)-1] != '\n' { - out += "\n" - } - var buf bytes.Buffer - parseProcSelfMaps([]byte(in), func(lo, hi, offset uint64, file, buildID string) { - fmt.Fprintf(&buf, "%08x %08x %08x %s\n", lo, hi, offset, file) - }) - if buf.String() != out { - t.Errorf("#%d: have:\n%s\nwant:\n%s\n%q\n%q", tx, buf.String(), out, buf.String(), out) + + f := func(t *testing.T, input string) { + for tx, tt := range strings.Split(input, "\n\n") { + i := strings.Index(tt, "->\n") + if i < 0 { + t.Fatal("malformed test case") + } + in, out := tt[:i], tt[i+len("->\n"):] + if len(out) > 0 && out[len(out)-1] != '\n' { + out += "\n" + } + var buf bytes.Buffer + parseProcSelfMaps([]byte(in), func(lo, hi, offset uint64, file, buildID string) { + fmt.Fprintf(&buf, "%08x %08x %08x %s\n", lo, hi, offset, file) + }) + if buf.String() != out { + t.Errorf("#%d: have:\n%s\nwant:\n%s\n%q\n%q", tx, buf.String(), out, buf.String(), out) + } } } + + t.Run("Normal", func(t *testing.T) { + f(t, profSelfMapsTests) + }) + + t.Run("WithDeletedFile", func(t *testing.T) { + f(t, profSelfMapsTestsWithDeleted) + }) } // TestMapping checkes the mapping section of CPU profiles diff --git a/src/runtime/pprof/protomem.go b/src/runtime/pprof/protomem.go index 82565d5245bfe..1c88aae43a0aa 100644 --- a/src/runtime/pprof/protomem.go +++ b/src/runtime/pprof/protomem.go @@ -56,8 +56,8 @@ func writeHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64, defau values[0], values[1] = scaleHeapSample(r.AllocObjects, r.AllocBytes, rate) values[2], values[3] = scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate) var blockSize int64 - if values[0] > 0 { - blockSize = values[1] / values[0] + if r.AllocObjects > 0 { + blockSize = r.AllocBytes / r.AllocObjects } b.pbSample(values, locs, func() { if blockSize != 0 { diff --git a/src/runtime/pprof/protomem_test.go b/src/runtime/pprof/protomem_test.go index 315d5f0b4d800..471b1ae9c3291 100644 --- a/src/runtime/pprof/protomem_test.go +++ b/src/runtime/pprof/protomem_test.go @@ -48,7 +48,7 @@ func TestConvertMemProfile(t *testing.T) { {ID: 3, Mapping: map2, Address: addr2 + 1}, {ID: 4, Mapping: map2, Address: addr2 + 2}, }, - NumLabel: map[string][]int64{"bytes": {829411}}, + NumLabel: map[string][]int64{"bytes": {512 * 1024}}, }, { Value: []int64{1, 829411, 0, 0}, @@ -57,7 +57,7 @@ func TestConvertMemProfile(t *testing.T) { {ID: 6, Mapping: map1, Address: addr1 + 2}, {ID: 7, Mapping: map2, Address: addr2 + 3}, }, - NumLabel: map[string][]int64{"bytes": {829411}}, + NumLabel: map[string][]int64{"bytes": {512 * 1024}}, }, } for _, tc := range []struct { diff --git a/src/runtime/print.go b/src/runtime/print.go index 7b2e4f40ffe06..e605eb34cb90a 100644 --- a/src/runtime/print.go +++ b/src/runtime/print.go @@ -89,7 +89,12 @@ func gwrite(b []byte) { } recordForPanic(b) gp := getg() - if gp == nil || gp.writebuf == nil { + // Don't use the writebuf if gp.m is dying. We want anything + // written through gwrite to appear in the terminal rather + // than be written to in some buffer, if we're in a panicking state. + // Note that we can't just clear writebuf in the gp.m.dying case + // because a panic isn't allowed to have any write barriers. + if gp == nil || gp.writebuf == nil || gp.m.dying > 0 { writeErr(b) return } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index f82014eb92f08..6e56b4b1d1b6f 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -157,8 +157,7 @@ func main() { } }() - // Record when the world started. Must be after runtime_init - // because nanotime on some platforms depends on startNano. + // Record when the world started. runtimeInitTime = nanotime() gcenable() @@ -254,7 +253,7 @@ func forcegchelper() { println("GC forced") } // Time-triggered, fully concurrent. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) + gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) } } @@ -477,23 +476,18 @@ const ( _GoidCacheBatch = 16 ) -//go:linkname internal_cpu_initialize internal/cpu.initialize -func internal_cpu_initialize(env string) - -//go:linkname internal_cpu_debugOptions internal/cpu.debugOptions -var internal_cpu_debugOptions bool - -// cpuinit extracts the environment variable GODEBUGCPU from the environment on -// Linux and Darwin if the GOEXPERIMENT debugcpu was set and calls internal/cpu.initialize. +// cpuinit extracts the environment variable GODEBUG from the environment on +// Unix-like operating systems and calls internal/cpu.Initialize. func cpuinit() { - const prefix = "GODEBUGCPU=" + const prefix = "GODEBUG=" var env string - if haveexperiment("debugcpu") && (GOOS == "linux" || GOOS == "darwin") { - internal_cpu_debugOptions = true + switch GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "solaris", "linux": + cpu.DebugOptions = true // Similar to goenv_unix but extracts the environment value for - // GODEBUGCPU directly. + // GODEBUG directly. // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() n := int32(0) for argv_index(argv, argc+1+n) != nil { @@ -504,21 +498,21 @@ func cpuinit() { p := argv_index(argv, argc+1+i) s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) - if hasprefix(s, prefix) { + if hasPrefix(s, prefix) { env = gostring(p)[len(prefix):] break } } } - internal_cpu_initialize(env) + cpu.Initialize(env) - support_erms = cpu.X86.HasERMS - support_popcnt = cpu.X86.HasPOPCNT - support_sse2 = cpu.X86.HasSSE2 - support_sse41 = cpu.X86.HasSSE41 + // Support cpu feature variables are used in code generated by the compiler + // to guard execution of instructions that can not be assumed to be always supported. + x86HasPOPCNT = cpu.X86.HasPOPCNT + x86HasSSE41 = cpu.X86.HasSSE41 - arm64_support_atomics = cpu.ARM64.HasATOMICS + arm64HasATOMICS = cpu.ARM64.HasATOMICS } // The bootstrap sequence is: @@ -669,59 +663,6 @@ func ready(gp *g, traceskip int, next bool) { } } -func gcprocs() int32 { - // Figure out how many CPUs to use during GC. - // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. - lock(&sched.lock) - n := gomaxprocs - if n > ncpu { - n = ncpu - } - if n > _MaxGcproc { - n = _MaxGcproc - } - if n > sched.nmidle+1 { // one M is currently running - n = sched.nmidle + 1 - } - unlock(&sched.lock) - return n -} - -func needaddgcproc() bool { - lock(&sched.lock) - n := gomaxprocs - if n > ncpu { - n = ncpu - } - if n > _MaxGcproc { - n = _MaxGcproc - } - n -= sched.nmidle + 1 // one M is currently running - unlock(&sched.lock) - return n > 0 -} - -func helpgc(nproc int32) { - _g_ := getg() - lock(&sched.lock) - pos := 0 - for n := int32(1); n < nproc; n++ { // one M is currently running - if allp[pos].mcache == _g_.m.mcache { - pos++ - } - mp := mget() - if mp == nil { - throw("gcprocs inconsistency") - } - mp.helpgc = n - mp.p.set(allp[pos]) - mp.mcache = allp[pos].mcache - pos++ - notewakeup(&mp.park) - } - unlock(&sched.lock) -} - // freezeStopWait is a large value that freezetheworld sets // sched.stopwait to in order to request that all Gs permanently stop. const freezeStopWait = 0x7fffffff @@ -1138,20 +1079,14 @@ func stopTheWorldWithSema() { } } -func mhelpgc() { - _g_ := getg() - _g_.m.helpgc = -1 -} - func startTheWorldWithSema(emitTraceEvent bool) int64 { _g_ := getg() _g_.m.locks++ // disable preemption because it can be holding p in a local var if netpollinited() { - gp := netpoll(false) // non-blocking - injectglist(gp) + list := netpoll(false) // non-blocking + injectglist(&list) } - add := needaddgcproc() lock(&sched.lock) procs := gomaxprocs @@ -1181,7 +1116,6 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { } else { // Start M to run P. Do not start another M below. newm(nil, p) - add = false } } @@ -1198,16 +1132,6 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { wakep() } - if add { - // If GC could have used another helper proc, start one now, - // in the hope that it will be available next time. - // It would have been even better to start it before the collection, - // but doing so requires allocating memory, so it's tricky to - // coordinate. This lazy approach works out in practice: - // we don't mind if the first couple gc rounds don't have quite - // the maximum number of procs. - newm(mhelpgc, nil) - } _g_.m.locks-- if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack _g_.stackguard0 = stackPreempt @@ -1248,8 +1172,8 @@ func mstart() { mstart1() // Exit this thread. - if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" || GOOS == "darwin" { - // Window, Solaris, Darwin and Plan 9 always system-allocate + if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" { + // Window, Solaris, Darwin, AIX and Plan 9 always system-allocate // the stack, but put it in _g_.stack before mstart, // so the logic above hasn't set osStack yet. osStack = true @@ -1282,10 +1206,7 @@ func mstart1() { fn() } - if _g_.m.helpgc != 0 { - _g_.m.helpgc = 0 - stopm() - } else if _g_.m != &m0 { + if _g_.m != &m0 { acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } @@ -1605,7 +1526,7 @@ func allocm(_p_ *p, fn func()) *m { // the following strategy: there is a stack of available m's // that can be stolen. Using compare-and-swap // to pop from the stack has ABA races, so we simulate -// a lock by doing an exchange (via casp) to steal the stack +// a lock by doing an exchange (via Casuintptr) to steal the stack // head and replace the top pointer with MLOCKED (1). // This serves as a simple spin lock that we can use even // without an m. The thread that locks the stack in this way @@ -1957,7 +1878,7 @@ func startTemplateThread() { // templateThread is a thread in a known-good state that exists solely // to start new threads in known-good states when the calling thread -// may not be a a good state. +// may not be in a good state. // // Many programs never need this, so templateThread is started lazily // when we first enter a state that might lead to running on a thread @@ -2009,21 +1930,11 @@ func stopm() { throw("stopm spinning") } -retry: lock(&sched.lock) mput(_g_.m) unlock(&sched.lock) notesleep(&_g_.m.park) noteclear(&_g_.m.park) - if _g_.m.helpgc != 0 { - // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P. - gchelper() - // Undo the effects of helpgc(). - _g_.m.helpgc = 0 - _g_.m.mcache = nil - _g_.m.p = 0 - goto retry - } acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } @@ -2312,9 +2223,9 @@ top: // not set lastpoll yet), this thread will do blocking netpoll below // anyway. if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { - if gp := netpoll(false); gp != nil { // non-blocking - // netpoll returns list of goroutines linked by schedlink. - injectglist(gp.schedlink.ptr()) + if list := netpoll(false); !list.empty() { // non-blocking + gp := list.pop() + injectglist(&list) casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) @@ -2369,10 +2280,10 @@ stop: } // wasm only: - // Check if a goroutine is waiting for a callback from the WebAssembly host. - // If yes, pause the execution until a callback was triggered. - if pauseSchedulerUntilCallback() { - // A callback was triggered and caused at least one goroutine to wake up. + // If a callback returned and no other goroutine is awake, + // then pause execution until a callback was triggered. + if beforeIdle() { + // At least one goroutine got woken. goto top } @@ -2466,29 +2377,30 @@ stop: if _g_.m.spinning { throw("findrunnable: netpoll with spinning") } - gp := netpoll(true) // block until new work is available + list := netpoll(true) // block until new work is available atomic.Store64(&sched.lastpoll, uint64(nanotime())) - if gp != nil { + if !list.empty() { lock(&sched.lock) _p_ = pidleget() unlock(&sched.lock) if _p_ != nil { acquirep(_p_) - injectglist(gp.schedlink.ptr()) + gp := list.pop() + injectglist(&list) casgstatus(gp, _Gwaiting, _Grunnable) if trace.enabled { traceGoUnpark(gp, 0) } return gp, false } - injectglist(gp) + injectglist(&list) } } stopm() goto top } -// pollWork returns true if there is non-background work this P could +// pollWork reports whether there is non-background work this P could // be doing. This is a fairly lightweight check to be used for // background work loops, like idle GC. It checks a subset of the // conditions checked by the actual scheduler. @@ -2501,8 +2413,8 @@ func pollWork() bool { return true } if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { - if gp := netpoll(false); gp != nil { - injectglist(gp) + if list := netpoll(false); !list.empty() { + injectglist(&list) return true } } @@ -2527,22 +2439,21 @@ func resetspinning() { } } -// Injects the list of runnable G's into the scheduler. +// Injects the list of runnable G's into the scheduler and clears glist. // Can run concurrently with GC. -func injectglist(glist *g) { - if glist == nil { +func injectglist(glist *gList) { + if glist.empty() { return } if trace.enabled { - for gp := glist; gp != nil; gp = gp.schedlink.ptr() { + for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { traceGoUnpark(gp, 0) } } lock(&sched.lock) var n int - for n = 0; glist != nil; n++ { - gp := glist - glist = gp.schedlink.ptr() + for n = 0; !glist.empty(); n++ { + gp := glist.pop() casgstatus(gp, _Gwaiting, _Grunnable) globrunqput(gp) } @@ -2550,6 +2461,7 @@ func injectglist(glist *g) { for ; n != 0 && sched.npidle != 0; n-- { startm(nil, false) } + *glist = gList{} } // One round of scheduler: find a runnable goroutine and execute it. @@ -2620,6 +2532,23 @@ top: resetspinning() } + if sched.disable.user && !schedEnabled(gp) { + // Scheduling of this goroutine is disabled. Put it on + // the list of pending runnable goroutines for when we + // re-enable user scheduling and look again. + lock(&sched.lock) + if schedEnabled(gp) { + // Something re-enabled scheduling while we + // were acquiring the lock. + unlock(&sched.lock) + } else { + sched.disable.runnable.pushBack(gp) + sched.disable.n++ + unlock(&sched.lock) + goto top + } + } + if gp.lockedm != 0 { // Hands off own p to the locked m, // then blocks waiting for a new p. @@ -2735,7 +2664,7 @@ func goexit0(gp *g) { _g_ := getg() casgstatus(gp, _Grunning, _Gdead) - if isSystemGoroutine(gp) { + if isSystemGoroutine(gp, false) { atomic.Xadd(&sched.ngsys, -1) } gp.m = nil @@ -2774,7 +2703,6 @@ func goexit0(gp *g) { print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") throw("internal lockOSThread error") } - _g_.m.lockedExt = 0 gfput(_g_.m.p.ptr(), gp) if locked { // The goroutine may have locked this thread because @@ -2785,6 +2713,10 @@ func goexit0(gp *g) { // the thread. if GOOS != "plan9" { // See golang.org/issue/22227. gogo(&_g_.m.g0.sched) + } else { + // Clear lockedExt on plan9 since we may end up re-using + // this thread. + _g_.m.lockedExt = 0 } } schedule() @@ -2899,8 +2831,11 @@ func reentersyscall(pc, sp uintptr) { _g_.m.syscalltick = _g_.m.p.ptr().syscalltick _g_.sysblocktraced = true _g_.m.mcache = nil - _g_.m.p.ptr().m = 0 - atomic.Store(&_g_.m.p.ptr().status, _Psyscall) + pp := _g_.m.p.ptr() + pp.m = 0 + _g_.m.oldp.set(pp) + _g_.m.p = 0 + atomic.Store(&pp.status, _Psyscall) if sched.gcwaiting != 0 { systemstack(entersyscall_gcwait) save(pc, sp) @@ -2926,7 +2861,7 @@ func entersyscall_sysmon() { func entersyscall_gcwait() { _g_ := getg() - _p_ := _g_.m.p.ptr() + _p_ := _g_.m.oldp.ptr() lock(&sched.lock) if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { @@ -3011,8 +2946,9 @@ func exitsyscall() { } _g_.waitsince = 0 - oldp := _g_.m.p.ptr() - if exitsyscallfast() { + oldp := _g_.m.oldp.ptr() + _g_.m.oldp = 0 + if exitsyscallfast(oldp) { if _g_.m.mcache == nil { throw("lost mcache") } @@ -3038,6 +2974,12 @@ func exitsyscall() { _g_.stackguard0 = _g_.stack.lo + _StackGuard } _g_.throwsplit = false + + if sched.disable.user && !schedEnabled(_g_) { + // Scheduling of this goroutine is disabled. + Gosched() + } + return } @@ -3076,27 +3018,23 @@ func exitsyscall() { } //go:nosplit -func exitsyscallfast() bool { +func exitsyscallfast(oldp *p) bool { _g_ := getg() // Freezetheworld sets stopwait but does not retake P's. if sched.stopwait == freezeStopWait { - _g_.m.mcache = nil - _g_.m.p = 0 return false } // Try to re-acquire the last P. - if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { + if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { // There's a cpu for us, so we can run. + wirep(oldp) exitsyscallfast_reacquired() return true } // Try to get any other idle P. - oldp := _g_.m.p.ptr() - _g_.m.mcache = nil - _g_.m.p = 0 if sched.pidle != 0 { var ok bool systemstack(func() { @@ -3123,15 +3061,9 @@ func exitsyscallfast() bool { // has successfully reacquired the P it was running on before the // syscall. // -// This function is allowed to have write barriers because exitsyscall -// has acquired a P at this point. -// -//go:yeswritebarrierrec //go:nosplit func exitsyscallfast_reacquired() { _g_ := getg() - _g_.m.mcache = _g_.m.p.ptr().mcache - _g_.m.p.ptr().m.set(_g_.m) if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { if trace.enabled { // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). @@ -3173,7 +3105,10 @@ func exitsyscall0(gp *g) { casgstatus(gp, _Gsyscall, _Grunnable) dropg() lock(&sched.lock) - _p_ := pidleget() + var _p_ *p + if schedEnabled(_g_) { + _p_ = pidleget() + } if _p_ == nil { globrunqput(gp) } else if atomic.Load(&sched.sysmonwait) != 0 { @@ -3368,9 +3303,11 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt if writeBarrier.needed && !_g_.m.curg.gcscandone { f := findfunc(fn.fn) stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) - // We're in the prologue, so it's always stack map index 0. - bv := stackmapdata(stkmap, 0) - bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata) + if stkmap.nbit > 0 { + // We're in the prologue, so it's always stack map index 0. + bv := stackmapdata(stkmap, 0) + bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata) + } } } @@ -3386,7 +3323,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt if _g_.m.curg != nil { newg.labels = _g_.m.curg.labels } - if isSystemGoroutine(newg) { + if isSystemGoroutine(newg, false) { atomic.Xadd(&sched.ngsys, +1) } newg.gcscanvalid = false @@ -3470,25 +3407,21 @@ func gfput(_p_ *p, gp *g) { gp.stackguard0 = 0 } - gp.schedlink.set(_p_.gfree) - _p_.gfree = gp - _p_.gfreecnt++ - if _p_.gfreecnt >= 64 { - lock(&sched.gflock) - for _p_.gfreecnt >= 32 { - _p_.gfreecnt-- - gp = _p_.gfree - _p_.gfree = gp.schedlink.ptr() + _p_.gFree.push(gp) + _p_.gFree.n++ + if _p_.gFree.n >= 64 { + lock(&sched.gFree.lock) + for _p_.gFree.n >= 32 { + _p_.gFree.n-- + gp = _p_.gFree.pop() if gp.stack.lo == 0 { - gp.schedlink.set(sched.gfreeNoStack) - sched.gfreeNoStack = gp + sched.gFree.noStack.push(gp) } else { - gp.schedlink.set(sched.gfreeStack) - sched.gfreeStack = gp + sched.gFree.stack.push(gp) } - sched.ngfree++ + sched.gFree.n++ } - unlock(&sched.gflock) + unlock(&sched.gFree.lock) } } @@ -3496,44 +3429,42 @@ func gfput(_p_ *p, gp *g) { // If local list is empty, grab a batch from global list. func gfget(_p_ *p) *g { retry: - gp := _p_.gfree - if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { - lock(&sched.gflock) - for _p_.gfreecnt < 32 { - if sched.gfreeStack != nil { - // Prefer Gs with stacks. - gp = sched.gfreeStack - sched.gfreeStack = gp.schedlink.ptr() - } else if sched.gfreeNoStack != nil { - gp = sched.gfreeNoStack - sched.gfreeNoStack = gp.schedlink.ptr() - } else { - break + if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { + lock(&sched.gFree.lock) + // Move a batch of free Gs to the P. + for _p_.gFree.n < 32 { + // Prefer Gs with stacks. + gp := sched.gFree.stack.pop() + if gp == nil { + gp = sched.gFree.noStack.pop() + if gp == nil { + break + } } - _p_.gfreecnt++ - sched.ngfree-- - gp.schedlink.set(_p_.gfree) - _p_.gfree = gp + sched.gFree.n-- + _p_.gFree.push(gp) + _p_.gFree.n++ } - unlock(&sched.gflock) + unlock(&sched.gFree.lock) goto retry } - if gp != nil { - _p_.gfree = gp.schedlink.ptr() - _p_.gfreecnt-- - if gp.stack.lo == 0 { - // Stack was deallocated in gfput. Allocate a new one. - systemstack(func() { - gp.stack = stackalloc(_FixedStack) - }) - gp.stackguard0 = gp.stack.lo + _StackGuard - } else { - if raceenabled { - racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) - } - if msanenabled { - msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) - } + gp := _p_.gFree.pop() + if gp == nil { + return nil + } + _p_.gFree.n-- + if gp.stack.lo == 0 { + // Stack was deallocated in gfput. Allocate a new one. + systemstack(func() { + gp.stack = stackalloc(_FixedStack) + }) + gp.stackguard0 = gp.stack.lo + _StackGuard + } else { + if raceenabled { + racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) + } + if msanenabled { + msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) } } return gp @@ -3541,21 +3472,18 @@ retry: // Purge all cached G's from gfree list to the global list. func gfpurge(_p_ *p) { - lock(&sched.gflock) - for _p_.gfreecnt != 0 { - _p_.gfreecnt-- - gp := _p_.gfree - _p_.gfree = gp.schedlink.ptr() + lock(&sched.gFree.lock) + for !_p_.gFree.empty() { + gp := _p_.gFree.pop() + _p_.gFree.n-- if gp.stack.lo == 0 { - gp.schedlink.set(sched.gfreeNoStack) - sched.gfreeNoStack = gp + sched.gFree.noStack.push(gp) } else { - gp.schedlink.set(sched.gfreeStack) - sched.gfreeStack = gp + sched.gFree.stack.push(gp) } - sched.ngfree++ + sched.gFree.n++ } - unlock(&sched.gflock) + unlock(&sched.gFree.lock) } // Breakpoint executes a breakpoint trap. @@ -3668,9 +3596,9 @@ func badunlockosthread() { } func gcount() int32 { - n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) + n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) for _, _p_ := range allp { - n -= _p_.gfreecnt + n -= _p_.gFree.n } // All these variables can be changed concurrently, so the result can be inconsistent. @@ -3716,7 +3644,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { if f := findfunc(pc); f.valid() { - if hasprefix(funcname(f), "runtime/internal/atomic") { + if hasPrefix(funcname(f), "runtime/internal/atomic") { lostAtomic64Count++ return } @@ -3819,6 +3747,9 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { // Collect Go stack that leads to the cgo call. n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) + if n > 0 { + n += cgoOff + } } else if traceback { n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) } @@ -3845,7 +3776,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { pc = funcPC(_ExternalCode) + sys.PCQuantum } stk[0] = pc - if mp.preemptoff != "" || mp.helpgc != 0 { + if mp.preemptoff != "" { stk[1] = funcPC(_GC) + sys.PCQuantum } else { stk[1] = funcPC(_System) + sys.PCQuantum @@ -4107,6 +4038,7 @@ func procresize(nprocs int32) *p { if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { // continue to use the current P _g_.m.p.ptr().status = _Prunning + _g_.m.p.ptr().mcache.prepareForSweep() } else { // release the current P and acquire allp[0] if _g_.m.p != 0 { @@ -4151,36 +4083,40 @@ func procresize(nprocs int32) *p { //go:yeswritebarrierrec func acquirep(_p_ *p) { // Do the part that isn't allowed to have write barriers. - acquirep1(_p_) + wirep(_p_) - // have p; write barriers now allowed - _g_ := getg() - _g_.m.mcache = _p_.mcache + // Have p; write barriers now allowed. + + // Perform deferred mcache flush before this P can allocate + // from a potentially stale mcache. + _p_.mcache.prepareForSweep() if trace.enabled { traceProcStart() } } -// acquirep1 is the first step of acquirep, which actually acquires -// _p_. This is broken out so we can disallow write barriers for this -// part, since we don't yet have a P. +// wirep is the first step of acquirep, which actually associates the +// current M to _p_. This is broken out so we can disallow write +// barriers for this part, since we don't yet have a P. // //go:nowritebarrierrec -func acquirep1(_p_ *p) { +//go:nosplit +func wirep(_p_ *p) { _g_ := getg() if _g_.m.p != 0 || _g_.m.mcache != nil { - throw("acquirep: already in go") + throw("wirep: already in go") } if _p_.m != 0 || _p_.status != _Pidle { id := int64(0) if _p_.m != 0 { id = _p_.m.ptr().id } - print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") - throw("acquirep: invalid p state") + print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") + throw("wirep: invalid p state") } + _g_.m.mcache = _p_.mcache _g_.m.p.set(_p_) _p_.m.set(_g_.m) _p_.status = _Prunning @@ -4258,7 +4194,7 @@ func checkdead() { lock(&allglock) for i := 0; i < len(allgs); i++ { gp := allgs[i] - if isSystemGoroutine(gp) { + if isSystemGoroutine(gp, false) { continue } s := readgstatus(gp) @@ -4387,8 +4323,8 @@ func sysmon() { now := nanotime() if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) - gp := netpoll(false) // non-blocking - returns list of goroutines - if gp != nil { + list := netpoll(false) // non-blocking - returns list of goroutines + if !list.empty() { // Need to decrement number of idle locked M's // (pretending that one more is running) before injectglist. // Otherwise it can lead to the following situation: @@ -4397,7 +4333,7 @@ func sysmon() { // observes that there is no work to do and no other running M's // and reports deadlock. incidlelocked(-1) - injectglist(gp) + injectglist(&list) incidlelocked(1) } } @@ -4412,8 +4348,9 @@ func sysmon() { if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { lock(&forcegc.lock) forcegc.idle = 0 - forcegc.g.schedlink = 0 - injectglist(forcegc.g) + var list gList + list.push(forcegc.g) + injectglist(&list) unlock(&forcegc.lock) } // scavenge heap once in a while @@ -4580,7 +4517,7 @@ func schedtrace(detailed bool) { if mp != nil { id = mp.id } - print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") + print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n") } else { // In non-detailed mode format lengths of per-P run queues as: // [len1 len2 len3 len4] @@ -4616,7 +4553,7 @@ func schedtrace(detailed bool) { if lockedg != nil { id3 = lockedg.goid } - print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") + print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") } lock(&allglock) @@ -4638,6 +4575,40 @@ func schedtrace(detailed bool) { unlock(&sched.lock) } +// schedEnableUser enables or disables the scheduling of user +// goroutines. +// +// This does not stop already running user goroutines, so the caller +// should first stop the world when disabling user goroutines. +func schedEnableUser(enable bool) { + lock(&sched.lock) + if sched.disable.user == !enable { + unlock(&sched.lock) + return + } + sched.disable.user = !enable + if enable { + n := sched.disable.n + sched.disable.n = 0 + globrunqputbatch(&sched.disable.runnable, n) + unlock(&sched.lock) + for ; n != 0 && sched.npidle != 0; n-- { + startm(nil, false) + } + } else { + unlock(&sched.lock) + } +} + +// schedEnabled reports whether gp should be scheduled. It returns +// false is scheduling of gp is disabled. +func schedEnabled(gp *g) bool { + if sched.disable.user { + return isSystemGoroutine(gp, true) + } + return true +} + // Put mp on midle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. @@ -4667,13 +4638,7 @@ func mget() *m { // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func globrunqput(gp *g) { - gp.schedlink = 0 - if sched.runqtail != 0 { - sched.runqtail.ptr().schedlink.set(gp) - } else { - sched.runqhead.set(gp) - } - sched.runqtail.set(gp) + sched.runq.pushBack(gp) sched.runqsize++ } @@ -4682,25 +4647,17 @@ func globrunqput(gp *g) { // May run during STW, so write barriers are not allowed. //go:nowritebarrierrec func globrunqputhead(gp *g) { - gp.schedlink = sched.runqhead - sched.runqhead.set(gp) - if sched.runqtail == 0 { - sched.runqtail.set(gp) - } + sched.runq.push(gp) sched.runqsize++ } // Put a batch of runnable goroutines on the global runnable queue. +// This clears *batch. // Sched must be locked. -func globrunqputbatch(ghead *g, gtail *g, n int32) { - gtail.schedlink = 0 - if sched.runqtail != 0 { - sched.runqtail.ptr().schedlink.set(ghead) - } else { - sched.runqhead.set(ghead) - } - sched.runqtail.set(gtail) +func globrunqputbatch(batch *gQueue, n int32) { + sched.runq.pushBackAll(*batch) sched.runqsize += n + *batch = gQueue{} } // Try get a batch of G's from the global runnable queue. @@ -4722,16 +4679,11 @@ func globrunqget(_p_ *p, max int32) *g { } sched.runqsize -= n - if sched.runqsize == 0 { - sched.runqtail = 0 - } - gp := sched.runqhead.ptr() - sched.runqhead = gp.schedlink + gp := sched.runq.pop() n-- for ; n > 0; n-- { - gp1 := sched.runqhead.ptr() - sched.runqhead = gp1.schedlink + gp1 := sched.runq.pop() runqput(_p_, gp1, false) } return gp @@ -4763,7 +4715,7 @@ func pidleget() *p { return _p_ } -// runqempty returns true if _p_ has no Gs on its local run queue. +// runqempty reports whether _p_ has no Gs on its local run queue. // It never returns true spuriously. func runqempty(_p_ *p) bool { // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, @@ -4815,11 +4767,11 @@ func runqput(_p_ *p, gp *g, next bool) { } retry: - h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers + h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers t := _p_.runqtail if t-h < uint32(len(_p_.runq)) { _p_.runq[t%uint32(len(_p_.runq))].set(gp) - atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption + atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption return } if runqputslow(_p_, gp, h, t) { @@ -4843,7 +4795,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool { for i := uint32(0); i < n; i++ { batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() } - if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume + if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume return false } batch[n] = gp @@ -4859,10 +4811,13 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool { for i := uint32(0); i < n; i++ { batch[i].schedlink.set(batch[i+1]) } + var q gQueue + q.head.set(batch[0]) + q.tail.set(batch[n]) // Now put the batch on global queue. lock(&sched.lock) - globrunqputbatch(batch[0], batch[n], int32(n+1)) + globrunqputbatch(&q, int32(n+1)) unlock(&sched.lock) return true } @@ -4884,13 +4839,13 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) { } for { - h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers + h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers t := _p_.runqtail if t == h { return nil, false } gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() - if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume + if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume return gp, false } } @@ -4902,8 +4857,8 @@ func runqget(_p_ *p) (gp *g, inheritTime bool) { // Can be executed by any P. func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { for { - h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers - t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer + h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers + t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer n := t - h n = n - n/2 if n == 0 { @@ -4946,7 +4901,7 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool g := _p_.runq[(h+i)%uint32(len(_p_.runq))] batch[(batchHead+i)%uint32(len(batch))] = g } - if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume + if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume return n } } @@ -4966,11 +4921,112 @@ func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { if n == 0 { return gp } - h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers + h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers if t-h+n >= uint32(len(_p_.runq)) { throw("runqsteal: runq overflow") } - atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption + atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption + return gp +} + +// A gQueue is a dequeue of Gs linked through g.schedlink. A G can only +// be on one gQueue or gList at a time. +type gQueue struct { + head guintptr + tail guintptr +} + +// empty reports whether q is empty. +func (q *gQueue) empty() bool { + return q.head == 0 +} + +// push adds gp to the head of q. +func (q *gQueue) push(gp *g) { + gp.schedlink = q.head + q.head.set(gp) + if q.tail == 0 { + q.tail.set(gp) + } +} + +// pushBack adds gp to the tail of q. +func (q *gQueue) pushBack(gp *g) { + gp.schedlink = 0 + if q.tail != 0 { + q.tail.ptr().schedlink.set(gp) + } else { + q.head.set(gp) + } + q.tail.set(gp) +} + +// pushBackAll adds all Gs in l2 to the tail of q. After this q2 must +// not be used. +func (q *gQueue) pushBackAll(q2 gQueue) { + if q2.tail == 0 { + return + } + q2.tail.ptr().schedlink = 0 + if q.tail != 0 { + q.tail.ptr().schedlink = q2.head + } else { + q.head = q2.head + } + q.tail = q2.tail +} + +// pop removes and returns the head of queue q. It returns nil if +// q is empty. +func (q *gQueue) pop() *g { + gp := q.head.ptr() + if gp != nil { + q.head = gp.schedlink + if q.head == 0 { + q.tail = 0 + } + } + return gp +} + +// popList takes all Gs in q and returns them as a gList. +func (q *gQueue) popList() gList { + stack := gList{q.head} + *q = gQueue{} + return stack +} + +// A gList is a list of Gs linked through g.schedlink. A G can only be +// on one gQueue or gList at a time. +type gList struct { + head guintptr +} + +// empty reports whether l is empty. +func (l *gList) empty() bool { + return l.head == 0 +} + +// push adds gp to the head of l. +func (l *gList) push(gp *g) { + gp.schedlink = l.head + l.head.set(gp) +} + +// pushAll prepends all Gs in q to l. +func (l *gList) pushAll(q gQueue) { + if !q.empty() { + q.tail.ptr().schedlink = l.head + l.head = q.head + } +} + +// pop removes and returns the head of l. If l is empty, it returns nil. +func (l *gList) pop() *g { + gp := l.head.ptr() + if gp != nil { + l.head = gp.schedlink + } return gp } diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go index ad325987ac4d1..1715324aa09d2 100644 --- a/src/runtime/proc_test.go +++ b/src/runtime/proc_test.go @@ -891,11 +891,22 @@ func testLockOSThreadExit(t *testing.T, prog string) { output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1") want := "OK\n" if output != want { - t.Errorf("want %s, got %s\n", want, output) + t.Errorf("want %q, got %q", want, output) } output = runTestProg(t, prog, "LockOSThreadAlt") if output != want { - t.Errorf("want %s, got %s\n", want, output) + t.Errorf("want %q, got %q", want, output) + } +} + +func TestLockOSThreadAvoidsStatePropagation(t *testing.T) { + want := "OK\n" + skip := "unshare not permitted\n" + output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1") + if output == skip { + t.Skip("unshare syscall not permitted on this system") + } else if output != want { + t.Errorf("want %q, got %q", want, output) } } diff --git a/src/runtime/race.go b/src/runtime/race.go index 0124e231fa89f..adb2198c55470 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -156,7 +156,7 @@ func racecallback(cmd uintptr, ctx unsafe.Pointer) { } func raceSymbolizeCode(ctx *symbolizeCodeContext) { - f := FuncForPC(ctx.pc) + f := findfunc(ctx.pc)._Func() if f != nil { file, line := f.FileLine(ctx.pc) if line != 0 { @@ -294,6 +294,10 @@ var racearenaend uintptr func racefuncenter(uintptr) func racefuncenterfp() func racefuncexit() +func raceread(uintptr) +func racewrite(uintptr) +func racereadrange(addr, size uintptr) +func racewriterange(addr, size uintptr) func racereadrangepc1(uintptr, uintptr, uintptr) func racewriterangepc1(uintptr, uintptr, uintptr) func racecallbackthunk(uintptr) @@ -496,3 +500,76 @@ func racereleasemergeg(gp *g, addr unsafe.Pointer) { func racefingo() { racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0) } + +// The declarations below generate ABI wrappers for functions +// implemented in assembly in this package but declared in another +// package. + +//go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32 +func abigen_sync_atomic_LoadInt32(addr *int32) (val int32) + +//go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64 +func abigen_sync_atomic_LoadInt64(addr *int64) (val int64) + +//go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32 +func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32) + +//go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64 +func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64) + +//go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr +func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr) + +//go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer +func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) + +//go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32 +func abigen_sync_atomic_StoreInt32(addr *int32, val int32) + +//go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64 +func abigen_sync_atomic_StoreInt64(addr *int64, val int64) + +//go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32 +func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32) + +//go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64 +func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64) + +//go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32 +func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32) + +//go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64 +func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64) + +//go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32 +func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32) + +//go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64 +func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64) + +//go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32 +func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32) + +//go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32 +func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32) + +//go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64 +func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64) + +//go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64 +func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64) + +//go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr +func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr) + +//go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32 +func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) + +//go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64 +func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool) + +//go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32 +func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) + +//go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64 +func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) diff --git a/src/runtime/race/README b/src/runtime/race/README index 1c66c636956a8..be53b4c37c9a2 100644 --- a/src/runtime/race/README +++ b/src/runtime/race/README @@ -10,3 +10,4 @@ race_linux_amd64.syso built with LLVM fe2c72c59aa7f4afa45e3f65a5d16a374b6cce26 a race_linux_ppc64le.syso built with LLVM fe2c72c59aa7f4afa45e3f65a5d16a374b6cce26 and Go 323c85862a7afbde66a3bba0776bf4ba6cd7c030. race_netbsd_amd64.syso built with LLVM fe2c72c59aa7f4afa45e3f65a5d16a374b6cce26 and Go 323c85862a7afbde66a3bba0776bf4ba6cd7c030. race_windows_amd64.syso built with LLVM ae08a22cc215448aa3ad5a6fb099f6df77e9fa01 and Go 323c85862a7afbde66a3bba0776bf4ba6cd7c030. +race_linux_arm64.syso built with LLVM 3aa2b775d08f903f804246af10b80a439c16b436 and Go ef2c48659880c7e8a989e6721a21f018790f7793. diff --git a/src/runtime/race/race.go b/src/runtime/race/race.go index 95e965411b76d..d298e805cfaec 100644 --- a/src/runtime/race/race.go +++ b/src/runtime/race/race.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build race,linux,amd64 race,freebsd,amd64 race,netbsd,amd64 race,darwin,amd64 race,windows,amd64 race,linux,ppc64le +// +build race,linux,amd64 race,freebsd,amd64 race,netbsd,amd64 race,darwin,amd64 race,windows,amd64 race,linux,ppc64le race,linux,arm64 package race diff --git a/src/runtime/race/race_linux_arm64.syso b/src/runtime/race/race_linux_arm64.syso new file mode 100644 index 0000000000000..65bc1ececa5a7 Binary files /dev/null and b/src/runtime/race/race_linux_arm64.syso differ diff --git a/src/runtime/race/testdata/chan_test.go b/src/runtime/race/testdata/chan_test.go index 7f349c42ed783..60e55ed66a4b0 100644 --- a/src/runtime/race/testdata/chan_test.go +++ b/src/runtime/race/testdata/chan_test.go @@ -577,18 +577,32 @@ func TestRaceChanItselfCap(t *testing.T) { <-compl } -func TestRaceChanCloseLen(t *testing.T) { - v := 0 - _ = v +func TestNoRaceChanCloseLen(t *testing.T) { c := make(chan int, 10) - c <- 0 + r := make(chan int, 10) + go func() { + r <- len(c) + }() go func() { - v = 1 close(c) + r <- 0 }() - time.Sleep(1e7) - _ = len(c) - v = 2 + <-r + <-r +} + +func TestNoRaceChanCloseCap(t *testing.T) { + c := make(chan int, 10) + r := make(chan int, 10) + go func() { + r <- cap(c) + }() + go func() { + close(c) + r <- 0 + }() + <-r + <-r } func TestRaceChanCloseSend(t *testing.T) { diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s new file mode 100644 index 0000000000000..48b119f8c4b76 --- /dev/null +++ b/src/runtime/race_arm64.s @@ -0,0 +1,471 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +#include "go_asm.h" +#include "funcdata.h" +#include "textflag.h" +#include "tls_arm64.h" + +// The following thunks allow calling the gcc-compiled race runtime directly +// from Go code without going all the way through cgo. +// First, it's much faster (up to 50% speedup for real Go programs). +// Second, it eliminates race-related special cases from cgocall and scheduler. +// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go. + +// A brief recap of the arm64 calling convention. +// Arguments are passed in R0...R7, the rest is on stack. +// Callee-saved registers are: R19...R28. +// Temporary registers are: R9...R15 +// SP must be 16-byte aligned. + +// When calling racecalladdr, R9 is the call target address. + +// The race ctx, ThreadState *thr below, is passed in R0 and loaded in racecalladdr. + +#define load_g \ + MRS_TPIDR_R0 \ + MOVD runtime·tls_g(SB), R11 \ + ADD R11, R0 \ + MOVD 0(R0), g + +// func runtime·raceread(addr uintptr) +// Called from instrumented code. +TEXT runtime·raceread(SB), NOSPLIT, $0-8 + MOVD addr+0(FP), R1 + MOVD LR, R2 + // void __tsan_read(ThreadState *thr, void *addr, void *pc); + MOVD $__tsan_read(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·RaceRead(addr uintptr) +TEXT runtime·RaceRead(SB), NOSPLIT, $0-8 + // This needs to be a tail call, because raceread reads caller pc. + JMP runtime·raceread(SB) + +// func runtime·racereadpc(void *addr, void *callpc, void *pc) +TEXT runtime·racereadpc(SB), NOSPLIT, $0-24 + MOVD addr+0(FP), R1 + MOVD callpc+8(FP), R2 + MOVD pc+16(FP), R3 + // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc); + MOVD $__tsan_read_pc(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·racewrite(addr uintptr) +// Called from instrumented code. +TEXT runtime·racewrite(SB), NOSPLIT, $0-8 + MOVD addr+0(FP), R1 + MOVD LR, R2 + // void __tsan_write(ThreadState *thr, void *addr, void *pc); + MOVD $__tsan_write(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·RaceWrite(addr uintptr) +TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8 + // This needs to be a tail call, because racewrite reads caller pc. + JMP runtime·racewrite(SB) + +// func runtime·racewritepc(void *addr, void *callpc, void *pc) +TEXT runtime·racewritepc(SB), NOSPLIT, $0-24 + MOVD addr+0(FP), R1 + MOVD callpc+8(FP), R2 + MOVD pc+16(FP), R3 + // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc); + MOVD $__tsan_write_pc(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·racereadrange(addr, size uintptr) +// Called from instrumented code. +TEXT runtime·racereadrange(SB), NOSPLIT, $0-16 + MOVD addr+0(FP), R1 + MOVD size+8(FP), R2 + MOVD LR, R3 + // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc); + MOVD $__tsan_read_range(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·RaceReadRange(addr, size uintptr) +TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16 + // This needs to be a tail call, because racereadrange reads caller pc. + JMP runtime·racereadrange(SB) + +// func runtime·racereadrangepc1(void *addr, uintptr sz, void *pc) +TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24 + MOVD addr+0(FP), R1 + MOVD size+8(FP), R2 + MOVD pc+16(FP), R3 + ADD $4, R3 // pc is function start, tsan wants return address. + // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc); + MOVD $__tsan_read_range(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·racewriterange(addr, size uintptr) +// Called from instrumented code. +TEXT runtime·racewriterange(SB), NOSPLIT, $0-16 + MOVD addr+0(FP), R1 + MOVD size+8(FP), R2 + MOVD LR, R3 + // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc); + MOVD $__tsan_write_range(SB), R9 + JMP racecalladdr<>(SB) + +// func runtime·RaceWriteRange(addr, size uintptr) +TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16 + // This needs to be a tail call, because racewriterange reads caller pc. + JMP runtime·racewriterange(SB) + +// func runtime·racewriterangepc1(void *addr, uintptr sz, void *pc) +TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24 + MOVD addr+0(FP), R1 + MOVD size+8(FP), R2 + MOVD pc+16(FP), R3 + ADD $4, R3 // pc is function start, tsan wants return address. + // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc); + MOVD $__tsan_write_range(SB), R9 + JMP racecalladdr<>(SB) + +// If addr (R1) is out of range, do nothing. +// Otherwise, setup goroutine context and invoke racecall. Other arguments already set. +TEXT racecalladdr<>(SB), NOSPLIT, $0-0 + load_g + MOVD g_racectx(g), R0 + // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). + MOVD runtime·racearenastart(SB), R10 + CMP R10, R1 + BLT data + MOVD runtime·racearenaend(SB), R10 + CMP R10, R1 + BLT call +data: + MOVD runtime·racedatastart(SB), R10 + CMP R10, R1 + BLT ret + MOVD runtime·racedataend(SB), R10 + CMP R10, R1 + BGT ret +call: + JMP racecall<>(SB) +ret: + RET + +// func runtime·racefuncenterfp(fp uintptr) +// Called from instrumented code. +// Like racefuncenter but doesn't passes an arg, uses the caller pc +// from the first slot on the stack +TEXT runtime·racefuncenterfp(SB), NOSPLIT, $0-0 + MOVD 0(RSP), R9 + JMP racefuncenter<>(SB) + +// func runtime·racefuncenter(pc uintptr) +// Called from instrumented code. +TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8 + MOVD callpc+0(FP), R9 + JMP racefuncenter<>(SB) + +// Common code for racefuncenter/racefuncenterfp +// R9 = caller's return address +TEXT racefuncenter<>(SB), NOSPLIT, $0-0 + load_g + MOVD g_racectx(g), R0 // goroutine racectx + MOVD R9, R1 + // void __tsan_func_enter(ThreadState *thr, void *pc); + MOVD $__tsan_func_enter(SB), R9 + BL racecall<>(SB) + RET + +// func runtime·racefuncexit() +// Called from instrumented code. +TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 + load_g + MOVD g_racectx(g), R0 // race context + // void __tsan_func_exit(ThreadState *thr); + MOVD $__tsan_func_exit(SB), R9 + JMP racecall<>(SB) + +// Atomic operations for sync/atomic package. +// R3 = addr of arguments passed to this function, it can +// be fetched at 40(RSP) in racecallatomic after two times BL +// R0, R1, R2 set in racecallatomic + +// Load +TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic32_load(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic64_load(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·LoadInt32(SB) + +TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·LoadInt64(SB) + +TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·LoadInt64(SB) + +TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·LoadInt64(SB) + +// Store +TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic32_store(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic64_store(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·StoreInt32(SB) + +TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·StoreInt64(SB) + +TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·StoreInt64(SB) + +// Swap +TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic32_exchange(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic64_exchange(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·SwapInt32(SB) + +TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·SwapInt64(SB) + +TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·SwapInt64(SB) + +// Add +TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic32_fetch_add(SB), R9 + BL racecallatomic<>(SB) + MOVW add+8(FP), R0 // convert fetch_add to add_fetch + MOVW ret+16(FP), R1 + ADD R0, R1, R0 + MOVW R0, ret+16(FP) + RET + +TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic64_fetch_add(SB), R9 + BL racecallatomic<>(SB) + MOVD add+8(FP), R0 // convert fetch_add to add_fetch + MOVD ret+16(FP), R1 + ADD R0, R1, R0 + MOVD R0, ret+16(FP) + RET + +TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·AddInt32(SB) + +TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·AddInt64(SB) + +TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·AddInt64(SB) + +// CompareAndSwap +TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic32_compare_exchange(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0 + GO_ARGS + MOVD $__tsan_go_atomic64_compare_exchange(SB), R9 + BL racecallatomic<>(SB) + RET + +TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·CompareAndSwapInt32(SB) + +TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·CompareAndSwapInt64(SB) + +TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0 + GO_ARGS + JMP sync∕atomic·CompareAndSwapInt64(SB) + +// Generic atomic operation implementation. +// R9 = addr of target function +TEXT racecallatomic<>(SB), NOSPLIT, $0 + // Set up these registers + // R0 = *ThreadState + // R1 = caller pc + // R2 = pc + // R3 = addr of incoming arg list + + // Trigger SIGSEGV early. + MOVD 40(RSP), R3 // 1st arg is addr. after two times BL, get it at 40(RSP) + MOVD (R3), R13 // segv here if addr is bad + // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). + MOVD runtime·racearenastart(SB), R10 + CMP R10, R3 + BLT racecallatomic_data + MOVD runtime·racearenaend(SB), R10 + CMP R10, R3 + BLT racecallatomic_ok +racecallatomic_data: + MOVD runtime·racedatastart(SB), R10 + CMP R10, R3 + BLT racecallatomic_ignore + MOVD runtime·racedataend(SB), R10 + CMP R10, R3 + BGE racecallatomic_ignore +racecallatomic_ok: + // Addr is within the good range, call the atomic function. + load_g + MOVD g_racectx(g), R0 // goroutine context + MOVD 16(RSP), R1 // caller pc + MOVD R9, R2 // pc + ADD $40, RSP, R3 + JMP racecall<>(SB) // does not return +racecallatomic_ignore: + // Addr is outside the good range. + // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op. + // An attempt to synchronize on the address would cause crash. + MOVD R9, R20 // remember the original function + MOVD $__tsan_go_ignore_sync_begin(SB), R9 + load_g + MOVD g_racectx(g), R0 // goroutine context + BL racecall<>(SB) + MOVD R20, R9 // restore the original function + // Call the atomic function. + // racecall will call LLVM race code which might clobber R28 (g) + load_g + MOVD g_racectx(g), R0 // goroutine context + MOVD 16(RSP), R1 // caller pc + MOVD R9, R2 // pc + ADD $40, RSP, R3 // arguments + BL racecall<>(SB) + // Call __tsan_go_ignore_sync_end. + MOVD $__tsan_go_ignore_sync_end(SB), R9 + MOVD g_racectx(g), R0 // goroutine context + BL racecall<>(SB) + RET + +// func runtime·racecall(void(*f)(...), ...) +// Calls C function f from race runtime and passes up to 4 arguments to it. +// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments. +TEXT runtime·racecall(SB), NOSPLIT, $0-0 + MOVD fn+0(FP), R9 + MOVD arg0+8(FP), R0 + MOVD arg1+16(FP), R1 + MOVD arg2+24(FP), R2 + MOVD arg3+32(FP), R3 + JMP racecall<>(SB) + +// Switches SP to g0 stack and calls (R9). Arguments already set. +TEXT racecall<>(SB), NOSPLIT, $0-0 + MOVD g_m(g), R10 + // Switch to g0 stack. + MOVD RSP, R19 // callee-saved, preserved across the CALL + MOVD m_g0(R10), R11 + CMP R11, g + BEQ call // already on g0 + MOVD (g_sched+gobuf_sp)(R11), R12 + MOVD R12, RSP +call: + BL R9 + MOVD R19, RSP + RET + +// C->Go callback thunk that allows to call runtime·racesymbolize from C code. +// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g. +// The overall effect of Go->C->Go call chain is similar to that of mcall. +// R0 contains command code. R1 contains command-specific context. +// See racecallback for command codes. +TEXT runtime·racecallbackthunk(SB), NOSPLIT|NOFRAME, $0 + // Handle command raceGetProcCmd (0) here. + // First, code below assumes that we are on curg, while raceGetProcCmd + // can be executed on g0. Second, it is called frequently, so will + // benefit from this fast path. + CMP $0, R0 + BNE rest + MOVD g, R13 + load_g + MOVD g_m(g), R0 + MOVD m_p(R0), R0 + MOVD p_racectx(R0), R0 + MOVD R0, (R1) + MOVD R13, g + JMP (LR) +rest: + // Save callee-saved registers (Go code won't respect that). + // 8(RSP) and 16(RSP) are for args passed through racecallback + SUB $96, RSP + MOVD LR, 0(RSP) + STP (R19, R20), 24(RSP) + STP (R21, R22), 40(RSP) + STP (R23, R24), 56(RSP) + STP (R25, R26), 72(RSP) + MOVD R27, 88(RSP) + // Set g = g0. + // load_g will clobber R0, Save R0 + MOVD R0, R13 + load_g + // restore R0 + MOVD R13, R0 + MOVD g_m(g), R13 + MOVD m_g0(R13), g + + MOVD R0, 8(RSP) // func arg + MOVD R1, 16(RSP) // func arg + BL runtime·racecallback(SB) + + // All registers are smashed after Go code, reload. + MOVD g_m(g), R13 + MOVD m_curg(R13), g // g = m->curg + // Restore callee-saved registers. + MOVD 0(RSP), LR + LDP 24(RSP), (R19, R20) + LDP 40(RSP), (R21, R22) + LDP 56(RSP), (R23, R24) + LDP 72(RSP), (R25, R26) + MOVD 88(RSP), R27 + ADD $96, RSP + JMP (LR) + +// tls_g, g value for each thread in TLS +GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8 diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s new file mode 100644 index 0000000000000..0e3d582809faf --- /dev/null +++ b/src/runtime/rt0_aix_ppc64.s @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// _rt0_ppc64_aix is a function descriptor of the entrypoint function +// __start. This name is needed by cmd/link. +DATA _rt0_ppc64_aix+0(SB)/8, $__start<>(SB) +DATA _rt0_ppc64_aix+8(SB)/8, $TOC(SB) +GLOBL _rt0_ppc64_aix(SB), NOPTR, $16 + + +// The starting function must return in the loader to +// initialise some librairies, especially libthread which +// creates the main thread and adds the TLS in R13 +// R19 contains a function descriptor to the loader function +// which needs to be called. +// This code is similar to the __start function in C +TEXT __start<>(SB),NOSPLIT,$-8 + XOR R0, R0 + MOVD $libc___n_pthreads(SB), R4 + MOVD 0(R4), R4 + MOVD $libc___mod_init(SB), R5 + MOVD 0(R5), R5 + MOVD 0(R19), R0 + MOVD R2, 40(R1) + MOVD 8(R19), R2 + MOVD R18, R3 + MOVD R0, CTR + BL (CTR) // Return to AIX loader + + // Launch rt0_go + MOVD 40(R1), R2 + MOVD R14, R3 // argc + MOVD R15, R4 // argv + MOVD $runtime·rt0_go(SB), R12 + MOVD R12, CTR + BR (CTR) + diff --git a/src/runtime/rt0_darwin_arm64.s b/src/runtime/rt0_darwin_arm64.s index d039a8e0abab2..e3972f492429a 100644 --- a/src/runtime/rt0_darwin_arm64.s +++ b/src/runtime/rt0_darwin_arm64.s @@ -49,7 +49,9 @@ TEXT _rt0_arm64_darwin_lib(SB),NOSPLIT,$168 MOVD _cgo_sys_thread_create(SB), R4 MOVD $_rt0_arm64_darwin_lib_go(SB), R0 MOVD $0, R1 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) + ADD $16, RSP // Restore callee-save registers. MOVD 24(RSP), R19 diff --git a/src/runtime/rt0_js_wasm.s b/src/runtime/rt0_js_wasm.s index c494b0a34a3ff..50adbe22256da 100644 --- a/src/runtime/rt0_js_wasm.s +++ b/src/runtime/rt0_js_wasm.s @@ -5,53 +5,61 @@ #include "go_asm.h" #include "textflag.h" -// The register RUN indicates the current run state of the program. -// Possible values are: -#define RUN_STARTING 0 -#define RUN_RUNNING 1 -#define RUN_PAUSED 2 -#define RUN_EXITED 3 - -// _rt0_wasm_js does NOT follow the Go ABI. It has two WebAssembly parameters: +// _rt0_wasm_js is not used itself. It only exists to mark the exported functions as alive. +TEXT _rt0_wasm_js(SB),NOSPLIT,$0 + I32Const $wasm_export_run(SB) + Drop + I32Const $wasm_export_resume(SB) + Drop + I32Const $wasm_export_getsp(SB) + Drop + +// wasm_export_run gets called from JavaScript. It initializes the Go runtime and executes Go code until it needs +// to wait for an event. It does NOT follow the Go ABI. It has two WebAssembly parameters: // R0: argc (i32) // R1: argv (i32) -TEXT _rt0_wasm_js(SB),NOSPLIT,$0 - Get RUN - I32Const $RUN_STARTING - I32Eq - If - MOVD $runtime·wasmStack+m0Stack__size(SB), SP - - Get SP - Get R0 // argc - I64ExtendUI32 - I64Store $0 - - Get SP - Get R1 // argv - I64ExtendUI32 - I64Store $8 - - I32Const $runtime·rt0_go(SB) - I32Const $16 - I32ShrU - Set PC_F - - I32Const $RUN_RUNNING - Set RUN - Else - Get RUN - I32Const $RUN_PAUSED - I32Eq - If - I32Const $RUN_RUNNING - Set RUN - Else - Unreachable - End - End +TEXT wasm_export_run(SB),NOSPLIT,$0 + MOVD $runtime·wasmStack+m0Stack__size(SB), SP + + Get SP + Get R0 // argc + I64ExtendUI32 + I64Store $0 + + Get SP + Get R1 // argv + I64ExtendUI32 + I64Store $8 + + I32Const $runtime·rt0_go(SB) + I32Const $16 + I32ShrU + Set PC_F + + I32Const $0 + Set PC_B -// Call the function for the current PC_F. Repeat until RUN != 0 indicates pause or exit. + Call wasm_pc_f_loop(SB) + + Return + +// wasm_export_resume gets called from JavaScript. It resumes the execution of Go code until it needs to wait for +// an event. +TEXT wasm_export_resume(SB),NOSPLIT,$0 + I32Const $runtime·handleEvent(SB) + I32Const $16 + I32ShrU + Set PC_F + + I32Const $0 + Set PC_B + + Call wasm_pc_f_loop(SB) + + Return + +TEXT wasm_pc_f_loop(SB),NOSPLIT,$0 +// Call the function for the current PC_F. Repeat until PAUSE != 0 indicates pause or exit. // The WebAssembly stack may unwind, e.g. when switching goroutines. // The Go stack on the linear memory is then used to jump to the correct functions // with this loop, without having to restore the full WebAssembly stack. @@ -61,25 +69,33 @@ loop: CallIndirect $0 Drop - Get RUN - I32Const $RUN_RUNNING - I32Eq + Get PAUSE + I32Eqz BrIf loop End + I32Const $0 + Set PAUSE + + Return + +// wasm_export_getsp gets called from JavaScript to retrieve the SP. +TEXT wasm_export_getsp(SB),NOSPLIT,$0 + Get SP Return -TEXT runtime·pause(SB), NOSPLIT, $0 - I32Const $RUN_PAUSED - Set RUN +TEXT runtime·pause(SB), NOSPLIT, $0-8 + MOVD newsp+0(FP), SP + I32Const $1 + Set PAUSE RETUNWIND TEXT runtime·exit(SB), NOSPLIT, $0-4 Call runtime·wasmExit(SB) Drop - I32Const $RUN_EXITED - Set RUN + I32Const $1 + Set PAUSE RETUNWIND -TEXT _rt0_wasm_js_lib(SB),NOSPLIT,$0 +TEXT wasm_export_lib(SB),NOSPLIT,$0 UNDEF diff --git a/src/runtime/rt0_linux_arm64.s b/src/runtime/rt0_linux_arm64.s index 458f082159dae..a6bc99df56930 100644 --- a/src/runtime/rt0_linux_arm64.s +++ b/src/runtime/rt0_linux_arm64.s @@ -48,7 +48,9 @@ TEXT _rt0_arm64_linux_lib(SB),NOSPLIT,$184 BEQ nocgo MOVD $_rt0_arm64_linux_lib_go(SB), R0 MOVD $0, R1 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL (R4) + ADD $16, RSP B restore nocgo: diff --git a/src/runtime/rt0_linux_ppc64.s b/src/runtime/rt0_linux_ppc64.s index f81451543830d..1265b158532f8 100644 --- a/src/runtime/rt0_linux_ppc64.s +++ b/src/runtime/rt0_linux_ppc64.s @@ -6,6 +6,11 @@ TEXT _rt0_ppc64_linux(SB),NOSPLIT,$0 DWORD $0 DWORD $0 +TEXT main(SB),NOSPLIT,$0 + DWORD $_main<>(SB) + DWORD $0 + DWORD $0 + TEXT _main<>(SB),NOSPLIT,$-8 // In a statically linked binary, the stack contains argc, // argv as argc string pointers followed by a NULL, envv as a @@ -13,11 +18,13 @@ TEXT _main<>(SB),NOSPLIT,$-8 // There is no TLS base pointer. // // TODO(austin): Support ABI v1 dynamic linking entry point - MOVD 0(R1), R3 // argc - ADD $8, R1, R4 // argv - BR main(SB) - -TEXT main(SB),NOSPLIT,$-8 MOVD $runtime·rt0_go(SB), R12 MOVD R12, CTR + MOVBZ runtime·iscgo(SB), R5 + CMP R5, $0 + BEQ nocgo + BR (CTR) +nocgo: + MOVD 0(R1), R3 // argc + ADD $8, R1, R4 // argv BR (CTR) diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s index 73b9ae392d2b4..54ea9d58f71db 100644 --- a/src/runtime/rt0_linux_ppc64le.s +++ b/src/runtime/rt0_linux_ppc64le.s @@ -12,7 +12,7 @@ TEXT _rt0_ppc64le_linux_lib(SB),NOSPLIT,$-8 MOVW CR, R0 // Save CR in caller's frame MOVD R0, 8(R1) MOVDU R1, -320(R1) // Allocate frame. - + // Preserve callee-save registers. MOVD R14, 24(R1) MOVD R15, 32(R1) diff --git a/src/runtime/rt0_nacl_amd64p32.s b/src/runtime/rt0_nacl_amd64p32.s index 54e4b1de89e7b..38583c58b2786 100644 --- a/src/runtime/rt0_nacl_amd64p32.s +++ b/src/runtime/rt0_nacl_amd64p32.s @@ -11,7 +11,7 @@ // 8(DI) - argc // 12(DI) - argv, then 0, then envv, then 0, then auxv // NaCl entry here is almost the same, except that there -// is no saved caller PC, so 0(FP) is -8(FP) and so on. +// is no saved caller PC, so 0(FP) is -8(FP) and so on. TEXT _rt0_amd64p32_nacl(SB),NOSPLIT,$16 MOVL DI, 0(SP) CALL runtime·nacl_sysinfo(SB) diff --git a/src/runtime/rt0_windows_arm.s b/src/runtime/rt0_windows_arm.s new file mode 100644 index 0000000000000..c5787d0dee003 --- /dev/null +++ b/src/runtime/rt0_windows_arm.s @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// This is the entry point for the program from the +// kernel for an ordinary -buildmode=exe program. +TEXT _rt0_arm_windows(SB),NOSPLIT|NOFRAME,$0 + B ·rt0_go(SB) diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py index 510c08c286ca0..48960b7f6133f 100644 --- a/src/runtime/runtime-gdb.py +++ b/src/runtime/runtime-gdb.py @@ -353,7 +353,8 @@ def to_string(self): return "" if dtype is None: # trouble looking up, print something reasonable - return "({0}){0}".format(iface_dtype_name(self.val), self.val['data']) + return "({typename}){data}".format( + typename=iface_dtype_name(self.val), data=self.val['data']) try: return self.val['data'].cast(dtype).dereference() @@ -528,11 +529,17 @@ def invoke(self, arg, _from_tty): save_frame = gdb.selected_frame() gdb.parse_and_eval('$save_sp = $sp') gdb.parse_and_eval('$save_pc = $pc') + # In GDB, assignments to sp must be done from the + # top-most frame, so select frame 0 first. + gdb.execute('select-frame 0') gdb.parse_and_eval('$sp = {0}'.format(str(sp))) gdb.parse_and_eval('$pc = {0}'.format(str(pc))) try: gdb.execute(cmd) finally: + # In GDB, assignments to sp must be done from the + # top-most frame, so select frame 0 first. + gdb.execute('select-frame 0') gdb.parse_and_eval('$sp = $save_sp') gdb.parse_and_eval('$pc = $save_pc') save_frame.select() diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index d9c6f6d22a24b..a988d1d702d95 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -26,16 +26,20 @@ func checkGdbEnvironment(t *testing.T) { case "darwin": t.Skip("gdb does not work on darwin") case "netbsd": - t.Skip("gdb does not work with threads on NetBSD; see golang.org/issue/22893 and gnats.netbsd.org/52548") + t.Skip("gdb does not work with threads on NetBSD; see https://golang.org/issue/22893 and https://gnats.netbsd.org/52548") case "windows": t.Skip("gdb tests fail on Windows: https://golang.org/issue/22687") case "linux": if runtime.GOARCH == "ppc64" { - t.Skip("skipping gdb tests on linux/ppc64; see golang.org/issue/17366") + t.Skip("skipping gdb tests on linux/ppc64; see https://golang.org/issue/17366") } if runtime.GOARCH == "mips" { t.Skip("skipping gdb tests on linux/mips; see https://golang.org/issue/25939") } + case "aix": + t.Skip("gdb does not work on AIX; see https://golang.org/issue/28558") + case "freebsd": + t.Skip("skipping gdb tests on FreeBSD; see https://golang.org/issue/29508") } if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final { t.Skip("gdb test can fail with GOROOT_FINAL pending") @@ -179,12 +183,11 @@ func testGdbPython(t *testing.T, cgo bool) { } args = append(args, "-ex", "set python print-stack full", - "-ex", "br fmt.Println", + "-ex", "br main.go:15", "-ex", "run", "-ex", "echo BEGIN info goroutines\n", "-ex", "info goroutines", "-ex", "echo END\n", - "-ex", "up", // up from fmt.Println to main "-ex", "echo BEGIN print mapvar\n", "-ex", "print mapvar", "-ex", "echo END\n", @@ -194,14 +197,13 @@ func testGdbPython(t *testing.T, cgo bool) { "-ex", "echo BEGIN info locals\n", "-ex", "info locals", "-ex", "echo END\n", - "-ex", "down", // back to fmt.Println (goroutine 2 below only works at bottom of stack. TODO: fix that) "-ex", "echo BEGIN goroutine 1 bt\n", "-ex", "goroutine 1 bt", "-ex", "echo END\n", "-ex", "echo BEGIN goroutine 2 bt\n", "-ex", "goroutine 2 bt", "-ex", "echo END\n", - "-ex", "clear fmt.Println", // clear the previous break point + "-ex", "clear main.go:15", // clear the previous break point "-ex", fmt.Sprintf("br main.go:%d", nLines), // new break point at the end of main "-ex", "c", "-ex", "echo BEGIN goroutine 1 bt at the end\n", @@ -242,14 +244,14 @@ func testGdbPython(t *testing.T, cgo bool) { t.Fatalf("info goroutines failed: %s", bl) } - printMapvarRe1 := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`) - printMapvarRe2 := regexp.MustCompile(`\Q = map[string]string = {["ghi"] = "jkl", ["abc"] = "def"}\E$`) + printMapvarRe1 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def", \[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl"}$`) + printMapvarRe2 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl", \[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def"}$`) if bl := blocks["print mapvar"]; !printMapvarRe1.MatchString(bl) && !printMapvarRe2.MatchString(bl) { t.Fatalf("print mapvar failed: %s", bl) } - strVarRe := regexp.MustCompile(`\Q = "abc"\E$`) + strVarRe := regexp.MustCompile(`^\$[0-9]+ = (0x[0-9a-f]+\s+)?"abc"$`) if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) { t.Fatalf("print strvar failed: %s", bl) } @@ -262,13 +264,17 @@ func testGdbPython(t *testing.T, cgo bool) { // However, the newer dwarf location list code reconstituted // aggregates from their fields and reverted their printing // back to its original form. + // Only test that all variables are listed in 'info locals' since + // different versions of gdb print variables in different + // order and with differing amount of information and formats. - infoLocalsRe := regexp.MustCompile(`slicevar *= *\[\]string *= *{"def"}`) - if bl := blocks["info locals"]; !infoLocalsRe.MatchString(bl) { + if bl := blocks["info locals"]; !strings.Contains(bl, "slicevar") || + !strings.Contains(bl, "mapvar") || + !strings.Contains(bl, "strvar") { t.Fatalf("info locals failed: %s", bl) } - btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?fmt\.Println.+at`) + btGoroutine1Re := regexp.MustCompile(`(?m)^#0\s+(0x[0-9a-f]+\s+in\s+)?main\.main.+at`) if bl := blocks["goroutine 1 bt"]; !btGoroutine1Re.MatchString(bl) { t.Fatalf("goroutine 1 bt failed: %s", bl) } @@ -425,11 +431,11 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "struct []main.astruct;", - "struct bucket;", - "struct hash;", - "struct main.astruct;", - "typedef struct hash * map[string]main.astruct;", + "[]main.astruct;", + "bucket;", + "hash;", + "main.astruct;", + "hash * map[string]main.astruct;", } for _, name := range types { if !strings.Contains(sgot, name) { @@ -484,13 +490,13 @@ func TestGdbConst(t *testing.T) { "-ex", "print main.aConstant", "-ex", "print main.largeConstant", "-ex", "print main.minusOne", - "-ex", "print 'runtime._MSpanInUse'", + "-ex", "print 'runtime.mSpanInUse'", "-ex", "print 'runtime._PageSize'", filepath.Join(dir, "a.exe"), } got, _ := exec.Command("gdb", args...).CombinedOutput() - sgot := strings.Replace(string(got), "\r\n", "\n", -1) + sgot := strings.ReplaceAll(string(got), "\r\n", "\n") t.Logf("output %q", sgot) diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index a0769bbb67c4f..c5667e73adc4d 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -145,7 +145,7 @@ func check() { h uint64 i, i1 float32 j, j1 float64 - k, k1 unsafe.Pointer + k unsafe.Pointer l *uint16 m [4]byte ) @@ -234,21 +234,6 @@ func check() { throw("cas6") } - k = unsafe.Pointer(uintptr(0xfedcb123)) - if sys.PtrSize == 8 { - k = unsafe.Pointer(uintptr(k) << 10) - } - if casp(&k, nil, nil) { - throw("casp1") - } - k1 = add(k, 1) - if !casp(&k, k, k1) { - throw("casp2") - } - if k != k1 { - throw("casp3") - } - m = [4]byte{1, 1, 1, 1} atomic.Or8(&m[1], 0xf0) if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { @@ -320,10 +305,10 @@ var debug struct { gccheckmark int32 gcpacertrace int32 gcshrinkstackoff int32 - gcrescanstacks int32 gcstoptheworld int32 gctrace int32 invalidptr int32 + madvdontneed int32 // for Linux; issue 28466 sbrk int32 scavenge int32 scheddetail int32 @@ -338,10 +323,10 @@ var dbgvars = []dbgVar{ {"gccheckmark", &debug.gccheckmark}, {"gcpacertrace", &debug.gcpacertrace}, {"gcshrinkstackoff", &debug.gcshrinkstackoff}, - {"gcrescanstacks", &debug.gcrescanstacks}, {"gcstoptheworld", &debug.gcstoptheworld}, {"gctrace", &debug.gctrace}, {"invalidptr", &debug.invalidptr}, + {"madvdontneed", &debug.madvdontneed}, {"sbrk", &debug.sbrk}, {"scavenge", &debug.scavenge}, {"scheddetail", &debug.scheddetail}, @@ -431,7 +416,9 @@ func timediv(v int64, div int32, rem *int32) int32 { for bit := 30; bit >= 0; bit-- { if v >= int64(div)<= int64(div) { diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index ad47d1275ebc4..df9cbaef203e7 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/cpu" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -416,6 +417,7 @@ type m struct { caughtsig guintptr // goroutine running during fatal signal p puintptr // attached p for executing go code (nil if not executing go code) nextp puintptr + oldp puintptr // the p that was attached before executing a syscall id int64 mallocing int32 throwing int32 @@ -423,7 +425,6 @@ type m struct { locks int32 dying int32 profilehz int32 - helpgc int32 spinning bool // m is out of work and is actively looking for work blocked bool // m is blocked on a note inwb bool // m is executing a write barrier @@ -506,8 +507,10 @@ type p struct { runnext guintptr // Available G's (status == Gdead) - gfree *g - gfreecnt int32 + gFree struct { + gList + n int32 + } sudogcache []*sudog sudogbuf [128]*sudog @@ -546,7 +549,7 @@ type p struct { runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point - pad [sys.CacheLineSize]byte + pad cpu.CacheLinePad } type schedt struct { @@ -574,15 +577,28 @@ type schedt struct { nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. // Global runnable queue. - runqhead guintptr - runqtail guintptr + runq gQueue runqsize int32 + // disable controls selective disabling of the scheduler. + // + // Use schedEnableUser to control this. + // + // disable is protected by sched.lock. + disable struct { + // user disables scheduling of user goroutines. + user bool + runnable gQueue // pending runnable Gs + n int32 // length of runnable + } + // Global cache of dead G's. - gflock mutex - gfreeStack *g - gfreeNoStack *g - ngfree int32 + gFree struct { + lock mutex + stack gList // Gs with stacks + noStack gList // Gs without stacks + n int32 + } // Central cache of sudog structs. sudoglock mutex @@ -635,14 +651,27 @@ type _func struct { entry uintptr // start pc nameoff int32 // function name - args int32 // in/out args size - funcID funcID // set for certain special runtime functions + args int32 // in/out args size + deferreturn uint32 // offset of a deferreturn block from entry, if any. pcsp int32 pcfile int32 pcln int32 npcdata int32 - nfuncdata int32 + funcID funcID // set for certain special runtime functions + _ [2]int8 // unused + nfuncdata uint8 // must be last +} + +// Pseudo-Func that is returned for PCs that occur in inlined code. +// A *Func can be either a *_func or a *funcinl, and they are distinguished +// by the first uintptr. +type funcinl struct { + zero uintptr // set to 0 to distinguish from _func + entry uintptr // entry of the real (the "outermost") frame. + name string + file string + line int } // layout of Itab known to compilers @@ -833,21 +862,13 @@ var ( newprocs int32 // Information about what cpu features are available. - // Set on startup in runtime.cpuinit. // Packages outside the runtime should not use these // as they are not an external api. - // TODO: deprecate these; use internal/cpu directly. + // Set on startup in asm_{386,amd64,amd64p32}.s processorVersionInfo uint32 isIntel bool lfenceBeforeRdtsc bool - // Set in runtime.cpuinit. - support_erms bool - support_popcnt bool - support_sse2 bool - support_sse41 bool - arm64_support_atomics bool - goarm uint8 // set by cmd/link on arm systems framepointer_enabled bool // set by cmd/link ) diff --git a/src/runtime/runtime_unix_test.go b/src/runtime/runtime_unix_test.go index e91216365efee..b0cbbbe3e60d4 100644 --- a/src/runtime/runtime_unix_test.go +++ b/src/runtime/runtime_unix_test.go @@ -6,7 +6,7 @@ // We need a fast system call to provoke the race, // and Close(-1) is nearly universally fast. -// +build darwin dragonfly freebsd linux netbsd openbsd plan9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd plan9 package runtime_test diff --git a/src/runtime/select.go b/src/runtime/select.go index 3a3ac6b7ac0bd..85be1bc64da0f 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -110,7 +110,7 @@ func block() { // // selectgo returns the index of the chosen scase, which matches the // ordinal position of its respective select{recv,send,default} call. -// Also, if the chosen scase was a receive operation, it returns whether +// Also, if the chosen scase was a receive operation, it reports whether // a value was received. func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) { if debugSelect { @@ -245,7 +245,7 @@ loop: case caseSend: if raceenabled { - racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) + racereadpc(c.raceaddr(), cas.pc, chansendpc) } if c.closed != 0 { goto sclose @@ -462,7 +462,7 @@ rclose: typedmemclr(c.elemtype, cas.elem) } if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } goto retc diff --git a/src/runtime/sema.go b/src/runtime/sema.go index aba97331275ad..18e0a398ba76c 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -20,8 +20,8 @@ package runtime import ( + "internal/cpu" "runtime/internal/atomic" - "runtime/internal/sys" "unsafe" ) @@ -48,7 +48,7 @@ const semTabSize = 251 var semtable [semTabSize]struct { root semaRoot - pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte + pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte } //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire diff --git a/src/runtime/semasleep_test.go b/src/runtime/semasleep_test.go new file mode 100644 index 0000000000000..5b2cc64483f95 --- /dev/null +++ b/src/runtime/semasleep_test.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !nacl,!plan9,!windows,!js + +package runtime_test + +import ( + "internal/testenv" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + "testing" + "time" +) + +// Issue #27250. Spurious wakeups to pthread_cond_timedwait_relative_np +// shouldn't cause semasleep to retry with the same timeout which would +// cause indefinite spinning. +func TestSpuriousWakeupsNeverHangSemasleep(t *testing.T) { + testenv.MustHaveGoBuild(t) + tempDir, err := ioutil.TempDir("", "issue-27250") + if err != nil { + t.Fatalf("Failed to create the temp directory: %v", err) + } + defer os.RemoveAll(tempDir) + + repro := ` + package main + + import "time" + + func main() { + <-time.After(1 * time.Second) + } + ` + mainPath := filepath.Join(tempDir, "main.go") + if err := ioutil.WriteFile(mainPath, []byte(repro), 0644); err != nil { + t.Fatalf("Failed to create temp file for repro.go: %v", err) + } + binaryPath := filepath.Join(tempDir, "binary") + + // Build the binary so that we can send the signal to its PID. + out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", binaryPath, mainPath).CombinedOutput() + if err != nil { + t.Fatalf("Failed to compile the binary: err: %v\nOutput: %s\n", err, out) + } + if err := os.Chmod(binaryPath, 0755); err != nil { + t.Fatalf("Failed to chmod binary: %v", err) + } + + // Now run the binary. + cmd := exec.Command(binaryPath) + if err := cmd.Start(); err != nil { + t.Fatalf("Failed to start command: %v", err) + } + doneCh := make(chan error, 1) + go func() { + doneCh <- cmd.Wait() + }() + + // With the repro running, we can continuously send to it + // a non-terminal signal such as SIGIO, to spuriously + // wakeup pthread_cond_timedwait_relative_np. + unfixedTimer := time.NewTimer(2 * time.Second) + for { + select { + case <-time.After(200 * time.Millisecond): + // Send the pesky signal that toggles spinning + // indefinitely if #27520 is not fixed. + cmd.Process.Signal(syscall.SIGIO) + + case <-unfixedTimer.C: + t.Error("Program failed to return on time and has to be killed, issue #27520 still exists") + cmd.Process.Signal(syscall.SIGKILL) + return + + case err := <-doneCh: + if err != nil { + t.Fatalf("The program returned but unfortunately with an error: %v", err) + } + return + } + } +} diff --git a/src/runtime/signal_aix_ppc64.go b/src/runtime/signal_aix_ppc64.go new file mode 100644 index 0000000000000..c17563e2a5fae --- /dev/null +++ b/src/runtime/signal_aix_ppc64.go @@ -0,0 +1,85 @@ +/// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +type sigctxt struct { + info *siginfo + ctxt unsafe.Pointer +} + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) regs() *context64 { return &(*ucontext)(c.ctxt).uc_mcontext } + +func (c *sigctxt) r0() uint64 { return c.regs().gpr[0] } +func (c *sigctxt) r1() uint64 { return c.regs().gpr[1] } +func (c *sigctxt) r2() uint64 { return c.regs().gpr[2] } +func (c *sigctxt) r3() uint64 { return c.regs().gpr[3] } +func (c *sigctxt) r4() uint64 { return c.regs().gpr[4] } +func (c *sigctxt) r5() uint64 { return c.regs().gpr[5] } +func (c *sigctxt) r6() uint64 { return c.regs().gpr[6] } +func (c *sigctxt) r7() uint64 { return c.regs().gpr[7] } +func (c *sigctxt) r8() uint64 { return c.regs().gpr[8] } +func (c *sigctxt) r9() uint64 { return c.regs().gpr[9] } +func (c *sigctxt) r10() uint64 { return c.regs().gpr[10] } +func (c *sigctxt) r11() uint64 { return c.regs().gpr[11] } +func (c *sigctxt) r12() uint64 { return c.regs().gpr[12] } +func (c *sigctxt) r13() uint64 { return c.regs().gpr[13] } +func (c *sigctxt) r14() uint64 { return c.regs().gpr[14] } +func (c *sigctxt) r15() uint64 { return c.regs().gpr[15] } +func (c *sigctxt) r16() uint64 { return c.regs().gpr[16] } +func (c *sigctxt) r17() uint64 { return c.regs().gpr[17] } +func (c *sigctxt) r18() uint64 { return c.regs().gpr[18] } +func (c *sigctxt) r19() uint64 { return c.regs().gpr[19] } +func (c *sigctxt) r20() uint64 { return c.regs().gpr[20] } +func (c *sigctxt) r21() uint64 { return c.regs().gpr[21] } +func (c *sigctxt) r22() uint64 { return c.regs().gpr[22] } +func (c *sigctxt) r23() uint64 { return c.regs().gpr[23] } +func (c *sigctxt) r24() uint64 { return c.regs().gpr[24] } +func (c *sigctxt) r25() uint64 { return c.regs().gpr[25] } +func (c *sigctxt) r26() uint64 { return c.regs().gpr[26] } +func (c *sigctxt) r27() uint64 { return c.regs().gpr[27] } +func (c *sigctxt) r28() uint64 { return c.regs().gpr[28] } +func (c *sigctxt) r29() uint64 { return c.regs().gpr[29] } +func (c *sigctxt) r30() uint64 { return c.regs().gpr[30] } +func (c *sigctxt) r31() uint64 { return c.regs().gpr[31] } +func (c *sigctxt) sp() uint64 { return c.regs().gpr[1] } + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) pc() uint64 { return c.regs().iar } + +func (c *sigctxt) ctr() uint64 { return c.regs().ctr } +func (c *sigctxt) link() uint64 { return c.regs().lr } +func (c *sigctxt) xer() uint32 { return c.regs().xer } +func (c *sigctxt) ccr() uint32 { return c.regs().cr } +func (c *sigctxt) fpscr() uint32 { return c.regs().fpscr } +func (c *sigctxt) fpscrx() uint32 { return c.regs().fpscrx } + +// TODO(aix): find trap equivalent +func (c *sigctxt) trap() uint32 { return 0x0 } + +func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } +func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) } +func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) } + +func (c *sigctxt) set_r0(x uint64) { c.regs().gpr[0] = x } +func (c *sigctxt) set_r12(x uint64) { c.regs().gpr[12] = x } +func (c *sigctxt) set_r30(x uint64) { c.regs().gpr[30] = x } +func (c *sigctxt) set_pc(x uint64) { c.regs().iar = x } +func (c *sigctxt) set_sp(x uint64) { c.regs().gpr[1] = x } +func (c *sigctxt) set_link(x uint64) { c.regs().lr = x } + +func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } +func (c *sigctxt) set_sigaddr(x uint64) { + *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x) +} diff --git a/src/runtime/signal_openbsd_arm.go b/src/runtime/signal_openbsd_arm.go index 97bb13b4f3e1f..f796550e60c0d 100644 --- a/src/runtime/signal_openbsd_arm.go +++ b/src/runtime/signal_openbsd_arm.go @@ -45,7 +45,7 @@ func (c *sigctxt) oldmask() uint32 { return 0 } func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } func (c *sigctxt) sigaddr() uint32 { - return *(*uint32)(add(unsafe.Pointer(c.info), 12)) + return *(*uint32)(add(unsafe.Pointer(c.info), 16)) } func (c *sigctxt) set_pc(x uint32) { c.regs().sc_pc = x } @@ -55,5 +55,5 @@ func (c *sigctxt) set_r10(x uint32) { c.regs().sc_r10 = x } func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } func (c *sigctxt) set_sigaddr(x uint32) { - *(*uint32)(add(unsafe.Pointer(c.info), 12)) = x + *(*uint32)(add(unsafe.Pointer(c.info), 16)) = x } diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go index 5a1a5cae60d64..cac1a23c9fbd4 100644 --- a/src/runtime/signal_ppc64x.go +++ b/src/runtime/signal_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux +// +build aix linux // +build ppc64 ppc64le package runtime diff --git a/src/runtime/signal_sighandler.go b/src/runtime/signal_sighandler.go index 5a734f9050926..6e71e41f5211c 100644 --- a/src/runtime/signal_sighandler.go +++ b/src/runtime/signal_sighandler.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package runtime diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 6cd9f8ddb6f58..15f1799801921 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime @@ -773,7 +773,7 @@ func unminitSignals() { } } -// blockableSig returns whether sig may be blocked by the signal mask. +// blockableSig reports whether sig may be blocked by the signal mask. // We never want to block the signals marked _SigUnblock; // these are the synchronous signals that turn into a Go panic. // In a Go program--not a c-archive/c-shared--we never want to block diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go index a63450038d818..3fc1ec5886d8c 100644 --- a/src/runtime/signal_windows.go +++ b/src/runtime/signal_windows.go @@ -27,7 +27,7 @@ func lastcontinuetramp() func initExceptionHandler() { stdcall2(_AddVectoredExceptionHandler, 1, funcPC(exceptiontramp)) - if _AddVectoredContinueHandler == nil || unsafe.Sizeof(&_AddVectoredContinueHandler) == 4 { + if _AddVectoredContinueHandler == nil || GOARCH == "386" { // use SetUnhandledExceptionFilter for windows-386 or // if VectoredContinueHandler is unavailable. // note: SetUnhandledExceptionFilter handler won't be called, if debugging. @@ -38,7 +38,24 @@ func initExceptionHandler() { } } -// isgoexception returns true if this exception should be translated +// isAbort returns true, if context r describes exception raised +// by calling runtime.abort function. +// +//go:nosplit +func isAbort(r *context) bool { + switch GOARCH { + case "386", "amd64": + // In the case of an abort, the exception IP is one byte after + // the INT3 (this differs from UNIX OSes). + return isAbortPC(r.ip() - 1) + case "arm": + return isAbortPC(r.ip()) + default: + return false + } +} + +// isgoexception reports whether this exception should be translated // into a Go panic. // // It is nosplit to avoid growing the stack in case we're aborting @@ -53,9 +70,7 @@ func isgoexception(info *exceptionrecord, r *context) bool { return false } - // In the case of an abort, the exception IP is one byte after - // the INT3 (this differs from UNIX OSes). - if isAbortPC(r.ip() - 1) { + if isAbort(r) { // Never turn abort into a panic. return false } @@ -117,10 +132,18 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 { if r.ip() != 0 { sp := unsafe.Pointer(r.sp()) sp = add(sp, ^(unsafe.Sizeof(uintptr(0)) - 1)) // sp-- - *((*uintptr)(sp)) = r.ip() - r.setsp(uintptr(sp)) + r.set_sp(uintptr(sp)) + switch GOARCH { + default: + panic("unsupported architecture") + case "386", "amd64": + *((*uintptr)(sp)) = r.ip() + case "arm": + *((*uintptr)(sp)) = r.lr() + r.set_lr(r.ip()) + } } - r.setip(funcPC(sigpanic)) + r.set_ip(funcPC(sigpanic)) return _EXCEPTION_CONTINUE_EXECUTION } @@ -177,9 +200,15 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 { } print("\n") + // TODO(jordanrh1): This may be needed for 386/AMD64 as well. + if GOARCH == "arm" { + _g_.m.throwing = 1 + _g_.m.caughtsig.set(gp) + } + level, _, docrash := gotraceback() if level > 0 { - tracebacktrap(r.ip(), r.sp(), 0, gp) + tracebacktrap(r.ip(), r.sp(), r.lr(), gp) tracebackothers(gp) dumpregs(r) } diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go index 9f5324095448c..a425433b20a8d 100644 --- a/src/runtime/sigqueue.go +++ b/src/runtime/sigqueue.go @@ -237,8 +237,10 @@ func signal_ignore(s uint32) { atomic.Store(&sig.ignored[s/32], i) } -// sigInitIgnored marks the signal as already ignored. This is called at -// program start by siginit. +// sigInitIgnored marks the signal as already ignored. This is called at +// program start by initsig. In a shared library initsig is called by +// libpreinit, so the runtime may not be initialized yet. +//go:nosplit func sigInitIgnored(s uint32) { i := sig.ignored[s/32] i |= 1 << (s & 31) diff --git a/src/runtime/sigtab_aix.go b/src/runtime/sigtab_aix.go new file mode 100644 index 0000000000000..42e5606ab6db6 --- /dev/null +++ b/src/runtime/sigtab_aix.go @@ -0,0 +1,264 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var sigtable = [...]sigTabT{ + 0: {0, "SIGNONE: no trap"}, + _SIGHUP: {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"}, + _SIGINT: {_SigNotify + _SigKill, "SIGINT: interrupt"}, + _SIGQUIT: {_SigNotify + _SigThrow, "SIGQUIT: quit"}, + _SIGILL: {_SigThrow + _SigUnblock, "SIGILL: illegal instruction"}, + _SIGTRAP: {_SigThrow + _SigUnblock, "SIGTRAP: trace trap"}, + _SIGABRT: {_SigNotify + _SigThrow, "SIGABRT: abort"}, + _SIGBUS: {_SigPanic + _SigUnblock, "SIGBUS: bus error"}, + _SIGFPE: {_SigPanic + _SigUnblock, "SIGFPE: floating-point exception"}, + _SIGKILL: {0, "SIGKILL: kill"}, + _SIGUSR1: {_SigNotify, "SIGUSR1: user-defined signal 1"}, + _SIGSEGV: {_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"}, + _SIGUSR2: {_SigNotify, "SIGUSR2: user-defined signal 2"}, + _SIGPIPE: {_SigNotify, "SIGPIPE: write to broken pipe"}, + _SIGALRM: {_SigNotify, "SIGALRM: alarm clock"}, + _SIGTERM: {_SigNotify + _SigKill, "SIGTERM: termination"}, + _SIGCHLD: {_SigNotify + _SigUnblock, "SIGCHLD: child status has changed"}, + _SIGCONT: {_SigNotify + _SigDefault, "SIGCONT: continue"}, + _SIGSTOP: {0, "SIGSTOP: stop"}, + _SIGTSTP: {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"}, + _SIGTTIN: {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"}, + _SIGTTOU: {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"}, + _SIGURG: {_SigNotify, "SIGURG: urgent condition on socket"}, + _SIGXCPU: {_SigNotify, "SIGXCPU: cpu limit exceeded"}, + _SIGXFSZ: {_SigNotify, "SIGXFSZ: file size limit exceeded"}, + _SIGVTALRM: {_SigNotify, "SIGVTALRM: virtual alarm clock"}, + _SIGPROF: {_SigNotify + _SigUnblock, "SIGPROF: profiling alarm clock"}, + _SIGWINCH: {_SigNotify, "SIGWINCH: window size change"}, + _SIGSYS: {_SigThrow, "SIGSYS: bad system call"}, + _SIGIO: {_SigNotify, "SIGIO: i/o now possible"}, + _SIGPWR: {_SigNotify, "SIGPWR: power failure restart"}, + _SIGEMT: {_SigThrow, "SIGEMT: emulate instruction executed"}, + _SIGWAITING: {0, "SIGWAITING: reserved signal no longer used by"}, + 26: {_SigNotify, "signal 26"}, + 27: {_SigNotify, "signal 27"}, + 33: {_SigNotify, "signal 33"}, + 35: {_SigNotify, "signal 35"}, + 36: {_SigNotify, "signal 36"}, + 37: {_SigNotify, "signal 37"}, + 38: {_SigNotify, "signal 38"}, + 40: {_SigNotify, "signal 40"}, + 41: {_SigNotify, "signal 41"}, + 42: {_SigNotify, "signal 42"}, + 43: {_SigNotify, "signal 43"}, + 44: {_SigNotify, "signal 44"}, + 45: {_SigNotify, "signal 45"}, + 46: {_SigNotify, "signal 46"}, + 47: {_SigNotify, "signal 47"}, + 48: {_SigNotify, "signal 48"}, + 49: {_SigNotify, "signal 49"}, + 50: {_SigNotify, "signal 50"}, + 51: {_SigNotify, "signal 51"}, + 52: {_SigNotify, "signal 52"}, + 53: {_SigNotify, "signal 53"}, + 54: {_SigNotify, "signal 54"}, + 55: {_SigNotify, "signal 55"}, + 56: {_SigNotify, "signal 56"}, + 57: {_SigNotify, "signal 57"}, + 58: {_SigNotify, "signal 58"}, + 59: {_SigNotify, "signal 59"}, + 60: {_SigNotify, "signal 60"}, + 61: {_SigNotify, "signal 61"}, + 62: {_SigNotify, "signal 62"}, + 63: {_SigNotify, "signal 63"}, + 64: {_SigNotify, "signal 64"}, + 65: {_SigNotify, "signal 65"}, + 66: {_SigNotify, "signal 66"}, + 67: {_SigNotify, "signal 67"}, + 68: {_SigNotify, "signal 68"}, + 69: {_SigNotify, "signal 69"}, + 70: {_SigNotify, "signal 70"}, + 71: {_SigNotify, "signal 71"}, + 72: {_SigNotify, "signal 72"}, + 73: {_SigNotify, "signal 73"}, + 74: {_SigNotify, "signal 74"}, + 75: {_SigNotify, "signal 75"}, + 76: {_SigNotify, "signal 76"}, + 77: {_SigNotify, "signal 77"}, + 78: {_SigNotify, "signal 78"}, + 79: {_SigNotify, "signal 79"}, + 80: {_SigNotify, "signal 80"}, + 81: {_SigNotify, "signal 81"}, + 82: {_SigNotify, "signal 82"}, + 83: {_SigNotify, "signal 83"}, + 84: {_SigNotify, "signal 84"}, + 85: {_SigNotify, "signal 85"}, + 86: {_SigNotify, "signal 86"}, + 87: {_SigNotify, "signal 87"}, + 88: {_SigNotify, "signal 88"}, + 89: {_SigNotify, "signal 89"}, + 90: {_SigNotify, "signal 90"}, + 91: {_SigNotify, "signal 91"}, + 92: {_SigNotify, "signal 92"}, + 93: {_SigNotify, "signal 93"}, + 94: {_SigNotify, "signal 94"}, + 95: {_SigNotify, "signal 95"}, + 96: {_SigNotify, "signal 96"}, + 97: {_SigNotify, "signal 97"}, + 98: {_SigNotify, "signal 98"}, + 99: {_SigNotify, "signal 99"}, + 100: {_SigNotify, "signal 100"}, + 101: {_SigNotify, "signal 101"}, + 102: {_SigNotify, "signal 102"}, + 103: {_SigNotify, "signal 103"}, + 104: {_SigNotify, "signal 104"}, + 105: {_SigNotify, "signal 105"}, + 106: {_SigNotify, "signal 106"}, + 107: {_SigNotify, "signal 107"}, + 108: {_SigNotify, "signal 108"}, + 109: {_SigNotify, "signal 109"}, + 110: {_SigNotify, "signal 110"}, + 111: {_SigNotify, "signal 111"}, + 112: {_SigNotify, "signal 112"}, + 113: {_SigNotify, "signal 113"}, + 114: {_SigNotify, "signal 114"}, + 115: {_SigNotify, "signal 115"}, + 116: {_SigNotify, "signal 116"}, + 117: {_SigNotify, "signal 117"}, + 118: {_SigNotify, "signal 118"}, + 119: {_SigNotify, "signal 119"}, + 120: {_SigNotify, "signal 120"}, + 121: {_SigNotify, "signal 121"}, + 122: {_SigNotify, "signal 122"}, + 123: {_SigNotify, "signal 123"}, + 124: {_SigNotify, "signal 124"}, + 125: {_SigNotify, "signal 125"}, + 126: {_SigNotify, "signal 126"}, + 127: {_SigNotify, "signal 127"}, + 128: {_SigNotify, "signal 128"}, + 129: {_SigNotify, "signal 129"}, + 130: {_SigNotify, "signal 130"}, + 131: {_SigNotify, "signal 131"}, + 132: {_SigNotify, "signal 132"}, + 133: {_SigNotify, "signal 133"}, + 134: {_SigNotify, "signal 134"}, + 135: {_SigNotify, "signal 135"}, + 136: {_SigNotify, "signal 136"}, + 137: {_SigNotify, "signal 137"}, + 138: {_SigNotify, "signal 138"}, + 139: {_SigNotify, "signal 139"}, + 140: {_SigNotify, "signal 140"}, + 141: {_SigNotify, "signal 141"}, + 142: {_SigNotify, "signal 142"}, + 143: {_SigNotify, "signal 143"}, + 144: {_SigNotify, "signal 144"}, + 145: {_SigNotify, "signal 145"}, + 146: {_SigNotify, "signal 146"}, + 147: {_SigNotify, "signal 147"}, + 148: {_SigNotify, "signal 148"}, + 149: {_SigNotify, "signal 149"}, + 150: {_SigNotify, "signal 150"}, + 151: {_SigNotify, "signal 151"}, + 152: {_SigNotify, "signal 152"}, + 153: {_SigNotify, "signal 153"}, + 154: {_SigNotify, "signal 154"}, + 155: {_SigNotify, "signal 155"}, + 156: {_SigNotify, "signal 156"}, + 157: {_SigNotify, "signal 157"}, + 158: {_SigNotify, "signal 158"}, + 159: {_SigNotify, "signal 159"}, + 160: {_SigNotify, "signal 160"}, + 161: {_SigNotify, "signal 161"}, + 162: {_SigNotify, "signal 162"}, + 163: {_SigNotify, "signal 163"}, + 164: {_SigNotify, "signal 164"}, + 165: {_SigNotify, "signal 165"}, + 166: {_SigNotify, "signal 166"}, + 167: {_SigNotify, "signal 167"}, + 168: {_SigNotify, "signal 168"}, + 169: {_SigNotify, "signal 169"}, + 170: {_SigNotify, "signal 170"}, + 171: {_SigNotify, "signal 171"}, + 172: {_SigNotify, "signal 172"}, + 173: {_SigNotify, "signal 173"}, + 174: {_SigNotify, "signal 174"}, + 175: {_SigNotify, "signal 175"}, + 176: {_SigNotify, "signal 176"}, + 177: {_SigNotify, "signal 177"}, + 178: {_SigNotify, "signal 178"}, + 179: {_SigNotify, "signal 179"}, + 180: {_SigNotify, "signal 180"}, + 181: {_SigNotify, "signal 181"}, + 182: {_SigNotify, "signal 182"}, + 183: {_SigNotify, "signal 183"}, + 184: {_SigNotify, "signal 184"}, + 185: {_SigNotify, "signal 185"}, + 186: {_SigNotify, "signal 186"}, + 187: {_SigNotify, "signal 187"}, + 188: {_SigNotify, "signal 188"}, + 189: {_SigNotify, "signal 189"}, + 190: {_SigNotify, "signal 190"}, + 191: {_SigNotify, "signal 191"}, + 192: {_SigNotify, "signal 192"}, + 193: {_SigNotify, "signal 193"}, + 194: {_SigNotify, "signal 194"}, + 195: {_SigNotify, "signal 195"}, + 196: {_SigNotify, "signal 196"}, + 197: {_SigNotify, "signal 197"}, + 198: {_SigNotify, "signal 198"}, + 199: {_SigNotify, "signal 199"}, + 200: {_SigNotify, "signal 200"}, + 201: {_SigNotify, "signal 201"}, + 202: {_SigNotify, "signal 202"}, + 203: {_SigNotify, "signal 203"}, + 204: {_SigNotify, "signal 204"}, + 205: {_SigNotify, "signal 205"}, + 206: {_SigNotify, "signal 206"}, + 207: {_SigNotify, "signal 207"}, + 208: {_SigNotify, "signal 208"}, + 209: {_SigNotify, "signal 209"}, + 210: {_SigNotify, "signal 210"}, + 211: {_SigNotify, "signal 211"}, + 212: {_SigNotify, "signal 212"}, + 213: {_SigNotify, "signal 213"}, + 214: {_SigNotify, "signal 214"}, + 215: {_SigNotify, "signal 215"}, + 216: {_SigNotify, "signal 216"}, + 217: {_SigNotify, "signal 217"}, + 218: {_SigNotify, "signal 218"}, + 219: {_SigNotify, "signal 219"}, + 220: {_SigNotify, "signal 220"}, + 221: {_SigNotify, "signal 221"}, + 222: {_SigNotify, "signal 222"}, + 223: {_SigNotify, "signal 223"}, + 224: {_SigNotify, "signal 224"}, + 225: {_SigNotify, "signal 225"}, + 226: {_SigNotify, "signal 226"}, + 227: {_SigNotify, "signal 227"}, + 228: {_SigNotify, "signal 228"}, + 229: {_SigNotify, "signal 229"}, + 230: {_SigNotify, "signal 230"}, + 231: {_SigNotify, "signal 231"}, + 232: {_SigNotify, "signal 232"}, + 233: {_SigNotify, "signal 233"}, + 234: {_SigNotify, "signal 234"}, + 235: {_SigNotify, "signal 235"}, + 236: {_SigNotify, "signal 236"}, + 237: {_SigNotify, "signal 237"}, + 238: {_SigNotify, "signal 238"}, + 239: {_SigNotify, "signal 239"}, + 240: {_SigNotify, "signal 240"}, + 241: {_SigNotify, "signal 241"}, + 242: {_SigNotify, "signal 242"}, + 243: {_SigNotify, "signal 243"}, + 244: {_SigNotify, "signal 244"}, + 245: {_SigNotify, "signal 245"}, + 246: {_SigNotify, "signal 246"}, + 247: {_SigNotify, "signal 247"}, + 248: {_SigNotify, "signal 248"}, + 249: {_SigNotify, "signal 249"}, + 250: {_SigNotify, "signal 250"}, + 251: {_SigNotify, "signal 251"}, + 252: {_SigNotify, "signal 252"}, + 253: {_SigNotify, "signal 253"}, + 254: {_SigNotify, "signal 254"}, + 255: {_SigNotify, "signal 255"}, +} diff --git a/src/runtime/slice.go b/src/runtime/slice.go index fd5d08b52c103..2309b1a615ee5 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -5,6 +5,7 @@ package runtime import ( + "runtime/internal/math" "runtime/internal/sys" "unsafe" ) @@ -22,28 +23,6 @@ type notInHeapSlice struct { cap int } -// maxElems is a lookup table containing the maximum capacity for a slice. -// The index is the size of the slice element. -var maxElems = [...]uintptr{ - ^uintptr(0), - maxAlloc / 1, maxAlloc / 2, maxAlloc / 3, maxAlloc / 4, - maxAlloc / 5, maxAlloc / 6, maxAlloc / 7, maxAlloc / 8, - maxAlloc / 9, maxAlloc / 10, maxAlloc / 11, maxAlloc / 12, - maxAlloc / 13, maxAlloc / 14, maxAlloc / 15, maxAlloc / 16, - maxAlloc / 17, maxAlloc / 18, maxAlloc / 19, maxAlloc / 20, - maxAlloc / 21, maxAlloc / 22, maxAlloc / 23, maxAlloc / 24, - maxAlloc / 25, maxAlloc / 26, maxAlloc / 27, maxAlloc / 28, - maxAlloc / 29, maxAlloc / 30, maxAlloc / 31, maxAlloc / 32, -} - -// maxSliceCap returns the maximum capacity for a slice. -func maxSliceCap(elemsize uintptr) uintptr { - if elemsize < uintptr(len(maxElems)) { - return maxElems[elemsize] - } - return maxAlloc / elemsize -} - func panicmakeslicelen() { panic(errorString("makeslice: len out of range")) } @@ -52,26 +31,25 @@ func panicmakeslicecap() { panic(errorString("makeslice: cap out of range")) } -func makeslice(et *_type, len, cap int) slice { - // NOTE: The len > maxElements check here is not strictly necessary, - // but it produces a 'len out of range' error instead of a 'cap out of range' error - // when someone does make([]T, bignumber). 'cap out of range' is true too, - // but since the cap is only being supplied implicitly, saying len is clearer. - // See issue 4085. - maxElements := maxSliceCap(et.size) - if len < 0 || uintptr(len) > maxElements { - panicmakeslicelen() - } - - if cap < len || uintptr(cap) > maxElements { +func makeslice(et *_type, len, cap int) unsafe.Pointer { + mem, overflow := math.MulUintptr(et.size, uintptr(cap)) + if overflow || mem > maxAlloc || len < 0 || len > cap { + // NOTE: Produce a 'len out of range' error instead of a + // 'cap out of range' error when someone does make([]T, bignumber). + // 'cap out of range' is true too, but since the cap is only being + // supplied implicitly, saying len is clearer. + // See golang.org/issue/4085. + mem, overflow := math.MulUintptr(et.size, uintptr(len)) + if overflow || mem > maxAlloc || len < 0 { + panicmakeslicelen() + } panicmakeslicecap() } - p := mallocgc(et.size*uintptr(cap), et, true) - return slice{p, len, cap} + return mallocgc(mem, et, true) } -func makeslice64(et *_type, len64, cap64 int64) slice { +func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { len := int(len64) if int64(len) != len64 { panicmakeslicelen() @@ -104,10 +82,11 @@ func growslice(et *_type, old slice, cap int) slice { msanread(old.array, uintptr(old.len*int(et.size))) } + if cap < old.cap { + panic(errorString("growslice: cap out of range")) + } + if et.size == 0 { - if cap < old.cap { - panic(errorString("growslice: cap out of range")) - } // append should not create a slice with nil pointer but non-zero len. // We assume that append doesn't need to preserve old.array in this case. return slice{unsafe.Pointer(&zerobase), old.len, cap} @@ -169,15 +148,14 @@ func growslice(et *_type, old slice, cap int) slice { default: lenmem = uintptr(old.len) * et.size newlenmem = uintptr(cap) * et.size - capmem = roundupsize(uintptr(newcap) * et.size) - overflow = uintptr(newcap) > maxSliceCap(et.size) + capmem, overflow = math.MulUintptr(et.size, uintptr(newcap)) + capmem = roundupsize(capmem) newcap = int(capmem / et.size) } - // The check of overflow (uintptr(newcap) > maxSliceCap(et.size)) - // in addition to capmem > _MaxMem is needed to prevent an overflow - // which can be used to trigger a segfault on 32bit architectures - // with this example program: + // The check of overflow in addition to capmem > maxAlloc is needed + // to prevent an overflow which can be used to trigger a segfault + // on 32bit architectures with this example program: // // type T [1<<27 + 1]int64 // @@ -188,28 +166,26 @@ func growslice(et *_type, old slice, cap int) slice { // s = append(s, d, d, d, d) // print(len(s), "\n") // } - if cap < old.cap || overflow || capmem > maxAlloc { + if overflow || capmem > maxAlloc { panic(errorString("growslice: cap out of range")) } var p unsafe.Pointer if et.kind&kindNoPointers != 0 { p = mallocgc(capmem, nil, false) - memmove(p, old.array, lenmem) // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). // Only clear the part that will not be overwritten. memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) } else { // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. p = mallocgc(capmem, et, true) - if !writeBarrier.enabled { - memmove(p, old.array, lenmem) - } else { - for i := uintptr(0); i < lenmem; i += et.size { - typedmemmove(et, add(p, i), add(old.array, i)) - } + if writeBarrier.enabled { + // Only shade the pointers in old.array since we know the destination slice p + // only contains nil pointers because it has been cleared during alloc. + bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem) } } + memmove(p, old.array, lenmem) return slice{p, old.len, newcap} } diff --git a/src/runtime/slice_test.go b/src/runtime/slice_test.go index c2dfb7afd19b8..0463fc70a764b 100644 --- a/src/runtime/slice_test.go +++ b/src/runtime/slice_test.go @@ -10,20 +10,68 @@ import ( const N = 20 -func BenchmarkMakeSlice(b *testing.B) { - var x []byte - for i := 0; i < b.N; i++ { - x = make([]byte, 32) - _ = x - } -} - type ( struct24 struct{ a, b, c int64 } struct32 struct{ a, b, c, d int64 } struct40 struct{ a, b, c, d, e int64 } ) +func BenchmarkMakeSlice(b *testing.B) { + const length = 2 + b.Run("Byte", func(b *testing.B) { + var x []byte + for i := 0; i < b.N; i++ { + x = make([]byte, length, 2*length) + _ = x + } + }) + b.Run("Int16", func(b *testing.B) { + var x []int16 + for i := 0; i < b.N; i++ { + x = make([]int16, length, 2*length) + _ = x + } + }) + b.Run("Int", func(b *testing.B) { + var x []int + for i := 0; i < b.N; i++ { + x = make([]int, length, 2*length) + _ = x + } + }) + b.Run("Ptr", func(b *testing.B) { + var x []*byte + for i := 0; i < b.N; i++ { + x = make([]*byte, length, 2*length) + _ = x + } + }) + b.Run("Struct", func(b *testing.B) { + b.Run("24", func(b *testing.B) { + var x []struct24 + for i := 0; i < b.N; i++ { + x = make([]struct24, length, 2*length) + _ = x + } + }) + b.Run("32", func(b *testing.B) { + var x []struct32 + for i := 0; i < b.N; i++ { + x = make([]struct32, length, 2*length) + _ = x + } + }) + b.Run("40", func(b *testing.B) { + var x []struct40 + for i := 0; i < b.N; i++ { + x = make([]struct40, length, 2*length) + _ = x + } + }) + + }) +} + func BenchmarkGrowSlice(b *testing.B) { b.Run("Byte", func(b *testing.B) { x := make([]byte, 9) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index c7bfc0434bae4..85902a6b68eea 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -211,7 +211,7 @@ func stackpoolalloc(order uint8) gclinkptr { // Adds stack x to the free pool. Must be called with stackpoolmu held. func stackpoolfree(x gclinkptr, order uint8) { s := spanOfUnchecked(uintptr(x)) - if s.state != _MSpanManual { + if s.state != mSpanManual { throw("freeing stack not in a stack span") } if s.manualFreeList.ptr() == nil { @@ -350,7 +350,7 @@ func stackalloc(n uint32) stack { } var x gclinkptr c := thisg.m.mcache - if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { + if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" { // c == nil can happen in the guts of exitsyscall or // procresize. Just get a stack from the global pool. // Also don't touch stackcache during gc @@ -445,7 +445,7 @@ func stackfree(stk stack) { } x := gclinkptr(v) c := gp.m.mcache - if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { + if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" { lock(&stackpoolmu) stackpoolfree(x, order) unlock(&stackpoolmu) @@ -459,7 +459,7 @@ func stackfree(stk stack) { } } else { s := spanOfUnchecked(uintptr(v)) - if s.state != _MSpanManual { + if s.state != mSpanManual { println(hex(s.base()), v) throw("bad span state") } @@ -625,7 +625,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { return true } - locals, args := getStackMap(frame, &adjinfo.cache, true) + locals, args, objs := getStackMap(frame, &adjinfo.cache, true) // Adjust local variables if stack frame has been allocated. if locals.n > 0 { @@ -663,6 +663,42 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { } adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) } + + // Adjust pointers in all stack objects (whether they are live or not). + // See comments in mgcmark.go:scanframeworker. + if frame.varp != 0 { + for _, obj := range objs { + off := obj.off + base := frame.varp // locals base pointer + if off >= 0 { + base = frame.argp // arguments and return values base pointer + } + p := base + uintptr(off) + if p < frame.sp { + // Object hasn't been allocated in the frame yet. + // (Happens when the stack bounds check fails and + // we call into morestack.) + continue + } + t := obj.typ + gcdata := t.gcdata + var s *mspan + if t.kind&kindGCProg != 0 { + // See comments in mgcmark.go:scanstack + s = materializeGCProg(t.ptrdata, gcdata) + gcdata = (*byte)(unsafe.Pointer(s.startAddr)) + } + for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize { + if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { + adjustpointer(adjinfo, unsafe.Pointer(p+i)) + } + } + if s != nil { + dematerializeGCProg(s) + } + } + } + return true } @@ -981,9 +1017,6 @@ func newstack() { // system stack. gcw := &gp.m.p.ptr().gcw scanstack(gp, gcw) - if gcBlackenPromptly { - gcw.dispose() - } gp.gcscandone = true } gp.preemptscan = false @@ -1139,9 +1172,9 @@ func freeStackSpans() { unlock(&stackLarge.lock) } -// getStackMap returns the locals and arguments live pointer maps for -// frame. -func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector) { +// getStackMap returns the locals and arguments live pointer maps, and +// stack object list for frame. +func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) { targetpc := frame.continpc if targetpc == 0 { // Frame is dead. Return empty bitvectors. @@ -1221,7 +1254,14 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args // Arguments. if frame.arglen > 0 { if frame.argmap != nil { + // argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall. + // In this case, arglen specifies how much of the args section is actually live. + // (It could be either all the args + results, or just the args.) args = *frame.argmap + n := int32(frame.arglen / sys.PtrSize) + if n < args.n { + args.n = n // Don't use more of the arguments than arglen. + } } else { stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) if stackmap == nil || stackmap.n <= 0 { @@ -1238,9 +1278,33 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args } } } + + // stack objects. + p := funcdata(f, _FUNCDATA_StackObjects) + if p != nil { + n := *(*uintptr)(p) + p = add(p, sys.PtrSize) + *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} + // Note: the noescape above is needed to keep + // getStackMap from "leaking param content: + // frame". That leak propagates up to getgcmask, then + // GCMask, then verifyGCInfo, which converts the stack + // gcinfo tests into heap gcinfo tests :( + } + return } +// A stackObjectRecord is generated by the compiler for each stack object in a stack frame. +// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects. +type stackObjectRecord struct { + // offset in frame + // if negative, offset from varp + // if non-negative, offset from argp + off int + typ *_type +} + //go:nosplit func morestackc() { throw("attempt to execute system stack code on user stack") diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go index dc653951412b9..f52381710dff6 100644 --- a/src/runtime/stack_test.go +++ b/src/runtime/stack_test.go @@ -595,6 +595,9 @@ func (s structWithMethod) callers() []uintptr { return pc[:Callers(0, pc)] } +// The noinline prevents this function from being inlined +// into a wrapper. TODO: remove this when issue 28640 is fixed. +//go:noinline func (s structWithMethod) stack() string { buf := make([]byte, 4<<10) return string(buf[:Stack(buf, false)]) diff --git a/src/runtime/string.go b/src/runtime/string.go index 6e42483b13d67..839e882cdcd19 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -135,7 +135,8 @@ func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) { // and otherwise intrinsified by the compiler. // // Some internal compiler optimizations use this function. -// - Used for m[string(k)] lookup where m is a string-keyed map and k is a []byte. +// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)] +// where k is []byte, T1 to Tn is a nesting of struct and array literals. // - Used for "<"+string(b)+">" concatenation where b is []byte. // - Used for string(b)=="foo" comparison where b is []byte. func slicebytetostringtmp(b []byte) string { @@ -333,7 +334,7 @@ func index(s, t string) int { return 0 } for i := 0; i < len(s); i++ { - if s[i] == t[0] && hasprefix(s[i:], t) { + if s[i] == t[0] && hasPrefix(s[i:], t) { return i } } @@ -344,8 +345,8 @@ func contains(s, t string) bool { return index(s, t) >= 0 } -func hasprefix(s, t string) bool { - return len(s) >= len(t) && s[:len(t)] == t +func hasPrefix(s, prefix string) bool { + return len(s) >= len(prefix) && s[:len(prefix)] == prefix } const ( diff --git a/src/runtime/string_test.go b/src/runtime/string_test.go index 678ff003636f9..a1716fa32f2ec 100644 --- a/src/runtime/string_test.go +++ b/src/runtime/string_test.go @@ -240,6 +240,34 @@ func TestCompareTempString(t *testing.T) { } } +func TestStringIndexHaystack(t *testing.T) { + // See issue 25864. + haystack := []byte("hello") + needle := "ll" + n := testing.AllocsPerRun(1000, func() { + if strings.Index(string(haystack), needle) != 2 { + t.Fatalf("needle not found") + } + }) + if n != 0 { + t.Fatalf("want 0 allocs, got %v", n) + } +} + +func TestStringIndexNeedle(t *testing.T) { + // See issue 25864. + haystack := "hello" + needle := []byte("ll") + n := testing.AllocsPerRun(1000, func() { + if strings.Index(haystack, string(needle)) != 2 { + t.Fatalf("needle not found") + } + }) + if n != 0 { + t.Fatalf("want 0 allocs, got %v", n) + } +} + func TestStringOnStack(t *testing.T) { s := "" for i := 0; i < 3; i++ { diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index 74b385d5960cd..d4698e805c7c2 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -68,12 +68,12 @@ func badsystemstack() { // used only when the caller knows that *ptr contains no heap pointers // because either: // -// 1. *ptr is initialized memory and its type is pointer-free. +// *ptr is initialized memory and its type is pointer-free, or // -// 2. *ptr is uninitialized memory (e.g., memory that's being reused -// for a new allocation) and hence contains only "junk". +// *ptr is uninitialized memory (e.g., memory that's being reused +// for a new allocation) and hence contains only "junk". // -// in memclr_*.s +// The (CPU-specific) implementations of this function are in memclr_*.s. //go:noescape func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) @@ -154,6 +154,8 @@ func breakpoint() // one call that copies results back, in cgocallbackg1, and it does NOT pass a // frame type, meaning there are no write barriers invoked. See that call // site for justification. +// +// Package reflect accesses this symbol through a linkname. func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) func procyield(cycles uint32) @@ -178,7 +180,7 @@ func goexit(neverCallThisFunction) // cgocallback_gofunc is not called from go, only from cgocallback, // so the arguments will be found via cgocallback's pointer-declared arguments. // See the assembly implementations for more details. -func cgocallback_gofunc(fv uintptr, frame uintptr, framesize, ctxt uintptr) +func cgocallback_gofunc(fv, frame, framesize, ctxt uintptr) // publicationBarrier performs a store/store barrier (a "publication" // or "export" barrier). Some form of synchronization is required @@ -296,7 +298,7 @@ func round(n, a uintptr) uintptr { return (n + a - 1) &^ (a - 1) } -// checkASM returns whether assembly runtime checks have passed. +// checkASM reports whether assembly runtime checks have passed. func checkASM() bool func memequal_varlen(a, b unsafe.Pointer) bool diff --git a/src/runtime/stubs2.go b/src/runtime/stubs2.go index 02249d0aadc67..57134f7354ad1 100644 --- a/src/runtime/stubs2.go +++ b/src/runtime/stubs2.go @@ -8,6 +8,7 @@ // +build !nacl // +build !js // +build !darwin +// +build !aix package runtime @@ -25,7 +26,8 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32 //go:noescape func open(name *byte, mode, perm int32) int32 -func madvise(addr unsafe.Pointer, n uintptr, flags int32) +// return value is only set on linux to be used in osinit() +func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32 // exitThread terminates the current thread, writing *wait = 0 when // the stack is safe to reclaim. diff --git a/src/runtime/stubs3.go b/src/runtime/stubs3.go index 5c0786e411a24..a9ff689e79f8f 100644 --- a/src/runtime/stubs3.go +++ b/src/runtime/stubs3.go @@ -8,6 +8,7 @@ // +build !nacl // +build !freebsd // +build !darwin +// +build !aix package runtime diff --git a/src/runtime/stubs_386.go b/src/runtime/stubs_386.go new file mode 100644 index 0000000000000..01d92d399f63d --- /dev/null +++ b/src/runtime/stubs_386.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +func float64touint32(a float64) uint32 +func uint32tofloat64(a uint32) float64 diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index d90ab86ffa10e..17e342ef699a0 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -13,17 +13,12 @@ import ( // Frames may be used to get function/file/line information for a // slice of PC values returned by Callers. type Frames struct { - // callers is a slice of PCs that have not yet been expanded. + // callers is a slice of PCs that have not yet been expanded to frames. callers []uintptr - // stackExpander expands callers into a sequence of Frames, - // tracking the necessary state across PCs. - stackExpander stackExpander - - // elideWrapper indicates that, if the next frame is an - // autogenerated wrapper function, it should be elided from - // the stack. - elideWrapper bool + // frames is a slice of Frames that have yet to be returned. + frames []Frame + frameStore [2]Frame } // Frame is the information returned by Frames for each call frame. @@ -59,224 +54,86 @@ type Frame struct { Entry uintptr } -// stackExpander expands a call stack of PCs into a sequence of -// Frames. It tracks state across PCs necessary to perform this -// expansion. -// -// This is the core of the Frames implementation, but is a separate -// internal API to make it possible to use within the runtime without -// heap-allocating the PC slice. The only difference with the public -// Frames API is that the caller is responsible for threading the PC -// slice between expansion steps in this API. If escape analysis were -// smarter, we may not need this (though it may have to be a lot -// smarter). -type stackExpander struct { - // pcExpander expands the current PC into a sequence of Frames. - pcExpander pcExpander - - // If previous caller in iteration was a panic, then the next - // PC in the call stack is the address of the faulting - // instruction instead of the return address of the call. - wasPanic bool - - // skip > 0 indicates that skip frames in the expansion of the - // first PC should be skipped over and callers[1] should also - // be skipped. - skip int -} - // CallersFrames takes a slice of PC values returned by Callers and // prepares to return function/file/line information. // Do not change the slice until you are done with the Frames. func CallersFrames(callers []uintptr) *Frames { - ci := &Frames{} - ci.callers = ci.stackExpander.init(callers) - return ci -} - -func (se *stackExpander) init(callers []uintptr) []uintptr { - if len(callers) >= 1 { - pc := callers[0] - s := pc - skipPC - if s >= 0 && s < sizeofSkipFunction { - // Ignore skip frame callers[0] since this means the caller trimmed the PC slice. - return callers[1:] - } - } - if len(callers) >= 2 { - pc := callers[1] - s := pc - skipPC - if s > 0 && s < sizeofSkipFunction { - // Skip the first s inlined frames when we expand the first PC. - se.skip = int(s) - } - } - return callers + f := &Frames{callers: callers} + f.frames = f.frameStore[:0] + return f } // Next returns frame information for the next caller. // If more is false, there are no more callers (the Frame value is valid). func (ci *Frames) Next() (frame Frame, more bool) { - ci.callers, frame, more = ci.stackExpander.next(ci.callers, ci.elideWrapper) - ci.elideWrapper = elideWrapperCalling(frame.Function) - return -} - -func (se *stackExpander) next(callers []uintptr, elideWrapper bool) (ncallers []uintptr, frame Frame, more bool) { - ncallers = callers -again: - if !se.pcExpander.more { - // Expand the next PC. - if len(ncallers) == 0 { - se.wasPanic = false - return ncallers, Frame{}, false + for len(ci.frames) < 2 { + // Find the next frame. + // We need to look for 2 frames so we know what + // to return for the "more" result. + if len(ci.callers) == 0 { + break } - se.pcExpander.init(ncallers[0], se.wasPanic) - ncallers = ncallers[1:] - se.wasPanic = se.pcExpander.funcInfo.valid() && se.pcExpander.funcInfo.funcID == funcID_sigpanic - if se.skip > 0 { - for ; se.skip > 0; se.skip-- { - se.pcExpander.next() + pc := ci.callers[0] + ci.callers = ci.callers[1:] + funcInfo := findfunc(pc) + if !funcInfo.valid() { + if cgoSymbolizer != nil { + // Pre-expand cgo frames. We could do this + // incrementally, too, but there's no way to + // avoid allocation in this case anyway. + ci.frames = append(ci.frames, expandCgoFrames(pc)...) } - se.skip = 0 - // Drop skipPleaseUseCallersFrames. - ncallers = ncallers[1:] - } - if !se.pcExpander.more { - // No symbolic information for this PC. - // However, we return at least one frame for - // every PC, so return an invalid frame. - return ncallers, Frame{}, len(ncallers) > 0 + continue } - } - - frame = se.pcExpander.next() - if elideWrapper && frame.File == "" { - // Ignore autogenerated functions such as pointer - // method forwarding functions. These are an - // implementation detail that doesn't reflect the - // source code. - goto again - } - return ncallers, frame, se.pcExpander.more || len(ncallers) > 0 -} - -// A pcExpander expands a single PC into a sequence of Frames. -type pcExpander struct { - // more indicates that the next call to next will return a - // valid frame. - more bool - - // pc is the pc being expanded. - pc uintptr - - // frames is a pre-expanded set of Frames to return from the - // iterator. If this is set, then this is everything that will - // be returned from the iterator. - frames []Frame - - // funcInfo is the funcInfo of the function containing pc. - funcInfo funcInfo - - // inlTree is the inlining tree of the function containing pc. - inlTree *[1 << 20]inlinedCall - - // file and line are the file name and line number of the next - // frame. - file string - line int32 - - // inlIndex is the inlining index of the next frame, or -1 if - // the next frame is an outermost frame. - inlIndex int32 -} - -// init initializes this pcExpander to expand pc. It sets ex.more if -// pc expands to any Frames. -// -// A pcExpander can be reused by calling init again. -// -// If pc was a "call" to sigpanic, panicCall should be true. In this -// case, pc is treated as the address of a faulting instruction -// instead of the return address of a call. -func (ex *pcExpander) init(pc uintptr, panicCall bool) { - ex.more = false - - ex.funcInfo = findfunc(pc) - if !ex.funcInfo.valid() { - if cgoSymbolizer != nil { - // Pre-expand cgo frames. We could do this - // incrementally, too, but there's no way to - // avoid allocation in this case anyway. - ex.frames = expandCgoFrames(pc) - ex.more = len(ex.frames) > 0 + f := funcInfo._Func() + entry := f.Entry() + if pc > entry { + // We store the pc of the start of the instruction following + // the instruction in question (the call or the inline mark). + // This is done for historical reasons, and to make FuncForPC + // work correctly for entries in the result of runtime.Callers. + pc-- } - return - } - - ex.more = true - entry := ex.funcInfo.entry - ex.pc = pc - if ex.pc > entry && !panicCall { - ex.pc-- - } - - // file and line are the innermost position at pc. - ex.file, ex.line = funcline1(ex.funcInfo, ex.pc, false) - - // Get inlining tree at pc - inldata := funcdata(ex.funcInfo, _FUNCDATA_InlTree) - if inldata != nil { - ex.inlTree = (*[1 << 20]inlinedCall)(inldata) - ex.inlIndex = pcdatavalue(ex.funcInfo, _PCDATA_InlTreeIndex, ex.pc, nil) - } else { - ex.inlTree = nil - ex.inlIndex = -1 - } -} - -// next returns the next Frame in the expansion of pc and sets ex.more -// if there are more Frames to follow. -func (ex *pcExpander) next() Frame { - if !ex.more { - return Frame{} - } - - if len(ex.frames) > 0 { - // Return pre-expended frame. - frame := ex.frames[0] - ex.frames = ex.frames[1:] - ex.more = len(ex.frames) > 0 - return frame - } - - if ex.inlIndex >= 0 { - // Return inner inlined frame. - call := ex.inlTree[ex.inlIndex] - frame := Frame{ - PC: ex.pc, - Func: nil, // nil for inlined functions - Function: funcnameFromNameoff(ex.funcInfo, call.func_), - File: ex.file, - Line: int(ex.line), - Entry: ex.funcInfo.entry, + name := funcname(funcInfo) + file, line := funcline1(funcInfo, pc, false) + if inldata := funcdata(funcInfo, _FUNCDATA_InlTree); inldata != nil { + inltree := (*[1 << 20]inlinedCall)(inldata) + ix := pcdatavalue(funcInfo, _PCDATA_InlTreeIndex, pc, nil) + if ix >= 0 { + // Note: entry is not modified. It always refers to a real frame, not an inlined one. + f = nil + name = funcnameFromNameoff(funcInfo, inltree[ix].func_) + // File/line is already correct. + // TODO: remove file/line from InlinedCall? + } } - ex.file = funcfile(ex.funcInfo, call.file) - ex.line = call.line - ex.inlIndex = call.parent - return frame + ci.frames = append(ci.frames, Frame{ + PC: pc, + Func: f, + Function: name, + File: file, + Line: int(line), + Entry: entry, + }) } - // No inlining or pre-expanded frames. - ex.more = false - return Frame{ - PC: ex.pc, - Func: ex.funcInfo._Func(), - Function: funcname(ex.funcInfo), - File: ex.file, - Line: int(ex.line), - Entry: ex.funcInfo.entry, - } + // Pop one frame from the frame list. Keep the rest. + // Avoid allocation in the common case, which is 1 or 2 frames. + switch len(ci.frames) { + case 0: // In the rare case when there are no frames at all, we return Frame{}. + case 1: + frame = ci.frames[0] + ci.frames = ci.frameStore[:0] + case 2: + frame = ci.frames[0] + ci.frameStore[0] = ci.frames[1] + ci.frames = ci.frameStore[:1] + default: + frame = ci.frames[0] + ci.frames = ci.frames[1:] + } + more = len(ci.frames) > 0 + return } // expandCgoFrames expands frame information for pc, known to be @@ -348,6 +205,7 @@ const ( _FUNCDATA_LocalsPointerMaps = 1 _FUNCDATA_InlTree = 2 _FUNCDATA_RegPointerMaps = 3 + _FUNCDATA_StackObjects = 4 _ArgsSizeUnknown = -0x80000000 ) @@ -356,7 +214,7 @@ const ( // Note that in some situations involving plugins, there may be multiple // copies of a particular special runtime function. // Note: this list must match the list in cmd/internal/objabi/funcid.go. -type funcID uint32 +type funcID uint8 const ( funcID_normal funcID = iota // not a special function @@ -377,6 +235,9 @@ const ( funcID_gogo funcID_externalthreadhandler funcID_debugCallV1 + funcID_gopanic + funcID_panicwrap + funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.) ) // moduledata records information about the layout of the executable @@ -605,9 +466,32 @@ func moduledataverify1(datap *moduledata) { // given program counter address, or else nil. // // If pc represents multiple functions because of inlining, it returns -// the *Func describing the outermost function. +// the a *Func describing the innermost function, but with an entry +// of the outermost function. func FuncForPC(pc uintptr) *Func { - return findfunc(pc)._Func() + f := findfunc(pc) + if !f.valid() { + return nil + } + if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { + // Note: strict=false so bad PCs (those between functions) don't crash the runtime. + // We just report the preceeding function in that situation. See issue 29735. + // TODO: Perhaps we should report no function at all in that case. + // The runtime currently doesn't have function end info, alas. + if ix := pcdatavalue1(f, _PCDATA_InlTreeIndex, pc, nil, false); ix >= 0 { + inltree := (*[1 << 20]inlinedCall)(inldata) + name := funcnameFromNameoff(f, inltree[ix].func_) + file, line := funcline(f, pc) + fi := &funcinl{ + entry: f.entry, // entry of the real (the outermost) function. + name: name, + file: file, + line: int(line), + } + return (*Func)(unsafe.Pointer(fi)) + } + } + return f._Func() } // Name returns the name of the function. @@ -615,12 +499,22 @@ func (f *Func) Name() string { if f == nil { return "" } + fn := f.raw() + if fn.entry == 0 { // inlined version + fi := (*funcinl)(unsafe.Pointer(fn)) + return fi.name + } return funcname(f.funcInfo()) } // Entry returns the entry address of the function. func (f *Func) Entry() uintptr { - return f.raw().entry + fn := f.raw() + if fn.entry == 0 { // inlined version + fi := (*funcinl)(unsafe.Pointer(fn)) + return fi.entry + } + return fn.entry } // FileLine returns the file name and line number of the @@ -628,6 +522,11 @@ func (f *Func) Entry() uintptr { // The result will not be accurate if pc is not a program // counter within f. func (f *Func) FileLine(pc uintptr) (file string, line int) { + fn := f.raw() + if fn.entry == 0 { // inlined version + fi := (*funcinl)(unsafe.Pointer(fn)) + return fi.file, fi.line + } // Pass strict=false here, because anyone can call this function, // and they might just be wrong about targetpc belonging to f. file, line32 := funcline1(f.funcInfo(), pc, false) @@ -697,7 +596,7 @@ func findfunc(pc uintptr) funcInfo { } type pcvalueCache struct { - entries [16]pcvalueCacheEnt + entries [2][8]pcvalueCacheEnt } type pcvalueCacheEnt struct { @@ -708,6 +607,14 @@ type pcvalueCacheEnt struct { val int32 } +// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc. +// It must be very cheap to calculate. +// For now, align to sys.PtrSize and reduce mod the number of entries. +// In practice, this appears to be fairly randomly and evenly distributed. +func pcvalueCacheKey(targetpc uintptr) uintptr { + return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries)) +} + func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { if off == 0 { return -1 @@ -720,13 +627,14 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric // cheaper than doing the hashing for a less associative // cache. if cache != nil { - for i := range cache.entries { + x := pcvalueCacheKey(targetpc) + for i := range cache.entries[x] { // We check off first because we're more // likely to have multiple entries with // different offsets for the same targetpc // than the other way around, so we'll usually // fail in the first clause. - ent := &cache.entries[i] + ent := &cache.entries[x][i] if ent.off == off && ent.targetpc == targetpc { return ent.val } @@ -755,9 +663,14 @@ func pcvalue(f funcInfo, off int32, targetpc uintptr, cache *pcvalueCache, stric // replacement prevents a performance cliff if // a recursive stack's cycle is slightly // larger than the cache. + // Put the new element at the beginning, + // since it is the most likely to be newly used. if cache != nil { - ci := fastrandn(uint32(len(cache.entries))) - cache.entries[ci] = pcvalueCacheEnt{ + x := pcvalueCacheKey(targetpc) + e := &cache.entries[x] + ci := fastrand() % uint32(len(cache.entries[x])) + e[ci] = e[0] + e[0] = pcvalueCacheEnt{ targetpc: targetpc, off: off, val: val, @@ -847,15 +760,25 @@ func funcspdelta(f funcInfo, targetpc uintptr, cache *pcvalueCache) int32 { return x } +func pcdatastart(f funcInfo, table int32) int32 { + return *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) +} + func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) int32 { if table < 0 || table >= f.npcdata { return -1 } - off := *(*int32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) - return pcvalue(f, off, targetpc, cache, true) + return pcvalue(f, pcdatastart(f, table), targetpc, cache, true) +} + +func pcdatavalue1(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache, strict bool) int32 { + if table < 0 || table >= f.npcdata { + return -1 + } + return pcvalue(f, pcdatastart(f, table), targetpc, cache, strict) } -func funcdata(f funcInfo, i int32) unsafe.Pointer { +func funcdata(f funcInfo, i uint8) unsafe.Pointer { if i < 0 || i >= f.nfuncdata { return nil } @@ -928,8 +851,11 @@ func stackmapdata(stkmap *stackmap, n int32) bitvector { // inlinedCall is the encoding of entries in the FUNCDATA_InlTree table. type inlinedCall struct { - parent int32 // index of parent in the inltree, or < 0 - file int32 // fileno index into filetab - line int32 // line number of the call site - func_ int32 // offset into pclntab for name of called function + parent int16 // index of parent in the inltree, or < 0 + funcID funcID // type of the called function + _ byte + file int32 // fileno index into filetab + line int32 // line number of the call site + func_ int32 // offset into pclntab for name of called function + parentPc int32 // position of an instruction whose source position is the call site (offset from entry) } diff --git a/src/runtime/sys_aix_ppc64.s b/src/runtime/sys_aix_ppc64.s new file mode 100644 index 0000000000000..38e60f99eb95c --- /dev/null +++ b/src/runtime/sys_aix_ppc64.s @@ -0,0 +1,201 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc64 ppc64le + +// +// System calls and other sys.stuff for ppc64, Aix +// + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" +#include "asm_ppc64x.h" + +// This function calls a C function with the function descriptor in R12 +TEXT runtime·callCfunction(SB), NOSPLIT|NOFRAME,$0 + MOVD 0(R12), R12 + MOVD R2, 40(R1) + MOVD 0(R12), R0 + MOVD 8(R12), R2 + MOVD R0, CTR + BR (CTR) + + +// asmsyscall6 calls a library function with a function descriptor +// stored in libcall_fn and store the results in libcall struture +// Up to 6 arguments can be passed to this C function +// Called by runtime.asmcgocall +// It reserves a stack of 288 bytes for the C function. +// NOT USING GO CALLING CONVENTION +TEXT runtime·asmsyscall6(SB),NOSPLIT,$256 + MOVD R3, 48(R1) // Save libcall for later + MOVD libcall_fn(R3), R12 + MOVD libcall_args(R3), R9 + MOVD 0(R9), R3 + MOVD 8(R9), R4 + MOVD 16(R9), R5 + MOVD 24(R9), R6 + MOVD 32(R9), R7 + MOVD 40(R9), R8 + BL runtime·callCfunction(SB) + + // Restore R0 and TOC + XOR R0, R0 + MOVD 40(R1), R2 + + // Store result in libcall + MOVD 48(R1), R5 + MOVD R3, (libcall_r1)(R5) + MOVD $-1, R6 + CMP R6, R3 + BNE skiperrno + + // Save errno in libcall + BL runtime·load_g(SB) + MOVD g_m(g), R4 + MOVD (m_mOS + mOS_perrno)(R4), R9 + MOVW 0(R9), R9 + MOVD R9, (libcall_err)(R5) + RET +skiperrno: + // Reset errno if no error has been returned + MOVD R0, (libcall_err)(R5) + RET + + +TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 + MOVW sig+8(FP), R3 + MOVD info+16(FP), R4 + MOVD ctx+24(FP), R5 + MOVD fn+0(FP), R12 + MOVD R12, CTR + BL (CTR) + RET + + +// runtime.sigtramp is a function descriptor to the real sigtramp. +DATA runtime·sigtramp+0(SB)/8, $runtime·_sigtramp(SB) +DATA runtime·sigtramp+8(SB)/8, $TOC(SB) +DATA runtime·sigtramp+16(SB)/8, $0 +GLOBL runtime·sigtramp(SB), NOPTR, $24 + +// This funcion must not have any frame as we want to control how +// every registers are used. +TEXT runtime·_sigtramp(SB),NOSPLIT|NOFRAME,$0 + MOVD LR, R0 + MOVD R0, 16(R1) + // initialize essential registers (just in case) + BL runtime·reginit(SB) + + // Note that we are executing on altsigstack here, so we have + // more stack available than NOSPLIT would have us believe. + // To defeat the linker, we make our own stack frame with + // more space. + SUB $128+FIXED_FRAME, R1 + + // Save registers + MOVD R31, 56(R1) + MOVD g, 64(R1) + MOVD R29, 72(R1) + + BL runtime·load_g(SB) + + // Save m->libcall. We need to do this because we + // might get interrupted by a signal in runtime·asmcgocall. + + // save m->libcall + MOVD g_m(g), R6 + MOVD (m_libcall+libcall_fn)(R6), R7 + MOVD R7, 80(R1) + MOVD (m_libcall+libcall_args)(R6), R7 + MOVD R7, 88(R1) + MOVD (m_libcall+libcall_n)(R6), R7 + MOVD R7, 96(R1) + MOVD (m_libcall+libcall_r1)(R6), R7 + MOVD R7, 104(R1) + MOVD (m_libcall+libcall_r2)(R6), R7 + MOVD R7, 112(R1) + + // save errno, it might be EINTR; stuff we do here might reset it. + MOVD (m_mOS+mOS_perrno)(R6), R8 + MOVD 0(R8), R8 + MOVD R8, 120(R1) + + MOVW R3, FIXED_FRAME+0(R1) + MOVD R4, FIXED_FRAME+8(R1) + MOVD R5, FIXED_FRAME+16(R1) + MOVD $runtime·sigtrampgo(SB), R12 + MOVD R12, CTR + BL (CTR) + + MOVD g_m(g), R6 + // restore libcall + MOVD 80(R1), R7 + MOVD R7, (m_libcall+libcall_fn)(R6) + MOVD 88(R1), R7 + MOVD R7, (m_libcall+libcall_args)(R6) + MOVD 96(R1), R7 + MOVD R7, (m_libcall+libcall_n)(R6) + MOVD 104(R1), R7 + MOVD R7, (m_libcall+libcall_r1)(R6) + MOVD 112(R1), R7 + MOVD R7, (m_libcall+libcall_r2)(R6) + + // restore errno + MOVD (m_mOS+mOS_perrno)(R6), R7 + MOVD 120(R1), R8 + MOVD R8, 0(R7) + + // restore registers + MOVD 56(R1),R31 + MOVD 64(R1),g + MOVD 72(R1),R29 + + // Don't use RET because we need to restore R31 ! + ADD $128+FIXED_FRAME, R1 + MOVD 16(R1), R0 + MOVD R0, LR + BR (LR) + +// runtime.tstart is a function descriptor to the real tstart. +DATA runtime·tstart+0(SB)/8, $runtime·_tstart(SB) +DATA runtime·tstart+8(SB)/8, $TOC(SB) +DATA runtime·tstart+16(SB)/8, $0 +GLOBL runtime·tstart(SB), NOPTR, $24 + +TEXT runtime·_tstart(SB),NOSPLIT,$0 + XOR R0, R0 // reset R0 + + // set g + MOVD m_g0(R3), g + BL runtime·save_g(SB) + MOVD R3, g_m(g) + + // Layout new m scheduler stack on os stack. + MOVD R1, R3 + MOVD R3, (g_stack+stack_hi)(g) + SUB $(const_threadStackSize), R3 // stack size + MOVD R3, (g_stack+stack_lo)(g) + ADD $const__StackGuard, R3 + MOVD R3, g_stackguard0(g) + MOVD R3, g_stackguard1(g) + + BL runtime·mstart(SB) + + MOVD R0, R3 + RET + +// Runs on OS stack, called from runtime·osyield. +TEXT runtime·osyield1(SB),NOSPLIT,$0 + MOVD $libc_sched_yield(SB), R12 + MOVD 0(R12), R12 + MOVD R2, 40(R1) + MOVD 0(R12), R0 + MOVD 8(R12), R2 + MOVD R0, CTR + BL (CTR) + MOVD 40(R1), R2 + RET diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go index 7efbef746cd83..f34ac88352465 100644 --- a/src/runtime/sys_darwin.go +++ b/src/runtime/sys_darwin.go @@ -50,6 +50,61 @@ func libcCall(fn, arg unsafe.Pointer) int32 { return res } +// The X versions of syscall expect the libc call to return a 64-bit result. +// Otherwise (the non-X version) expects a 32-bit result. +// This distinction is required because an error is indicated by returning -1, +// and we need to know whether to check 32 or 64 bits of the result. +// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) + +//go:linkname syscall_syscall syscall.syscall +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall() + +//go:linkname syscall_syscall6 syscall.syscall6 +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall6() + +//go:linkname syscall_syscall6X syscall.syscall6X +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall6X() + +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn)) + return +} + +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn)) + return +} + // The *_trampoline functions convert from the Go calling convention to the C calling convention // and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s. @@ -370,5 +425,5 @@ func closeonexec(fd int32) { //go:cgo_import_dynamic libc_pthread_cond_signal pthread_cond_signal "/usr/lib/libSystem.B.dylib" // Magic incantation to get libSystem actually dynamically linked. -// TODO: Why does the code require this? See cmd/compile/internal/ld/go.go:210 +// TODO: Why does the code require this? See cmd/link/internal/ld/go.go //go:cgo_import_dynamic _ _ "/usr/lib/libSystem.B.dylib" diff --git a/src/runtime/sys_darwin_32.go b/src/runtime/sys_darwin_32.go new file mode 100644 index 0000000000000..2f17091327dae --- /dev/null +++ b/src/runtime/sys_darwin_32.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin +// +build 386 arm + +package runtime + +import "unsafe" + +//go:linkname syscall_syscall9 syscall.syscall9 +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscall9)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall9() + +//go:linkname syscall_syscallPtr syscall.syscallPtr +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscallPtr)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscallPtr() diff --git a/src/runtime/sys_darwin_386.s b/src/runtime/sys_darwin_386.s index 4bfb9b83623b6..1bc1a63c285fb 100644 --- a/src/runtime/sys_darwin_386.s +++ b/src/runtime/sys_darwin_386.s @@ -625,3 +625,242 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 MOVL BP, SP POPL BP RET + +// syscall calls a function in libc on behalf of the syscall package. +// syscall takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall(SB),NOSPLIT,$0 + PUSHL BP + MOVL SP, BP + SUBL $24, SP + MOVL 32(SP), CX + MOVL (0*4)(CX), AX // fn + MOVL (1*4)(CX), DX // a1 + MOVL DX, 0(SP) + MOVL (2*4)(CX), DX // a2 + MOVL DX, 4(SP) + MOVL (3*4)(CX), DX // a3 + MOVL DX, 8(SP) + + CALL AX + + MOVL 32(SP), CX + MOVL AX, (4*4)(CX) // r1 + MOVL DX, (5*4)(CX) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPL AX, $-1 + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVL (AX), AX + MOVL 32(SP), CX + MOVL AX, (6*4)(CX) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVL BP, SP + POPL BP + RET + +// Not used on 386. +TEXT runtime·syscallPtr(SB),NOSPLIT,$0 + MOVL $0xf1, 0xf1 // crash + RET + +// syscall6 calls a function in libc on behalf of the syscall package. +// syscall6 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6(SB),NOSPLIT,$0 + PUSHL BP + MOVL SP, BP + SUBL $24, SP + MOVL 32(SP), CX + MOVL (0*4)(CX), AX // fn + MOVL (1*4)(CX), DX // a1 + MOVL DX, 0(SP) + MOVL (2*4)(CX), DX // a2 + MOVL DX, 4(SP) + MOVL (3*4)(CX), DX // a3 + MOVL DX, 8(SP) + MOVL (4*4)(CX), DX // a4 + MOVL DX, 12(SP) + MOVL (5*4)(CX), DX // a5 + MOVL DX, 16(SP) + MOVL (6*4)(CX), DX // a6 + MOVL DX, 20(SP) + + CALL AX + + MOVL 32(SP), CX + MOVL AX, (7*4)(CX) // r1 + MOVL DX, (8*4)(CX) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPL AX, $-1 + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVL (AX), AX + MOVL 32(SP), CX + MOVL AX, (9*4)(CX) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVL BP, SP + POPL BP + RET + +// syscall6X calls a function in libc on behalf of the syscall package. +// syscall6X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6X must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6X(SB),NOSPLIT,$0 + PUSHL BP + MOVL SP, BP + SUBL $24, SP + MOVL 32(SP), CX + MOVL (0*4)(CX), AX // fn + MOVL (1*4)(CX), DX // a1 + MOVL DX, 0(SP) + MOVL (2*4)(CX), DX // a2 + MOVL DX, 4(SP) + MOVL (3*4)(CX), DX // a3 + MOVL DX, 8(SP) + MOVL (4*4)(CX), DX // a4 + MOVL DX, 12(SP) + MOVL (5*4)(CX), DX // a5 + MOVL DX, 16(SP) + MOVL (6*4)(CX), DX // a6 + MOVL DX, 20(SP) + + CALL AX + + MOVL 32(SP), CX + MOVL AX, (7*4)(CX) // r1 + MOVL DX, (8*4)(CX) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPL AX, $-1 + JNE ok + CMPL DX, $-1 + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVL (AX), AX + MOVL 32(SP), CX + MOVL AX, (9*4)(CX) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVL BP, SP + POPL BP + RET + +// syscall9 calls a function in libc on behalf of the syscall package. +// syscall9 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall9 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall9(SB),NOSPLIT,$0 + PUSHL BP + MOVL SP, BP + SUBL $40, SP + MOVL 48(SP), CX + MOVL (0*4)(CX), AX // fn + MOVL (1*4)(CX), DX // a1 + MOVL DX, 0(SP) + MOVL (2*4)(CX), DX // a2 + MOVL DX, 4(SP) + MOVL (3*4)(CX), DX // a3 + MOVL DX, 8(SP) + MOVL (4*4)(CX), DX // a4 + MOVL DX, 12(SP) + MOVL (5*4)(CX), DX // a5 + MOVL DX, 16(SP) + MOVL (6*4)(CX), DX // a6 + MOVL DX, 20(SP) + MOVL (7*4)(CX), DX // a7 + MOVL DX, 24(SP) + MOVL (8*4)(CX), DX // a8 + MOVL DX, 28(SP) + MOVL (9*4)(CX), DX // a9 + MOVL DX, 32(SP) + + CALL AX + + MOVL 48(SP), CX + MOVL AX, (10*4)(CX) // r1 + MOVL DX, (11*4)(CX) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPL AX, $-1 + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVL (AX), AX + MOVL 48(SP), CX + MOVL AX, (12*4)(CX) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVL BP, SP + POPL BP + RET diff --git a/src/runtime/sys_darwin_64.go b/src/runtime/sys_darwin_64.go new file mode 100644 index 0000000000000..8c128811b9f5d --- /dev/null +++ b/src/runtime/sys_darwin_64.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin +// +build amd64 arm64 + +package runtime + +import "unsafe" + +//go:linkname syscall_syscallX syscall.syscallX +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscallX)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscallX() + +//go:linkname syscall_syscallXPtr syscall.syscallXPtr +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscallXPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscallblock() + libcCall(unsafe.Pointer(funcPC(syscallXPtr)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscallXPtr() diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s index db743526130d3..f99cb00ab8cf0 100644 --- a/src/runtime/sys_darwin_amd64.s +++ b/src/runtime/sys_darwin_amd64.s @@ -26,6 +26,7 @@ TEXT runtime·open_trampoline(SB),NOSPLIT,$0 MOVL 8(DI), SI // arg 2 flags MOVL 12(DI), DX // arg 3 mode MOVQ 0(DI), DI // arg 1 pathname + XORL AX, AX // vararg: say "no float args" CALL libc_open(SB) POPQ BP RET @@ -383,6 +384,7 @@ TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0 MOVL 4(DI), SI // arg 2 cmd MOVL 8(DI), DX // arg 3 arg MOVL 0(DI), DI // arg 1 fd + XORL AX, AX // vararg: say "no float args" CALL libc_fcntl(SB) POPQ BP RET @@ -540,3 +542,202 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 CALL libc_pthread_cond_signal(SB) POPQ BP RET + +// syscall calls a function in libc on behalf of the syscall package. +// syscall takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall(SB),NOSPLIT,$0 + PUSHQ BP + MOVQ SP, BP + SUBQ $16, SP + MOVQ (0*8)(DI), CX // fn + MOVQ (2*8)(DI), SI // a2 + MOVQ (3*8)(DI), DX // a3 + MOVQ DI, (SP) + MOVQ (1*8)(DI), DI // a1 + XORL AX, AX // vararg: say "no float args" + + CALL CX + + MOVQ (SP), DI + MOVQ AX, (4*8)(DI) // r1 + MOVQ DX, (5*8)(DI) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPL AX, $-1 // Note: high 32 bits are junk + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVLQSX (AX), AX + MOVQ (SP), DI + MOVQ AX, (6*8)(DI) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVQ BP, SP + POPQ BP + RET + +// syscallX calls a function in libc on behalf of the syscall package. +// syscallX takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscallX must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscallX(SB),NOSPLIT,$0 + PUSHQ BP + MOVQ SP, BP + SUBQ $16, SP + MOVQ (0*8)(DI), CX // fn + MOVQ (2*8)(DI), SI // a2 + MOVQ (3*8)(DI), DX // a3 + MOVQ DI, (SP) + MOVQ (1*8)(DI), DI // a1 + XORL AX, AX // vararg: say "no float args" + + CALL CX + + MOVQ (SP), DI + MOVQ AX, (4*8)(DI) // r1 + MOVQ DX, (5*8)(DI) // r2 + + // Standard libc functions return -1 on error + // and set errno. + CMPQ AX, $-1 + JNE ok + + // Get error code from libc. + CALL libc_error(SB) + MOVLQSX (AX), AX + MOVQ (SP), DI + MOVQ AX, (6*8)(DI) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVQ BP, SP + POPQ BP + RET + +// Not used on amd64. +TEXT runtime·syscallXPtr(SB),NOSPLIT,$0 + MOVL $0xf1, 0xf1 // crash + RET + +// syscall6 calls a function in libc on behalf of the syscall package. +// syscall6 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6(SB),NOSPLIT,$0 + PUSHQ BP + MOVQ SP, BP + SUBQ $16, SP + MOVQ (0*8)(DI), R11// fn + MOVQ (2*8)(DI), SI // a2 + MOVQ (3*8)(DI), DX // a3 + MOVQ (4*8)(DI), CX // a4 + MOVQ (5*8)(DI), R8 // a5 + MOVQ (6*8)(DI), R9 // a6 + MOVQ DI, (SP) + MOVQ (1*8)(DI), DI // a1 + XORL AX, AX // vararg: say "no float args" + + CALL R11 + + MOVQ (SP), DI + MOVQ AX, (7*8)(DI) // r1 + MOVQ DX, (8*8)(DI) // r2 + + CMPL AX, $-1 + JNE ok + + CALL libc_error(SB) + MOVLQSX (AX), AX + MOVQ (SP), DI + MOVQ AX, (9*8)(DI) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVQ BP, SP + POPQ BP + RET + +// syscall6X calls a function in libc on behalf of the syscall package. +// syscall6X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6X must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6X(SB),NOSPLIT,$0 + PUSHQ BP + MOVQ SP, BP + SUBQ $16, SP + MOVQ (0*8)(DI), R11// fn + MOVQ (2*8)(DI), SI // a2 + MOVQ (3*8)(DI), DX // a3 + MOVQ (4*8)(DI), CX // a4 + MOVQ (5*8)(DI), R8 // a5 + MOVQ (6*8)(DI), R9 // a6 + MOVQ DI, (SP) + MOVQ (1*8)(DI), DI // a1 + XORL AX, AX // vararg: say "no float args" + + CALL R11 + + MOVQ (SP), DI + MOVQ AX, (7*8)(DI) // r1 + MOVQ DX, (8*8)(DI) // r2 + + CMPQ AX, $-1 + JNE ok + + CALL libc_error(SB) + MOVLQSX (AX), AX + MOVQ (SP), DI + MOVQ AX, (9*8)(DI) // err + +ok: + XORL AX, AX // no error (it's ignored anyway) + MOVQ BP, SP + POPQ BP + RET diff --git a/src/runtime/sys_darwin_arm.s b/src/runtime/sys_darwin_arm.s index 7a269cf576335..54c7afbf34395 100644 --- a/src/runtime/sys_darwin_arm.s +++ b/src/runtime/sys_darwin_arm.s @@ -382,3 +382,200 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 MOVW 0(R0), R0 // arg 1 cond BL libc_pthread_cond_signal(SB) RET + +// syscall calls a function in libc on behalf of the syscall package. +// syscall takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall(SB),NOSPLIT,$0 + MOVW.W R0, -4(R13) // push structure pointer + MOVW 0(R0), R12 // fn + MOVW 8(R0), R1 // a2 + MOVW 12(R0), R2 // a3 + MOVW 4(R0), R0 // a1 + BL (R12) + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 16(R2) // save r1 + MOVW R1, 20(R2) // save r2 + MOVW $-1, R3 + CMP R0, R3 + BNE ok + MOVW.W R2, -4(R13) // push structure pointer + BL libc_error(SB) + MOVW (R0), R0 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 24(R2) // save err +ok: + RET + +// syscallPtr is like syscall except the libc function reports an +// error by returning NULL. +TEXT runtime·syscallPtr(SB),NOSPLIT,$0 + MOVW.W R0, -4(R13) // push structure pointer + MOVW 0(R0), R12 // fn + MOVW 8(R0), R1 // a2 + MOVW 12(R0), R2 // a3 + MOVW 4(R0), R0 // a1 + BL (R12) + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 16(R2) // save r1 + MOVW R1, 20(R2) // save r2 + MOVW $0, R3 + CMP R0, R3 + BNE ok + MOVW.W R2, -4(R13) // push structure pointer + BL libc_error(SB) + MOVW (R0), R0 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 24(R2) // save err +ok: + RET + +// syscall6 calls a function in libc on behalf of the syscall package. +// syscall6 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6(SB),NOSPLIT,$0 + MOVW.W R0, -4(R13) // push structure pointer + MOVW 0(R0), R12 // fn + MOVW 24(R0), R1 // a6 + MOVW.W R1, -4(R13) + MOVW 20(R0), R1 // a5 + MOVW.W R1, -4(R13) + MOVW 8(R0), R1 // a2 + MOVW 12(R0), R2 // a3 + MOVW 16(R0), R3 // a4 + MOVW 4(R0), R0 // a1 + BL (R12) + ADD $8, R13 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 28(R2) // save r1 + MOVW R1, 32(R2) // save r2 + MOVW $-1, R3 + CMP R0, R3 + BNE ok + MOVW.W R2, -4(R13) // push structure pointer + BL libc_error(SB) + MOVW (R0), R0 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 36(R2) // save err +ok: + RET + +// syscall6X calls a function in libc on behalf of the syscall package. +// syscall6X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6X must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6X(SB),NOSPLIT,$0 + MOVW.W R0, -4(R13) // push structure pointer + MOVW 0(R0), R12 // fn + MOVW 24(R0), R1 // a6 + MOVW.W R1, -4(R13) + MOVW 20(R0), R1 // a5 + MOVW.W R1, -4(R13) + MOVW 8(R0), R1 // a2 + MOVW 12(R0), R2 // a3 + MOVW 16(R0), R3 // a4 + MOVW 4(R0), R0 // a1 + BL (R12) + ADD $8, R13 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 28(R2) // save r1 + MOVW R1, 32(R2) // save r2 + MOVW $-1, R3 + CMP R0, R3 + BNE ok + CMP R1, R3 + BNE ok + MOVW.W R2, -4(R13) // push structure pointer + BL libc_error(SB) + MOVW (R0), R0 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 36(R2) // save err +ok: + RET + +// syscall9 calls a function in libc on behalf of the syscall package. +// syscall9 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall9 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall9(SB),NOSPLIT,$0 + MOVW.W R0, -4(R13) // push structure pointer + MOVW 0(R0), R12 // fn + MOVW 36(R0), R1 // a9 + MOVW.W R1, -4(R13) + MOVW 32(R0), R1 // a8 + MOVW.W R1, -4(R13) + MOVW 28(R0), R1 // a7 + MOVW.W R1, -4(R13) + MOVW 24(R0), R1 // a6 + MOVW.W R1, -4(R13) + MOVW 20(R0), R1 // a5 + MOVW.W R1, -4(R13) + MOVW 8(R0), R1 // a2 + MOVW 12(R0), R2 // a3 + MOVW 16(R0), R3 // a4 + MOVW 4(R0), R0 // a1 + BL (R12) + ADD $20, R13 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 40(R2) // save r1 + MOVW R1, 44(R2) // save r2 + MOVW $-1, R3 + CMP R0, R3 + BNE ok + MOVW.W R2, -4(R13) // push structure pointer + BL libc_error(SB) + MOVW (R0), R0 + MOVW.P 4(R13), R2 // pop structure pointer + MOVW R0, 48(R2) // save err +ok: + RET diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s index d7ba116b8430b..29951d8ad7247 100644 --- a/src/runtime/sys_darwin_arm64.s +++ b/src/runtime/sys_darwin_arm64.s @@ -16,10 +16,13 @@ TEXT notok<>(SB),NOSPLIT,$0 B 0(PC) TEXT runtime·open_trampoline(SB),NOSPLIT,$0 + SUB $16, RSP MOVW 8(R0), R1 // arg 2 flags MOVW 12(R0), R2 // arg 3 mode + MOVW R2, (RSP) // arg 3 is variadic, pass on stack MOVD 0(R0), R0 // arg 1 pathname - BL libc_open(SB) + BL libc_open(SB) + ADD $16, RSP RET TEXT runtime·close_trampoline(SB),NOSPLIT,$0 @@ -283,10 +286,13 @@ ok: RET TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0 + SUB $16, RSP MOVW 4(R0), R1 // arg 2 cmd MOVW 8(R0), R2 // arg 3 arg + MOVW R2, (RSP) // arg 3 is variadic, pass on stack MOVW 0(R0), R0 // arg 1 fd BL libc_fcntl(SB) + ADD $16, RSP RET // sigaltstack on iOS is not supported and will always @@ -372,3 +378,213 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0 BL libc_pthread_cond_signal(SB) RET +// syscall calls a function in libc on behalf of the syscall package. +// syscall takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, 8(RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 8(R0), R0 // a1 + + // If fn is declared as vararg, we have to pass the vararg arguments on the stack. + // (Because ios decided not to adhere to the standard arm64 calling convention, sigh...) + // The only libSystem calls we support that are vararg are open, fcntl, and ioctl, + // which are all of the form fn(x, y, ...). So we just need to put the 3rd arg + // on the stack as well. + // If we ever have other vararg libSystem calls, we might need to handle more cases. + MOVD R2, (RSP) + + BL (R12) + + MOVD 8(RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 32(R2) // save r1 + MOVD R1, 40(R2) // save r2 + CMPW $-1, R0 + BNE ok + SUB $16, RSP // push structure pointer + MOVD R2, 8(RSP) + BL libc_error(SB) + MOVW (R0), R0 + MOVD 8(RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 48(R2) // save err +ok: + RET + +// syscallX calls a function in libc on behalf of the syscall package. +// syscallX takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscallX must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscallX(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, (RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 8(R0), R0 // a1 + BL (R12) + + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 32(R2) // save r1 + MOVD R1, 40(R2) // save r2 + CMP $-1, R0 + BNE ok + SUB $16, RSP // push structure pointer + MOVD R2, (RSP) + BL libc_error(SB) + MOVW (R0), R0 + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 48(R2) // save err +ok: + RET + +// syscallXPtr is like syscallX except that the libc function reports an +// error by returning NULL. +TEXT runtime·syscallXPtr(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, (RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 8(R0), R0 // a1 + BL (R12) + + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 32(R2) // save r1 + MOVD R1, 40(R2) // save r2 + CMP $0, R0 + BNE ok + SUB $16, RSP // push structure pointer + MOVD R2, (RSP) + BL libc_error(SB) + MOVW (R0), R0 + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 48(R2) // save err +ok: + RET + +// syscall6 calls a function in libc on behalf of the syscall package. +// syscall6 takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6 must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, 8(RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 32(R0), R3 // a4 + MOVD 40(R0), R4 // a5 + MOVD 48(R0), R5 // a6 + MOVD 8(R0), R0 // a1 + + // If fn is declared as vararg, we have to pass the vararg arguments on the stack. + // See syscall above. The only function this applies to is openat, for which the 4th + // arg must be on the stack. + MOVD R3, (RSP) + + BL (R12) + + MOVD 8(RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 56(R2) // save r1 + MOVD R1, 64(R2) // save r2 + CMPW $-1, R0 + BNE ok + SUB $16, RSP // push structure pointer + MOVD R2, 8(RSP) + BL libc_error(SB) + MOVW (R0), R0 + MOVD 8(RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 72(R2) // save err +ok: + RET + +// syscall6X calls a function in libc on behalf of the syscall package. +// syscall6X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall6X must be called on the g0 stack with the +// C calling convention (use libcCall). +TEXT runtime·syscall6X(SB),NOSPLIT,$0 + SUB $16, RSP // push structure pointer + MOVD R0, (RSP) + + MOVD 0(R0), R12 // fn + MOVD 16(R0), R1 // a2 + MOVD 24(R0), R2 // a3 + MOVD 32(R0), R3 // a4 + MOVD 40(R0), R4 // a5 + MOVD 48(R0), R5 // a6 + MOVD 8(R0), R0 // a1 + BL (R12) + + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 56(R2) // save r1 + MOVD R1, 64(R2) // save r2 + CMP $-1, R0 + BNE ok + SUB $16, RSP // push structure pointer + MOVD R2, (RSP) + BL libc_error(SB) + MOVW (R0), R0 + MOVD (RSP), R2 // pop structure pointer + ADD $16, RSP + MOVD R0, 72(R2) // save err +ok: + RET diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s index f0eb5f4e21614..b771850aaf5e2 100644 --- a/src/runtime/sys_dragonfly_amd64.s +++ b/src/runtime/sys_dragonfly_amd64.s @@ -9,7 +9,7 @@ #include "go_asm.h" #include "go_tls.h" #include "textflag.h" - + TEXT runtime·sys_umtx_sleep(SB),NOSPLIT,$0 MOVQ addr+0(FP), DI // arg 1 - ptr MOVL val+8(FP), SI // arg 2 - value @@ -260,9 +260,11 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $75, AX // madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 MOVQ new+0(FP), DI MOVQ old+8(FP), SI diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s index b8f685a32374a..bc309ba4536a6 100644 --- a/src/runtime/sys_freebsd_386.s +++ b/src/runtime/sys_freebsd_386.s @@ -9,7 +9,7 @@ #include "go_asm.h" #include "go_tls.h" #include "textflag.h" - + TEXT runtime·sys_umtx_op(SB),NOSPLIT,$-4 MOVL $454, AX INT $0x80 @@ -39,7 +39,7 @@ TEXT runtime·thr_start(SB),NOSPLIT,$0 POPAL get_tls(CX) MOVL BX, g(CX) - + MOVL AX, g_m(BX) CALL runtime·stackcheck(SB) // smashes AX CALL runtime·mstart(SB) @@ -163,7 +163,9 @@ TEXT runtime·munmap(SB),NOSPLIT,$-4 TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // madvise INT $0x80 - // ignore failure - maybe pages are locked + JAE 2(PC) + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB), NOSPLIT, $-4 diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s index be191a078458f..55959b3e3a333 100644 --- a/src/runtime/sys_freebsd_amd64.s +++ b/src/runtime/sys_freebsd_amd64.s @@ -337,9 +337,11 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $75, AX // madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 MOVQ new+0(FP), DI MOVQ old+8(FP), SI diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s index 93bf569367e92..f347b9fa961b9 100644 --- a/src/runtime/sys_freebsd_arm.s +++ b/src/runtime/sys_freebsd_arm.s @@ -264,14 +264,15 @@ TEXT runtime·munmap(SB),NOSPLIT,$0 RET TEXT runtime·madvise(SB),NOSPLIT,$0 - MOVW addr+0(FP), R0 // arg 1 addr - MOVW n+4(FP), R1 // arg 2 len - MOVW flags+8(FP), R2 // arg 3 flags - MOVW $SYS_madvise, R7 - SWI $0 - // ignore failure - maybe pages are locked + MOVW addr+0(FP), R0 // arg 1 addr + MOVW n+4(FP), R1 // arg 2 len + MOVW flags+8(FP), R2 // arg 3 flags + MOVW $SYS_madvise, R7 + SWI $0 + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 MOVW new+0(FP), R0 MOVW old+4(FP), R1 diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s index 8d5a4ff9772d2..40b55a67eb879 100644 --- a/src/runtime/sys_linux_386.s +++ b/src/runtime/sys_linux_386.s @@ -48,7 +48,6 @@ #define SYS_mincore 218 #define SYS_madvise 219 #define SYS_gettid 224 -#define SYS_tkill 238 #define SYS_futex 240 #define SYS_sched_getaffinity 242 #define SYS_set_thread_area 243 @@ -57,6 +56,7 @@ #define SYS_epoll_ctl 255 #define SYS_epoll_wait 256 #define SYS_clock_gettime 265 +#define SYS_tgkill 270 #define SYS_epoll_create1 329 TEXT runtime·exit(SB),NOSPLIT,$0 @@ -155,11 +155,14 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$12 + MOVL $SYS_getpid, AX + INVOKE_SYSCALL + MOVL AX, BX // arg 1 pid MOVL $SYS_gettid, AX INVOKE_SYSCALL - MOVL AX, BX // arg 1 tid - MOVL sig+0(FP), CX // arg 2 signal - MOVL $SYS_tkill, AX + MOVL AX, CX // arg 2 tid + MOVL sig+0(FP), DX // arg 3 signal + MOVL $SYS_tgkill, AX INVOKE_SYSCALL RET @@ -424,7 +427,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL n+4(FP), CX MOVL flags+8(FP), DX INVOKE_SYSCALL - // ignore failure - maybe pages are locked + MOVL AX, ret+12(FP) RET // int32 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index 62d80247bea52..b709f77060ad2 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -36,12 +36,12 @@ #define SYS_sigaltstack 131 #define SYS_arch_prctl 158 #define SYS_gettid 186 -#define SYS_tkill 200 #define SYS_futex 202 #define SYS_sched_getaffinity 204 #define SYS_epoll_create 213 #define SYS_exit_group 231 #define SYS_epoll_ctl 233 +#define SYS_tgkill 234 #define SYS_openat 257 #define SYS_faccessat 269 #define SYS_epoll_pwait 281 @@ -137,11 +137,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$0 + MOVL $SYS_getpid, AX + SYSCALL + MOVL AX, R12 MOVL $SYS_gettid, AX SYSCALL - MOVL AX, DI // arg 1 tid - MOVL sig+0(FP), SI // arg 2 - MOVL $SYS_tkill, AX + MOVL AX, SI // arg 2 tid + MOVL R12, DI // arg 1 pid + MOVL sig+0(FP), DX // arg 3 + MOVL $SYS_tgkill, AX SYSCALL RET @@ -515,7 +519,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $SYS_madvise, AX SYSCALL - // ignore failure - maybe pages are locked + MOVL AX, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, @@ -604,7 +608,7 @@ TEXT runtime·settls(SB),NOSPLIT,$32 // Same as in sys_darwin_386.s:/ugliness, different constant. // DI currently holds m->tls, which must be fs:0x1d0. // See cgo/gcc_android_amd64.c for the derivation of the constant. - SUBQ $0x1d0, DI // In android, the tls base + SUBQ $0x1d0, DI // In android, the tls base #else ADDQ $8, DI // ELF wants to use -8(FS) #endif diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s index aa39732cfb66d..43a58335c8068 100644 --- a/src/runtime/sys_linux_arm.s +++ b/src/runtime/sys_linux_arm.s @@ -36,7 +36,7 @@ #define SYS_setitimer (SYS_BASE + 104) #define SYS_mincore (SYS_BASE + 219) #define SYS_gettid (SYS_BASE + 224) -#define SYS_tkill (SYS_BASE + 238) +#define SYS_tgkill (SYS_BASE + 268) #define SYS_sched_yield (SYS_BASE + 158) #define SYS_nanosleep (SYS_BASE + 162) #define SYS_sched_getaffinity (SYS_BASE + 242) @@ -138,11 +138,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVW $SYS_getpid, R7 + SWI $0 + MOVW R0, R4 MOVW $SYS_gettid, R7 SWI $0 - // arg 1 tid already in R0 from gettid - MOVW sig+0(FP), R1 // arg 2 - signal - MOVW $SYS_tkill, R7 + MOVW R0, R1 // arg 2 tid + MOVW R4, R0 // arg 1 pid + MOVW sig+0(FP), R2 // arg 3 + MOVW $SYS_tgkill, R7 SWI $0 RET @@ -191,7 +195,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVW flags+8(FP), R2 MOVW $SYS_madvise, R7 SWI $0 - // ignore failure - maybe pages are locked + MOVW R0, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$0 diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s index c6afd76a657e2..8b344be8f83b9 100644 --- a/src/runtime/sys_linux_arm64.s +++ b/src/runtime/sys_linux_arm64.s @@ -36,7 +36,7 @@ #define SYS_getpid 172 #define SYS_gettid 178 #define SYS_kill 129 -#define SYS_tkill 130 +#define SYS_tgkill 131 #define SYS_futex 98 #define SYS_sched_getaffinity 123 #define SYS_exit_group 94 @@ -143,11 +143,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVD $SYS_getpid, R8 + SVC + MOVW R0, R19 MOVD $SYS_gettid, R8 SVC - MOVW R0, R0 // arg 1 tid - MOVW sig+0(FP), R1 // arg 2 - MOVD $SYS_tkill, R8 + MOVW R0, R1 // arg 2 tid + MOVW R19, R0 // arg 1 pid + MOVW sig+0(FP), R2 // arg 3 + MOVD $SYS_tgkill, R8 SVC RET @@ -239,7 +243,7 @@ TEXT runtime·nanotime(SB),NOSPLIT,$24-8 MOVD (g_sched+gobuf_sp)(R3), R1 // Set RSP to g0 stack noswitch: - SUB $16, R1 + SUB $32, R1 BIC $15, R1 MOVD R1, RSP @@ -298,7 +302,9 @@ TEXT runtime·callCgoSigaction(SB),NOSPLIT,$0 MOVD new+8(FP), R1 MOVD old+16(FP), R2 MOVD _cgo_sigaction(SB), R3 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL R3 + ADD $16, RSP MOVW R0, ret+24(FP) RET @@ -361,7 +367,9 @@ TEXT runtime·callCgoMmap(SB),NOSPLIT,$0 MOVW fd+24(FP), R4 MOVW off+28(FP), R5 MOVD _cgo_mmap(SB), R9 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL R9 + ADD $16, RSP MOVD R0, ret+32(FP) RET @@ -382,7 +390,9 @@ TEXT runtime·callCgoMunmap(SB),NOSPLIT,$0 MOVD addr+0(FP), R0 MOVD n+8(FP), R1 MOVD _cgo_munmap(SB), R9 + SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. BL R9 + ADD $16, RSP RET TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 @@ -391,7 +401,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R2 MOVD $SYS_madvise, R8 SVC - // ignore failure - maybe pages are locked + MOVW R0, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s index 8e64f1c562e64..c45703d22801c 100644 --- a/src/runtime/sys_linux_mips64x.s +++ b/src/runtime/sys_linux_mips64x.s @@ -35,12 +35,12 @@ #define SYS_madvise 5027 #define SYS_mincore 5026 #define SYS_gettid 5178 -#define SYS_tkill 5192 #define SYS_futex 5194 #define SYS_sched_getaffinity 5196 #define SYS_exit_group 5205 #define SYS_epoll_create 5207 #define SYS_epoll_ctl 5208 +#define SYS_tgkill 5225 #define SYS_openat 5247 #define SYS_epoll_pwait 5272 #define SYS_clock_gettime 5222 @@ -137,11 +137,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVV $SYS_getpid, R2 + SYSCALL + MOVW R2, R16 MOVV $SYS_gettid, R2 SYSCALL - MOVW R2, R4 // arg 1 tid - MOVW sig+0(FP), R5 // arg 2 - MOVV $SYS_tkill, R2 + MOVW R2, R5 // arg 2 tid + MOVW R16, R4 // arg 1 pid + MOVW sig+0(FP), R6 // arg 3 + MOVV $SYS_tgkill, R2 SYSCALL RET @@ -287,7 +291,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R6 MOVV $SYS_madvise, R2 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s index a6bca3bebd8b0..f362b0f3f1c95 100644 --- a/src/runtime/sys_linux_mipsx.s +++ b/src/runtime/sys_linux_mipsx.s @@ -35,7 +35,6 @@ #define SYS_madvise 4218 #define SYS_mincore 4217 #define SYS_gettid 4222 -#define SYS_tkill 4236 #define SYS_futex 4238 #define SYS_sched_getaffinity 4240 #define SYS_exit_group 4246 @@ -43,6 +42,7 @@ #define SYS_epoll_ctl 4249 #define SYS_epoll_wait 4250 #define SYS_clock_gettime 4263 +#define SYS_tgkill 4266 #define SYS_epoll_create1 4326 TEXT runtime·exit(SB),NOSPLIT,$0-4 @@ -135,11 +135,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$0-4 + MOVW $SYS_getpid, R2 + SYSCALL + MOVW R2, R16 MOVW $SYS_gettid, R2 SYSCALL - MOVW R2, R4 // arg 1 tid - MOVW sig+0(FP), R5 // arg 2 - MOVW $SYS_tkill, R2 + MOVW R2, R5 // arg 2 tid + MOVW R16, R4 // arg 1 pid + MOVW sig+0(FP), R6 // arg 3 + MOVW $SYS_tgkill, R2 SYSCALL RET @@ -298,13 +302,13 @@ TEXT runtime·munmap(SB),NOSPLIT,$0-8 UNDEF // crash RET -TEXT runtime·madvise(SB),NOSPLIT,$0-12 +TEXT runtime·madvise(SB),NOSPLIT,$0-16 MOVW addr+0(FP), R4 MOVW n+4(FP), R5 MOVW flags+8(FP), R6 MOVW $SYS_madvise, R2 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+12(FP) RET // int32 futex(int32 *uaddr, int32 op, int32 val, struct timespec *timeout, int32 *uaddr2, int32 val2); diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s index 483cb8ef9aefd..6835f434de4fc 100644 --- a/src/runtime/sys_linux_ppc64x.s +++ b/src/runtime/sys_linux_ppc64x.s @@ -36,7 +36,6 @@ #define SYS_madvise 205 #define SYS_mincore 206 #define SYS_gettid 207 -#define SYS_tkill 208 #define SYS_futex 221 #define SYS_sched_getaffinity 223 #define SYS_exit_group 234 @@ -44,6 +43,7 @@ #define SYS_epoll_ctl 237 #define SYS_epoll_wait 238 #define SYS_clock_gettime 246 +#define SYS_tgkill 250 #define SYS_epoll_create1 315 TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 @@ -123,10 +123,13 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + SYSCALL $SYS_getpid + MOVW R3, R14 SYSCALL $SYS_gettid - MOVW R3, R3 // arg 1 tid - MOVW sig+0(FP), R4 // arg 2 - SYSCALL $SYS_tkill + MOVW R3, R4 // arg 2 tid + MOVW R14, R3 // arg 1 pid + MOVW sig+0(FP), R5 // arg 3 + SYSCALL $SYS_tgkill RET TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 @@ -154,21 +157,87 @@ TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28 // func walltime() (sec int64, nsec int32) TEXT runtime·walltime(SB),NOSPLIT,$16 - MOVD $0, R3 // CLOCK_REALTIME - MOVD $0(R1), R4 - SYSCALL $SYS_clock_gettime - MOVD 0(R1), R3 // sec - MOVD 8(R1), R5 // nsec + MOVD R1, R15 // R15 is unchanged by C code + MOVD g_m(g), R21 // R21 = m + + MOVD $0, R3 // CLOCK_REALTIME + + MOVD runtime·vdsoClockgettimeSym(SB), R12 // Check for VDSO availability + CMP R12, R0 + BEQ fallback + + // Set vdsoPC and vdsoSP for SIGPROF traceback. + MOVD LR, R14 + MOVD R14, m_vdsoPC(R21) + MOVD R15, m_vdsoSP(R21) + + MOVD m_curg(R21), R6 + CMP g, R6 + BNE noswitch + + MOVD m_g0(R21), R7 + MOVD (g_sched+gobuf_sp)(R7), R1 // Set SP to g0 stack + +noswitch: + SUB $16, R1 // Space for results + RLDICR $0, R1, $59, R1 // Align for C code + MOVD R12, CTR + MOVD R1, R4 + BL (CTR) // Call from VDSO + MOVD $0, R0 // Restore R0 + MOVD R0, m_vdsoSP(R21) // Clear vdsoSP + MOVD 0(R1), R3 // sec + MOVD 8(R1), R5 // nsec + MOVD R15, R1 // Restore SP + +finish: MOVD R3, sec+0(FP) MOVW R5, nsec+8(FP) RET + // Syscall fallback +fallback: + ADD $32, R1, R4 + SYSCALL $SYS_clock_gettime + MOVD 32(R1), R3 + MOVD 40(R1), R5 + JMP finish + TEXT runtime·nanotime(SB),NOSPLIT,$16 - MOVW $1, R3 // CLOCK_MONOTONIC - MOVD $0(R1), R4 - SYSCALL $SYS_clock_gettime - MOVD 0(R1), R3 // sec - MOVD 8(R1), R5 // nsec + MOVD $1, R3 // CLOCK_MONOTONIC + + MOVD R1, R15 // R15 is unchanged by C code + MOVD g_m(g), R21 // R21 = m + + MOVD runtime·vdsoClockgettimeSym(SB), R12 // Check for VDSO availability + CMP R12, R0 + BEQ fallback + + // Set vdsoPC and vdsoSP for SIGPROF traceback. + MOVD LR, R14 // R14 is unchanged by C code + MOVD R14, m_vdsoPC(R21) + MOVD R15, m_vdsoSP(R21) + + MOVD m_curg(R21), R6 + CMP g, R6 + BNE noswitch + + MOVD m_g0(R21), R7 + MOVD (g_sched+gobuf_sp)(R7), R1 // Set SP to g0 stack + +noswitch: + SUB $16, R1 // Space for results + RLDICR $0, R1, $59, R1 // Align for C code + MOVD R12, CTR + MOVD R1, R4 + BL (CTR) // Call from VDSO + MOVD $0, R0 // Restore R0 + MOVD $0, m_vdsoSP(R21) // Clear vdsoSP + MOVD 0(R1), R3 // sec + MOVD 8(R1), R5 // nsec + MOVD R15, R1 // Restore SP + +finish: // sec is in R3, nsec in R5 // return nsec in R3 MOVD $1000000000, R4 @@ -177,6 +246,14 @@ TEXT runtime·nanotime(SB),NOSPLIT,$16 MOVD R3, ret+0(FP) RET + // Syscall fallback +fallback: + ADD $32, R1, R4 + SYSCALL $SYS_clock_gettime + MOVD 32(R1), R3 + MOVD 48(R1), R5 + JMP finish + TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28 MOVW how+0(FP), R3 MOVD new+8(FP), R4 @@ -224,7 +301,7 @@ TEXT runtime·_sigtramp(SB),NOSPLIT,$64 // this might be called in external code context, // where g is not set. - MOVB runtime·iscgo(SB), R6 + MOVBZ runtime·iscgo(SB), R6 CMP R6, $0 BEQ 2(PC) BL runtime·load_g(SB) @@ -243,7 +320,7 @@ TEXT runtime·_sigtramp(SB),NOSPLIT,$64 TEXT runtime·cgoSigtramp(SB),NOSPLIT|NOFRAME,$0 // The stack unwinder, presumably written in C, may not be able to // handle Go frame correctly. So, this function is NOFRAME, and we - // we save/restore LR manually. + // save/restore LR manually. MOVD LR, R10 // We're coming from C code, initialize essential registers. @@ -337,7 +414,7 @@ TEXT runtime·cgoSigtramp(SB),NOSPLIT|NOFRAME,$0 DWORD $0 DWORD $0 TEXT runtime·_cgoSigtramp(SB),NOSPLIT,$0 - JMP runtime·sigtramp(SB) + JMP runtime·_sigtramp(SB) #endif TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$0 @@ -377,7 +454,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVD n+8(FP), R4 MOVW flags+16(FP), R5 SYSCALL $SYS_madvise - // ignore failure - maybe pages are locked + MOVW R3, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s index 1ff110c232b87..c79ceea7512f6 100644 --- a/src/runtime/sys_linux_s390x.s +++ b/src/runtime/sys_linux_s390x.s @@ -31,9 +31,9 @@ #define SYS_madvise 219 #define SYS_mincore 218 #define SYS_gettid 236 -#define SYS_tkill 237 #define SYS_futex 238 #define SYS_sched_getaffinity 240 +#define SYS_tgkill 241 #define SYS_exit_group 248 #define SYS_epoll_create 249 #define SYS_epoll_ctl 250 @@ -129,11 +129,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVW $SYS_getpid, R1 + SYSCALL + MOVW R2, R10 MOVW $SYS_gettid, R1 SYSCALL - MOVW R2, R2 // arg 1 tid - MOVW sig+0(FP), R3 // arg 2 - MOVW $SYS_tkill, R1 + MOVW R2, R3 // arg 2 tid + MOVW R10, R2 // arg 1 pid + MOVW sig+0(FP), R4 // arg 2 + MOVW $SYS_tgkill, R1 SYSCALL RET @@ -286,7 +290,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R4 MOVW $SYS_madvise, R1 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_nacl_386.s b/src/runtime/sys_nacl_386.s index cdc8ff1a02375..24eaeb238cb6d 100644 --- a/src/runtime/sys_nacl_386.s +++ b/src/runtime/sys_nacl_386.s @@ -266,7 +266,7 @@ TEXT runtime·nacl_clock_gettime(SB),NOSPLIT,$8 NACL_SYSCALL(SYS_clock_gettime) MOVL AX, ret+8(FP) RET - + TEXT runtime·nanotime(SB),NOSPLIT,$20 MOVL $0, 0(SP) // real time clock LEAL 8(SP), AX @@ -308,12 +308,12 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0 // save g MOVL DI, 20(SP) - + // g = m->gsignal MOVL g_m(DI), BX MOVL m_gsignal(BX), BX MOVL BX, g(CX) - + // copy arguments for sighandler MOVL $11, 0(SP) // signal MOVL $0, 4(SP) // siginfo @@ -356,7 +356,7 @@ ret: // Today those registers are just PC and SP, but in case additional registers // are relevant in the future (for example DX is the Go func context register) // we restore as many registers as possible. - // + // // We smash BP, because that's what the linker smashes during RET. // LEAL ctxt+4(FP), BP diff --git a/src/runtime/sys_nacl_amd64p32.s b/src/runtime/sys_nacl_amd64p32.s index 4c4d509576cb0..b4a108346d096 100644 --- a/src/runtime/sys_nacl_amd64p32.s +++ b/src/runtime/sys_nacl_amd64p32.s @@ -334,13 +334,13 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$80 // check that g exists get_tls(CX) MOVL g(CX), DI - + CMPL DI, $0 JEQ nog // save g MOVL DI, 20(SP) - + // g = m->gsignal MOVL g_m(DI), BX MOVL m_gsignal(BX), BX diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s index 4042ab4f8abd8..66f4620cab59a 100644 --- a/src/runtime/sys_netbsd_386.s +++ b/src/runtime/sys_netbsd_386.s @@ -135,7 +135,9 @@ TEXT runtime·munmap(SB),NOSPLIT,$-4 TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // sys_madvise INT $0x80 - // ignore failure - maybe pages are locked + JAE 2(PC) + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$-4 diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s index 11b9c1b417375..531c227a7b5ad 100644 --- a/src/runtime/sys_netbsd_amd64.s +++ b/src/runtime/sys_netbsd_amd64.s @@ -23,7 +23,7 @@ TEXT runtime·lwp_create(SB),NOSPLIT,$0 RET TEXT runtime·lwp_tramp(SB),NOSPLIT,$0 - + // Set FS to point at m->tls. LEAQ m_tls(R8), DI CALL runtime·settls(SB) @@ -319,7 +319,9 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX // arg 3 - behav MOVQ $75, AX // sys_madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s index 6b2c5a83572c2..304075f295e31 100644 --- a/src/runtime/sys_netbsd_arm.s +++ b/src/runtime/sys_netbsd_arm.s @@ -284,11 +284,12 @@ TEXT runtime·munmap(SB),NOSPLIT,$0 RET TEXT runtime·madvise(SB),NOSPLIT,$0 - MOVW addr+0(FP), R0 // arg 1 - addr - MOVW n+4(FP), R1 // arg 2 - len - MOVW flags+8(FP), R2 // arg 3 - behav - SWI $0xa0004b // sys_madvise - // ignore failure - maybe pages are locked + MOVW addr+0(FP), R0 // arg 1 - addr + MOVW n+4(FP), R1 // arg 2 - len + MOVW flags+8(FP), R2 // arg 3 - behav + SWI $0xa0004b // sys_madvise + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s index 21f13c806e635..d555edb71f162 100644 --- a/src/runtime/sys_openbsd_386.s +++ b/src/runtime/sys_openbsd_386.s @@ -136,7 +136,8 @@ TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // sys_madvise INT $0x80 JAE 2(PC) - MOVL $0xf1, 0xf1 // crash + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$-4 @@ -294,7 +295,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$12 CALL runtime·settls(SB) POPL AX POPAL - + // Now segment is established. Initialize m, g. get_tls(AX) MOVL DX, g(AX) diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s index 38ac38d9bf182..227e81869c0f6 100644 --- a/src/runtime/sys_openbsd_amd64.s +++ b/src/runtime/sys_openbsd_amd64.s @@ -305,7 +305,9 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX // arg 3 - behav MOVQ $75, AX // sys_madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 diff --git a/src/runtime/sys_openbsd_arm.s b/src/runtime/sys_openbsd_arm.s index ff1c1da9b97ee..94ac5d599d2fd 100644 --- a/src/runtime/sys_openbsd_arm.s +++ b/src/runtime/sys_openbsd_arm.s @@ -143,8 +143,8 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVW flags+8(FP), R2 // arg 2 - flags MOVW $75, R12 // sys_madvise SWI $0 - MOVW.CS $0, R8 // crash on syscall failure - MOVW.CS R8, (R8) + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$0 @@ -371,8 +371,9 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0 TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 B runtime·armPublicationBarrier(SB) -// TODO(jsing): Implement. TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0 - MOVW $5, R0 - MOVW R0, (R0) + MOVM.WP [R1, R2, R3, R12], (R13) + MOVW $330, R12 // sys___get_tcb + SWI $0 + MOVM.IAW (R13), [R1, R2, R3, R12] RET diff --git a/src/runtime/sys_plan9_386.s b/src/runtime/sys_plan9_386.s index 47dcb8db04e95..a7fb9fe6f7c64 100644 --- a/src/runtime/sys_plan9_386.s +++ b/src/runtime/sys_plan9_386.s @@ -126,7 +126,7 @@ TEXT runtime·noted(SB),NOSPLIT,$0 INT $64 MOVL AX, ret+4(FP) RET - + TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0 MOVL $38, AX INT $64 diff --git a/src/runtime/sys_plan9_amd64.s b/src/runtime/sys_plan9_amd64.s index 8077d6d324eb8..4ef4aab37671e 100644 --- a/src/runtime/sys_plan9_amd64.s +++ b/src/runtime/sys_plan9_amd64.s @@ -123,7 +123,7 @@ TEXT runtime·noted(SB),NOSPLIT,$0 SYSCALL MOVL AX, ret+8(FP) RET - + TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0 MOVQ $38, BP SYSCALL diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s index 2b6dabab99e90..930fc889976d4 100644 --- a/src/runtime/sys_solaris_amd64.s +++ b/src/runtime/sys_solaris_amd64.s @@ -63,9 +63,9 @@ TEXT runtime·pipe1(SB),NOSPLIT,$0 // Call a library function with SysV calling conventions. // The called function can take a maximum of 6 INTEGER class arguments, -// see +// see // Michael Matz, Jan Hubicka, Andreas Jaeger, and Mark Mitchell -// System V Application Binary Interface +// System V Application Binary Interface // AMD64 Architecture Processor Supplement // section 3.2.3. // @@ -119,7 +119,7 @@ skipargs: MOVL 0(AX), AX MOVQ AX, libcall_err(DI) -skiperrno2: +skiperrno2: RET // uint32 tstart_sysvicall(M *newm); @@ -186,7 +186,7 @@ allgood: // Save m->libcall and m->scratch. We need to do this because we // might get interrupted by a signal in runtime·asmcgocall. - // save m->libcall + // save m->libcall MOVQ g_m(R10), BP LEAQ m_libcall(BP), R11 MOVQ libcall_fn(R11), R10 diff --git a/src/runtime/sys_wasm.s b/src/runtime/sys_wasm.s index 3ca844a4c73c5..6e28656340d79 100644 --- a/src/runtime/sys_wasm.s +++ b/src/runtime/sys_wasm.s @@ -187,11 +187,11 @@ TEXT ·walltime(SB), NOSPLIT, $0 CallImport RET -TEXT ·scheduleCallback(SB), NOSPLIT, $0 +TEXT ·scheduleTimeoutEvent(SB), NOSPLIT, $0 CallImport RET -TEXT ·clearScheduledCallback(SB), NOSPLIT, $0 +TEXT ·clearTimeoutEvent(SB), NOSPLIT, $0 CallImport RET diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s index 3c091adcb1df6..e6d774e66f475 100644 --- a/src/runtime/sys_windows_386.s +++ b/src/runtime/sys_windows_386.s @@ -455,9 +455,7 @@ loop: MULL CX IMULL $100, DI ADDL DI, DX - // wintime*100 = DX:AX, subtract startNano and return - SUBL runtime·startNano+0(SB), AX - SBBL runtime·startNano+4(SB), DX + // wintime*100 = DX:AX MOVL AX, ret_lo+0(FP) MOVL DX, ret_hi+4(FP) RET @@ -482,9 +480,6 @@ loop: IMULL $100, DI ADDL DI, DX // w*100 = DX:AX - // subtract startNano and save for return - SUBL runtime·startNano+0(SB), AX - SBBL runtime·startNano+4(SB), DX MOVL AX, mono+12(FP) MOVL DX, mono+16(FP) @@ -494,13 +489,13 @@ wall: MOVL (_SYSTEM_TIME+time_hi2), DX CMPL CX, DX JNE wall - + // w = DX:AX // convert to Unix epoch (but still 100ns units) #define delta 116444736000000000 SUBL $(delta & 0xFFFFFFFF), AX SBBL $(delta >> 32), DX - + // nano/100 = DX:AX // split into two decimal halves by div 1e9. // (decimal point is two spots over from correct place, @@ -509,7 +504,7 @@ wall: DIVL CX MOVL AX, DI MOVL DX, SI - + // DI = nano/100/1e9 = nano/1e11 = sec/100, DX = SI = nano/100%1e9 // split DX into seconds and nanoseconds by div 1e7 magic multiply. MOVL DX, AX @@ -520,7 +515,7 @@ wall: IMULL $10000000, DX MOVL SI, CX SUBL DX, CX - + // DI = sec/100 (still) // BX = (nano/100%1e9)/1e7 = (nano/1e9)%100 = sec%100 // CX = (nano/100%1e9)%1e7 = (nano%1e9)/100 = nsec/100 diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index c1449dba6006d..612f0a474d18f 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -89,7 +89,7 @@ TEXT runtime·badsignal2(SB),NOSPLIT|NOFRAME,$48 MOVQ $0, 32(SP) // overlapped MOVQ runtime·_WriteFile(SB), AX CALL AX - + RET // faster get/set last error @@ -363,7 +363,7 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT,$0 // Layout new m scheduler stack on os stack. MOVQ SP, AX MOVQ AX, (g_stack+stack_hi)(DX) - SUBQ $(64*1024), AX // inital stack size (adjusted later) + SUBQ $(64*1024), AX // initial stack size (adjusted later) MOVQ AX, (g_stack+stack_lo)(DX) ADDQ $const__StackGuard, AX MOVQ AX, g_stackguard0(DX) @@ -486,7 +486,6 @@ loop: SHLQ $32, CX ORQ BX, CX IMULQ $100, CX - SUBQ runtime·startNano(SB), CX MOVQ CX, ret+0(FP) RET useQPC: @@ -506,7 +505,6 @@ loop: SHLQ $32, AX ORQ BX, AX IMULQ $100, AX - SUBQ runtime·startNano(SB), AX MOVQ AX, mono+16(FP) MOVQ $_SYSTEM_TIME, DI diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s new file mode 100644 index 0000000000000..60be74b95cf02 --- /dev/null +++ b/src/runtime/sys_windows_arm.s @@ -0,0 +1,692 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// void runtime·asmstdcall(void *c); +TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr} + MOVW R0, R4 // put libcall * in r4 + MOVW R13, R5 // save stack pointer in r5 + + // SetLastError(0) + MOVW $0, R0 + MRC 15, 0, R1, C13, C0, 2 + MOVW R0, 0x34(R1) + + MOVW 8(R4), R12 // libcall->args + + // Do we have more than 4 arguments? + MOVW 4(R4), R0 // libcall->n + SUB.S $4, R0, R2 + BLE loadregs + + // Reserve stack space for remaining args + SUB R2<<2, R13 + BIC $0x7, R13 // alignment for ABI + + // R0: count of arguments + // R1: + // R2: loop counter, from 0 to (n-4) + // R3: scratch + // R4: pointer to libcall struct + // R12: libcall->args + MOVW $0, R2 +stackargs: + ADD $4, R2, R3 // r3 = args[4 + i] + MOVW R3<<2(R12), R3 + MOVW R3, R2<<2(R13) // stack[i] = r3 + + ADD $1, R2 // i++ + SUB $4, R0, R3 // while (i < (n - 4)) + CMP R3, R2 + BLT stackargs + +loadregs: + CMP $3, R0 + MOVW.GT 12(R12), R3 + + CMP $2, R0 + MOVW.GT 8(R12), R2 + + CMP $1, R0 + MOVW.GT 4(R12), R1 + + CMP $0, R0 + MOVW.GT 0(R12), R0 + + BIC $0x7, R13 // alignment for ABI + MOVW 0(R4), R12 // branch to libcall->fn + BL (R12) + + MOVW R5, R13 // free stack space + MOVW R0, 12(R4) // save return value to libcall->r1 + MOVW R1, 16(R4) + + // GetLastError + MRC 15, 0, R1, C13, C0, 2 + MOVW 0x34(R1), R0 + MOVW R0, 20(R4) // store in libcall->err + + MOVM.IA.W (R13), [R4, R5, R15] + +TEXT runtime·badsignal2(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {r4, lr} + MOVW R13, R4 // save original stack pointer + SUB $8, R13 // space for 2 variables + BIC $0x7, R13 // alignment for ABI + + // stderr + MOVW runtime·_GetStdHandle(SB), R1 + MOVW $-12, R0 + BL (R1) + + MOVW $runtime·badsignalmsg(SB), R1 // lpBuffer + MOVW $runtime·badsignallen(SB), R2 // lpNumberOfBytesToWrite + MOVW (R2), R2 + ADD $0x4, R13, R3 // lpNumberOfBytesWritten + MOVW $0, R12 // lpOverlapped + MOVW R12, (R13) + + MOVW runtime·_WriteFile(SB), R12 + BL (R12) + + MOVW R4, R13 // restore SP + MOVM.IA.W (R13), [R4, R15] // pop {r4, pc} + +TEXT runtime·getlasterror(SB),NOSPLIT,$0 + MRC 15, 0, R0, C13, C0, 2 + MOVW 0x34(R0), R0 + MOVW R0, ret+0(FP) + RET + +TEXT runtime·setlasterror(SB),NOSPLIT|NOFRAME,$0 + MRC 15, 0, R1, C13, C0, 2 + MOVW R0, 0x34(R1) + RET + +// Called by Windows as a Vectored Exception Handler (VEH). +// First argument is pointer to struct containing +// exception record and context pointers. +// Handler function is stored in R1 +// Return 0 for 'not handled', -1 for handled. +// int32_t sigtramp( +// PEXCEPTION_POINTERS ExceptionInfo, +// func *GoExceptionHandler); +TEXT runtime·sigtramp(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R0, R4-R11, R14], (R13) // push {r0, r4-r11, lr} (SP-=40) + SUB $(8+20), R13 // reserve space for g, sp, and + // parameters/retval to go call + + MOVW R0, R6 // Save param0 + MOVW R1, R7 // Save param1 + + BL runtime·load_g(SB) + CMP $0, g // is there a current g? + BL.EQ runtime·badsignal2(SB) + + // save g and SP in case of stack switch + MOVW R13, 24(R13) + MOVW g, 20(R13) + + // do we need to switch to the g0 stack? + MOVW g, R5 // R5 = g + MOVW g_m(R5), R2 // R2 = m + MOVW m_g0(R2), R4 // R4 = g0 + CMP R5, R4 // if curg == g0 + BEQ g0 + + // switch to g0 stack + MOVW R4, g // g = g0 + MOVW (g_sched+gobuf_sp)(g), R3 // R3 = g->gobuf.sp + BL runtime·save_g(SB) + + // traceback will think that we've done PUSH and SUB + // on this stack, so subtract them here to match. + // (we need room for sighandler arguments anyway). + // and re-save old SP for restoring later. + SUB $(40+8+20), R3 + MOVW R13, 24(R3) // save old stack pointer + MOVW R3, R13 // switch stack + +g0: + MOVW 0(R6), R2 // R2 = ExceptionPointers->ExceptionRecord + MOVW 4(R6), R3 // R3 = ExceptionPointers->ContextRecord + + // make it look like mstart called us on g0, to stop traceback + MOVW $runtime·mstart(SB), R4 + + MOVW R4, 0(R13) // Save link register for traceback + MOVW R2, 4(R13) // Move arg0 (ExceptionRecord) into position + MOVW R3, 8(R13) // Move arg1 (ContextRecord) into position + MOVW R5, 12(R13) // Move arg2 (original g) into position + BL (R7) // Call the go routine + MOVW 16(R13), R4 // Fetch return value from stack + + // Compute the value of the g0 stack pointer after deallocating + // this frame, then allocating 8 bytes. We may need to store + // the resume SP and PC on the g0 stack to work around + // control flow guard when we resume from the exception. + ADD $(40+20), R13, R12 + + // switch back to original stack and g + MOVW 24(R13), R13 + MOVW 20(R13), g + BL runtime·save_g(SB) + +done: + MOVW R4, R0 // move retval into position + ADD $(8 + 20), R13 // free locals + MOVM.IA.W (R13), [R3, R4-R11, R14] // pop {r3, r4-r11, lr} + + // if return value is CONTINUE_SEARCH, do not set up control + // flow guard workaround + CMP $0, R0 + BEQ return + + // Check if we need to set up the control flow guard workaround. + // On Windows/ARM, the stack pointer must lie within system + // stack limits when we resume from exception. + // Store the resume SP and PC on the g0 stack, + // and return to returntramp on the g0 stack. returntramp + // pops the saved PC and SP from the g0 stack, resuming execution + // at the desired location. + // If returntramp has already been set up by a previous exception + // handler, don't clobber the stored SP and PC on the stack. + MOVW 4(R3), R3 // PEXCEPTION_POINTERS->Context + MOVW 0x40(R3), R2 // load PC from context record + MOVW $runtime·returntramp(SB), R1 + CMP R1, R2 + B.EQ return // do not clobber saved SP/PC + + // Save resume SP and PC on g0 stack + MOVW 0x38(R3), R2 // load SP from context record + MOVW R2, 0(R12) // Store resume SP on g0 stack + MOVW 0x40(R3), R2 // load PC from context record + MOVW R2, 4(R12) // Store resume PC on g0 stack + + // Set up context record to return to returntramp on g0 stack + MOVW R12, 0x38(R3) // save g0 stack pointer + // in context record + MOVW $runtime·returntramp(SB), R2 // save resume address + MOVW R2, 0x40(R3) // in context record + +return: + B (R14) // return + +// +// Trampoline to resume execution from exception handler. +// This is part of the control flow guard workaround. +// It switches stacks and jumps to the continuation address. +// +TEXT runtime·returntramp(SB),NOSPLIT|NOFRAME,$0 + MOVM.IA (R13), [R13, R15] // ldm sp, [sp, pc] + +TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·exceptionhandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·firstcontinuehandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·lastcontinuehandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·ctrlhandler(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·ctrlhandler1(SB), R1 + B runtime·externalthreadhandler(SB) + +TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·profileloop1(SB), R1 + B runtime·externalthreadhandler(SB) + +// int32 externalthreadhandler(uint32 arg, int (*func)(uint32)) +// stack layout: +// +----------------+ +// | callee-save | +// | registers | +// +----------------+ +// | m | +// +----------------+ +// 20| g | +// +----------------+ +// 16| func ptr (r1) | +// +----------------+ +// 12| argument (r0) | +//---+----------------+ +// 8 | param1 | +// +----------------+ +// 4 | param0 | +// +----------------+ +// 0 | retval | +// +----------------+ +// +TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + SUB $(m__size + g__size + 20), R13 // space for locals + MOVW R0, 12(R13) + MOVW R1, 16(R13) + + // zero out m and g structures + ADD $20, R13, R0 // compute pointer to g + MOVW R0, 4(R13) + MOVW $(m__size + g__size), R0 + MOVW R0, 8(R13) + BL runtime·memclrNoHeapPointers(SB) + + // initialize m and g structures + ADD $20, R13, R2 // R2 = g + ADD $(20 + g__size), R13, R3 // R3 = m + MOVW R2, m_g0(R3) // m->g0 = g + MOVW R3, g_m(R2) // g->m = m + MOVW R2, m_curg(R3) // m->curg = g + + MOVW R2, g + BL runtime·save_g(SB) + + // set up stackguard stuff + MOVW R13, R0 + MOVW R0, g_stack+stack_hi(g) + SUB $(32*1024), R0 + MOVW R0, (g_stack+stack_lo)(g) + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + + // move argument into position and call function + MOVW 12(R13), R0 + MOVW R0, 4(R13) + MOVW 16(R13), R1 + BL (R1) + + // clear g + MOVW $0, g + BL runtime·save_g(SB) + + MOVW 0(R13), R0 // load return value + ADD $(m__size + g__size + 20), R13 // free locals + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +GLOBL runtime·cbctxts(SB), NOPTR, $4 + +TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + SUB $36, R13 // space for locals + + // save callback arguments to stack. We currently support up to 4 arguments + ADD $16, R13, R4 + MOVM.IA [R0-R3], (R4) + + // load cbctxts[i]. The trampoline in zcallback_windows.s puts the callback + // index in R12 + MOVW runtime·cbctxts(SB), R4 + MOVW R12<<2(R4), R4 // R4 holds pointer to wincallbackcontext structure + + // extract callback context + MOVW wincallbackcontext_argsize(R4), R5 + MOVW wincallbackcontext_gobody(R4), R4 + + // we currently support up to 4 arguments + CMP $(4 * 4), R5 + BL.GT runtime·abort(SB) + + // extend argsize by size of return value + ADD $4, R5 + + // Build 'type args struct' + MOVW R4, 4(R13) // fn + ADD $16, R13, R0 // arg (points to r0-r3, ret on stack) + MOVW R0, 8(R13) + MOVW R5, 12(R13) // argsize + + BL runtime·load_g(SB) + BL runtime·cgocallback_gofunc(SB) + + ADD $16, R13, R0 // load arg + MOVW 12(R13), R1 // load argsize + SUB $4, R1 // offset to return value + MOVW R1<<0(R0), R0 // load return value + + ADD $36, R13 // free locals + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +// uint32 tstart_stdcall(M *newm); +TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + + MOVW m_g0(R0), g + MOVW R0, g_m(g) + BL runtime·save_g(SB) + + // do per-thread TLS initialization + BL runtime·init_thread_tls(SB) + + // Layout new m scheduler stack on os stack. + MOVW R13, R0 + MOVW R0, g_stack+stack_hi(g) + SUB $(64*1024), R0 + MOVW R0, (g_stack+stack_lo)(g) + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + + BL runtime·emptyfunc(SB) // fault if stack check is wrong + BL runtime·mstart(SB) + + // Exit the thread. + MOVW $0, R0 + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +// onosstack calls fn on OS stack. +// adapted from asm_arm.s : systemstack +// func onosstack(fn unsafe.Pointer, arg uint32) +TEXT runtime·onosstack(SB),NOSPLIT,$0 + MOVW fn+0(FP), R5 // R5 = fn + MOVW arg+4(FP), R6 // R6 = arg + + // This function can be called when there is no g, + // for example, when we are handling a callback on a non-go thread. + // In this case we're already on the system stack. + CMP $0, g + BEQ noswitch + + MOVW g_m(g), R1 // R1 = m + + MOVW m_gsignal(R1), R2 // R2 = gsignal + CMP g, R2 + B.EQ noswitch + + MOVW m_g0(R1), R2 // R2 = g0 + CMP g, R2 + B.EQ noswitch + + MOVW m_curg(R1), R3 + CMP g, R3 + B.EQ switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVW $runtime·badsystemstack(SB), R0 + BL (R0) + B runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + MOVW $runtime·systemstack_switch(SB), R3 + ADD $4, R3, R3 // get past push {lr} + MOVW R3, (g_sched+gobuf_pc)(g) + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW LR, (g_sched+gobuf_lr)(g) + MOVW g, (g_sched+gobuf_g)(g) + + // switch to g0 + MOVW R2, g + MOVW (g_sched+gobuf_sp)(R2), R3 + // make it look like mstart called systemstack on g0, to stop traceback + SUB $4, R3, R3 + MOVW $runtime·mstart(SB), R4 + MOVW R4, 0(R3) + MOVW R3, R13 + + // call target function + MOVW R6, R0 // arg + BL (R5) + + // switch back to g + MOVW g_m(g), R1 + MOVW m_curg(R1), g + MOVW (g_sched+gobuf_sp)(g), R13 + MOVW $0, R3 + MOVW R3, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVW.P 4(R13), R14 // restore LR + MOVW R6, R0 // arg + B (R5) + +// Runs on OS stack. Duration (in 100ns units) is in R0. +TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {r4, lr} + MOVW R13, R4 // Save SP + SUB $8, R13 // R13 = R13 - 8 + BIC $0x7, R13 // Align SP for ABI + RSB $0, R0, R3 // R3 = -R0 + MOVW $0, R1 // R1 = FALSE (alertable) + MOVW $-1, R0 // R0 = handle + MOVW R13, R2 // R2 = pTime + MOVW R3, 0(R2) // time_lo + MOVW R0, 4(R2) // time_hi + MOVW runtime·_NtWaitForSingleObject(SB), R3 + BL (R3) + MOVW R4, R13 // Restore SP + MOVM.IA.W (R13), [R4, R15] // pop {R4, pc} + +// Runs on OS stack. +TEXT runtime·switchtothread(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {R4, lr} + MOVW R13, R4 + BIC $0x7, R13 // alignment for ABI + MOVW runtime·_SwitchToThread(SB), R0 + BL (R0) + MOVW R4, R13 // restore stack pointer + MOVM.IA.W (R13), [R4, R15] // pop {R4, pc} + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + B runtime·armPublicationBarrier(SB) + +// never called (cgo not supported) +TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0 + MOVW $0xabcd, R0 + MOVW R0, (R0) + RET + +// See http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/getting-os-information-the-kuser_shared_data-structure/ +// Must read hi1, then lo, then hi2. The snapshot is valid if hi1 == hi2. +#define _INTERRUPT_TIME 0x7ffe0008 +#define _SYSTEM_TIME 0x7ffe0014 +#define time_lo 0 +#define time_hi1 4 +#define time_hi2 8 + +TEXT runtime·nanotime(SB),NOSPLIT,$0-8 + MOVW $0, R0 + MOVB runtime·useQPCTime(SB), R0 + CMP $0, R0 + BNE useQPC + MOVW $_INTERRUPT_TIME, R3 +loop: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE loop + + // wintime = R1:R0, multiply by 100 + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + + // wintime*100 = R4:R3 + MOVW R3, ret_lo+0(FP) + MOVW R4, ret_hi+4(FP) + RET +useQPC: + B runtime·nanotimeQPC(SB) // tail call + RET + +TEXT time·now(SB),NOSPLIT,$0-20 + MOVW $0, R0 + MOVB runtime·useQPCTime(SB), R0 + CMP $0, R0 + BNE useQPC + MOVW $_INTERRUPT_TIME, R3 +loop: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE loop + + // wintime = R1:R0, multiply by 100 + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + + // wintime*100 = R4:R3 + MOVW R3, mono+12(FP) + MOVW R4, mono+16(FP) + + MOVW $_SYSTEM_TIME, R3 +wall: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE wall + + // w = R1:R0 in 100ns untis + // convert to Unix epoch (but still 100ns units) + #define delta 116444736000000000 + SUB.S $(delta & 0xFFFFFFFF), R0 + SBC $(delta >> 32), R1 + + // Convert to nSec + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + // w = R2:R1 in nSec + MOVW R3, R1 // R4:R3 -> R2:R1 + MOVW R4, R2 + + // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61) + // to get seconds (96 bit scaled result) + MOVW $0x89705f41, R3 // 2**61 * 10**-9 + MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3 + MOVW $0,R7 + MULALU R2,R3,(R7,R6) + + // unscale by discarding low 32 bits, shifting the rest by 29 + MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61) + ORR R7<<3,R6 + MOVW R7>>29,R7 + + // subtract (10**9 * sec) from nsec to get nanosecond remainder + MOVW $1000000000, R5 // 10**9 + MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5 + MULA R7,R5,R9,R9 + SUB.S R8,R1 // R2:R1 -= R9:R8 + SBC R9,R2 + + // because reciprocal was a truncated repeating fraction, quotient + // may be slightly too small -- adjust to make remainder < 10**9 + CMP R5,R1 // if remainder > 10**9 + SUB.HS R5,R1 // remainder -= 10**9 + ADD.HS $1,R6 // sec += 1 + + MOVW R6,sec_lo+0(FP) + MOVW R7,sec_hi+4(FP) + MOVW R1,nsec+8(FP) + RET +useQPC: + B runtime·nanotimeQPC(SB) // tail call + RET + +// save_g saves the g register (R10) into thread local memory +// so that we can call externally compiled +// ARM code that will overwrite those registers. +// NOTE: runtime.gogo assumes that R1 is preserved by this function. +// runtime.mcall assumes this function only clobbers R0 and R11. +// Returns with g in R0. +// Save the value in the _TEB->TlsSlots array. +// Effectively implements TlsSetValue(). +// tls_g stores the TLS slot allocated TlsAlloc(). +TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0 + MRC 15, 0, R0, C13, C0, 2 + ADD $0xe10, R0 + MOVW $runtime·tls_g(SB), R11 + MOVW (R11), R11 + MOVW g, R11<<2(R0) + MOVW g, R0 // preserve R0 across call to setg<> + RET + +// load_g loads the g register from thread-local memory, +// for use after calling externally compiled +// ARM code that overwrote those registers. +// Get the value from the _TEB->TlsSlots array. +// Effectively implements TlsGetValue(). +TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0 + MRC 15, 0, R0, C13, C0, 2 + ADD $0xe10, R0 + MOVW $runtime·tls_g(SB), g + MOVW (g), g + MOVW g<<2(R0), g + RET + +// This is called from rt0_go, which runs on the system stack +// using the initial stack allocated by the OS. +// It calls back into standard C using the BL below. +// To do that, the stack pointer must be 8-byte-aligned. +TEXT runtime·_initcgo(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {r4, lr} + + // Ensure stack is 8-byte aligned before calling C code + MOVW R13, R4 + BIC $0x7, R13 + + // Allocate a TLS slot to hold g across calls to external code + MOVW $runtime·_TlsAlloc(SB), R0 + MOVW (R0), R0 + BL (R0) + + // Assert that slot is less than 64 so we can use _TEB->TlsSlots + CMP $64, R0 + MOVW $runtime·abort(SB), R1 + BL.GE (R1) + + // Save Slot into tls_g + MOVW $runtime·tls_g(SB), R1 + MOVW R0, (R1) + + BL runtime·init_thread_tls(SB) + + MOVW R4, R13 + MOVM.IA.W (R13), [R4, R15] // pop {r4, pc} + +// void init_thread_tls() +// +// Does per-thread TLS initialization. Saves a pointer to the TLS slot +// holding G, in the current m. +// +// g->m->tls[0] = &_TEB->TlsSlots[tls_g] +// +// The purpose of this is to enable the profiling handler to get the +// current g associated with the thread. We cannot use m->curg because curg +// only holds the current user g. If the thread is executing system code or +// external code, m->curg will be NULL. The thread's TLS slot always holds +// the current g, so save a reference to this location so the profiling +// handler can get the real g from the thread's m. +// +// Clobbers R0-R3 +TEXT runtime·init_thread_tls(SB),NOSPLIT|NOFRAME,$0 + // compute &_TEB->TlsSlots[tls_g] + MRC 15, 0, R0, C13, C0, 2 + ADD $0xe10, R0 + MOVW $runtime·tls_g(SB), R1 + MOVW (R1), R1 + MOVW R1<<2, R1 + ADD R1, R0 + + // save in g->m->tls[0] + MOVW g_m(g), R1 + MOVW R0, m_tls(R1) + RET + +// Holds the TLS Slot, which was allocated by TlsAlloc() +GLOBL runtime·tls_g+0(SB), NOPTR, $4 diff --git a/src/runtime/syscall_aix.go b/src/runtime/syscall_aix.go new file mode 100644 index 0000000000000..7f2bcbe9d95bb --- /dev/null +++ b/src/runtime/syscall_aix.go @@ -0,0 +1,208 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// This file handles some syscalls from the syscall package +// Especially, syscalls use during forkAndExecInChild which must not split the stack + +//go:cgo_import_dynamic libc_chdir chdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_chroot chroot "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_dup2 dup2 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_execve execve "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_fork fork "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_ioctl ioctl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setgid setgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setgroups setgroups "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setsid setsid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setuid setuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setpgid setpgid "libc.a/shr_64.o" + +//go:linkname libc_chdir libc_chdir +//go:linkname libc_chroot libc_chroot +//go:linkname libc_dup2 libc_dup2 +//go:linkname libc_execve libc_execve +//go:linkname libc_fcntl libc_fcntl +//go:linkname libc_fork libc_fork +//go:linkname libc_ioctl libc_ioctl +//go:linkname libc_setgid libc_setgid +//go:linkname libc_setgroups libc_setgroups +//go:linkname libc_setsid libc_setsid +//go:linkname libc_setuid libc_setuid +//go:linkname libc_setpgid libc_setpgid + +var ( + libc_chdir, + libc_chroot, + libc_dup2, + libc_execve, + libc_fcntl, + libc_fork, + libc_ioctl, + libc_setgid, + libc_setgroups, + libc_setsid, + libc_setuid, + libc_setpgid libFunc +) + +// In syscall_syscall6 and syscall_rawsyscall6, r2 is always 0 +// as it's never used on AIX +// TODO: remove r2 from zsyscall_aix_$GOARCH.go + +// Syscall is needed because some packages (like net) need it too. +// The best way is to return EINVAL and let Golang handles its failure +// If the syscall can't fail, this function can redirect it to a real syscall. +//go:nosplit +func syscall_Syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + return 0, 0, _EINVAL +} + +// This is syscall.RawSyscall, it exists to satisfy some build dependency, +// but it doesn't work. +func syscall_RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + panic("RawSyscall not available on AIX") +} + +//go:nosplit +func syscall_syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + c := getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = nargs + c.args = uintptr(noescape(unsafe.Pointer(&a1))) + + entersyscallblock() + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(&c)) + exitsyscall() + return c.r1, 0, c.err +} + +//go:nosplit +func syscall_rawSyscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + c := getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = nargs + c.args = uintptr(noescape(unsafe.Pointer(&a1))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(&c)) + + return c.r1, 0, c.err +} + +//go:linkname syscall_chdir syscall.chdir +//go:nosplit +func syscall_chdir(path uintptr) (err uintptr) { + _, err = syscall1(&libc_chdir, path) + return +} + +//go:linkname syscall_chroot1 syscall.chroot1 +//go:nosplit +func syscall_chroot1(path uintptr) (err uintptr) { + _, err = syscall1(&libc_chroot, path) + return +} + +// like close, but must not split stack, for fork. +//go:linkname syscall_close syscall.close +//go:nosplit +func syscall_close(fd int32) int32 { + _, err := syscall1(&libc_close, uintptr(fd)) + return int32(err) +} + +//go:linkname syscall_dup2child syscall.dup2child +//go:nosplit +func syscall_dup2child(old, new uintptr) (val, err uintptr) { + val, err = syscall2(&libc_dup2, old, new) + return +} + +//go:linkname syscall_execve syscall.execve +//go:nosplit +func syscall_execve(path, argv, envp uintptr) (err uintptr) { + _, err = syscall3(&libc_execve, path, argv, envp) + return +} + +// like exit, but must not split stack, for fork. +//go:linkname syscall_exit syscall.exit +//go:nosplit +func syscall_exit(code uintptr) { + syscall1(&libc_exit, code) +} + +//go:linkname syscall_fcntl1 syscall.fcntl1 +//go:nosplit +func syscall_fcntl1(fd, cmd, arg uintptr) (val, err uintptr) { + val, err = syscall3(&libc_fcntl, fd, cmd, arg) + return + +} + +//go:linkname syscall_forkx syscall.forkx +//go:nosplit +func syscall_forkx(flags uintptr) (pid uintptr, err uintptr) { + pid, err = syscall1(&libc_fork, flags) + return +} + +//go:linkname syscall_getpid syscall.getpid +//go:nosplit +func syscall_getpid() (pid, err uintptr) { + pid, err = syscall0(&libc_getpid) + return +} + +//go:linkname syscall_ioctl syscall.ioctl +//go:nosplit +func syscall_ioctl(fd, req, arg uintptr) (err uintptr) { + _, err = syscall3(&libc_ioctl, fd, req, arg) + return +} + +//go:linkname syscall_setgid syscall.setgid +//go:nosplit +func syscall_setgid(gid uintptr) (err uintptr) { + _, err = syscall1(&libc_setgid, gid) + return +} + +//go:linkname syscall_setgroups1 syscall.setgroups1 +//go:nosplit +func syscall_setgroups1(ngid, gid uintptr) (err uintptr) { + _, err = syscall2(&libc_setgroups, ngid, gid) + return +} + +//go:linkname syscall_setsid syscall.setsid +//go:nosplit +func syscall_setsid() (pid, err uintptr) { + pid, err = syscall0(&libc_setsid) + return +} + +//go:linkname syscall_setuid syscall.setuid +//go:nosplit +func syscall_setuid(uid uintptr) (err uintptr) { + _, err = syscall1(&libc_setuid, uid) + return +} + +//go:linkname syscall_setpgid syscall.setpgid +//go:nosplit +func syscall_setpgid(pid, pgid uintptr) (err uintptr) { + _, err = syscall2(&libc_setpgid, pid, pgid) + return +} + +//go:linkname syscall_write1 syscall.write1 +//go:nosplit +func syscall_write1(fd, buf, nbyte uintptr) (n, err uintptr) { + n, err = syscall3(&libc_write, fd, buf, nbyte) + return +} diff --git a/src/runtime/syscall_solaris.go b/src/runtime/syscall_solaris.go index 9f05a47892916..94e018d4798d8 100644 --- a/src/runtime/syscall_solaris.go +++ b/src/runtime/syscall_solaris.go @@ -83,6 +83,13 @@ func syscall_close(fd int32) int32 { return int32(sysvicall1(&libc_close, uintptr(fd))) } +const _F_DUP2FD = 0x9 + +//go:nosplit +func syscall_dup2(oldfd, newfd uintptr) (val, err uintptr) { + return syscall_fcntl(oldfd, _F_DUP2FD, newfd) +} + //go:nosplit func syscall_execve(path, argv, envp uintptr) (err uintptr) { call := libcall{ diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 8264070569ff3..8cfc71124a5b6 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -25,18 +25,32 @@ func (c *wincallbackcontext) setCleanstack(cleanstack bool) { var ( cbs callbacks cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s - - callbackasm byte // type isn't really byte, it's code in runtime ) +func callbackasm() + // callbackasmAddr returns address of runtime.callbackasm // function adjusted by i. -// runtime.callbackasm is just a series of CALL instructions -// (each is 5 bytes long), and we want callback to arrive at +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at // correspondent call instruction instead of start of // runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. func callbackasmAddr(i int) uintptr { - return uintptr(add(unsafe.Pointer(&callbackasm), uintptr(i*5))) + var entrySize int + switch GOARCH { + default: + panic("unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm": + // On ARM, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return funcPC(callbackasm) + uintptr(i*entrySize) } //go:linkname compileCallback syscall.compileCallback @@ -224,3 +238,16 @@ func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, cgocall(asmstdcallAddr, unsafe.Pointer(c)) return c.r1, c.r2, c.err } + +//go:linkname syscall_Syscall18 syscall.Syscall18 +//go:nosplit +func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { + lockOSThread() + defer unlockOSThread() + c := &getg().m.syscall + c.fn = fn + c.n = nargs + c.args = uintptr(noescape(unsafe.Pointer(&a1))) + cgocall(asmstdcallAddr, unsafe.Pointer(c)) + return c.r1, c.r2, c.err +} diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index 0882e9cb73b6d..3ad6512976774 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -157,7 +157,7 @@ func TestEnumWindows(t *testing.T) { } } -func callback(hwnd syscall.Handle, lparam uintptr) uintptr { +func callback(timeFormatString unsafe.Pointer, lparam uintptr) uintptr { (*(*func())(unsafe.Pointer(&lparam)))() return 0 // stop enumeration } @@ -165,9 +165,10 @@ func callback(hwnd syscall.Handle, lparam uintptr) uintptr { // nestedCall calls into Windows, back into Go, and finally to f. func nestedCall(t *testing.T, f func()) { c := syscall.NewCallback(callback) - d := GetDLL(t, "user32.dll") + d := GetDLL(t, "kernel32.dll") defer d.Release() - d.Proc("EnumWindows").Call(c, uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&f)))) + const LOCALE_NAME_USER_DEFAULT = 0 + d.Proc("EnumTimeFormatsEx").Call(c, LOCALE_NAME_USER_DEFAULT, 0, uintptr(*(*unsafe.Pointer)(unsafe.Pointer(&f)))) } func TestCallback(t *testing.T) { diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go index 744b6108e2bc3..fdf08be7e9171 100644 --- a/src/runtime/testdata/testprog/gc.go +++ b/src/runtime/testdata/testprog/gc.go @@ -17,6 +17,7 @@ func init() { register("GCFairness", GCFairness) register("GCFairness2", GCFairness2) register("GCSys", GCSys) + register("GCPhys", GCPhys) } func GCSys() { @@ -48,8 +49,11 @@ func GCSys() { fmt.Printf("OK\n") } +var sink []byte + func workthegc() []byte { - return make([]byte, 1029) + sink = make([]byte, 1029) + return sink } func GCFairness() { @@ -121,3 +125,85 @@ func GCFairness2() { } fmt.Println("OK") } + +var maybeSaved []byte + +func GCPhys() { + // In this test, we construct a very specific scenario. We first + // allocate N objects and drop half of their pointers on the floor, + // effectively creating N/2 'holes' in our allocated arenas. We then + // try to allocate objects twice as big. At the end, we measure the + // physical memory overhead of large objects. + // + // The purpose of this test is to ensure that the GC scavenges free + // spans eagerly to ensure high physical memory utilization even + // during fragmentation. + const ( + // Unfortunately, measuring actual used physical pages is + // difficult because HeapReleased doesn't include the parts + // of an arena that haven't yet been touched. So, we just + // make objects and size sufficiently large such that even + // 64 MB overhead is relatively small in the final + // calculation. + // + // Currently, we target 480MiB worth of memory for our test, + // computed as size * objects + (size*2) * (objects/2) + // = 2 * size * objects + // + // Size must be also large enough to be considered a large + // object (not in any size-segregated span). + size = 1 << 20 + objects = 240 + ) + // Save objects which we want to survive, and condemn objects which we don't. + // Note that we condemn objects in this way and release them all at once in + // order to avoid having the GC start freeing up these objects while the loop + // is still running and filling in the holes we intend to make. + saved := make([][]byte, 0, objects) + condemned := make([][]byte, 0, objects/2+1) + for i := 0; i < objects; i++ { + // Write into a global, to prevent this from being optimized away by + // the compiler in the future. + maybeSaved = make([]byte, size) + if i%2 == 0 { + saved = append(saved, maybeSaved) + } else { + condemned = append(condemned, maybeSaved) + } + } + condemned = nil + // Clean up the heap. This will free up every other object created above + // (i.e. everything in condemned) creating holes in the heap. + runtime.GC() + // Allocate many new objects of 2x size. + for i := 0; i < objects/2; i++ { + saved = append(saved, make([]byte, size*2)) + } + // Clean up the heap again just to put it in a known state. + runtime.GC() + // heapBacked is an estimate of the amount of physical memory used by + // this test. HeapSys is an estimate of the size of the mapped virtual + // address space (which may or may not be backed by physical pages) + // whereas HeapReleased is an estimate of the amount of bytes returned + // to the OS. Their difference then roughly corresponds to the amount + // of virtual address space that is backed by physical pages. + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + heapBacked := stats.HeapSys - stats.HeapReleased + // If heapBacked exceeds the amount of memory actually used for heap + // allocated objects by 10% (post-GC HeapAlloc should be quite close to + // the size of the working set), then fail. + // + // In the context of this test, that indicates a large amount of + // fragmentation with physical pages that are otherwise unused but not + // returned to the OS. + overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc) + if overuse > 0.1 { + fmt.Printf("exceeded physical memory overuse threshold of 10%%: %3.2f%%\n"+ + "(alloc: %d, sys: %d, rel: %d, objs: %d)\n", overuse*100, stats.HeapAlloc, + stats.HeapSys, stats.HeapReleased, len(saved)) + return + } + fmt.Println("OK") + runtime.KeepAlive(saved) +} diff --git a/src/runtime/testdata/testprog/gettid.go b/src/runtime/testdata/testprog/gettid.go deleted file mode 100644 index 1b3e29ab08e5f..0000000000000 --- a/src/runtime/testdata/testprog/gettid.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "syscall" -) - -func gettid() int { - return syscall.Gettid() -} - -func tidExists(tid int) (exists, supported bool) { - stat, err := ioutil.ReadFile(fmt.Sprintf("/proc/self/task/%d/stat", tid)) - if os.IsNotExist(err) { - return false, true - } - // Check if it's a zombie thread. - state := bytes.Fields(stat)[2] - return !(len(state) == 1 && state[0] == 'Z'), true -} diff --git a/src/runtime/testdata/testprog/lockosthread.go b/src/runtime/testdata/testprog/lockosthread.go index 88c0d12e4c11b..fd3123e64743f 100644 --- a/src/runtime/testdata/testprog/lockosthread.go +++ b/src/runtime/testdata/testprog/lockosthread.go @@ -24,6 +24,12 @@ func init() { runtime.LockOSThread() }) register("LockOSThreadAlt", LockOSThreadAlt) + + registerInit("LockOSThreadAvoidsStatePropagation", func() { + // Lock the OS thread now so main runs on the main thread. + runtime.LockOSThread() + }) + register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation) } func LockOSThreadMain() { @@ -92,3 +98,100 @@ func LockOSThreadAlt() { ok: println("OK") } + +func LockOSThreadAvoidsStatePropagation() { + // This test is similar to LockOSThreadAlt in that it will detect if a thread + // which should have died is still running. However, rather than do this with + // thread IDs, it does this by unsharing state on that thread. This way, it + // also detects whether new threads were cloned from the dead thread, and not + // from a clean thread. Cloning from a locked thread is undesirable since + // cloned threads will inherit potentially unwanted OS state. + // + // unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on + // Linux, so on other platforms this just checks that the runtime doesn't + // do anything terrible. + // + // This is running locked to the main OS thread. + + // GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is + // cloned from. + if runtime.GOMAXPROCS(-1) != 1 { + println("requires GOMAXPROCS=1") + os.Exit(1) + } + + if err := chdir("/"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + // On systems other than Linux, cwd == "". + cwd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if cwd != "" && cwd != "/" { + println("unexpected cwd", cwd, " wanted /") + os.Exit(1) + } + + ready := make(chan bool, 1) + go func() { + // This goroutine must be running on a new thread. + runtime.LockOSThread() + + // Unshare details about the FS, like the CWD, with + // the rest of the process on this thread. + // On systems other than Linux, this is a no-op. + if err := unshareFs(); err != nil { + if err == errNotPermitted { + println("unshare not permitted") + os.Exit(0) + } + println("failed to unshare fs:", err.Error()) + os.Exit(1) + } + // Chdir to somewhere else on this thread. + // On systems other than Linux, this is a no-op. + if err := chdir("/tmp"); err != nil { + println("failed to chdir:", err.Error()) + os.Exit(1) + } + + // The state on this thread is now considered "tainted", but it + // should no longer be observable in any other context. + + ready <- true + // Exit with the thread locked. + }() + <-ready + + // Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if + // for some reason state from the (hopefully dead) locked thread above + // propagated into a newly created thread (via clone), or that thread + // is actually being re-used, then we should get scheduled on such a + // thread with high likelihood. + done := make(chan bool) + go func() { + runtime.LockOSThread() + + // Get the CWD and check if this is the same as the main thread's + // CWD. Every thread should share the same CWD. + // On systems other than Linux, wd == "". + wd, err := getcwd() + if err != nil { + println("failed to get cwd:", err.Error()) + os.Exit(1) + } + if wd != cwd { + println("bad state from old thread propagated after it should have died") + os.Exit(1) + } + <-done + + runtime.UnlockOSThread() + }() + done <- true + runtime.UnlockOSThread() + println("OK") +} diff --git a/src/runtime/testdata/testprog/memprof.go b/src/runtime/testdata/testprog/memprof.go index a22fee61d7825..7b134bc078403 100644 --- a/src/runtime/testdata/testprog/memprof.go +++ b/src/runtime/testdata/testprog/memprof.go @@ -21,7 +21,10 @@ var memProfBuf bytes.Buffer var memProfStr string func MemProf() { - for i := 0; i < 1000; i++ { + // Force heap sampling for determinism. + runtime.MemProfileRate = 1 + + for i := 0; i < 10; i++ { fmt.Fprintf(&memProfBuf, "%*d\n", i, i) } memProfStr = memProfBuf.String() diff --git a/src/cmd/vet/testdata/cgo/cgo3.go b/src/runtime/testdata/testprog/syscalls.go similarity index 55% rename from src/cmd/vet/testdata/cgo/cgo3.go rename to src/runtime/testdata/testprog/syscalls.go index 0b1518e1f930a..098d5cadf8a48 100644 --- a/src/cmd/vet/testdata/cgo/cgo3.go +++ b/src/runtime/testdata/testprog/syscalls.go @@ -2,12 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Used by TestVetVerbose to test that vet -v doesn't fail because it -// can't find "C". +package main -package testdata +import ( + "errors" +) -import "C" - -func F() { -} +var errNotPermitted = errors.New("operation not permitted") diff --git a/src/runtime/testdata/testprog/syscalls_linux.go b/src/runtime/testdata/testprog/syscalls_linux.go new file mode 100644 index 0000000000000..b8ac0876269d3 --- /dev/null +++ b/src/runtime/testdata/testprog/syscalls_linux.go @@ -0,0 +1,59 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "syscall" +) + +func gettid() int { + return syscall.Gettid() +} + +func tidExists(tid int) (exists, supported bool) { + stat, err := ioutil.ReadFile(fmt.Sprintf("/proc/self/task/%d/stat", tid)) + if os.IsNotExist(err) { + return false, true + } + // Check if it's a zombie thread. + state := bytes.Fields(stat)[2] + return !(len(state) == 1 && state[0] == 'Z'), true +} + +func getcwd() (string, error) { + if !syscall.ImplementsGetwd { + return "", nil + } + // Use the syscall to get the current working directory. + // This is imperative for checking for OS thread state + // after an unshare since os.Getwd might just check the + // environment, or use some other mechanism. + var buf [4096]byte + n, err := syscall.Getcwd(buf[:]) + if err != nil { + return "", err + } + // Subtract one for null terminator. + return string(buf[:n-1]), nil +} + +func unshareFs() error { + err := syscall.Unshare(syscall.CLONE_FS) + if err != nil { + errno, ok := err.(syscall.Errno) + if ok && errno == syscall.EPERM { + return errNotPermitted + } + } + return err +} + +func chdir(path string) error { + return syscall.Chdir(path) +} diff --git a/src/runtime/testdata/testprog/gettid_none.go b/src/runtime/testdata/testprog/syscalls_none.go similarity index 68% rename from src/runtime/testdata/testprog/gettid_none.go rename to src/runtime/testdata/testprog/syscalls_none.go index 036db87e10ea6..7f8ded3994f18 100644 --- a/src/runtime/testdata/testprog/gettid_none.go +++ b/src/runtime/testdata/testprog/syscalls_none.go @@ -13,3 +13,15 @@ func gettid() int { func tidExists(tid int) (exists, supported bool) { return false, false } + +func getcwd() (string, error) { + return "", nil +} + +func unshareFs() error { + return nil +} + +func chdir(path string) error { + return nil +} diff --git a/src/runtime/testdata/testprog/traceback_ancestors.go b/src/runtime/testdata/testprog/traceback_ancestors.go index fe57c1c157e68..0ee402c4bdc60 100644 --- a/src/runtime/testdata/testprog/traceback_ancestors.go +++ b/src/runtime/testdata/testprog/traceback_ancestors.go @@ -5,8 +5,10 @@ package main import ( + "bytes" "fmt" "runtime" + "strings" ) func init() { @@ -18,25 +20,50 @@ const numFrames = 2 func TracebackAncestors() { w := make(chan struct{}) - recurseThenCallGo(w, numGoroutines, numFrames) + recurseThenCallGo(w, numGoroutines, numFrames, true) <-w printStack() close(w) } +var ignoreGoroutines = make(map[string]bool) + func printStack() { buf := make([]byte, 1024) for { n := runtime.Stack(buf, true) if n < len(buf) { - fmt.Print(string(buf[:n])) + tb := string(buf[:n]) + + // Delete any ignored goroutines, if present. + pos := 0 + for pos < len(tb) { + next := pos + strings.Index(tb[pos:], "\n\n") + if next < pos { + next = len(tb) + } else { + next += len("\n\n") + } + + if strings.HasPrefix(tb[pos:], "goroutine ") { + id := tb[pos+len("goroutine "):] + id = id[:strings.IndexByte(id, ' ')] + if ignoreGoroutines[id] { + tb = tb[:pos] + tb[next:] + next = pos + } + } + pos = next + } + + fmt.Print(tb) return } buf = make([]byte, 2*len(buf)) } } -func recurseThenCallGo(w chan struct{}, frames int, goroutines int) { +func recurseThenCallGo(w chan struct{}, frames int, goroutines int, main bool) { if frames == 0 { // Signal to TracebackAncestors that we are done recursing and starting goroutines. w <- struct{}{} @@ -44,10 +71,29 @@ func recurseThenCallGo(w chan struct{}, frames int, goroutines int) { return } if goroutines == 0 { + // Record which goroutine this is so we can ignore it + // in the traceback if it hasn't finished exiting by + // the time we printStack. + if !main { + ignoreGoroutines[goroutineID()] = true + } + // Start the next goroutine now that there are no more recursions left // for this current goroutine. - go recurseThenCallGo(w, frames-1, numFrames) + go recurseThenCallGo(w, frames-1, numFrames, false) return } - recurseThenCallGo(w, frames, goroutines-1) + recurseThenCallGo(w, frames, goroutines-1, main) +} + +func goroutineID() string { + buf := make([]byte, 128) + runtime.Stack(buf, false) + const prefix = "goroutine " + if !bytes.HasPrefix(buf, []byte(prefix)) { + panic(fmt.Sprintf("expected %q at beginning of traceback:\n%s", prefix, buf)) + } + buf = buf[len(prefix):] + n := bytes.IndexByte(buf, ' ') + return string(buf[:n]) } diff --git a/src/runtime/testdata/testprogcgo/exec.go b/src/runtime/testdata/testprogcgo/exec.go index 2e948401c87a5..94da5dc526bc2 100644 --- a/src/runtime/testdata/testprogcgo/exec.go +++ b/src/runtime/testdata/testprogcgo/exec.go @@ -75,6 +75,14 @@ func CgoExecSignalMask() { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { + // An overloaded system + // may fail with EAGAIN. + // This doesn't tell us + // anything useful; ignore it. + // Issue #27731. + if isEAGAIN(err) { + return + } fmt.Printf("iteration %d: %v\n", j, err) os.Exit(1) } @@ -87,3 +95,11 @@ func CgoExecSignalMask() { fmt.Println("OK") } + +// isEAGAIN reports whether err is an EAGAIN error from a process execution. +func isEAGAIN(err error) bool { + if p, ok := err.(*os.PathError); ok { + err = p.Err + } + return err == syscall.EAGAIN +} diff --git a/src/runtime/testdata/testprogcgo/pprof.go b/src/runtime/testdata/testprogcgo/pprof.go index 4460b9304e5cf..00f2c42e93cae 100644 --- a/src/runtime/testdata/testprogcgo/pprof.go +++ b/src/runtime/testdata/testprogcgo/pprof.go @@ -26,6 +26,9 @@ void cpuHog() { salt2 = foo; } +void cpuHog2() { +} + static int cpuHogCount; struct cgoTracebackArg { @@ -37,10 +40,13 @@ struct cgoTracebackArg { // pprofCgoTraceback is passed to runtime.SetCgoTraceback. // For testing purposes it pretends that all CPU hits in C code are in cpuHog. +// Issue #29034: At least 2 frames are required to verify all frames are captured +// since runtime/pprof ignores the runtime.goexit base frame if it exists. void pprofCgoTraceback(void* parg) { struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg); arg->buf[0] = (uintptr_t)(cpuHog) + 0x10; - arg->buf[1] = 0; + arg->buf[1] = (uintptr_t)(cpuHog2) + 0x4; + arg->buf[2] = 0; ++cpuHogCount; } diff --git a/src/runtime/testdata/testprogcgo/threadpprof.go b/src/runtime/testdata/testprogcgo/threadpprof.go index 3da82961b9b74..37a2a1ab6590a 100644 --- a/src/runtime/testdata/testprogcgo/threadpprof.go +++ b/src/runtime/testdata/testprogcgo/threadpprof.go @@ -30,6 +30,9 @@ void cpuHogThread() { threadSalt2 = foo; } +void cpuHogThread2() { +} + static int cpuHogThreadCount; struct cgoTracebackArg { @@ -44,7 +47,8 @@ struct cgoTracebackArg { void pprofCgoThreadTraceback(void* parg) { struct cgoTracebackArg* arg = (struct cgoTracebackArg*)(parg); arg->buf[0] = (uintptr_t)(cpuHogThread) + 0x10; - arg->buf[1] = 0; + arg->buf[1] = (uintptr_t)(cpuHogThread2) + 0x4; + arg->buf[2] = 0; __sync_add_and_fetch(&cpuHogThreadCount, 1); } diff --git a/src/runtime/textflag.h b/src/runtime/textflag.h index 929e9b36a909b..d1bb52cc004ac 100644 --- a/src/runtime/textflag.h +++ b/src/runtime/textflag.h @@ -31,4 +31,4 @@ // TODO(mwhudson): only implemented for ppc64x at present. #define NOFRAME 512 // Function can call reflect.Type.Method or reflect.Type.MethodByName. -#define REFLECTMETHOD = 1024 +#define REFLECTMETHOD 1024 diff --git a/src/runtime/time.go b/src/runtime/time.go index 9de45f5e08e59..28a4722866498 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -7,7 +7,7 @@ package runtime import ( - "runtime/internal/sys" + "internal/cpu" "unsafe" ) @@ -50,7 +50,7 @@ var timers [timersLen]struct { // The padding should eliminate false sharing // between timersBucket values. - pad [sys.CacheLineSize - unsafe.Sizeof(timersBucket{})%sys.CacheLineSize]byte + pad [cpu.CacheLinePadSize - unsafe.Sizeof(timersBucket{})%cpu.CacheLinePadSize]byte } func (t *timer) assignBucket() *timersBucket { @@ -156,7 +156,7 @@ func (tb *timersBucket) addtimerLocked(t *timer) bool { } if t.i == 0 { // siftup moved to top: new earliest deadline. - if tb.sleeping { + if tb.sleeping && tb.sleepUntil > t.when { tb.sleeping = false notewakeup(&tb.waitnote) } @@ -164,10 +164,10 @@ func (tb *timersBucket) addtimerLocked(t *timer) bool { tb.rescheduling = false goready(tb.gp, 0) } - } - if !tb.created { - tb.created = true - go timerproc(tb) + if !tb.created { + tb.created = true + go timerproc(tb) + } } return true } @@ -187,14 +187,22 @@ func deltimer(t *timer) bool { tb := t.tb lock(&tb.lock) + removed, ok := tb.deltimerLocked(t) + unlock(&tb.lock) + if !ok { + badTimer() + } + return removed +} + +func (tb *timersBucket) deltimerLocked(t *timer) (removed, ok bool) { // t may not be registered anymore and may have // a bogus i (typically 0, if generated by Go). // Verify it before proceeding. i := t.i last := len(tb.t) - 1 if i < 0 || i > last || tb.t[i] != t { - unlock(&tb.lock) - return false + return false, true } if i != last { tb.t[i] = tb.t[last] @@ -202,7 +210,7 @@ func deltimer(t *timer) bool { } tb.t[last] = nil tb.t = tb.t[:last] - ok := true + ok = true if i != last { if !siftupTimer(tb.t, i) { ok = false @@ -211,11 +219,26 @@ func deltimer(t *timer) bool { ok = false } } + return true, ok +} + +func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg interface{}, seq uintptr) { + tb := t.tb + + lock(&tb.lock) + _, ok := tb.deltimerLocked(t) + if ok { + t.when = when + t.period = period + t.f = f + t.arg = arg + t.seq = seq + ok = tb.addtimerLocked(t) + } unlock(&tb.lock) if !ok { badTimer() } - return true } // Timerproc runs the time-driven events. @@ -435,23 +458,3 @@ func siftdownTimer(t []*timer, i int) bool { func badTimer() { panic(errorString("racy use of timers")) } - -// Entry points for net, time to call nanotime. - -//go:linkname poll_runtimeNano internal/poll.runtimeNano -func poll_runtimeNano() int64 { - return nanotime() -} - -//go:linkname time_runtimeNano time.runtimeNano -func time_runtimeNano() int64 { - return nanotime() -} - -// Monotonic times are reported as offsets from startNano. -// We initialize startNano to nanotime() - 1 so that on systems where -// monotonic time resolution is fairly low (e.g. Windows 2008 -// which appears to have a default resolution of 15ms), -// we avoid ever reporting a nanotime of 0. -// (Callers may want to use 0 as "time not set".) -var startNano int64 = nanotime() - 1 diff --git a/src/runtime/timeasm.go b/src/runtime/timeasm.go index 5af920c18c048..82cf63edffd46 100644 --- a/src/runtime/timeasm.go +++ b/src/runtime/timeasm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. // Declarations for operating systems implementing time.now directly in assembly. -// Those systems are also expected to have nanotime subtract startNano, -// so that time.now and nanotime return the same monotonic clock readings. // +build windows diff --git a/src/runtime/timestub.go b/src/runtime/timestub.go index f9230da69f92a..459bf8e5435b0 100644 --- a/src/runtime/timestub.go +++ b/src/runtime/timestub.go @@ -14,5 +14,5 @@ import _ "unsafe" // for go:linkname //go:linkname time_now time.now func time_now() (sec int64, nsec int32, mono int64) { sec, nsec = walltime() - return sec, nsec, nanotime() - startNano + return sec, nsec, nanotime() } diff --git a/src/runtime/timestub2.go b/src/runtime/timestub2.go index 9ddc6fed9182d..00c2c55f461d6 100644 --- a/src/runtime/timestub2.go +++ b/src/runtime/timestub2.go @@ -5,6 +5,7 @@ // +build !darwin // +build !windows // +build !freebsd +// +build !aix package runtime diff --git a/src/runtime/tls_arm.s b/src/runtime/tls_arm.s index cc547a5db1303..400c16a177582 100644 --- a/src/runtime/tls_arm.s +++ b/src/runtime/tls_arm.s @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build !windows + #include "go_asm.h" #include "go_tls.h" #include "funcdata.h" diff --git a/src/runtime/tls_ppc64x.s b/src/runtime/tls_ppc64x.s index ed94989b69146..c6974492828ea 100644 --- a/src/runtime/tls_ppc64x.s +++ b/src/runtime/tls_ppc64x.s @@ -23,9 +23,11 @@ // // NOTE: setg_gcc<> assume this clobbers only R31. TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0 - MOVB runtime·iscgo(SB), R31 +#ifndef GOOS_aix + MOVBZ runtime·iscgo(SB), R31 CMP R31, $0 BEQ nocgo +#endif MOVD runtime·tls_g(SB), R31 MOVD g, 0(R13)(R31*1) diff --git a/src/runtime/trace.go b/src/runtime/trace.go index 61f7513ee0e5a..08e92d2efe6b9 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -532,12 +532,12 @@ func traceEvent(ev byte, skip int, args ...uint64) { } func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) { - buf := (*bufp).ptr() + buf := bufp.ptr() // TODO: test on non-zero extraBytes param. maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params if buf == nil || len(buf.arr)-buf.pos < maxSize { buf = traceFlush(traceBufPtrOf(buf), pid).ptr() - (*bufp).set(buf) + bufp.set(buf) } ticks := uint64(cputicks()) / traceTickDiv @@ -584,10 +584,10 @@ func traceStackID(mp *m, buf []uintptr, skip int) uint64 { gp := mp.curg var nstk int if gp == _g_ { - nstk = callers(skip+1, buf[:]) + nstk = callers(skip+1, buf) } else if gp != nil { gp = mp.curg - nstk = gcallers(gp, skip, buf[:]) + nstk = gcallers(gp, skip, buf) } if nstk > 0 { nstk-- // skip runtime.goexit @@ -689,11 +689,11 @@ func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) // so there must be no memory allocation or any activities // that causes tracing after this point. - buf := (*bufp).ptr() + buf := bufp.ptr() size := 1 + 2*traceBytesPerNumber + len(s) if buf == nil || len(buf.arr)-buf.pos < size { buf = traceFlush(traceBufPtrOf(buf), pid).ptr() - (*bufp).set(buf) + bufp.set(buf) } buf.byte(traceEvString) buf.varint(id) @@ -708,7 +708,7 @@ func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) buf.varint(uint64(slen)) buf.pos += copy(buf.arr[buf.pos:], s[:slen]) - (*bufp).set(buf) + bufp.set(buf) return id, bufp } @@ -1206,7 +1206,7 @@ func trace_userLog(id uint64, category, message string) { traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID) // traceEventLocked reserved extra space for val and len(val) // in buf, so buf now has room for the following. - buf := (*bufp).ptr() + buf := bufp.ptr() // double-check the message and its length can fit. // Otherwise, truncate the message. diff --git a/src/runtime/trace/annotation.go b/src/runtime/trace/annotation.go index 3545ef3bba571..82cb232dba95e 100644 --- a/src/runtime/trace/annotation.go +++ b/src/runtime/trace/annotation.go @@ -24,13 +24,13 @@ type traceContextKey struct{} // If the end function is called multiple times, only the first // call is used in the latency measurement. // -// ctx, task := trace.NewTask(ctx, "awesome task") -// trace.WithRegion(ctx, prepWork) +// ctx, task := trace.NewTask(ctx, "awesomeTask") +// trace.WithRegion(ctx, "preparation", prepWork) // // preparation of the task // go func() { // continue processing the task in a separate goroutine. // defer task.End() -// trace.WithRegion(ctx, remainingWork) -// } +// trace.WithRegion(ctx, "remainingWork", remainingWork) +// }() func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { pid := fromContext(pctx).id id := newID() @@ -171,7 +171,7 @@ func (r *Region) End() { userRegion(r.id, regionEndCode, r.regionType) } -// IsEnabled returns whether tracing is enabled. +// IsEnabled reports whether tracing is enabled. // The information is advisory only. The tracing status // may have changed by the time this function returns. func IsEnabled() bool { diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index d8c225d975fa2..a536fb2a7166c 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -99,8 +99,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in if skip > 0 && callback != nil { throw("gentraceback callback cannot be used with non-zero skip") } - g := getg() - if g == gp && g == g.m.curg { + + // Don't call this "g"; it's too easy get "g" and "gp" confused. + if ourg := getg(); ourg == gp && ourg == ourg.m.curg { // The starting sp has been passed in as a uintptr, and the caller may // have other uintptr-typed stack references as well. // If during one of the calls that got us here or during one of the @@ -145,7 +146,6 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in cgoCtxt := gp.cgoCtxt printing := pcbuf == nil && callback == nil _defer := gp._defer - elideWrapper := false for _defer != nil && _defer.sp == _NoArgs { _defer = _defer.link @@ -178,6 +178,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in var cache pcvalueCache + lastFuncID := funcID_normal n := 0 for n < max { // Typically: @@ -196,16 +197,29 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // Found an actual function. // Derive frame pointer and link register. if frame.fp == 0 { - // We want to jump over the systemstack switch. If we're running on the - // g0, this systemstack is at the top of the stack. - // if we're not on g0 or there's a no curg, then this is a regular call. - sp := frame.sp - if flags&_TraceJumpStack != 0 && f.funcID == funcID_systemstack && gp == g.m.g0 && gp.m.curg != nil { - sp = gp.m.curg.sched.sp - frame.sp = sp - cgoCtxt = gp.m.curg.cgoCtxt + // Jump over system stack transitions. If we're on g0 and there's a user + // goroutine, try to jump. Otherwise this is a regular call. + if flags&_TraceJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil { + switch f.funcID { + case funcID_morestack: + // morestack does not return normally -- newstack() + // gogo's to curg.sched. Match that. + // This keeps morestack() from showing up in the backtrace, + // but that makes some sense since it'll never be returned + // to. + frame.pc = gp.m.curg.sched.pc + frame.fn = findfunc(frame.pc) + f = frame.fn + frame.sp = gp.m.curg.sched.sp + cgoCtxt = gp.m.curg.cgoCtxt + case funcID_systemstack: + // systemstack returns normally, so just follow the + // stack transition. + frame.sp = gp.m.curg.sched.sp + cgoCtxt = gp.m.curg.cgoCtxt + } } - frame.fp = sp + uintptr(funcspdelta(f, frame.pc, &cache)) + frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache)) if !usesLR { // On x86, call instruction pushes return PC before entering new function. frame.fp += sys.RegSize @@ -271,7 +285,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // If framepointer_enabled and there's a frame, then // there's a saved bp here. - if framepointer_enabled && GOARCH == "amd64" && frame.varp > frame.sp { + if frame.varp > frame.sp && (framepointer_enabled && GOARCH == "amd64" || GOARCH == "arm64") { frame.varp -= sys.RegSize } @@ -298,8 +312,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // the function either doesn't return at all (if it has no defers or if the // defers do not recover) or it returns from one of the calls to // deferproc a second time (if the corresponding deferred func recovers). - // It suffices to assume that the most recent deferproc is the one that - // returns; everything live at earlier deferprocs is still live at that one. + // In the latter case, use a deferreturn call site as the continuation pc. frame.continpc = frame.pc if waspanic { // We match up defers with frames using the SP. @@ -310,7 +323,10 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // can't push a defer, the defer can't belong // to that frame. if _defer != nil && _defer.sp == frame.sp && frame.sp != frame.fp { - frame.continpc = _defer.pc + frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1 + // Note: the +1 is to offset the -1 that + // stack.go:getStackMap does to back up a return + // address make sure the pc is in the CALL instruction. } else { frame.continpc = 0 } @@ -328,48 +344,46 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in } if pcbuf != nil { - if skip == 0 { - (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc - } else { - // backup to CALL instruction to read inlining info (same logic as below) - tracepc := frame.pc - if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { - tracepc-- - } - inldata := funcdata(f, _FUNCDATA_InlTree) - - // no inlining info, skip the physical frame - if inldata == nil { - skip-- - goto skipped - } + pc := frame.pc + // backup to CALL instruction to read inlining info (same logic as below) + tracepc := pc + if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { + tracepc-- + } - ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) + // If there is inlining info, record the inner frames. + if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { inltree := (*[1 << 20]inlinedCall)(inldata) - // skip the logical (inlined) frames - logicalSkipped := 0 - for ix >= 0 && skip > 0 { - skip-- - logicalSkipped++ - ix = inltree[ix].parent - } - - // skip the physical frame if there's more to skip - if skip > 0 { - skip-- - goto skipped - } - - // now we have a partially skipped frame - (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc - - // if there's room, pcbuf[1] is a skip PC that encodes the number of skipped frames in pcbuf[0] - if n+1 < max { - n++ - pc := skipPC + uintptr(logicalSkipped) - (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc + for { + ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) + if ix < 0 { + break + } + if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { + // ignore wrappers + } else if skip > 0 { + skip-- + } else if n < max { + (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc + n++ + } + lastFuncID = inltree[ix].funcID + // Back up to an instruction in the "caller". + tracepc = frame.fn.entry + uintptr(inltree[ix].parentPc) + pc = tracepc + 1 } } + // Record the main frame. + if f.funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { + // Ignore wrapper functions (except when they trigger panics). + } else if skip > 0 { + skip-- + } else if n < max { + (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc + n++ + } + lastFuncID = f.funcID + n-- // offset n++ below } if printing { @@ -379,32 +393,39 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // any frames. And don't elide wrappers that // called panic rather than the wrapped // function. Otherwise, leave them out. - name := funcname(f) - nextElideWrapper := elideWrapperCalling(name) - if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, elideWrapper && nprint != 0) { - // Print during crash. - // main(0x1, 0x2, 0x3) - // /home/rsc/go/src/runtime/x.go:23 +0xf - // - tracepc := frame.pc // back up to CALL instruction for funcline. - if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { - tracepc-- - } - file, line := funcline(f, tracepc) - inldata := funcdata(f, _FUNCDATA_InlTree) - if inldata != nil { - inltree := (*[1 << 20]inlinedCall)(inldata) + + // backup to CALL instruction to read inlining info (same logic as below) + tracepc := frame.pc + if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { + tracepc-- + } + // If there is inlining info, print the inner frames. + if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { + inltree := (*[1 << 20]inlinedCall)(inldata) + for { ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) - for ix != -1 { + if ix < 0 { + break + } + if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, inltree[ix].funcID, lastFuncID) { name := funcnameFromNameoff(f, inltree[ix].func_) + file, line := funcline(f, tracepc) print(name, "(...)\n") print("\t", file, ":", line, "\n") - - file = funcfile(f, inltree[ix].file) - line = inltree[ix].line - ix = inltree[ix].parent + nprint++ } + lastFuncID = inltree[ix].funcID + // Back up to an instruction in the "caller". + tracepc = frame.fn.entry + uintptr(inltree[ix].parentPc) } + } + if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, f.funcID, lastFuncID) { + // Print during crash. + // main(0x1, 0x2, 0x3) + // /home/rsc/go/src/runtime/x.go:23 +0xf + // + name := funcname(f) + file, line := funcline(f, tracepc) if name == "runtime.gopanic" { name = "panic" } @@ -425,17 +446,16 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in if frame.pc > f.entry { print(" +", hex(frame.pc-f.entry)) } - if g.m.throwing > 0 && gp == g.m.curg || level >= 2 { + if gp.m != nil && gp.m.throwing > 0 && gp == gp.m.curg || level >= 2 { print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc)) } print("\n") nprint++ } - elideWrapper = nextElideWrapper + lastFuncID = f.funcID } n++ - skipped: if f.funcID == funcID_cgocallback_gofunc && len(cgoCtxt) > 0 { ctxt := cgoCtxt[len(cgoCtxt)-1] cgoCtxt = cgoCtxt[:len(cgoCtxt)-1] @@ -536,9 +556,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // It's okay in those situations not to use up the entire defer stack: // incomplete information then is still better than nothing. if callback != nil && n < max && _defer != nil { - if _defer != nil { - print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") - } + print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") for _defer = gp._defer; _defer != nil; _defer = _defer.link { print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") } @@ -557,8 +575,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl // and reflect.methodValue. type reflectMethodValue struct { - fn uintptr - stack *bitvector // args bitmap + fn uintptr + stack *bitvector // ptrmap for both args and results + argLen uintptr // just args } // getArgInfoFast returns the argument frame information for a call to f. @@ -587,6 +606,7 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar // These take a *reflect.methodValue as their // context register. var mv *reflectMethodValue + var retValid bool if ctxt != nil { // This is not an actual call, but a // deferred call. The function value @@ -600,6 +620,10 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar // 0(SP). arg0 := frame.sp + sys.MinFrameSize mv = *(**reflectMethodValue)(unsafe.Pointer(arg0)) + // Figure out whether the return values are valid. + // Reflect will update this value after it copies + // in the return values. + retValid = *(*bool)(unsafe.Pointer(arg0 + 3*sys.PtrSize)) } if mv.fn != f.entry { print("runtime: confused by ", funcname(f), "\n") @@ -607,6 +631,9 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar } bv := mv.stack arglen = uintptr(bv.n * sys.PtrSize) + if !retValid { + arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1) + } argmap = bv } } @@ -650,7 +677,7 @@ func printcreatedby(gp *g) { // Show what created goroutine, except main goroutine (goid 1). pc := gp.gopc f := findfunc(pc) - if f.valid() && showframe(f, gp, false, false) && gp.goid != 1 { + if f.valid() && showframe(f, gp, false, funcID_normal, funcID_normal) && gp.goid != 1 { printcreatedby1(f, pc) } } @@ -737,11 +764,10 @@ func traceback1(pc, sp, lr uintptr, gp *g, flags uint) { // TODO: Unify this with gentraceback and CallersFrames. func printAncestorTraceback(ancestor ancestorInfo) { print("[originating from goroutine ", ancestor.goid, "]:\n") - elideWrapper := false for fidx, pc := range ancestor.pcs { f := findfunc(pc) // f previously validated - if showfuncinfo(f, fidx == 0, elideWrapper && fidx != 0) { - elideWrapper = printAncestorTracebackFuncInfo(f, pc) + if showfuncinfo(f, fidx == 0, funcID_normal, funcID_normal) { + printAncestorTracebackFuncInfo(f, pc) } } if len(ancestor.pcs) == _TracebackMaxFrames { @@ -749,7 +775,7 @@ func printAncestorTraceback(ancestor ancestorInfo) { } // Show what created goroutine, except main goroutine (goid 1). f := findfunc(ancestor.gopc) - if f.valid() && showfuncinfo(f, false, false) && ancestor.goid != 1 { + if f.valid() && showfuncinfo(f, false, funcID_normal, funcID_normal) && ancestor.goid != 1 { printcreatedby1(f, ancestor.gopc) } } @@ -758,27 +784,16 @@ func printAncestorTraceback(ancestor ancestorInfo) { // within an ancestor traceback. The precision of this info is reduced // due to only have access to the pcs at the time of the caller // goroutine being created. -func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) bool { - tracepc := pc // back up to CALL instruction for funcline. - if pc > f.entry { - tracepc -= sys.PCQuantum - } - file, line := funcline(f, tracepc) - inldata := funcdata(f, _FUNCDATA_InlTree) - if inldata != nil { +func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) { + name := funcname(f) + if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { inltree := (*[1 << 20]inlinedCall)(inldata) - ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) - for ix != -1 { - name := funcnameFromNameoff(f, inltree[ix].func_) - print(name, "(...)\n") - print("\t", file, ":", line, "\n") - - file = funcfile(f, inltree[ix].file) - line = inltree[ix].line - ix = inltree[ix].parent + ix := pcdatavalue(f, _PCDATA_InlTreeIndex, pc, nil) + if ix >= 0 { + name = funcnameFromNameoff(f, inltree[ix].func_) } } - name := funcname(f) + file, line := funcline(f, pc) if name == "runtime.gopanic" { name = "panic" } @@ -788,7 +803,6 @@ func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) bool { print(" +", hex(pc-f.entry)) } print("\n") - return elideWrapperCalling(name) } func callers(skip int, pcbuf []uintptr) int { @@ -806,15 +820,19 @@ func gcallers(gp *g, skip int, pcbuf []uintptr) int { return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) } -func showframe(f funcInfo, gp *g, firstFrame, elideWrapper bool) bool { +// showframe reports whether the frame with the given characteristics should +// be printed during a traceback. +func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool { g := getg() if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) { return true } - return showfuncinfo(f, firstFrame, elideWrapper) + return showfuncinfo(f, firstFrame, funcID, childID) } -func showfuncinfo(f funcInfo, firstFrame, elideWrapper bool) bool { +// showfuncinfo reports whether a function with the given characteristics should +// be printed during a traceback. +func showfuncinfo(f funcInfo, firstFrame bool, funcID, childID funcID) bool { level, _, _ := gotraceback() if level > 1 { // Show all frames. @@ -825,11 +843,8 @@ func showfuncinfo(f funcInfo, firstFrame, elideWrapper bool) bool { return false } - if elideWrapper { - file, _ := funcline(f, f.entry) - if file == "" { - return false - } + if funcID == funcID_wrapper && elideWrapperCalling(childID) { + return false } name := funcname(f) @@ -843,7 +858,7 @@ func showfuncinfo(f funcInfo, firstFrame, elideWrapper bool) bool { return true } - return contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name)) + return contains(name, ".") && (!hasPrefix(name, "runtime.") || isExportedRuntime(name)) } // isExportedRuntime reports whether name is an exported runtime function. @@ -853,12 +868,12 @@ func isExportedRuntime(name string) bool { return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' } -// elideWrapperCalling returns whether a wrapper function that called -// function "name" should be elided from stack traces. -func elideWrapperCalling(name string) bool { +// elideWrapperCalling reports whether a wrapper function that called +// function id should be elided from stack traces. +func elideWrapperCalling(id funcID) bool { // If the wrapper called a panic function instead of the // wrapped function, we want to include it in stacks. - return !(name == "runtime.gopanic" || name == "runtime.sigpanic" || name == "runtime.panicwrap") + return !(id == funcID_gopanic || id == funcID_sigpanic || id == funcID_panicwrap) } var gStatusStrings = [...]string{ @@ -922,7 +937,7 @@ func tracebackothers(me *g) { lock(&allglock) for _, gp := range allgs { - if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp) && level < 2 { + if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 { continue } print("\n") @@ -1008,7 +1023,11 @@ func topofstack(f funcInfo, g0 bool) bool { // in stack dumps and deadlock detector. This is any goroutine that // starts at a runtime.* entry point, except for runtime.main and // sometimes runtime.runfinq. -func isSystemGoroutine(gp *g) bool { +// +// If fixed is true, any goroutine that can vary between user and +// system (that is, the finalizer goroutine) is considered a user +// goroutine. +func isSystemGoroutine(gp *g, fixed bool) bool { // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine. f := findfunc(gp.startpc) if !f.valid() { @@ -1020,9 +1039,14 @@ func isSystemGoroutine(gp *g) bool { if f.funcID == funcID_runfinq { // We include the finalizer goroutine if it's calling // back into user code. + if fixed { + // This goroutine can vary. In fixed mode, + // always consider it a user goroutine. + return false + } return !fingRunning } - return hasprefix(funcname(f), "runtime.") + return hasPrefix(funcname(f), "runtime.") } // SetCgoTraceback records three C functions to use to gather diff --git a/src/runtime/type.go b/src/runtime/type.go index 4b38c351c7ee7..f7f99924eaf75 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -112,10 +112,6 @@ func (t *_type) uncommon() *uncommontype { } } -func hasPrefix(s, prefix string) bool { - return len(s) >= len(prefix) && s[:len(prefix)] == prefix -} - func (t *_type) name() string { if t.tflag&tflagNamed == 0 { return "" @@ -365,17 +361,32 @@ type interfacetype struct { } type maptype struct { - typ _type - key *_type - elem *_type - bucket *_type // internal type representing a hash bucket - keysize uint8 // size of key slot - indirectkey bool // store ptr to key instead of key itself - valuesize uint8 // size of value slot - indirectvalue bool // store ptr to value instead of value itself - bucketsize uint16 // size of bucket - reflexivekey bool // true if k==k for all keys - needkeyupdate bool // true if we need to update key on an overwrite + typ _type + key *_type + elem *_type + bucket *_type // internal type representing a hash bucket + keysize uint8 // size of key slot + valuesize uint8 // size of value slot + bucketsize uint16 // size of bucket + flags uint32 +} + +// Note: flag values must match those used in the TMAP case +// in ../cmd/compile/internal/gc/reflect.go:dtypesym. +func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself + return mt.flags&1 != 0 +} +func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself + return mt.flags&2 != 0 +} +func (mt *maptype) reflexivekey() bool { // true if k==k for all keys + return mt.flags&4 != 0 +} +func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite + return mt.flags&8 != 0 +} +func (mt *maptype) hashMightPanic() bool { // true if hash function might panic + return mt.flags&16 != 0 } type arraytype struct { diff --git a/src/runtime/vdso_elf64.go b/src/runtime/vdso_elf64.go index 851025006586c..7c9bd96277907 100644 --- a/src/runtime/vdso_elf64.go +++ b/src/runtime/vdso_elf64.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build amd64 arm64 +// +build amd64 arm64 ppc64 ppc64le package runtime diff --git a/src/runtime/vdso_in_none.go b/src/runtime/vdso_in_none.go index 34cfac56d1245..f2d6bb55d9cba 100644 --- a/src/runtime/vdso_in_none.go +++ b/src/runtime/vdso_in_none.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux,!386,!amd64,!arm,!arm64 !linux +// +build linux,!386,!amd64,!arm,!arm64,!ppc64,!ppc64le !linux package runtime diff --git a/src/runtime/vdso_linux.go b/src/runtime/vdso_linux.go index f6a285efd73f2..71ba4ce4161b6 100644 --- a/src/runtime/vdso_linux.go +++ b/src/runtime/vdso_linux.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build 386 amd64 arm arm64 +// +build 386 amd64 arm arm64 ppc64 ppc64le package runtime @@ -42,6 +42,8 @@ const ( _STT_FUNC = 2 /* Symbol is a code object */ + _STT_NOTYPE = 0 /* Symbol type is not specified */ + _STB_GLOBAL = 1 /* Global symbol */ _STB_WEAK = 2 /* Weak symbol */ @@ -212,7 +214,8 @@ func vdsoParseSymbols(info *vdsoInfo, version int32) { sym := &info.symtab[symIndex] typ := _ELF_ST_TYPE(sym.st_info) bind := _ELF_ST_BIND(sym.st_info) - if typ != _STT_FUNC || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF { + // On ppc64x, VDSO functions are of type _STT_NOTYPE. + if typ != _STT_FUNC && typ != _STT_NOTYPE || bind != _STB_GLOBAL && bind != _STB_WEAK || sym.st_shndx == _SHN_UNDEF { return false } if k.name != gostringnocopy(&info.symstrings[sym.st_name]) { @@ -277,7 +280,7 @@ func vdsoauxv(tag, val uintptr) { } } -// vdsoMarker returns whether PC is on the VDSO page. +// vdsoMarker reports whether PC is on the VDSO page. func inVDSOPage(pc uintptr) bool { for _, k := range vdsoSymbolKeys { if *k.ptr != 0 { diff --git a/src/runtime/vdso_linux_ppc64x.go b/src/runtime/vdso_linux_ppc64x.go new file mode 100644 index 0000000000000..f30946e4c5b31 --- /dev/null +++ b/src/runtime/vdso_linux_ppc64x.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package runtime + +const ( + // vdsoArrayMax is the byte-size of a maximally sized array on this architecture. + // See cmd/compile/internal/ppc64/galign.go arch.MAXWIDTH initialization. + vdsoArrayMax = 1<<50 - 1 +) + +var vdsoLinuxVersion = vdsoVersionKey{"LINUX_2.6.15", 0x75fcba5} + +var vdsoSymbolKeys = []vdsoSymbolKey{ + {"__kernel_clock_gettime", 0xb0cd725, 0xdfa941fd, &vdsoClockgettimeSym}, +} + +// initialize with vsyscall fallbacks +var ( + vdsoClockgettimeSym uintptr = 0 +) diff --git a/src/runtime/vdso_linux_test.go b/src/runtime/vdso_linux_test.go index b5221f90b71e4..ad083c61b443f 100644 --- a/src/runtime/vdso_linux_test.go +++ b/src/runtime/vdso_linux_test.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // +build linux -// +build 386 amd64 arm arm64 +// +build 386 amd64 arm arm64 ppc64 ppc64le package runtime_test diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s index d48e515d32cb3..41d285874dda7 100644 --- a/src/runtime/vlop_arm.s +++ b/src/runtime/vlop_arm.s @@ -30,7 +30,7 @@ // func runtime·udiv(n, d uint32) (q, r uint32) // compiler knowns the register usage of this function -// Reference: +// Reference: // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software // Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740 #define Rq R0 // input d, output q @@ -44,7 +44,7 @@ // the RET instruction will clobber R12 on nacl, and the compiler's register // allocator needs to know. TEXT runtime·udiv(SB),NOSPLIT|NOFRAME,$0 - MOVBU runtime·hardDiv(SB), Ra + MOVBU internal∕cpu·ARM+const_offsetARMHasIDIVA(SB), Ra CMP $0, Ra BNE udiv_hardware diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go index 9f003aed051f9..c022916422de3 100644 --- a/src/runtime/wincallback.go +++ b/src/runtime/wincallback.go @@ -17,11 +17,12 @@ import ( const maxCallback = 2000 -func genasm() { +func genasm386Amd64() { var buf bytes.Buffer - buf.WriteString(`// generated by wincallback.go; run go generate + buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. +// +build 386 amd64 // runtime·callbackasm is called by external code to // execute Go implemented callback function. It is not // called from the start, instead runtime·compilecallback @@ -29,13 +30,43 @@ func genasm() { // appropriately so different callbacks start with different // CALL instruction in runtime·callbackasm. This determines // which Go callback function is executed later on. + TEXT runtime·callbackasm(SB),7,$0 `) for i := 0; i < maxCallback; i++ { buf.WriteString("\tCALL\truntime·callbackasm1(SB)\n") } - err := ioutil.WriteFile("zcallback_windows.s", buf.Bytes(), 0666) + filename := fmt.Sprintf("zcallback_windows.s") + err := ioutil.WriteFile(filename, buf.Bytes(), 0666) + if err != nil { + fmt.Fprintf(os.Stderr, "wincallback: %s\n", err) + os.Exit(2) + } +} + +func genasmArm() { + var buf bytes.Buffer + + buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0 +`) + for i := 0; i < maxCallback; i++ { + buf.WriteString(fmt.Sprintf("\tMOVW\t$%d, R12\n", i)) + buf.WriteString("\tB\truntime·callbackasm1(SB)\n") + } + + err := ioutil.WriteFile("zcallback_windows_arm.s", buf.Bytes(), 0666) if err != nil { fmt.Fprintf(os.Stderr, "wincallback: %s\n", err) os.Exit(2) @@ -45,7 +76,7 @@ TEXT runtime·callbackasm(SB),7,$0 func gengo() { var buf bytes.Buffer - buf.WriteString(fmt.Sprintf(`// generated by wincallback.go; run go generate + buf.WriteString(fmt.Sprintf(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. package runtime @@ -59,6 +90,7 @@ const cb_max = %d // maximum number of windows callbacks allowed } func main() { - genasm() + genasm386Amd64() + genasmArm() gengo() } diff --git a/src/runtime/zcallback_windows.go b/src/runtime/zcallback_windows.go index 9908d4ec2370f..2c3cb28518f5d 100644 --- a/src/runtime/zcallback_windows.go +++ b/src/runtime/zcallback_windows.go @@ -1,4 +1,4 @@ -// generated by wincallback.go; run go generate +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. package runtime diff --git a/src/runtime/zcallback_windows.s b/src/runtime/zcallback_windows.s index b9a3a3081190e..7772eef329f58 100644 --- a/src/runtime/zcallback_windows.s +++ b/src/runtime/zcallback_windows.s @@ -1,5 +1,6 @@ -// generated by wincallback.go; run go generate +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. +// +build 386 amd64 // runtime·callbackasm is called by external code to // execute Go implemented callback function. It is not // called from the start, instead runtime·compilecallback @@ -7,6 +8,7 @@ // appropriately so different callbacks start with different // CALL instruction in runtime·callbackasm. This determines // which Go callback function is executed later on. + TEXT runtime·callbackasm(SB),7,$0 CALL runtime·callbackasm1(SB) CALL runtime·callbackasm1(SB) diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s new file mode 100644 index 0000000000000..f943d84cbfe5f --- /dev/null +++ b/src/runtime/zcallback_windows_arm.s @@ -0,0 +1,4012 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0 + MOVW $0, R12 + B runtime·callbackasm1(SB) + MOVW $1, R12 + B runtime·callbackasm1(SB) + MOVW $2, R12 + B runtime·callbackasm1(SB) + MOVW $3, R12 + B runtime·callbackasm1(SB) + MOVW $4, R12 + B runtime·callbackasm1(SB) + MOVW $5, R12 + B runtime·callbackasm1(SB) + MOVW $6, R12 + B runtime·callbackasm1(SB) + MOVW $7, R12 + B runtime·callbackasm1(SB) + MOVW $8, R12 + B runtime·callbackasm1(SB) + MOVW $9, R12 + B runtime·callbackasm1(SB) + MOVW $10, R12 + B runtime·callbackasm1(SB) + MOVW $11, R12 + B runtime·callbackasm1(SB) + MOVW $12, R12 + B runtime·callbackasm1(SB) + MOVW $13, R12 + B runtime·callbackasm1(SB) + MOVW $14, R12 + B runtime·callbackasm1(SB) + MOVW $15, R12 + B runtime·callbackasm1(SB) + MOVW $16, R12 + B runtime·callbackasm1(SB) + MOVW $17, R12 + B runtime·callbackasm1(SB) + MOVW $18, R12 + B runtime·callbackasm1(SB) + MOVW $19, R12 + B runtime·callbackasm1(SB) + MOVW $20, R12 + B runtime·callbackasm1(SB) + MOVW $21, R12 + B runtime·callbackasm1(SB) + MOVW $22, R12 + B runtime·callbackasm1(SB) + MOVW $23, R12 + B runtime·callbackasm1(SB) + MOVW $24, R12 + B runtime·callbackasm1(SB) + MOVW $25, R12 + B runtime·callbackasm1(SB) + MOVW $26, R12 + B runtime·callbackasm1(SB) + MOVW $27, R12 + B runtime·callbackasm1(SB) + MOVW $28, R12 + B runtime·callbackasm1(SB) + MOVW $29, R12 + B runtime·callbackasm1(SB) + MOVW $30, R12 + B runtime·callbackasm1(SB) + MOVW $31, R12 + B runtime·callbackasm1(SB) + MOVW $32, R12 + B runtime·callbackasm1(SB) + MOVW $33, R12 + B runtime·callbackasm1(SB) + MOVW $34, R12 + B runtime·callbackasm1(SB) + MOVW $35, R12 + B runtime·callbackasm1(SB) + MOVW $36, R12 + B runtime·callbackasm1(SB) + MOVW $37, R12 + B runtime·callbackasm1(SB) + MOVW $38, R12 + B runtime·callbackasm1(SB) + MOVW $39, R12 + B runtime·callbackasm1(SB) + MOVW $40, R12 + B runtime·callbackasm1(SB) + MOVW $41, R12 + B runtime·callbackasm1(SB) + MOVW $42, R12 + B runtime·callbackasm1(SB) + MOVW $43, R12 + B runtime·callbackasm1(SB) + MOVW $44, R12 + B runtime·callbackasm1(SB) + MOVW $45, R12 + B runtime·callbackasm1(SB) + MOVW $46, R12 + B runtime·callbackasm1(SB) + MOVW $47, R12 + B runtime·callbackasm1(SB) + MOVW $48, R12 + B runtime·callbackasm1(SB) + MOVW $49, R12 + B runtime·callbackasm1(SB) + MOVW $50, R12 + B runtime·callbackasm1(SB) + MOVW $51, R12 + B runtime·callbackasm1(SB) + MOVW $52, R12 + B runtime·callbackasm1(SB) + MOVW $53, R12 + B runtime·callbackasm1(SB) + MOVW $54, R12 + B runtime·callbackasm1(SB) + MOVW $55, R12 + B runtime·callbackasm1(SB) + MOVW $56, R12 + B runtime·callbackasm1(SB) + MOVW $57, R12 + B runtime·callbackasm1(SB) + MOVW $58, R12 + B runtime·callbackasm1(SB) + MOVW $59, R12 + B runtime·callbackasm1(SB) + MOVW $60, R12 + B runtime·callbackasm1(SB) + MOVW $61, R12 + B runtime·callbackasm1(SB) + MOVW $62, R12 + B runtime·callbackasm1(SB) + MOVW $63, R12 + B runtime·callbackasm1(SB) + MOVW $64, R12 + B runtime·callbackasm1(SB) + MOVW $65, R12 + B runtime·callbackasm1(SB) + MOVW $66, R12 + B runtime·callbackasm1(SB) + MOVW $67, R12 + B runtime·callbackasm1(SB) + MOVW $68, R12 + B runtime·callbackasm1(SB) + MOVW $69, R12 + B runtime·callbackasm1(SB) + MOVW $70, R12 + B runtime·callbackasm1(SB) + MOVW $71, R12 + B runtime·callbackasm1(SB) + MOVW $72, R12 + B runtime·callbackasm1(SB) + MOVW $73, R12 + B runtime·callbackasm1(SB) + MOVW $74, R12 + B runtime·callbackasm1(SB) + MOVW $75, R12 + B runtime·callbackasm1(SB) + MOVW $76, R12 + B runtime·callbackasm1(SB) + MOVW $77, R12 + B runtime·callbackasm1(SB) + MOVW $78, R12 + B runtime·callbackasm1(SB) + MOVW $79, R12 + B runtime·callbackasm1(SB) + MOVW $80, R12 + B runtime·callbackasm1(SB) + MOVW $81, R12 + B runtime·callbackasm1(SB) + MOVW $82, R12 + B runtime·callbackasm1(SB) + MOVW $83, R12 + B runtime·callbackasm1(SB) + MOVW $84, R12 + B runtime·callbackasm1(SB) + MOVW $85, R12 + B runtime·callbackasm1(SB) + MOVW $86, R12 + B runtime·callbackasm1(SB) + MOVW $87, R12 + B runtime·callbackasm1(SB) + MOVW $88, R12 + B runtime·callbackasm1(SB) + MOVW $89, R12 + B runtime·callbackasm1(SB) + MOVW $90, R12 + B runtime·callbackasm1(SB) + MOVW $91, R12 + B runtime·callbackasm1(SB) + MOVW $92, R12 + B runtime·callbackasm1(SB) + MOVW $93, R12 + B runtime·callbackasm1(SB) + MOVW $94, R12 + B runtime·callbackasm1(SB) + MOVW $95, R12 + B runtime·callbackasm1(SB) + MOVW $96, R12 + B runtime·callbackasm1(SB) + MOVW $97, R12 + B runtime·callbackasm1(SB) + MOVW $98, R12 + B runtime·callbackasm1(SB) + MOVW $99, R12 + B runtime·callbackasm1(SB) + MOVW $100, R12 + B runtime·callbackasm1(SB) + MOVW $101, R12 + B runtime·callbackasm1(SB) + MOVW $102, R12 + B runtime·callbackasm1(SB) + MOVW $103, R12 + B runtime·callbackasm1(SB) + MOVW $104, R12 + B runtime·callbackasm1(SB) + MOVW $105, R12 + B runtime·callbackasm1(SB) + MOVW $106, R12 + B runtime·callbackasm1(SB) + MOVW $107, R12 + B runtime·callbackasm1(SB) + MOVW $108, R12 + B runtime·callbackasm1(SB) + MOVW $109, R12 + B runtime·callbackasm1(SB) + MOVW $110, R12 + B runtime·callbackasm1(SB) + MOVW $111, R12 + B runtime·callbackasm1(SB) + MOVW $112, R12 + B runtime·callbackasm1(SB) + MOVW $113, R12 + B runtime·callbackasm1(SB) + MOVW $114, R12 + B runtime·callbackasm1(SB) + MOVW $115, R12 + B runtime·callbackasm1(SB) + MOVW $116, R12 + B runtime·callbackasm1(SB) + MOVW $117, R12 + B runtime·callbackasm1(SB) + MOVW $118, R12 + B runtime·callbackasm1(SB) + MOVW $119, R12 + B runtime·callbackasm1(SB) + MOVW $120, R12 + B runtime·callbackasm1(SB) + MOVW $121, R12 + B runtime·callbackasm1(SB) + MOVW $122, R12 + B runtime·callbackasm1(SB) + MOVW $123, R12 + B runtime·callbackasm1(SB) + MOVW $124, R12 + B runtime·callbackasm1(SB) + MOVW $125, R12 + B runtime·callbackasm1(SB) + MOVW $126, R12 + B runtime·callbackasm1(SB) + MOVW $127, R12 + B runtime·callbackasm1(SB) + MOVW $128, R12 + B runtime·callbackasm1(SB) + MOVW $129, R12 + B runtime·callbackasm1(SB) + MOVW $130, R12 + B runtime·callbackasm1(SB) + MOVW $131, R12 + B runtime·callbackasm1(SB) + MOVW $132, R12 + B runtime·callbackasm1(SB) + MOVW $133, R12 + B runtime·callbackasm1(SB) + MOVW $134, R12 + B runtime·callbackasm1(SB) + MOVW $135, R12 + B runtime·callbackasm1(SB) + MOVW $136, R12 + B runtime·callbackasm1(SB) + MOVW $137, R12 + B runtime·callbackasm1(SB) + MOVW $138, R12 + B runtime·callbackasm1(SB) + MOVW $139, R12 + B runtime·callbackasm1(SB) + MOVW $140, R12 + B runtime·callbackasm1(SB) + MOVW $141, R12 + B runtime·callbackasm1(SB) + MOVW $142, R12 + B runtime·callbackasm1(SB) + MOVW $143, R12 + B runtime·callbackasm1(SB) + MOVW $144, R12 + B runtime·callbackasm1(SB) + MOVW $145, R12 + B runtime·callbackasm1(SB) + MOVW $146, R12 + B runtime·callbackasm1(SB) + MOVW $147, R12 + B runtime·callbackasm1(SB) + MOVW $148, R12 + B runtime·callbackasm1(SB) + MOVW $149, R12 + B runtime·callbackasm1(SB) + MOVW $150, R12 + B runtime·callbackasm1(SB) + MOVW $151, R12 + B runtime·callbackasm1(SB) + MOVW $152, R12 + B runtime·callbackasm1(SB) + MOVW $153, R12 + B runtime·callbackasm1(SB) + MOVW $154, R12 + B runtime·callbackasm1(SB) + MOVW $155, R12 + B runtime·callbackasm1(SB) + MOVW $156, R12 + B runtime·callbackasm1(SB) + MOVW $157, R12 + B runtime·callbackasm1(SB) + MOVW $158, R12 + B runtime·callbackasm1(SB) + MOVW $159, R12 + B runtime·callbackasm1(SB) + MOVW $160, R12 + B runtime·callbackasm1(SB) + MOVW $161, R12 + B runtime·callbackasm1(SB) + MOVW $162, R12 + B runtime·callbackasm1(SB) + MOVW $163, R12 + B runtime·callbackasm1(SB) + MOVW $164, R12 + B runtime·callbackasm1(SB) + MOVW $165, R12 + B runtime·callbackasm1(SB) + MOVW $166, R12 + B runtime·callbackasm1(SB) + MOVW $167, R12 + B runtime·callbackasm1(SB) + MOVW $168, R12 + B runtime·callbackasm1(SB) + MOVW $169, R12 + B runtime·callbackasm1(SB) + MOVW $170, R12 + B runtime·callbackasm1(SB) + MOVW $171, R12 + B runtime·callbackasm1(SB) + MOVW $172, R12 + B runtime·callbackasm1(SB) + MOVW $173, R12 + B runtime·callbackasm1(SB) + MOVW $174, R12 + B runtime·callbackasm1(SB) + MOVW $175, R12 + B runtime·callbackasm1(SB) + MOVW $176, R12 + B runtime·callbackasm1(SB) + MOVW $177, R12 + B runtime·callbackasm1(SB) + MOVW $178, R12 + B runtime·callbackasm1(SB) + MOVW $179, R12 + B runtime·callbackasm1(SB) + MOVW $180, R12 + B runtime·callbackasm1(SB) + MOVW $181, R12 + B runtime·callbackasm1(SB) + MOVW $182, R12 + B runtime·callbackasm1(SB) + MOVW $183, R12 + B runtime·callbackasm1(SB) + MOVW $184, R12 + B runtime·callbackasm1(SB) + MOVW $185, R12 + B runtime·callbackasm1(SB) + MOVW $186, R12 + B runtime·callbackasm1(SB) + MOVW $187, R12 + B runtime·callbackasm1(SB) + MOVW $188, R12 + B runtime·callbackasm1(SB) + MOVW $189, R12 + B runtime·callbackasm1(SB) + MOVW $190, R12 + B runtime·callbackasm1(SB) + MOVW $191, R12 + B runtime·callbackasm1(SB) + MOVW $192, R12 + B runtime·callbackasm1(SB) + MOVW $193, R12 + B runtime·callbackasm1(SB) + MOVW $194, R12 + B runtime·callbackasm1(SB) + MOVW $195, R12 + B runtime·callbackasm1(SB) + MOVW $196, R12 + B runtime·callbackasm1(SB) + MOVW $197, R12 + B runtime·callbackasm1(SB) + MOVW $198, R12 + B runtime·callbackasm1(SB) + MOVW $199, R12 + B runtime·callbackasm1(SB) + MOVW $200, R12 + B runtime·callbackasm1(SB) + MOVW $201, R12 + B runtime·callbackasm1(SB) + MOVW $202, R12 + B runtime·callbackasm1(SB) + MOVW $203, R12 + B runtime·callbackasm1(SB) + MOVW $204, R12 + B runtime·callbackasm1(SB) + MOVW $205, R12 + B runtime·callbackasm1(SB) + MOVW $206, R12 + B runtime·callbackasm1(SB) + MOVW $207, R12 + B runtime·callbackasm1(SB) + MOVW $208, R12 + B runtime·callbackasm1(SB) + MOVW $209, R12 + B runtime·callbackasm1(SB) + MOVW $210, R12 + B runtime·callbackasm1(SB) + MOVW $211, R12 + B runtime·callbackasm1(SB) + MOVW $212, R12 + B runtime·callbackasm1(SB) + MOVW $213, R12 + B runtime·callbackasm1(SB) + MOVW $214, R12 + B runtime·callbackasm1(SB) + MOVW $215, R12 + B runtime·callbackasm1(SB) + MOVW $216, R12 + B runtime·callbackasm1(SB) + MOVW $217, R12 + B runtime·callbackasm1(SB) + MOVW $218, R12 + B runtime·callbackasm1(SB) + MOVW $219, R12 + B runtime·callbackasm1(SB) + MOVW $220, R12 + B runtime·callbackasm1(SB) + MOVW $221, R12 + B runtime·callbackasm1(SB) + MOVW $222, R12 + B runtime·callbackasm1(SB) + MOVW $223, R12 + B runtime·callbackasm1(SB) + MOVW $224, R12 + B runtime·callbackasm1(SB) + MOVW $225, R12 + B runtime·callbackasm1(SB) + MOVW $226, R12 + B runtime·callbackasm1(SB) + MOVW $227, R12 + B runtime·callbackasm1(SB) + MOVW $228, R12 + B runtime·callbackasm1(SB) + MOVW $229, R12 + B runtime·callbackasm1(SB) + MOVW $230, R12 + B runtime·callbackasm1(SB) + MOVW $231, R12 + B runtime·callbackasm1(SB) + MOVW $232, R12 + B runtime·callbackasm1(SB) + MOVW $233, R12 + B runtime·callbackasm1(SB) + MOVW $234, R12 + B runtime·callbackasm1(SB) + MOVW $235, R12 + B runtime·callbackasm1(SB) + MOVW $236, R12 + B runtime·callbackasm1(SB) + MOVW $237, R12 + B runtime·callbackasm1(SB) + MOVW $238, R12 + B runtime·callbackasm1(SB) + MOVW $239, R12 + B runtime·callbackasm1(SB) + MOVW $240, R12 + B runtime·callbackasm1(SB) + MOVW $241, R12 + B runtime·callbackasm1(SB) + MOVW $242, R12 + B runtime·callbackasm1(SB) + MOVW $243, R12 + B runtime·callbackasm1(SB) + MOVW $244, R12 + B runtime·callbackasm1(SB) + MOVW $245, R12 + B runtime·callbackasm1(SB) + MOVW $246, R12 + B runtime·callbackasm1(SB) + MOVW $247, R12 + B runtime·callbackasm1(SB) + MOVW $248, R12 + B runtime·callbackasm1(SB) + MOVW $249, R12 + B runtime·callbackasm1(SB) + MOVW $250, R12 + B runtime·callbackasm1(SB) + MOVW $251, R12 + B runtime·callbackasm1(SB) + MOVW $252, R12 + B runtime·callbackasm1(SB) + MOVW $253, R12 + B runtime·callbackasm1(SB) + MOVW $254, R12 + B runtime·callbackasm1(SB) + MOVW $255, R12 + B runtime·callbackasm1(SB) + MOVW $256, R12 + B runtime·callbackasm1(SB) + MOVW $257, R12 + B runtime·callbackasm1(SB) + MOVW $258, R12 + B runtime·callbackasm1(SB) + MOVW $259, R12 + B runtime·callbackasm1(SB) + MOVW $260, R12 + B runtime·callbackasm1(SB) + MOVW $261, R12 + B runtime·callbackasm1(SB) + MOVW $262, R12 + B runtime·callbackasm1(SB) + MOVW $263, R12 + B runtime·callbackasm1(SB) + MOVW $264, R12 + B runtime·callbackasm1(SB) + MOVW $265, R12 + B runtime·callbackasm1(SB) + MOVW $266, R12 + B runtime·callbackasm1(SB) + MOVW $267, R12 + B runtime·callbackasm1(SB) + MOVW $268, R12 + B runtime·callbackasm1(SB) + MOVW $269, R12 + B runtime·callbackasm1(SB) + MOVW $270, R12 + B runtime·callbackasm1(SB) + MOVW $271, R12 + B runtime·callbackasm1(SB) + MOVW $272, R12 + B runtime·callbackasm1(SB) + MOVW $273, R12 + B runtime·callbackasm1(SB) + MOVW $274, R12 + B runtime·callbackasm1(SB) + MOVW $275, R12 + B runtime·callbackasm1(SB) + MOVW $276, R12 + B runtime·callbackasm1(SB) + MOVW $277, R12 + B runtime·callbackasm1(SB) + MOVW $278, R12 + B runtime·callbackasm1(SB) + MOVW $279, R12 + B runtime·callbackasm1(SB) + MOVW $280, R12 + B runtime·callbackasm1(SB) + MOVW $281, R12 + B runtime·callbackasm1(SB) + MOVW $282, R12 + B runtime·callbackasm1(SB) + MOVW $283, R12 + B runtime·callbackasm1(SB) + MOVW $284, R12 + B runtime·callbackasm1(SB) + MOVW $285, R12 + B runtime·callbackasm1(SB) + MOVW $286, R12 + B runtime·callbackasm1(SB) + MOVW $287, R12 + B runtime·callbackasm1(SB) + MOVW $288, R12 + B runtime·callbackasm1(SB) + MOVW $289, R12 + B runtime·callbackasm1(SB) + MOVW $290, R12 + B runtime·callbackasm1(SB) + MOVW $291, R12 + B runtime·callbackasm1(SB) + MOVW $292, R12 + B runtime·callbackasm1(SB) + MOVW $293, R12 + B runtime·callbackasm1(SB) + MOVW $294, R12 + B runtime·callbackasm1(SB) + MOVW $295, R12 + B runtime·callbackasm1(SB) + MOVW $296, R12 + B runtime·callbackasm1(SB) + MOVW $297, R12 + B runtime·callbackasm1(SB) + MOVW $298, R12 + B runtime·callbackasm1(SB) + MOVW $299, R12 + B runtime·callbackasm1(SB) + MOVW $300, R12 + B runtime·callbackasm1(SB) + MOVW $301, R12 + B runtime·callbackasm1(SB) + MOVW $302, R12 + B runtime·callbackasm1(SB) + MOVW $303, R12 + B runtime·callbackasm1(SB) + MOVW $304, R12 + B runtime·callbackasm1(SB) + MOVW $305, R12 + B runtime·callbackasm1(SB) + MOVW $306, R12 + B runtime·callbackasm1(SB) + MOVW $307, R12 + B runtime·callbackasm1(SB) + MOVW $308, R12 + B runtime·callbackasm1(SB) + MOVW $309, R12 + B runtime·callbackasm1(SB) + MOVW $310, R12 + B runtime·callbackasm1(SB) + MOVW $311, R12 + B runtime·callbackasm1(SB) + MOVW $312, R12 + B runtime·callbackasm1(SB) + MOVW $313, R12 + B runtime·callbackasm1(SB) + MOVW $314, R12 + B runtime·callbackasm1(SB) + MOVW $315, R12 + B runtime·callbackasm1(SB) + MOVW $316, R12 + B runtime·callbackasm1(SB) + MOVW $317, R12 + B runtime·callbackasm1(SB) + MOVW $318, R12 + B runtime·callbackasm1(SB) + MOVW $319, R12 + B runtime·callbackasm1(SB) + MOVW $320, R12 + B runtime·callbackasm1(SB) + MOVW $321, R12 + B runtime·callbackasm1(SB) + MOVW $322, R12 + B runtime·callbackasm1(SB) + MOVW $323, R12 + B runtime·callbackasm1(SB) + MOVW $324, R12 + B runtime·callbackasm1(SB) + MOVW $325, R12 + B runtime·callbackasm1(SB) + MOVW $326, R12 + B runtime·callbackasm1(SB) + MOVW $327, R12 + B runtime·callbackasm1(SB) + MOVW $328, R12 + B runtime·callbackasm1(SB) + MOVW $329, R12 + B runtime·callbackasm1(SB) + MOVW $330, R12 + B runtime·callbackasm1(SB) + MOVW $331, R12 + B runtime·callbackasm1(SB) + MOVW $332, R12 + B runtime·callbackasm1(SB) + MOVW $333, R12 + B runtime·callbackasm1(SB) + MOVW $334, R12 + B runtime·callbackasm1(SB) + MOVW $335, R12 + B runtime·callbackasm1(SB) + MOVW $336, R12 + B runtime·callbackasm1(SB) + MOVW $337, R12 + B runtime·callbackasm1(SB) + MOVW $338, R12 + B runtime·callbackasm1(SB) + MOVW $339, R12 + B runtime·callbackasm1(SB) + MOVW $340, R12 + B runtime·callbackasm1(SB) + MOVW $341, R12 + B runtime·callbackasm1(SB) + MOVW $342, R12 + B runtime·callbackasm1(SB) + MOVW $343, R12 + B runtime·callbackasm1(SB) + MOVW $344, R12 + B runtime·callbackasm1(SB) + MOVW $345, R12 + B runtime·callbackasm1(SB) + MOVW $346, R12 + B runtime·callbackasm1(SB) + MOVW $347, R12 + B runtime·callbackasm1(SB) + MOVW $348, R12 + B runtime·callbackasm1(SB) + MOVW $349, R12 + B runtime·callbackasm1(SB) + MOVW $350, R12 + B runtime·callbackasm1(SB) + MOVW $351, R12 + B runtime·callbackasm1(SB) + MOVW $352, R12 + B runtime·callbackasm1(SB) + MOVW $353, R12 + B runtime·callbackasm1(SB) + MOVW $354, R12 + B runtime·callbackasm1(SB) + MOVW $355, R12 + B runtime·callbackasm1(SB) + MOVW $356, R12 + B runtime·callbackasm1(SB) + MOVW $357, R12 + B runtime·callbackasm1(SB) + MOVW $358, R12 + B runtime·callbackasm1(SB) + MOVW $359, R12 + B runtime·callbackasm1(SB) + MOVW $360, R12 + B runtime·callbackasm1(SB) + MOVW $361, R12 + B runtime·callbackasm1(SB) + MOVW $362, R12 + B runtime·callbackasm1(SB) + MOVW $363, R12 + B runtime·callbackasm1(SB) + MOVW $364, R12 + B runtime·callbackasm1(SB) + MOVW $365, R12 + B runtime·callbackasm1(SB) + MOVW $366, R12 + B runtime·callbackasm1(SB) + MOVW $367, R12 + B runtime·callbackasm1(SB) + MOVW $368, R12 + B runtime·callbackasm1(SB) + MOVW $369, R12 + B runtime·callbackasm1(SB) + MOVW $370, R12 + B runtime·callbackasm1(SB) + MOVW $371, R12 + B runtime·callbackasm1(SB) + MOVW $372, R12 + B runtime·callbackasm1(SB) + MOVW $373, R12 + B runtime·callbackasm1(SB) + MOVW $374, R12 + B runtime·callbackasm1(SB) + MOVW $375, R12 + B runtime·callbackasm1(SB) + MOVW $376, R12 + B runtime·callbackasm1(SB) + MOVW $377, R12 + B runtime·callbackasm1(SB) + MOVW $378, R12 + B runtime·callbackasm1(SB) + MOVW $379, R12 + B runtime·callbackasm1(SB) + MOVW $380, R12 + B runtime·callbackasm1(SB) + MOVW $381, R12 + B runtime·callbackasm1(SB) + MOVW $382, R12 + B runtime·callbackasm1(SB) + MOVW $383, R12 + B runtime·callbackasm1(SB) + MOVW $384, R12 + B runtime·callbackasm1(SB) + MOVW $385, R12 + B runtime·callbackasm1(SB) + MOVW $386, R12 + B runtime·callbackasm1(SB) + MOVW $387, R12 + B runtime·callbackasm1(SB) + MOVW $388, R12 + B runtime·callbackasm1(SB) + MOVW $389, R12 + B runtime·callbackasm1(SB) + MOVW $390, R12 + B runtime·callbackasm1(SB) + MOVW $391, R12 + B runtime·callbackasm1(SB) + MOVW $392, R12 + B runtime·callbackasm1(SB) + MOVW $393, R12 + B runtime·callbackasm1(SB) + MOVW $394, R12 + B runtime·callbackasm1(SB) + MOVW $395, R12 + B runtime·callbackasm1(SB) + MOVW $396, R12 + B runtime·callbackasm1(SB) + MOVW $397, R12 + B runtime·callbackasm1(SB) + MOVW $398, R12 + B runtime·callbackasm1(SB) + MOVW $399, R12 + B runtime·callbackasm1(SB) + MOVW $400, R12 + B runtime·callbackasm1(SB) + MOVW $401, R12 + B runtime·callbackasm1(SB) + MOVW $402, R12 + B runtime·callbackasm1(SB) + MOVW $403, R12 + B runtime·callbackasm1(SB) + MOVW $404, R12 + B runtime·callbackasm1(SB) + MOVW $405, R12 + B runtime·callbackasm1(SB) + MOVW $406, R12 + B runtime·callbackasm1(SB) + MOVW $407, R12 + B runtime·callbackasm1(SB) + MOVW $408, R12 + B runtime·callbackasm1(SB) + MOVW $409, R12 + B runtime·callbackasm1(SB) + MOVW $410, R12 + B runtime·callbackasm1(SB) + MOVW $411, R12 + B runtime·callbackasm1(SB) + MOVW $412, R12 + B runtime·callbackasm1(SB) + MOVW $413, R12 + B runtime·callbackasm1(SB) + MOVW $414, R12 + B runtime·callbackasm1(SB) + MOVW $415, R12 + B runtime·callbackasm1(SB) + MOVW $416, R12 + B runtime·callbackasm1(SB) + MOVW $417, R12 + B runtime·callbackasm1(SB) + MOVW $418, R12 + B runtime·callbackasm1(SB) + MOVW $419, R12 + B runtime·callbackasm1(SB) + MOVW $420, R12 + B runtime·callbackasm1(SB) + MOVW $421, R12 + B runtime·callbackasm1(SB) + MOVW $422, R12 + B runtime·callbackasm1(SB) + MOVW $423, R12 + B runtime·callbackasm1(SB) + MOVW $424, R12 + B runtime·callbackasm1(SB) + MOVW $425, R12 + B runtime·callbackasm1(SB) + MOVW $426, R12 + B runtime·callbackasm1(SB) + MOVW $427, R12 + B runtime·callbackasm1(SB) + MOVW $428, R12 + B runtime·callbackasm1(SB) + MOVW $429, R12 + B runtime·callbackasm1(SB) + MOVW $430, R12 + B runtime·callbackasm1(SB) + MOVW $431, R12 + B runtime·callbackasm1(SB) + MOVW $432, R12 + B runtime·callbackasm1(SB) + MOVW $433, R12 + B runtime·callbackasm1(SB) + MOVW $434, R12 + B runtime·callbackasm1(SB) + MOVW $435, R12 + B runtime·callbackasm1(SB) + MOVW $436, R12 + B runtime·callbackasm1(SB) + MOVW $437, R12 + B runtime·callbackasm1(SB) + MOVW $438, R12 + B runtime·callbackasm1(SB) + MOVW $439, R12 + B runtime·callbackasm1(SB) + MOVW $440, R12 + B runtime·callbackasm1(SB) + MOVW $441, R12 + B runtime·callbackasm1(SB) + MOVW $442, R12 + B runtime·callbackasm1(SB) + MOVW $443, R12 + B runtime·callbackasm1(SB) + MOVW $444, R12 + B runtime·callbackasm1(SB) + MOVW $445, R12 + B runtime·callbackasm1(SB) + MOVW $446, R12 + B runtime·callbackasm1(SB) + MOVW $447, R12 + B runtime·callbackasm1(SB) + MOVW $448, R12 + B runtime·callbackasm1(SB) + MOVW $449, R12 + B runtime·callbackasm1(SB) + MOVW $450, R12 + B runtime·callbackasm1(SB) + MOVW $451, R12 + B runtime·callbackasm1(SB) + MOVW $452, R12 + B runtime·callbackasm1(SB) + MOVW $453, R12 + B runtime·callbackasm1(SB) + MOVW $454, R12 + B runtime·callbackasm1(SB) + MOVW $455, R12 + B runtime·callbackasm1(SB) + MOVW $456, R12 + B runtime·callbackasm1(SB) + MOVW $457, R12 + B runtime·callbackasm1(SB) + MOVW $458, R12 + B runtime·callbackasm1(SB) + MOVW $459, R12 + B runtime·callbackasm1(SB) + MOVW $460, R12 + B runtime·callbackasm1(SB) + MOVW $461, R12 + B runtime·callbackasm1(SB) + MOVW $462, R12 + B runtime·callbackasm1(SB) + MOVW $463, R12 + B runtime·callbackasm1(SB) + MOVW $464, R12 + B runtime·callbackasm1(SB) + MOVW $465, R12 + B runtime·callbackasm1(SB) + MOVW $466, R12 + B runtime·callbackasm1(SB) + MOVW $467, R12 + B runtime·callbackasm1(SB) + MOVW $468, R12 + B runtime·callbackasm1(SB) + MOVW $469, R12 + B runtime·callbackasm1(SB) + MOVW $470, R12 + B runtime·callbackasm1(SB) + MOVW $471, R12 + B runtime·callbackasm1(SB) + MOVW $472, R12 + B runtime·callbackasm1(SB) + MOVW $473, R12 + B runtime·callbackasm1(SB) + MOVW $474, R12 + B runtime·callbackasm1(SB) + MOVW $475, R12 + B runtime·callbackasm1(SB) + MOVW $476, R12 + B runtime·callbackasm1(SB) + MOVW $477, R12 + B runtime·callbackasm1(SB) + MOVW $478, R12 + B runtime·callbackasm1(SB) + MOVW $479, R12 + B runtime·callbackasm1(SB) + MOVW $480, R12 + B runtime·callbackasm1(SB) + MOVW $481, R12 + B runtime·callbackasm1(SB) + MOVW $482, R12 + B runtime·callbackasm1(SB) + MOVW $483, R12 + B runtime·callbackasm1(SB) + MOVW $484, R12 + B runtime·callbackasm1(SB) + MOVW $485, R12 + B runtime·callbackasm1(SB) + MOVW $486, R12 + B runtime·callbackasm1(SB) + MOVW $487, R12 + B runtime·callbackasm1(SB) + MOVW $488, R12 + B runtime·callbackasm1(SB) + MOVW $489, R12 + B runtime·callbackasm1(SB) + MOVW $490, R12 + B runtime·callbackasm1(SB) + MOVW $491, R12 + B runtime·callbackasm1(SB) + MOVW $492, R12 + B runtime·callbackasm1(SB) + MOVW $493, R12 + B runtime·callbackasm1(SB) + MOVW $494, R12 + B runtime·callbackasm1(SB) + MOVW $495, R12 + B runtime·callbackasm1(SB) + MOVW $496, R12 + B runtime·callbackasm1(SB) + MOVW $497, R12 + B runtime·callbackasm1(SB) + MOVW $498, R12 + B runtime·callbackasm1(SB) + MOVW $499, R12 + B runtime·callbackasm1(SB) + MOVW $500, R12 + B runtime·callbackasm1(SB) + MOVW $501, R12 + B runtime·callbackasm1(SB) + MOVW $502, R12 + B runtime·callbackasm1(SB) + MOVW $503, R12 + B runtime·callbackasm1(SB) + MOVW $504, R12 + B runtime·callbackasm1(SB) + MOVW $505, R12 + B runtime·callbackasm1(SB) + MOVW $506, R12 + B runtime·callbackasm1(SB) + MOVW $507, R12 + B runtime·callbackasm1(SB) + MOVW $508, R12 + B runtime·callbackasm1(SB) + MOVW $509, R12 + B runtime·callbackasm1(SB) + MOVW $510, R12 + B runtime·callbackasm1(SB) + MOVW $511, R12 + B runtime·callbackasm1(SB) + MOVW $512, R12 + B runtime·callbackasm1(SB) + MOVW $513, R12 + B runtime·callbackasm1(SB) + MOVW $514, R12 + B runtime·callbackasm1(SB) + MOVW $515, R12 + B runtime·callbackasm1(SB) + MOVW $516, R12 + B runtime·callbackasm1(SB) + MOVW $517, R12 + B runtime·callbackasm1(SB) + MOVW $518, R12 + B runtime·callbackasm1(SB) + MOVW $519, R12 + B runtime·callbackasm1(SB) + MOVW $520, R12 + B runtime·callbackasm1(SB) + MOVW $521, R12 + B runtime·callbackasm1(SB) + MOVW $522, R12 + B runtime·callbackasm1(SB) + MOVW $523, R12 + B runtime·callbackasm1(SB) + MOVW $524, R12 + B runtime·callbackasm1(SB) + MOVW $525, R12 + B runtime·callbackasm1(SB) + MOVW $526, R12 + B runtime·callbackasm1(SB) + MOVW $527, R12 + B runtime·callbackasm1(SB) + MOVW $528, R12 + B runtime·callbackasm1(SB) + MOVW $529, R12 + B runtime·callbackasm1(SB) + MOVW $530, R12 + B runtime·callbackasm1(SB) + MOVW $531, R12 + B runtime·callbackasm1(SB) + MOVW $532, R12 + B runtime·callbackasm1(SB) + MOVW $533, R12 + B runtime·callbackasm1(SB) + MOVW $534, R12 + B runtime·callbackasm1(SB) + MOVW $535, R12 + B runtime·callbackasm1(SB) + MOVW $536, R12 + B runtime·callbackasm1(SB) + MOVW $537, R12 + B runtime·callbackasm1(SB) + MOVW $538, R12 + B runtime·callbackasm1(SB) + MOVW $539, R12 + B runtime·callbackasm1(SB) + MOVW $540, R12 + B runtime·callbackasm1(SB) + MOVW $541, R12 + B runtime·callbackasm1(SB) + MOVW $542, R12 + B runtime·callbackasm1(SB) + MOVW $543, R12 + B runtime·callbackasm1(SB) + MOVW $544, R12 + B runtime·callbackasm1(SB) + MOVW $545, R12 + B runtime·callbackasm1(SB) + MOVW $546, R12 + B runtime·callbackasm1(SB) + MOVW $547, R12 + B runtime·callbackasm1(SB) + MOVW $548, R12 + B runtime·callbackasm1(SB) + MOVW $549, R12 + B runtime·callbackasm1(SB) + MOVW $550, R12 + B runtime·callbackasm1(SB) + MOVW $551, R12 + B runtime·callbackasm1(SB) + MOVW $552, R12 + B runtime·callbackasm1(SB) + MOVW $553, R12 + B runtime·callbackasm1(SB) + MOVW $554, R12 + B runtime·callbackasm1(SB) + MOVW $555, R12 + B runtime·callbackasm1(SB) + MOVW $556, R12 + B runtime·callbackasm1(SB) + MOVW $557, R12 + B runtime·callbackasm1(SB) + MOVW $558, R12 + B runtime·callbackasm1(SB) + MOVW $559, R12 + B runtime·callbackasm1(SB) + MOVW $560, R12 + B runtime·callbackasm1(SB) + MOVW $561, R12 + B runtime·callbackasm1(SB) + MOVW $562, R12 + B runtime·callbackasm1(SB) + MOVW $563, R12 + B runtime·callbackasm1(SB) + MOVW $564, R12 + B runtime·callbackasm1(SB) + MOVW $565, R12 + B runtime·callbackasm1(SB) + MOVW $566, R12 + B runtime·callbackasm1(SB) + MOVW $567, R12 + B runtime·callbackasm1(SB) + MOVW $568, R12 + B runtime·callbackasm1(SB) + MOVW $569, R12 + B runtime·callbackasm1(SB) + MOVW $570, R12 + B runtime·callbackasm1(SB) + MOVW $571, R12 + B runtime·callbackasm1(SB) + MOVW $572, R12 + B runtime·callbackasm1(SB) + MOVW $573, R12 + B runtime·callbackasm1(SB) + MOVW $574, R12 + B runtime·callbackasm1(SB) + MOVW $575, R12 + B runtime·callbackasm1(SB) + MOVW $576, R12 + B runtime·callbackasm1(SB) + MOVW $577, R12 + B runtime·callbackasm1(SB) + MOVW $578, R12 + B runtime·callbackasm1(SB) + MOVW $579, R12 + B runtime·callbackasm1(SB) + MOVW $580, R12 + B runtime·callbackasm1(SB) + MOVW $581, R12 + B runtime·callbackasm1(SB) + MOVW $582, R12 + B runtime·callbackasm1(SB) + MOVW $583, R12 + B runtime·callbackasm1(SB) + MOVW $584, R12 + B runtime·callbackasm1(SB) + MOVW $585, R12 + B runtime·callbackasm1(SB) + MOVW $586, R12 + B runtime·callbackasm1(SB) + MOVW $587, R12 + B runtime·callbackasm1(SB) + MOVW $588, R12 + B runtime·callbackasm1(SB) + MOVW $589, R12 + B runtime·callbackasm1(SB) + MOVW $590, R12 + B runtime·callbackasm1(SB) + MOVW $591, R12 + B runtime·callbackasm1(SB) + MOVW $592, R12 + B runtime·callbackasm1(SB) + MOVW $593, R12 + B runtime·callbackasm1(SB) + MOVW $594, R12 + B runtime·callbackasm1(SB) + MOVW $595, R12 + B runtime·callbackasm1(SB) + MOVW $596, R12 + B runtime·callbackasm1(SB) + MOVW $597, R12 + B runtime·callbackasm1(SB) + MOVW $598, R12 + B runtime·callbackasm1(SB) + MOVW $599, R12 + B runtime·callbackasm1(SB) + MOVW $600, R12 + B runtime·callbackasm1(SB) + MOVW $601, R12 + B runtime·callbackasm1(SB) + MOVW $602, R12 + B runtime·callbackasm1(SB) + MOVW $603, R12 + B runtime·callbackasm1(SB) + MOVW $604, R12 + B runtime·callbackasm1(SB) + MOVW $605, R12 + B runtime·callbackasm1(SB) + MOVW $606, R12 + B runtime·callbackasm1(SB) + MOVW $607, R12 + B runtime·callbackasm1(SB) + MOVW $608, R12 + B runtime·callbackasm1(SB) + MOVW $609, R12 + B runtime·callbackasm1(SB) + MOVW $610, R12 + B runtime·callbackasm1(SB) + MOVW $611, R12 + B runtime·callbackasm1(SB) + MOVW $612, R12 + B runtime·callbackasm1(SB) + MOVW $613, R12 + B runtime·callbackasm1(SB) + MOVW $614, R12 + B runtime·callbackasm1(SB) + MOVW $615, R12 + B runtime·callbackasm1(SB) + MOVW $616, R12 + B runtime·callbackasm1(SB) + MOVW $617, R12 + B runtime·callbackasm1(SB) + MOVW $618, R12 + B runtime·callbackasm1(SB) + MOVW $619, R12 + B runtime·callbackasm1(SB) + MOVW $620, R12 + B runtime·callbackasm1(SB) + MOVW $621, R12 + B runtime·callbackasm1(SB) + MOVW $622, R12 + B runtime·callbackasm1(SB) + MOVW $623, R12 + B runtime·callbackasm1(SB) + MOVW $624, R12 + B runtime·callbackasm1(SB) + MOVW $625, R12 + B runtime·callbackasm1(SB) + MOVW $626, R12 + B runtime·callbackasm1(SB) + MOVW $627, R12 + B runtime·callbackasm1(SB) + MOVW $628, R12 + B runtime·callbackasm1(SB) + MOVW $629, R12 + B runtime·callbackasm1(SB) + MOVW $630, R12 + B runtime·callbackasm1(SB) + MOVW $631, R12 + B runtime·callbackasm1(SB) + MOVW $632, R12 + B runtime·callbackasm1(SB) + MOVW $633, R12 + B runtime·callbackasm1(SB) + MOVW $634, R12 + B runtime·callbackasm1(SB) + MOVW $635, R12 + B runtime·callbackasm1(SB) + MOVW $636, R12 + B runtime·callbackasm1(SB) + MOVW $637, R12 + B runtime·callbackasm1(SB) + MOVW $638, R12 + B runtime·callbackasm1(SB) + MOVW $639, R12 + B runtime·callbackasm1(SB) + MOVW $640, R12 + B runtime·callbackasm1(SB) + MOVW $641, R12 + B runtime·callbackasm1(SB) + MOVW $642, R12 + B runtime·callbackasm1(SB) + MOVW $643, R12 + B runtime·callbackasm1(SB) + MOVW $644, R12 + B runtime·callbackasm1(SB) + MOVW $645, R12 + B runtime·callbackasm1(SB) + MOVW $646, R12 + B runtime·callbackasm1(SB) + MOVW $647, R12 + B runtime·callbackasm1(SB) + MOVW $648, R12 + B runtime·callbackasm1(SB) + MOVW $649, R12 + B runtime·callbackasm1(SB) + MOVW $650, R12 + B runtime·callbackasm1(SB) + MOVW $651, R12 + B runtime·callbackasm1(SB) + MOVW $652, R12 + B runtime·callbackasm1(SB) + MOVW $653, R12 + B runtime·callbackasm1(SB) + MOVW $654, R12 + B runtime·callbackasm1(SB) + MOVW $655, R12 + B runtime·callbackasm1(SB) + MOVW $656, R12 + B runtime·callbackasm1(SB) + MOVW $657, R12 + B runtime·callbackasm1(SB) + MOVW $658, R12 + B runtime·callbackasm1(SB) + MOVW $659, R12 + B runtime·callbackasm1(SB) + MOVW $660, R12 + B runtime·callbackasm1(SB) + MOVW $661, R12 + B runtime·callbackasm1(SB) + MOVW $662, R12 + B runtime·callbackasm1(SB) + MOVW $663, R12 + B runtime·callbackasm1(SB) + MOVW $664, R12 + B runtime·callbackasm1(SB) + MOVW $665, R12 + B runtime·callbackasm1(SB) + MOVW $666, R12 + B runtime·callbackasm1(SB) + MOVW $667, R12 + B runtime·callbackasm1(SB) + MOVW $668, R12 + B runtime·callbackasm1(SB) + MOVW $669, R12 + B runtime·callbackasm1(SB) + MOVW $670, R12 + B runtime·callbackasm1(SB) + MOVW $671, R12 + B runtime·callbackasm1(SB) + MOVW $672, R12 + B runtime·callbackasm1(SB) + MOVW $673, R12 + B runtime·callbackasm1(SB) + MOVW $674, R12 + B runtime·callbackasm1(SB) + MOVW $675, R12 + B runtime·callbackasm1(SB) + MOVW $676, R12 + B runtime·callbackasm1(SB) + MOVW $677, R12 + B runtime·callbackasm1(SB) + MOVW $678, R12 + B runtime·callbackasm1(SB) + MOVW $679, R12 + B runtime·callbackasm1(SB) + MOVW $680, R12 + B runtime·callbackasm1(SB) + MOVW $681, R12 + B runtime·callbackasm1(SB) + MOVW $682, R12 + B runtime·callbackasm1(SB) + MOVW $683, R12 + B runtime·callbackasm1(SB) + MOVW $684, R12 + B runtime·callbackasm1(SB) + MOVW $685, R12 + B runtime·callbackasm1(SB) + MOVW $686, R12 + B runtime·callbackasm1(SB) + MOVW $687, R12 + B runtime·callbackasm1(SB) + MOVW $688, R12 + B runtime·callbackasm1(SB) + MOVW $689, R12 + B runtime·callbackasm1(SB) + MOVW $690, R12 + B runtime·callbackasm1(SB) + MOVW $691, R12 + B runtime·callbackasm1(SB) + MOVW $692, R12 + B runtime·callbackasm1(SB) + MOVW $693, R12 + B runtime·callbackasm1(SB) + MOVW $694, R12 + B runtime·callbackasm1(SB) + MOVW $695, R12 + B runtime·callbackasm1(SB) + MOVW $696, R12 + B runtime·callbackasm1(SB) + MOVW $697, R12 + B runtime·callbackasm1(SB) + MOVW $698, R12 + B runtime·callbackasm1(SB) + MOVW $699, R12 + B runtime·callbackasm1(SB) + MOVW $700, R12 + B runtime·callbackasm1(SB) + MOVW $701, R12 + B runtime·callbackasm1(SB) + MOVW $702, R12 + B runtime·callbackasm1(SB) + MOVW $703, R12 + B runtime·callbackasm1(SB) + MOVW $704, R12 + B runtime·callbackasm1(SB) + MOVW $705, R12 + B runtime·callbackasm1(SB) + MOVW $706, R12 + B runtime·callbackasm1(SB) + MOVW $707, R12 + B runtime·callbackasm1(SB) + MOVW $708, R12 + B runtime·callbackasm1(SB) + MOVW $709, R12 + B runtime·callbackasm1(SB) + MOVW $710, R12 + B runtime·callbackasm1(SB) + MOVW $711, R12 + B runtime·callbackasm1(SB) + MOVW $712, R12 + B runtime·callbackasm1(SB) + MOVW $713, R12 + B runtime·callbackasm1(SB) + MOVW $714, R12 + B runtime·callbackasm1(SB) + MOVW $715, R12 + B runtime·callbackasm1(SB) + MOVW $716, R12 + B runtime·callbackasm1(SB) + MOVW $717, R12 + B runtime·callbackasm1(SB) + MOVW $718, R12 + B runtime·callbackasm1(SB) + MOVW $719, R12 + B runtime·callbackasm1(SB) + MOVW $720, R12 + B runtime·callbackasm1(SB) + MOVW $721, R12 + B runtime·callbackasm1(SB) + MOVW $722, R12 + B runtime·callbackasm1(SB) + MOVW $723, R12 + B runtime·callbackasm1(SB) + MOVW $724, R12 + B runtime·callbackasm1(SB) + MOVW $725, R12 + B runtime·callbackasm1(SB) + MOVW $726, R12 + B runtime·callbackasm1(SB) + MOVW $727, R12 + B runtime·callbackasm1(SB) + MOVW $728, R12 + B runtime·callbackasm1(SB) + MOVW $729, R12 + B runtime·callbackasm1(SB) + MOVW $730, R12 + B runtime·callbackasm1(SB) + MOVW $731, R12 + B runtime·callbackasm1(SB) + MOVW $732, R12 + B runtime·callbackasm1(SB) + MOVW $733, R12 + B runtime·callbackasm1(SB) + MOVW $734, R12 + B runtime·callbackasm1(SB) + MOVW $735, R12 + B runtime·callbackasm1(SB) + MOVW $736, R12 + B runtime·callbackasm1(SB) + MOVW $737, R12 + B runtime·callbackasm1(SB) + MOVW $738, R12 + B runtime·callbackasm1(SB) + MOVW $739, R12 + B runtime·callbackasm1(SB) + MOVW $740, R12 + B runtime·callbackasm1(SB) + MOVW $741, R12 + B runtime·callbackasm1(SB) + MOVW $742, R12 + B runtime·callbackasm1(SB) + MOVW $743, R12 + B runtime·callbackasm1(SB) + MOVW $744, R12 + B runtime·callbackasm1(SB) + MOVW $745, R12 + B runtime·callbackasm1(SB) + MOVW $746, R12 + B runtime·callbackasm1(SB) + MOVW $747, R12 + B runtime·callbackasm1(SB) + MOVW $748, R12 + B runtime·callbackasm1(SB) + MOVW $749, R12 + B runtime·callbackasm1(SB) + MOVW $750, R12 + B runtime·callbackasm1(SB) + MOVW $751, R12 + B runtime·callbackasm1(SB) + MOVW $752, R12 + B runtime·callbackasm1(SB) + MOVW $753, R12 + B runtime·callbackasm1(SB) + MOVW $754, R12 + B runtime·callbackasm1(SB) + MOVW $755, R12 + B runtime·callbackasm1(SB) + MOVW $756, R12 + B runtime·callbackasm1(SB) + MOVW $757, R12 + B runtime·callbackasm1(SB) + MOVW $758, R12 + B runtime·callbackasm1(SB) + MOVW $759, R12 + B runtime·callbackasm1(SB) + MOVW $760, R12 + B runtime·callbackasm1(SB) + MOVW $761, R12 + B runtime·callbackasm1(SB) + MOVW $762, R12 + B runtime·callbackasm1(SB) + MOVW $763, R12 + B runtime·callbackasm1(SB) + MOVW $764, R12 + B runtime·callbackasm1(SB) + MOVW $765, R12 + B runtime·callbackasm1(SB) + MOVW $766, R12 + B runtime·callbackasm1(SB) + MOVW $767, R12 + B runtime·callbackasm1(SB) + MOVW $768, R12 + B runtime·callbackasm1(SB) + MOVW $769, R12 + B runtime·callbackasm1(SB) + MOVW $770, R12 + B runtime·callbackasm1(SB) + MOVW $771, R12 + B runtime·callbackasm1(SB) + MOVW $772, R12 + B runtime·callbackasm1(SB) + MOVW $773, R12 + B runtime·callbackasm1(SB) + MOVW $774, R12 + B runtime·callbackasm1(SB) + MOVW $775, R12 + B runtime·callbackasm1(SB) + MOVW $776, R12 + B runtime·callbackasm1(SB) + MOVW $777, R12 + B runtime·callbackasm1(SB) + MOVW $778, R12 + B runtime·callbackasm1(SB) + MOVW $779, R12 + B runtime·callbackasm1(SB) + MOVW $780, R12 + B runtime·callbackasm1(SB) + MOVW $781, R12 + B runtime·callbackasm1(SB) + MOVW $782, R12 + B runtime·callbackasm1(SB) + MOVW $783, R12 + B runtime·callbackasm1(SB) + MOVW $784, R12 + B runtime·callbackasm1(SB) + MOVW $785, R12 + B runtime·callbackasm1(SB) + MOVW $786, R12 + B runtime·callbackasm1(SB) + MOVW $787, R12 + B runtime·callbackasm1(SB) + MOVW $788, R12 + B runtime·callbackasm1(SB) + MOVW $789, R12 + B runtime·callbackasm1(SB) + MOVW $790, R12 + B runtime·callbackasm1(SB) + MOVW $791, R12 + B runtime·callbackasm1(SB) + MOVW $792, R12 + B runtime·callbackasm1(SB) + MOVW $793, R12 + B runtime·callbackasm1(SB) + MOVW $794, R12 + B runtime·callbackasm1(SB) + MOVW $795, R12 + B runtime·callbackasm1(SB) + MOVW $796, R12 + B runtime·callbackasm1(SB) + MOVW $797, R12 + B runtime·callbackasm1(SB) + MOVW $798, R12 + B runtime·callbackasm1(SB) + MOVW $799, R12 + B runtime·callbackasm1(SB) + MOVW $800, R12 + B runtime·callbackasm1(SB) + MOVW $801, R12 + B runtime·callbackasm1(SB) + MOVW $802, R12 + B runtime·callbackasm1(SB) + MOVW $803, R12 + B runtime·callbackasm1(SB) + MOVW $804, R12 + B runtime·callbackasm1(SB) + MOVW $805, R12 + B runtime·callbackasm1(SB) + MOVW $806, R12 + B runtime·callbackasm1(SB) + MOVW $807, R12 + B runtime·callbackasm1(SB) + MOVW $808, R12 + B runtime·callbackasm1(SB) + MOVW $809, R12 + B runtime·callbackasm1(SB) + MOVW $810, R12 + B runtime·callbackasm1(SB) + MOVW $811, R12 + B runtime·callbackasm1(SB) + MOVW $812, R12 + B runtime·callbackasm1(SB) + MOVW $813, R12 + B runtime·callbackasm1(SB) + MOVW $814, R12 + B runtime·callbackasm1(SB) + MOVW $815, R12 + B runtime·callbackasm1(SB) + MOVW $816, R12 + B runtime·callbackasm1(SB) + MOVW $817, R12 + B runtime·callbackasm1(SB) + MOVW $818, R12 + B runtime·callbackasm1(SB) + MOVW $819, R12 + B runtime·callbackasm1(SB) + MOVW $820, R12 + B runtime·callbackasm1(SB) + MOVW $821, R12 + B runtime·callbackasm1(SB) + MOVW $822, R12 + B runtime·callbackasm1(SB) + MOVW $823, R12 + B runtime·callbackasm1(SB) + MOVW $824, R12 + B runtime·callbackasm1(SB) + MOVW $825, R12 + B runtime·callbackasm1(SB) + MOVW $826, R12 + B runtime·callbackasm1(SB) + MOVW $827, R12 + B runtime·callbackasm1(SB) + MOVW $828, R12 + B runtime·callbackasm1(SB) + MOVW $829, R12 + B runtime·callbackasm1(SB) + MOVW $830, R12 + B runtime·callbackasm1(SB) + MOVW $831, R12 + B runtime·callbackasm1(SB) + MOVW $832, R12 + B runtime·callbackasm1(SB) + MOVW $833, R12 + B runtime·callbackasm1(SB) + MOVW $834, R12 + B runtime·callbackasm1(SB) + MOVW $835, R12 + B runtime·callbackasm1(SB) + MOVW $836, R12 + B runtime·callbackasm1(SB) + MOVW $837, R12 + B runtime·callbackasm1(SB) + MOVW $838, R12 + B runtime·callbackasm1(SB) + MOVW $839, R12 + B runtime·callbackasm1(SB) + MOVW $840, R12 + B runtime·callbackasm1(SB) + MOVW $841, R12 + B runtime·callbackasm1(SB) + MOVW $842, R12 + B runtime·callbackasm1(SB) + MOVW $843, R12 + B runtime·callbackasm1(SB) + MOVW $844, R12 + B runtime·callbackasm1(SB) + MOVW $845, R12 + B runtime·callbackasm1(SB) + MOVW $846, R12 + B runtime·callbackasm1(SB) + MOVW $847, R12 + B runtime·callbackasm1(SB) + MOVW $848, R12 + B runtime·callbackasm1(SB) + MOVW $849, R12 + B runtime·callbackasm1(SB) + MOVW $850, R12 + B runtime·callbackasm1(SB) + MOVW $851, R12 + B runtime·callbackasm1(SB) + MOVW $852, R12 + B runtime·callbackasm1(SB) + MOVW $853, R12 + B runtime·callbackasm1(SB) + MOVW $854, R12 + B runtime·callbackasm1(SB) + MOVW $855, R12 + B runtime·callbackasm1(SB) + MOVW $856, R12 + B runtime·callbackasm1(SB) + MOVW $857, R12 + B runtime·callbackasm1(SB) + MOVW $858, R12 + B runtime·callbackasm1(SB) + MOVW $859, R12 + B runtime·callbackasm1(SB) + MOVW $860, R12 + B runtime·callbackasm1(SB) + MOVW $861, R12 + B runtime·callbackasm1(SB) + MOVW $862, R12 + B runtime·callbackasm1(SB) + MOVW $863, R12 + B runtime·callbackasm1(SB) + MOVW $864, R12 + B runtime·callbackasm1(SB) + MOVW $865, R12 + B runtime·callbackasm1(SB) + MOVW $866, R12 + B runtime·callbackasm1(SB) + MOVW $867, R12 + B runtime·callbackasm1(SB) + MOVW $868, R12 + B runtime·callbackasm1(SB) + MOVW $869, R12 + B runtime·callbackasm1(SB) + MOVW $870, R12 + B runtime·callbackasm1(SB) + MOVW $871, R12 + B runtime·callbackasm1(SB) + MOVW $872, R12 + B runtime·callbackasm1(SB) + MOVW $873, R12 + B runtime·callbackasm1(SB) + MOVW $874, R12 + B runtime·callbackasm1(SB) + MOVW $875, R12 + B runtime·callbackasm1(SB) + MOVW $876, R12 + B runtime·callbackasm1(SB) + MOVW $877, R12 + B runtime·callbackasm1(SB) + MOVW $878, R12 + B runtime·callbackasm1(SB) + MOVW $879, R12 + B runtime·callbackasm1(SB) + MOVW $880, R12 + B runtime·callbackasm1(SB) + MOVW $881, R12 + B runtime·callbackasm1(SB) + MOVW $882, R12 + B runtime·callbackasm1(SB) + MOVW $883, R12 + B runtime·callbackasm1(SB) + MOVW $884, R12 + B runtime·callbackasm1(SB) + MOVW $885, R12 + B runtime·callbackasm1(SB) + MOVW $886, R12 + B runtime·callbackasm1(SB) + MOVW $887, R12 + B runtime·callbackasm1(SB) + MOVW $888, R12 + B runtime·callbackasm1(SB) + MOVW $889, R12 + B runtime·callbackasm1(SB) + MOVW $890, R12 + B runtime·callbackasm1(SB) + MOVW $891, R12 + B runtime·callbackasm1(SB) + MOVW $892, R12 + B runtime·callbackasm1(SB) + MOVW $893, R12 + B runtime·callbackasm1(SB) + MOVW $894, R12 + B runtime·callbackasm1(SB) + MOVW $895, R12 + B runtime·callbackasm1(SB) + MOVW $896, R12 + B runtime·callbackasm1(SB) + MOVW $897, R12 + B runtime·callbackasm1(SB) + MOVW $898, R12 + B runtime·callbackasm1(SB) + MOVW $899, R12 + B runtime·callbackasm1(SB) + MOVW $900, R12 + B runtime·callbackasm1(SB) + MOVW $901, R12 + B runtime·callbackasm1(SB) + MOVW $902, R12 + B runtime·callbackasm1(SB) + MOVW $903, R12 + B runtime·callbackasm1(SB) + MOVW $904, R12 + B runtime·callbackasm1(SB) + MOVW $905, R12 + B runtime·callbackasm1(SB) + MOVW $906, R12 + B runtime·callbackasm1(SB) + MOVW $907, R12 + B runtime·callbackasm1(SB) + MOVW $908, R12 + B runtime·callbackasm1(SB) + MOVW $909, R12 + B runtime·callbackasm1(SB) + MOVW $910, R12 + B runtime·callbackasm1(SB) + MOVW $911, R12 + B runtime·callbackasm1(SB) + MOVW $912, R12 + B runtime·callbackasm1(SB) + MOVW $913, R12 + B runtime·callbackasm1(SB) + MOVW $914, R12 + B runtime·callbackasm1(SB) + MOVW $915, R12 + B runtime·callbackasm1(SB) + MOVW $916, R12 + B runtime·callbackasm1(SB) + MOVW $917, R12 + B runtime·callbackasm1(SB) + MOVW $918, R12 + B runtime·callbackasm1(SB) + MOVW $919, R12 + B runtime·callbackasm1(SB) + MOVW $920, R12 + B runtime·callbackasm1(SB) + MOVW $921, R12 + B runtime·callbackasm1(SB) + MOVW $922, R12 + B runtime·callbackasm1(SB) + MOVW $923, R12 + B runtime·callbackasm1(SB) + MOVW $924, R12 + B runtime·callbackasm1(SB) + MOVW $925, R12 + B runtime·callbackasm1(SB) + MOVW $926, R12 + B runtime·callbackasm1(SB) + MOVW $927, R12 + B runtime·callbackasm1(SB) + MOVW $928, R12 + B runtime·callbackasm1(SB) + MOVW $929, R12 + B runtime·callbackasm1(SB) + MOVW $930, R12 + B runtime·callbackasm1(SB) + MOVW $931, R12 + B runtime·callbackasm1(SB) + MOVW $932, R12 + B runtime·callbackasm1(SB) + MOVW $933, R12 + B runtime·callbackasm1(SB) + MOVW $934, R12 + B runtime·callbackasm1(SB) + MOVW $935, R12 + B runtime·callbackasm1(SB) + MOVW $936, R12 + B runtime·callbackasm1(SB) + MOVW $937, R12 + B runtime·callbackasm1(SB) + MOVW $938, R12 + B runtime·callbackasm1(SB) + MOVW $939, R12 + B runtime·callbackasm1(SB) + MOVW $940, R12 + B runtime·callbackasm1(SB) + MOVW $941, R12 + B runtime·callbackasm1(SB) + MOVW $942, R12 + B runtime·callbackasm1(SB) + MOVW $943, R12 + B runtime·callbackasm1(SB) + MOVW $944, R12 + B runtime·callbackasm1(SB) + MOVW $945, R12 + B runtime·callbackasm1(SB) + MOVW $946, R12 + B runtime·callbackasm1(SB) + MOVW $947, R12 + B runtime·callbackasm1(SB) + MOVW $948, R12 + B runtime·callbackasm1(SB) + MOVW $949, R12 + B runtime·callbackasm1(SB) + MOVW $950, R12 + B runtime·callbackasm1(SB) + MOVW $951, R12 + B runtime·callbackasm1(SB) + MOVW $952, R12 + B runtime·callbackasm1(SB) + MOVW $953, R12 + B runtime·callbackasm1(SB) + MOVW $954, R12 + B runtime·callbackasm1(SB) + MOVW $955, R12 + B runtime·callbackasm1(SB) + MOVW $956, R12 + B runtime·callbackasm1(SB) + MOVW $957, R12 + B runtime·callbackasm1(SB) + MOVW $958, R12 + B runtime·callbackasm1(SB) + MOVW $959, R12 + B runtime·callbackasm1(SB) + MOVW $960, R12 + B runtime·callbackasm1(SB) + MOVW $961, R12 + B runtime·callbackasm1(SB) + MOVW $962, R12 + B runtime·callbackasm1(SB) + MOVW $963, R12 + B runtime·callbackasm1(SB) + MOVW $964, R12 + B runtime·callbackasm1(SB) + MOVW $965, R12 + B runtime·callbackasm1(SB) + MOVW $966, R12 + B runtime·callbackasm1(SB) + MOVW $967, R12 + B runtime·callbackasm1(SB) + MOVW $968, R12 + B runtime·callbackasm1(SB) + MOVW $969, R12 + B runtime·callbackasm1(SB) + MOVW $970, R12 + B runtime·callbackasm1(SB) + MOVW $971, R12 + B runtime·callbackasm1(SB) + MOVW $972, R12 + B runtime·callbackasm1(SB) + MOVW $973, R12 + B runtime·callbackasm1(SB) + MOVW $974, R12 + B runtime·callbackasm1(SB) + MOVW $975, R12 + B runtime·callbackasm1(SB) + MOVW $976, R12 + B runtime·callbackasm1(SB) + MOVW $977, R12 + B runtime·callbackasm1(SB) + MOVW $978, R12 + B runtime·callbackasm1(SB) + MOVW $979, R12 + B runtime·callbackasm1(SB) + MOVW $980, R12 + B runtime·callbackasm1(SB) + MOVW $981, R12 + B runtime·callbackasm1(SB) + MOVW $982, R12 + B runtime·callbackasm1(SB) + MOVW $983, R12 + B runtime·callbackasm1(SB) + MOVW $984, R12 + B runtime·callbackasm1(SB) + MOVW $985, R12 + B runtime·callbackasm1(SB) + MOVW $986, R12 + B runtime·callbackasm1(SB) + MOVW $987, R12 + B runtime·callbackasm1(SB) + MOVW $988, R12 + B runtime·callbackasm1(SB) + MOVW $989, R12 + B runtime·callbackasm1(SB) + MOVW $990, R12 + B runtime·callbackasm1(SB) + MOVW $991, R12 + B runtime·callbackasm1(SB) + MOVW $992, R12 + B runtime·callbackasm1(SB) + MOVW $993, R12 + B runtime·callbackasm1(SB) + MOVW $994, R12 + B runtime·callbackasm1(SB) + MOVW $995, R12 + B runtime·callbackasm1(SB) + MOVW $996, R12 + B runtime·callbackasm1(SB) + MOVW $997, R12 + B runtime·callbackasm1(SB) + MOVW $998, R12 + B runtime·callbackasm1(SB) + MOVW $999, R12 + B runtime·callbackasm1(SB) + MOVW $1000, R12 + B runtime·callbackasm1(SB) + MOVW $1001, R12 + B runtime·callbackasm1(SB) + MOVW $1002, R12 + B runtime·callbackasm1(SB) + MOVW $1003, R12 + B runtime·callbackasm1(SB) + MOVW $1004, R12 + B runtime·callbackasm1(SB) + MOVW $1005, R12 + B runtime·callbackasm1(SB) + MOVW $1006, R12 + B runtime·callbackasm1(SB) + MOVW $1007, R12 + B runtime·callbackasm1(SB) + MOVW $1008, R12 + B runtime·callbackasm1(SB) + MOVW $1009, R12 + B runtime·callbackasm1(SB) + MOVW $1010, R12 + B runtime·callbackasm1(SB) + MOVW $1011, R12 + B runtime·callbackasm1(SB) + MOVW $1012, R12 + B runtime·callbackasm1(SB) + MOVW $1013, R12 + B runtime·callbackasm1(SB) + MOVW $1014, R12 + B runtime·callbackasm1(SB) + MOVW $1015, R12 + B runtime·callbackasm1(SB) + MOVW $1016, R12 + B runtime·callbackasm1(SB) + MOVW $1017, R12 + B runtime·callbackasm1(SB) + MOVW $1018, R12 + B runtime·callbackasm1(SB) + MOVW $1019, R12 + B runtime·callbackasm1(SB) + MOVW $1020, R12 + B runtime·callbackasm1(SB) + MOVW $1021, R12 + B runtime·callbackasm1(SB) + MOVW $1022, R12 + B runtime·callbackasm1(SB) + MOVW $1023, R12 + B runtime·callbackasm1(SB) + MOVW $1024, R12 + B runtime·callbackasm1(SB) + MOVW $1025, R12 + B runtime·callbackasm1(SB) + MOVW $1026, R12 + B runtime·callbackasm1(SB) + MOVW $1027, R12 + B runtime·callbackasm1(SB) + MOVW $1028, R12 + B runtime·callbackasm1(SB) + MOVW $1029, R12 + B runtime·callbackasm1(SB) + MOVW $1030, R12 + B runtime·callbackasm1(SB) + MOVW $1031, R12 + B runtime·callbackasm1(SB) + MOVW $1032, R12 + B runtime·callbackasm1(SB) + MOVW $1033, R12 + B runtime·callbackasm1(SB) + MOVW $1034, R12 + B runtime·callbackasm1(SB) + MOVW $1035, R12 + B runtime·callbackasm1(SB) + MOVW $1036, R12 + B runtime·callbackasm1(SB) + MOVW $1037, R12 + B runtime·callbackasm1(SB) + MOVW $1038, R12 + B runtime·callbackasm1(SB) + MOVW $1039, R12 + B runtime·callbackasm1(SB) + MOVW $1040, R12 + B runtime·callbackasm1(SB) + MOVW $1041, R12 + B runtime·callbackasm1(SB) + MOVW $1042, R12 + B runtime·callbackasm1(SB) + MOVW $1043, R12 + B runtime·callbackasm1(SB) + MOVW $1044, R12 + B runtime·callbackasm1(SB) + MOVW $1045, R12 + B runtime·callbackasm1(SB) + MOVW $1046, R12 + B runtime·callbackasm1(SB) + MOVW $1047, R12 + B runtime·callbackasm1(SB) + MOVW $1048, R12 + B runtime·callbackasm1(SB) + MOVW $1049, R12 + B runtime·callbackasm1(SB) + MOVW $1050, R12 + B runtime·callbackasm1(SB) + MOVW $1051, R12 + B runtime·callbackasm1(SB) + MOVW $1052, R12 + B runtime·callbackasm1(SB) + MOVW $1053, R12 + B runtime·callbackasm1(SB) + MOVW $1054, R12 + B runtime·callbackasm1(SB) + MOVW $1055, R12 + B runtime·callbackasm1(SB) + MOVW $1056, R12 + B runtime·callbackasm1(SB) + MOVW $1057, R12 + B runtime·callbackasm1(SB) + MOVW $1058, R12 + B runtime·callbackasm1(SB) + MOVW $1059, R12 + B runtime·callbackasm1(SB) + MOVW $1060, R12 + B runtime·callbackasm1(SB) + MOVW $1061, R12 + B runtime·callbackasm1(SB) + MOVW $1062, R12 + B runtime·callbackasm1(SB) + MOVW $1063, R12 + B runtime·callbackasm1(SB) + MOVW $1064, R12 + B runtime·callbackasm1(SB) + MOVW $1065, R12 + B runtime·callbackasm1(SB) + MOVW $1066, R12 + B runtime·callbackasm1(SB) + MOVW $1067, R12 + B runtime·callbackasm1(SB) + MOVW $1068, R12 + B runtime·callbackasm1(SB) + MOVW $1069, R12 + B runtime·callbackasm1(SB) + MOVW $1070, R12 + B runtime·callbackasm1(SB) + MOVW $1071, R12 + B runtime·callbackasm1(SB) + MOVW $1072, R12 + B runtime·callbackasm1(SB) + MOVW $1073, R12 + B runtime·callbackasm1(SB) + MOVW $1074, R12 + B runtime·callbackasm1(SB) + MOVW $1075, R12 + B runtime·callbackasm1(SB) + MOVW $1076, R12 + B runtime·callbackasm1(SB) + MOVW $1077, R12 + B runtime·callbackasm1(SB) + MOVW $1078, R12 + B runtime·callbackasm1(SB) + MOVW $1079, R12 + B runtime·callbackasm1(SB) + MOVW $1080, R12 + B runtime·callbackasm1(SB) + MOVW $1081, R12 + B runtime·callbackasm1(SB) + MOVW $1082, R12 + B runtime·callbackasm1(SB) + MOVW $1083, R12 + B runtime·callbackasm1(SB) + MOVW $1084, R12 + B runtime·callbackasm1(SB) + MOVW $1085, R12 + B runtime·callbackasm1(SB) + MOVW $1086, R12 + B runtime·callbackasm1(SB) + MOVW $1087, R12 + B runtime·callbackasm1(SB) + MOVW $1088, R12 + B runtime·callbackasm1(SB) + MOVW $1089, R12 + B runtime·callbackasm1(SB) + MOVW $1090, R12 + B runtime·callbackasm1(SB) + MOVW $1091, R12 + B runtime·callbackasm1(SB) + MOVW $1092, R12 + B runtime·callbackasm1(SB) + MOVW $1093, R12 + B runtime·callbackasm1(SB) + MOVW $1094, R12 + B runtime·callbackasm1(SB) + MOVW $1095, R12 + B runtime·callbackasm1(SB) + MOVW $1096, R12 + B runtime·callbackasm1(SB) + MOVW $1097, R12 + B runtime·callbackasm1(SB) + MOVW $1098, R12 + B runtime·callbackasm1(SB) + MOVW $1099, R12 + B runtime·callbackasm1(SB) + MOVW $1100, R12 + B runtime·callbackasm1(SB) + MOVW $1101, R12 + B runtime·callbackasm1(SB) + MOVW $1102, R12 + B runtime·callbackasm1(SB) + MOVW $1103, R12 + B runtime·callbackasm1(SB) + MOVW $1104, R12 + B runtime·callbackasm1(SB) + MOVW $1105, R12 + B runtime·callbackasm1(SB) + MOVW $1106, R12 + B runtime·callbackasm1(SB) + MOVW $1107, R12 + B runtime·callbackasm1(SB) + MOVW $1108, R12 + B runtime·callbackasm1(SB) + MOVW $1109, R12 + B runtime·callbackasm1(SB) + MOVW $1110, R12 + B runtime·callbackasm1(SB) + MOVW $1111, R12 + B runtime·callbackasm1(SB) + MOVW $1112, R12 + B runtime·callbackasm1(SB) + MOVW $1113, R12 + B runtime·callbackasm1(SB) + MOVW $1114, R12 + B runtime·callbackasm1(SB) + MOVW $1115, R12 + B runtime·callbackasm1(SB) + MOVW $1116, R12 + B runtime·callbackasm1(SB) + MOVW $1117, R12 + B runtime·callbackasm1(SB) + MOVW $1118, R12 + B runtime·callbackasm1(SB) + MOVW $1119, R12 + B runtime·callbackasm1(SB) + MOVW $1120, R12 + B runtime·callbackasm1(SB) + MOVW $1121, R12 + B runtime·callbackasm1(SB) + MOVW $1122, R12 + B runtime·callbackasm1(SB) + MOVW $1123, R12 + B runtime·callbackasm1(SB) + MOVW $1124, R12 + B runtime·callbackasm1(SB) + MOVW $1125, R12 + B runtime·callbackasm1(SB) + MOVW $1126, R12 + B runtime·callbackasm1(SB) + MOVW $1127, R12 + B runtime·callbackasm1(SB) + MOVW $1128, R12 + B runtime·callbackasm1(SB) + MOVW $1129, R12 + B runtime·callbackasm1(SB) + MOVW $1130, R12 + B runtime·callbackasm1(SB) + MOVW $1131, R12 + B runtime·callbackasm1(SB) + MOVW $1132, R12 + B runtime·callbackasm1(SB) + MOVW $1133, R12 + B runtime·callbackasm1(SB) + MOVW $1134, R12 + B runtime·callbackasm1(SB) + MOVW $1135, R12 + B runtime·callbackasm1(SB) + MOVW $1136, R12 + B runtime·callbackasm1(SB) + MOVW $1137, R12 + B runtime·callbackasm1(SB) + MOVW $1138, R12 + B runtime·callbackasm1(SB) + MOVW $1139, R12 + B runtime·callbackasm1(SB) + MOVW $1140, R12 + B runtime·callbackasm1(SB) + MOVW $1141, R12 + B runtime·callbackasm1(SB) + MOVW $1142, R12 + B runtime·callbackasm1(SB) + MOVW $1143, R12 + B runtime·callbackasm1(SB) + MOVW $1144, R12 + B runtime·callbackasm1(SB) + MOVW $1145, R12 + B runtime·callbackasm1(SB) + MOVW $1146, R12 + B runtime·callbackasm1(SB) + MOVW $1147, R12 + B runtime·callbackasm1(SB) + MOVW $1148, R12 + B runtime·callbackasm1(SB) + MOVW $1149, R12 + B runtime·callbackasm1(SB) + MOVW $1150, R12 + B runtime·callbackasm1(SB) + MOVW $1151, R12 + B runtime·callbackasm1(SB) + MOVW $1152, R12 + B runtime·callbackasm1(SB) + MOVW $1153, R12 + B runtime·callbackasm1(SB) + MOVW $1154, R12 + B runtime·callbackasm1(SB) + MOVW $1155, R12 + B runtime·callbackasm1(SB) + MOVW $1156, R12 + B runtime·callbackasm1(SB) + MOVW $1157, R12 + B runtime·callbackasm1(SB) + MOVW $1158, R12 + B runtime·callbackasm1(SB) + MOVW $1159, R12 + B runtime·callbackasm1(SB) + MOVW $1160, R12 + B runtime·callbackasm1(SB) + MOVW $1161, R12 + B runtime·callbackasm1(SB) + MOVW $1162, R12 + B runtime·callbackasm1(SB) + MOVW $1163, R12 + B runtime·callbackasm1(SB) + MOVW $1164, R12 + B runtime·callbackasm1(SB) + MOVW $1165, R12 + B runtime·callbackasm1(SB) + MOVW $1166, R12 + B runtime·callbackasm1(SB) + MOVW $1167, R12 + B runtime·callbackasm1(SB) + MOVW $1168, R12 + B runtime·callbackasm1(SB) + MOVW $1169, R12 + B runtime·callbackasm1(SB) + MOVW $1170, R12 + B runtime·callbackasm1(SB) + MOVW $1171, R12 + B runtime·callbackasm1(SB) + MOVW $1172, R12 + B runtime·callbackasm1(SB) + MOVW $1173, R12 + B runtime·callbackasm1(SB) + MOVW $1174, R12 + B runtime·callbackasm1(SB) + MOVW $1175, R12 + B runtime·callbackasm1(SB) + MOVW $1176, R12 + B runtime·callbackasm1(SB) + MOVW $1177, R12 + B runtime·callbackasm1(SB) + MOVW $1178, R12 + B runtime·callbackasm1(SB) + MOVW $1179, R12 + B runtime·callbackasm1(SB) + MOVW $1180, R12 + B runtime·callbackasm1(SB) + MOVW $1181, R12 + B runtime·callbackasm1(SB) + MOVW $1182, R12 + B runtime·callbackasm1(SB) + MOVW $1183, R12 + B runtime·callbackasm1(SB) + MOVW $1184, R12 + B runtime·callbackasm1(SB) + MOVW $1185, R12 + B runtime·callbackasm1(SB) + MOVW $1186, R12 + B runtime·callbackasm1(SB) + MOVW $1187, R12 + B runtime·callbackasm1(SB) + MOVW $1188, R12 + B runtime·callbackasm1(SB) + MOVW $1189, R12 + B runtime·callbackasm1(SB) + MOVW $1190, R12 + B runtime·callbackasm1(SB) + MOVW $1191, R12 + B runtime·callbackasm1(SB) + MOVW $1192, R12 + B runtime·callbackasm1(SB) + MOVW $1193, R12 + B runtime·callbackasm1(SB) + MOVW $1194, R12 + B runtime·callbackasm1(SB) + MOVW $1195, R12 + B runtime·callbackasm1(SB) + MOVW $1196, R12 + B runtime·callbackasm1(SB) + MOVW $1197, R12 + B runtime·callbackasm1(SB) + MOVW $1198, R12 + B runtime·callbackasm1(SB) + MOVW $1199, R12 + B runtime·callbackasm1(SB) + MOVW $1200, R12 + B runtime·callbackasm1(SB) + MOVW $1201, R12 + B runtime·callbackasm1(SB) + MOVW $1202, R12 + B runtime·callbackasm1(SB) + MOVW $1203, R12 + B runtime·callbackasm1(SB) + MOVW $1204, R12 + B runtime·callbackasm1(SB) + MOVW $1205, R12 + B runtime·callbackasm1(SB) + MOVW $1206, R12 + B runtime·callbackasm1(SB) + MOVW $1207, R12 + B runtime·callbackasm1(SB) + MOVW $1208, R12 + B runtime·callbackasm1(SB) + MOVW $1209, R12 + B runtime·callbackasm1(SB) + MOVW $1210, R12 + B runtime·callbackasm1(SB) + MOVW $1211, R12 + B runtime·callbackasm1(SB) + MOVW $1212, R12 + B runtime·callbackasm1(SB) + MOVW $1213, R12 + B runtime·callbackasm1(SB) + MOVW $1214, R12 + B runtime·callbackasm1(SB) + MOVW $1215, R12 + B runtime·callbackasm1(SB) + MOVW $1216, R12 + B runtime·callbackasm1(SB) + MOVW $1217, R12 + B runtime·callbackasm1(SB) + MOVW $1218, R12 + B runtime·callbackasm1(SB) + MOVW $1219, R12 + B runtime·callbackasm1(SB) + MOVW $1220, R12 + B runtime·callbackasm1(SB) + MOVW $1221, R12 + B runtime·callbackasm1(SB) + MOVW $1222, R12 + B runtime·callbackasm1(SB) + MOVW $1223, R12 + B runtime·callbackasm1(SB) + MOVW $1224, R12 + B runtime·callbackasm1(SB) + MOVW $1225, R12 + B runtime·callbackasm1(SB) + MOVW $1226, R12 + B runtime·callbackasm1(SB) + MOVW $1227, R12 + B runtime·callbackasm1(SB) + MOVW $1228, R12 + B runtime·callbackasm1(SB) + MOVW $1229, R12 + B runtime·callbackasm1(SB) + MOVW $1230, R12 + B runtime·callbackasm1(SB) + MOVW $1231, R12 + B runtime·callbackasm1(SB) + MOVW $1232, R12 + B runtime·callbackasm1(SB) + MOVW $1233, R12 + B runtime·callbackasm1(SB) + MOVW $1234, R12 + B runtime·callbackasm1(SB) + MOVW $1235, R12 + B runtime·callbackasm1(SB) + MOVW $1236, R12 + B runtime·callbackasm1(SB) + MOVW $1237, R12 + B runtime·callbackasm1(SB) + MOVW $1238, R12 + B runtime·callbackasm1(SB) + MOVW $1239, R12 + B runtime·callbackasm1(SB) + MOVW $1240, R12 + B runtime·callbackasm1(SB) + MOVW $1241, R12 + B runtime·callbackasm1(SB) + MOVW $1242, R12 + B runtime·callbackasm1(SB) + MOVW $1243, R12 + B runtime·callbackasm1(SB) + MOVW $1244, R12 + B runtime·callbackasm1(SB) + MOVW $1245, R12 + B runtime·callbackasm1(SB) + MOVW $1246, R12 + B runtime·callbackasm1(SB) + MOVW $1247, R12 + B runtime·callbackasm1(SB) + MOVW $1248, R12 + B runtime·callbackasm1(SB) + MOVW $1249, R12 + B runtime·callbackasm1(SB) + MOVW $1250, R12 + B runtime·callbackasm1(SB) + MOVW $1251, R12 + B runtime·callbackasm1(SB) + MOVW $1252, R12 + B runtime·callbackasm1(SB) + MOVW $1253, R12 + B runtime·callbackasm1(SB) + MOVW $1254, R12 + B runtime·callbackasm1(SB) + MOVW $1255, R12 + B runtime·callbackasm1(SB) + MOVW $1256, R12 + B runtime·callbackasm1(SB) + MOVW $1257, R12 + B runtime·callbackasm1(SB) + MOVW $1258, R12 + B runtime·callbackasm1(SB) + MOVW $1259, R12 + B runtime·callbackasm1(SB) + MOVW $1260, R12 + B runtime·callbackasm1(SB) + MOVW $1261, R12 + B runtime·callbackasm1(SB) + MOVW $1262, R12 + B runtime·callbackasm1(SB) + MOVW $1263, R12 + B runtime·callbackasm1(SB) + MOVW $1264, R12 + B runtime·callbackasm1(SB) + MOVW $1265, R12 + B runtime·callbackasm1(SB) + MOVW $1266, R12 + B runtime·callbackasm1(SB) + MOVW $1267, R12 + B runtime·callbackasm1(SB) + MOVW $1268, R12 + B runtime·callbackasm1(SB) + MOVW $1269, R12 + B runtime·callbackasm1(SB) + MOVW $1270, R12 + B runtime·callbackasm1(SB) + MOVW $1271, R12 + B runtime·callbackasm1(SB) + MOVW $1272, R12 + B runtime·callbackasm1(SB) + MOVW $1273, R12 + B runtime·callbackasm1(SB) + MOVW $1274, R12 + B runtime·callbackasm1(SB) + MOVW $1275, R12 + B runtime·callbackasm1(SB) + MOVW $1276, R12 + B runtime·callbackasm1(SB) + MOVW $1277, R12 + B runtime·callbackasm1(SB) + MOVW $1278, R12 + B runtime·callbackasm1(SB) + MOVW $1279, R12 + B runtime·callbackasm1(SB) + MOVW $1280, R12 + B runtime·callbackasm1(SB) + MOVW $1281, R12 + B runtime·callbackasm1(SB) + MOVW $1282, R12 + B runtime·callbackasm1(SB) + MOVW $1283, R12 + B runtime·callbackasm1(SB) + MOVW $1284, R12 + B runtime·callbackasm1(SB) + MOVW $1285, R12 + B runtime·callbackasm1(SB) + MOVW $1286, R12 + B runtime·callbackasm1(SB) + MOVW $1287, R12 + B runtime·callbackasm1(SB) + MOVW $1288, R12 + B runtime·callbackasm1(SB) + MOVW $1289, R12 + B runtime·callbackasm1(SB) + MOVW $1290, R12 + B runtime·callbackasm1(SB) + MOVW $1291, R12 + B runtime·callbackasm1(SB) + MOVW $1292, R12 + B runtime·callbackasm1(SB) + MOVW $1293, R12 + B runtime·callbackasm1(SB) + MOVW $1294, R12 + B runtime·callbackasm1(SB) + MOVW $1295, R12 + B runtime·callbackasm1(SB) + MOVW $1296, R12 + B runtime·callbackasm1(SB) + MOVW $1297, R12 + B runtime·callbackasm1(SB) + MOVW $1298, R12 + B runtime·callbackasm1(SB) + MOVW $1299, R12 + B runtime·callbackasm1(SB) + MOVW $1300, R12 + B runtime·callbackasm1(SB) + MOVW $1301, R12 + B runtime·callbackasm1(SB) + MOVW $1302, R12 + B runtime·callbackasm1(SB) + MOVW $1303, R12 + B runtime·callbackasm1(SB) + MOVW $1304, R12 + B runtime·callbackasm1(SB) + MOVW $1305, R12 + B runtime·callbackasm1(SB) + MOVW $1306, R12 + B runtime·callbackasm1(SB) + MOVW $1307, R12 + B runtime·callbackasm1(SB) + MOVW $1308, R12 + B runtime·callbackasm1(SB) + MOVW $1309, R12 + B runtime·callbackasm1(SB) + MOVW $1310, R12 + B runtime·callbackasm1(SB) + MOVW $1311, R12 + B runtime·callbackasm1(SB) + MOVW $1312, R12 + B runtime·callbackasm1(SB) + MOVW $1313, R12 + B runtime·callbackasm1(SB) + MOVW $1314, R12 + B runtime·callbackasm1(SB) + MOVW $1315, R12 + B runtime·callbackasm1(SB) + MOVW $1316, R12 + B runtime·callbackasm1(SB) + MOVW $1317, R12 + B runtime·callbackasm1(SB) + MOVW $1318, R12 + B runtime·callbackasm1(SB) + MOVW $1319, R12 + B runtime·callbackasm1(SB) + MOVW $1320, R12 + B runtime·callbackasm1(SB) + MOVW $1321, R12 + B runtime·callbackasm1(SB) + MOVW $1322, R12 + B runtime·callbackasm1(SB) + MOVW $1323, R12 + B runtime·callbackasm1(SB) + MOVW $1324, R12 + B runtime·callbackasm1(SB) + MOVW $1325, R12 + B runtime·callbackasm1(SB) + MOVW $1326, R12 + B runtime·callbackasm1(SB) + MOVW $1327, R12 + B runtime·callbackasm1(SB) + MOVW $1328, R12 + B runtime·callbackasm1(SB) + MOVW $1329, R12 + B runtime·callbackasm1(SB) + MOVW $1330, R12 + B runtime·callbackasm1(SB) + MOVW $1331, R12 + B runtime·callbackasm1(SB) + MOVW $1332, R12 + B runtime·callbackasm1(SB) + MOVW $1333, R12 + B runtime·callbackasm1(SB) + MOVW $1334, R12 + B runtime·callbackasm1(SB) + MOVW $1335, R12 + B runtime·callbackasm1(SB) + MOVW $1336, R12 + B runtime·callbackasm1(SB) + MOVW $1337, R12 + B runtime·callbackasm1(SB) + MOVW $1338, R12 + B runtime·callbackasm1(SB) + MOVW $1339, R12 + B runtime·callbackasm1(SB) + MOVW $1340, R12 + B runtime·callbackasm1(SB) + MOVW $1341, R12 + B runtime·callbackasm1(SB) + MOVW $1342, R12 + B runtime·callbackasm1(SB) + MOVW $1343, R12 + B runtime·callbackasm1(SB) + MOVW $1344, R12 + B runtime·callbackasm1(SB) + MOVW $1345, R12 + B runtime·callbackasm1(SB) + MOVW $1346, R12 + B runtime·callbackasm1(SB) + MOVW $1347, R12 + B runtime·callbackasm1(SB) + MOVW $1348, R12 + B runtime·callbackasm1(SB) + MOVW $1349, R12 + B runtime·callbackasm1(SB) + MOVW $1350, R12 + B runtime·callbackasm1(SB) + MOVW $1351, R12 + B runtime·callbackasm1(SB) + MOVW $1352, R12 + B runtime·callbackasm1(SB) + MOVW $1353, R12 + B runtime·callbackasm1(SB) + MOVW $1354, R12 + B runtime·callbackasm1(SB) + MOVW $1355, R12 + B runtime·callbackasm1(SB) + MOVW $1356, R12 + B runtime·callbackasm1(SB) + MOVW $1357, R12 + B runtime·callbackasm1(SB) + MOVW $1358, R12 + B runtime·callbackasm1(SB) + MOVW $1359, R12 + B runtime·callbackasm1(SB) + MOVW $1360, R12 + B runtime·callbackasm1(SB) + MOVW $1361, R12 + B runtime·callbackasm1(SB) + MOVW $1362, R12 + B runtime·callbackasm1(SB) + MOVW $1363, R12 + B runtime·callbackasm1(SB) + MOVW $1364, R12 + B runtime·callbackasm1(SB) + MOVW $1365, R12 + B runtime·callbackasm1(SB) + MOVW $1366, R12 + B runtime·callbackasm1(SB) + MOVW $1367, R12 + B runtime·callbackasm1(SB) + MOVW $1368, R12 + B runtime·callbackasm1(SB) + MOVW $1369, R12 + B runtime·callbackasm1(SB) + MOVW $1370, R12 + B runtime·callbackasm1(SB) + MOVW $1371, R12 + B runtime·callbackasm1(SB) + MOVW $1372, R12 + B runtime·callbackasm1(SB) + MOVW $1373, R12 + B runtime·callbackasm1(SB) + MOVW $1374, R12 + B runtime·callbackasm1(SB) + MOVW $1375, R12 + B runtime·callbackasm1(SB) + MOVW $1376, R12 + B runtime·callbackasm1(SB) + MOVW $1377, R12 + B runtime·callbackasm1(SB) + MOVW $1378, R12 + B runtime·callbackasm1(SB) + MOVW $1379, R12 + B runtime·callbackasm1(SB) + MOVW $1380, R12 + B runtime·callbackasm1(SB) + MOVW $1381, R12 + B runtime·callbackasm1(SB) + MOVW $1382, R12 + B runtime·callbackasm1(SB) + MOVW $1383, R12 + B runtime·callbackasm1(SB) + MOVW $1384, R12 + B runtime·callbackasm1(SB) + MOVW $1385, R12 + B runtime·callbackasm1(SB) + MOVW $1386, R12 + B runtime·callbackasm1(SB) + MOVW $1387, R12 + B runtime·callbackasm1(SB) + MOVW $1388, R12 + B runtime·callbackasm1(SB) + MOVW $1389, R12 + B runtime·callbackasm1(SB) + MOVW $1390, R12 + B runtime·callbackasm1(SB) + MOVW $1391, R12 + B runtime·callbackasm1(SB) + MOVW $1392, R12 + B runtime·callbackasm1(SB) + MOVW $1393, R12 + B runtime·callbackasm1(SB) + MOVW $1394, R12 + B runtime·callbackasm1(SB) + MOVW $1395, R12 + B runtime·callbackasm1(SB) + MOVW $1396, R12 + B runtime·callbackasm1(SB) + MOVW $1397, R12 + B runtime·callbackasm1(SB) + MOVW $1398, R12 + B runtime·callbackasm1(SB) + MOVW $1399, R12 + B runtime·callbackasm1(SB) + MOVW $1400, R12 + B runtime·callbackasm1(SB) + MOVW $1401, R12 + B runtime·callbackasm1(SB) + MOVW $1402, R12 + B runtime·callbackasm1(SB) + MOVW $1403, R12 + B runtime·callbackasm1(SB) + MOVW $1404, R12 + B runtime·callbackasm1(SB) + MOVW $1405, R12 + B runtime·callbackasm1(SB) + MOVW $1406, R12 + B runtime·callbackasm1(SB) + MOVW $1407, R12 + B runtime·callbackasm1(SB) + MOVW $1408, R12 + B runtime·callbackasm1(SB) + MOVW $1409, R12 + B runtime·callbackasm1(SB) + MOVW $1410, R12 + B runtime·callbackasm1(SB) + MOVW $1411, R12 + B runtime·callbackasm1(SB) + MOVW $1412, R12 + B runtime·callbackasm1(SB) + MOVW $1413, R12 + B runtime·callbackasm1(SB) + MOVW $1414, R12 + B runtime·callbackasm1(SB) + MOVW $1415, R12 + B runtime·callbackasm1(SB) + MOVW $1416, R12 + B runtime·callbackasm1(SB) + MOVW $1417, R12 + B runtime·callbackasm1(SB) + MOVW $1418, R12 + B runtime·callbackasm1(SB) + MOVW $1419, R12 + B runtime·callbackasm1(SB) + MOVW $1420, R12 + B runtime·callbackasm1(SB) + MOVW $1421, R12 + B runtime·callbackasm1(SB) + MOVW $1422, R12 + B runtime·callbackasm1(SB) + MOVW $1423, R12 + B runtime·callbackasm1(SB) + MOVW $1424, R12 + B runtime·callbackasm1(SB) + MOVW $1425, R12 + B runtime·callbackasm1(SB) + MOVW $1426, R12 + B runtime·callbackasm1(SB) + MOVW $1427, R12 + B runtime·callbackasm1(SB) + MOVW $1428, R12 + B runtime·callbackasm1(SB) + MOVW $1429, R12 + B runtime·callbackasm1(SB) + MOVW $1430, R12 + B runtime·callbackasm1(SB) + MOVW $1431, R12 + B runtime·callbackasm1(SB) + MOVW $1432, R12 + B runtime·callbackasm1(SB) + MOVW $1433, R12 + B runtime·callbackasm1(SB) + MOVW $1434, R12 + B runtime·callbackasm1(SB) + MOVW $1435, R12 + B runtime·callbackasm1(SB) + MOVW $1436, R12 + B runtime·callbackasm1(SB) + MOVW $1437, R12 + B runtime·callbackasm1(SB) + MOVW $1438, R12 + B runtime·callbackasm1(SB) + MOVW $1439, R12 + B runtime·callbackasm1(SB) + MOVW $1440, R12 + B runtime·callbackasm1(SB) + MOVW $1441, R12 + B runtime·callbackasm1(SB) + MOVW $1442, R12 + B runtime·callbackasm1(SB) + MOVW $1443, R12 + B runtime·callbackasm1(SB) + MOVW $1444, R12 + B runtime·callbackasm1(SB) + MOVW $1445, R12 + B runtime·callbackasm1(SB) + MOVW $1446, R12 + B runtime·callbackasm1(SB) + MOVW $1447, R12 + B runtime·callbackasm1(SB) + MOVW $1448, R12 + B runtime·callbackasm1(SB) + MOVW $1449, R12 + B runtime·callbackasm1(SB) + MOVW $1450, R12 + B runtime·callbackasm1(SB) + MOVW $1451, R12 + B runtime·callbackasm1(SB) + MOVW $1452, R12 + B runtime·callbackasm1(SB) + MOVW $1453, R12 + B runtime·callbackasm1(SB) + MOVW $1454, R12 + B runtime·callbackasm1(SB) + MOVW $1455, R12 + B runtime·callbackasm1(SB) + MOVW $1456, R12 + B runtime·callbackasm1(SB) + MOVW $1457, R12 + B runtime·callbackasm1(SB) + MOVW $1458, R12 + B runtime·callbackasm1(SB) + MOVW $1459, R12 + B runtime·callbackasm1(SB) + MOVW $1460, R12 + B runtime·callbackasm1(SB) + MOVW $1461, R12 + B runtime·callbackasm1(SB) + MOVW $1462, R12 + B runtime·callbackasm1(SB) + MOVW $1463, R12 + B runtime·callbackasm1(SB) + MOVW $1464, R12 + B runtime·callbackasm1(SB) + MOVW $1465, R12 + B runtime·callbackasm1(SB) + MOVW $1466, R12 + B runtime·callbackasm1(SB) + MOVW $1467, R12 + B runtime·callbackasm1(SB) + MOVW $1468, R12 + B runtime·callbackasm1(SB) + MOVW $1469, R12 + B runtime·callbackasm1(SB) + MOVW $1470, R12 + B runtime·callbackasm1(SB) + MOVW $1471, R12 + B runtime·callbackasm1(SB) + MOVW $1472, R12 + B runtime·callbackasm1(SB) + MOVW $1473, R12 + B runtime·callbackasm1(SB) + MOVW $1474, R12 + B runtime·callbackasm1(SB) + MOVW $1475, R12 + B runtime·callbackasm1(SB) + MOVW $1476, R12 + B runtime·callbackasm1(SB) + MOVW $1477, R12 + B runtime·callbackasm1(SB) + MOVW $1478, R12 + B runtime·callbackasm1(SB) + MOVW $1479, R12 + B runtime·callbackasm1(SB) + MOVW $1480, R12 + B runtime·callbackasm1(SB) + MOVW $1481, R12 + B runtime·callbackasm1(SB) + MOVW $1482, R12 + B runtime·callbackasm1(SB) + MOVW $1483, R12 + B runtime·callbackasm1(SB) + MOVW $1484, R12 + B runtime·callbackasm1(SB) + MOVW $1485, R12 + B runtime·callbackasm1(SB) + MOVW $1486, R12 + B runtime·callbackasm1(SB) + MOVW $1487, R12 + B runtime·callbackasm1(SB) + MOVW $1488, R12 + B runtime·callbackasm1(SB) + MOVW $1489, R12 + B runtime·callbackasm1(SB) + MOVW $1490, R12 + B runtime·callbackasm1(SB) + MOVW $1491, R12 + B runtime·callbackasm1(SB) + MOVW $1492, R12 + B runtime·callbackasm1(SB) + MOVW $1493, R12 + B runtime·callbackasm1(SB) + MOVW $1494, R12 + B runtime·callbackasm1(SB) + MOVW $1495, R12 + B runtime·callbackasm1(SB) + MOVW $1496, R12 + B runtime·callbackasm1(SB) + MOVW $1497, R12 + B runtime·callbackasm1(SB) + MOVW $1498, R12 + B runtime·callbackasm1(SB) + MOVW $1499, R12 + B runtime·callbackasm1(SB) + MOVW $1500, R12 + B runtime·callbackasm1(SB) + MOVW $1501, R12 + B runtime·callbackasm1(SB) + MOVW $1502, R12 + B runtime·callbackasm1(SB) + MOVW $1503, R12 + B runtime·callbackasm1(SB) + MOVW $1504, R12 + B runtime·callbackasm1(SB) + MOVW $1505, R12 + B runtime·callbackasm1(SB) + MOVW $1506, R12 + B runtime·callbackasm1(SB) + MOVW $1507, R12 + B runtime·callbackasm1(SB) + MOVW $1508, R12 + B runtime·callbackasm1(SB) + MOVW $1509, R12 + B runtime·callbackasm1(SB) + MOVW $1510, R12 + B runtime·callbackasm1(SB) + MOVW $1511, R12 + B runtime·callbackasm1(SB) + MOVW $1512, R12 + B runtime·callbackasm1(SB) + MOVW $1513, R12 + B runtime·callbackasm1(SB) + MOVW $1514, R12 + B runtime·callbackasm1(SB) + MOVW $1515, R12 + B runtime·callbackasm1(SB) + MOVW $1516, R12 + B runtime·callbackasm1(SB) + MOVW $1517, R12 + B runtime·callbackasm1(SB) + MOVW $1518, R12 + B runtime·callbackasm1(SB) + MOVW $1519, R12 + B runtime·callbackasm1(SB) + MOVW $1520, R12 + B runtime·callbackasm1(SB) + MOVW $1521, R12 + B runtime·callbackasm1(SB) + MOVW $1522, R12 + B runtime·callbackasm1(SB) + MOVW $1523, R12 + B runtime·callbackasm1(SB) + MOVW $1524, R12 + B runtime·callbackasm1(SB) + MOVW $1525, R12 + B runtime·callbackasm1(SB) + MOVW $1526, R12 + B runtime·callbackasm1(SB) + MOVW $1527, R12 + B runtime·callbackasm1(SB) + MOVW $1528, R12 + B runtime·callbackasm1(SB) + MOVW $1529, R12 + B runtime·callbackasm1(SB) + MOVW $1530, R12 + B runtime·callbackasm1(SB) + MOVW $1531, R12 + B runtime·callbackasm1(SB) + MOVW $1532, R12 + B runtime·callbackasm1(SB) + MOVW $1533, R12 + B runtime·callbackasm1(SB) + MOVW $1534, R12 + B runtime·callbackasm1(SB) + MOVW $1535, R12 + B runtime·callbackasm1(SB) + MOVW $1536, R12 + B runtime·callbackasm1(SB) + MOVW $1537, R12 + B runtime·callbackasm1(SB) + MOVW $1538, R12 + B runtime·callbackasm1(SB) + MOVW $1539, R12 + B runtime·callbackasm1(SB) + MOVW $1540, R12 + B runtime·callbackasm1(SB) + MOVW $1541, R12 + B runtime·callbackasm1(SB) + MOVW $1542, R12 + B runtime·callbackasm1(SB) + MOVW $1543, R12 + B runtime·callbackasm1(SB) + MOVW $1544, R12 + B runtime·callbackasm1(SB) + MOVW $1545, R12 + B runtime·callbackasm1(SB) + MOVW $1546, R12 + B runtime·callbackasm1(SB) + MOVW $1547, R12 + B runtime·callbackasm1(SB) + MOVW $1548, R12 + B runtime·callbackasm1(SB) + MOVW $1549, R12 + B runtime·callbackasm1(SB) + MOVW $1550, R12 + B runtime·callbackasm1(SB) + MOVW $1551, R12 + B runtime·callbackasm1(SB) + MOVW $1552, R12 + B runtime·callbackasm1(SB) + MOVW $1553, R12 + B runtime·callbackasm1(SB) + MOVW $1554, R12 + B runtime·callbackasm1(SB) + MOVW $1555, R12 + B runtime·callbackasm1(SB) + MOVW $1556, R12 + B runtime·callbackasm1(SB) + MOVW $1557, R12 + B runtime·callbackasm1(SB) + MOVW $1558, R12 + B runtime·callbackasm1(SB) + MOVW $1559, R12 + B runtime·callbackasm1(SB) + MOVW $1560, R12 + B runtime·callbackasm1(SB) + MOVW $1561, R12 + B runtime·callbackasm1(SB) + MOVW $1562, R12 + B runtime·callbackasm1(SB) + MOVW $1563, R12 + B runtime·callbackasm1(SB) + MOVW $1564, R12 + B runtime·callbackasm1(SB) + MOVW $1565, R12 + B runtime·callbackasm1(SB) + MOVW $1566, R12 + B runtime·callbackasm1(SB) + MOVW $1567, R12 + B runtime·callbackasm1(SB) + MOVW $1568, R12 + B runtime·callbackasm1(SB) + MOVW $1569, R12 + B runtime·callbackasm1(SB) + MOVW $1570, R12 + B runtime·callbackasm1(SB) + MOVW $1571, R12 + B runtime·callbackasm1(SB) + MOVW $1572, R12 + B runtime·callbackasm1(SB) + MOVW $1573, R12 + B runtime·callbackasm1(SB) + MOVW $1574, R12 + B runtime·callbackasm1(SB) + MOVW $1575, R12 + B runtime·callbackasm1(SB) + MOVW $1576, R12 + B runtime·callbackasm1(SB) + MOVW $1577, R12 + B runtime·callbackasm1(SB) + MOVW $1578, R12 + B runtime·callbackasm1(SB) + MOVW $1579, R12 + B runtime·callbackasm1(SB) + MOVW $1580, R12 + B runtime·callbackasm1(SB) + MOVW $1581, R12 + B runtime·callbackasm1(SB) + MOVW $1582, R12 + B runtime·callbackasm1(SB) + MOVW $1583, R12 + B runtime·callbackasm1(SB) + MOVW $1584, R12 + B runtime·callbackasm1(SB) + MOVW $1585, R12 + B runtime·callbackasm1(SB) + MOVW $1586, R12 + B runtime·callbackasm1(SB) + MOVW $1587, R12 + B runtime·callbackasm1(SB) + MOVW $1588, R12 + B runtime·callbackasm1(SB) + MOVW $1589, R12 + B runtime·callbackasm1(SB) + MOVW $1590, R12 + B runtime·callbackasm1(SB) + MOVW $1591, R12 + B runtime·callbackasm1(SB) + MOVW $1592, R12 + B runtime·callbackasm1(SB) + MOVW $1593, R12 + B runtime·callbackasm1(SB) + MOVW $1594, R12 + B runtime·callbackasm1(SB) + MOVW $1595, R12 + B runtime·callbackasm1(SB) + MOVW $1596, R12 + B runtime·callbackasm1(SB) + MOVW $1597, R12 + B runtime·callbackasm1(SB) + MOVW $1598, R12 + B runtime·callbackasm1(SB) + MOVW $1599, R12 + B runtime·callbackasm1(SB) + MOVW $1600, R12 + B runtime·callbackasm1(SB) + MOVW $1601, R12 + B runtime·callbackasm1(SB) + MOVW $1602, R12 + B runtime·callbackasm1(SB) + MOVW $1603, R12 + B runtime·callbackasm1(SB) + MOVW $1604, R12 + B runtime·callbackasm1(SB) + MOVW $1605, R12 + B runtime·callbackasm1(SB) + MOVW $1606, R12 + B runtime·callbackasm1(SB) + MOVW $1607, R12 + B runtime·callbackasm1(SB) + MOVW $1608, R12 + B runtime·callbackasm1(SB) + MOVW $1609, R12 + B runtime·callbackasm1(SB) + MOVW $1610, R12 + B runtime·callbackasm1(SB) + MOVW $1611, R12 + B runtime·callbackasm1(SB) + MOVW $1612, R12 + B runtime·callbackasm1(SB) + MOVW $1613, R12 + B runtime·callbackasm1(SB) + MOVW $1614, R12 + B runtime·callbackasm1(SB) + MOVW $1615, R12 + B runtime·callbackasm1(SB) + MOVW $1616, R12 + B runtime·callbackasm1(SB) + MOVW $1617, R12 + B runtime·callbackasm1(SB) + MOVW $1618, R12 + B runtime·callbackasm1(SB) + MOVW $1619, R12 + B runtime·callbackasm1(SB) + MOVW $1620, R12 + B runtime·callbackasm1(SB) + MOVW $1621, R12 + B runtime·callbackasm1(SB) + MOVW $1622, R12 + B runtime·callbackasm1(SB) + MOVW $1623, R12 + B runtime·callbackasm1(SB) + MOVW $1624, R12 + B runtime·callbackasm1(SB) + MOVW $1625, R12 + B runtime·callbackasm1(SB) + MOVW $1626, R12 + B runtime·callbackasm1(SB) + MOVW $1627, R12 + B runtime·callbackasm1(SB) + MOVW $1628, R12 + B runtime·callbackasm1(SB) + MOVW $1629, R12 + B runtime·callbackasm1(SB) + MOVW $1630, R12 + B runtime·callbackasm1(SB) + MOVW $1631, R12 + B runtime·callbackasm1(SB) + MOVW $1632, R12 + B runtime·callbackasm1(SB) + MOVW $1633, R12 + B runtime·callbackasm1(SB) + MOVW $1634, R12 + B runtime·callbackasm1(SB) + MOVW $1635, R12 + B runtime·callbackasm1(SB) + MOVW $1636, R12 + B runtime·callbackasm1(SB) + MOVW $1637, R12 + B runtime·callbackasm1(SB) + MOVW $1638, R12 + B runtime·callbackasm1(SB) + MOVW $1639, R12 + B runtime·callbackasm1(SB) + MOVW $1640, R12 + B runtime·callbackasm1(SB) + MOVW $1641, R12 + B runtime·callbackasm1(SB) + MOVW $1642, R12 + B runtime·callbackasm1(SB) + MOVW $1643, R12 + B runtime·callbackasm1(SB) + MOVW $1644, R12 + B runtime·callbackasm1(SB) + MOVW $1645, R12 + B runtime·callbackasm1(SB) + MOVW $1646, R12 + B runtime·callbackasm1(SB) + MOVW $1647, R12 + B runtime·callbackasm1(SB) + MOVW $1648, R12 + B runtime·callbackasm1(SB) + MOVW $1649, R12 + B runtime·callbackasm1(SB) + MOVW $1650, R12 + B runtime·callbackasm1(SB) + MOVW $1651, R12 + B runtime·callbackasm1(SB) + MOVW $1652, R12 + B runtime·callbackasm1(SB) + MOVW $1653, R12 + B runtime·callbackasm1(SB) + MOVW $1654, R12 + B runtime·callbackasm1(SB) + MOVW $1655, R12 + B runtime·callbackasm1(SB) + MOVW $1656, R12 + B runtime·callbackasm1(SB) + MOVW $1657, R12 + B runtime·callbackasm1(SB) + MOVW $1658, R12 + B runtime·callbackasm1(SB) + MOVW $1659, R12 + B runtime·callbackasm1(SB) + MOVW $1660, R12 + B runtime·callbackasm1(SB) + MOVW $1661, R12 + B runtime·callbackasm1(SB) + MOVW $1662, R12 + B runtime·callbackasm1(SB) + MOVW $1663, R12 + B runtime·callbackasm1(SB) + MOVW $1664, R12 + B runtime·callbackasm1(SB) + MOVW $1665, R12 + B runtime·callbackasm1(SB) + MOVW $1666, R12 + B runtime·callbackasm1(SB) + MOVW $1667, R12 + B runtime·callbackasm1(SB) + MOVW $1668, R12 + B runtime·callbackasm1(SB) + MOVW $1669, R12 + B runtime·callbackasm1(SB) + MOVW $1670, R12 + B runtime·callbackasm1(SB) + MOVW $1671, R12 + B runtime·callbackasm1(SB) + MOVW $1672, R12 + B runtime·callbackasm1(SB) + MOVW $1673, R12 + B runtime·callbackasm1(SB) + MOVW $1674, R12 + B runtime·callbackasm1(SB) + MOVW $1675, R12 + B runtime·callbackasm1(SB) + MOVW $1676, R12 + B runtime·callbackasm1(SB) + MOVW $1677, R12 + B runtime·callbackasm1(SB) + MOVW $1678, R12 + B runtime·callbackasm1(SB) + MOVW $1679, R12 + B runtime·callbackasm1(SB) + MOVW $1680, R12 + B runtime·callbackasm1(SB) + MOVW $1681, R12 + B runtime·callbackasm1(SB) + MOVW $1682, R12 + B runtime·callbackasm1(SB) + MOVW $1683, R12 + B runtime·callbackasm1(SB) + MOVW $1684, R12 + B runtime·callbackasm1(SB) + MOVW $1685, R12 + B runtime·callbackasm1(SB) + MOVW $1686, R12 + B runtime·callbackasm1(SB) + MOVW $1687, R12 + B runtime·callbackasm1(SB) + MOVW $1688, R12 + B runtime·callbackasm1(SB) + MOVW $1689, R12 + B runtime·callbackasm1(SB) + MOVW $1690, R12 + B runtime·callbackasm1(SB) + MOVW $1691, R12 + B runtime·callbackasm1(SB) + MOVW $1692, R12 + B runtime·callbackasm1(SB) + MOVW $1693, R12 + B runtime·callbackasm1(SB) + MOVW $1694, R12 + B runtime·callbackasm1(SB) + MOVW $1695, R12 + B runtime·callbackasm1(SB) + MOVW $1696, R12 + B runtime·callbackasm1(SB) + MOVW $1697, R12 + B runtime·callbackasm1(SB) + MOVW $1698, R12 + B runtime·callbackasm1(SB) + MOVW $1699, R12 + B runtime·callbackasm1(SB) + MOVW $1700, R12 + B runtime·callbackasm1(SB) + MOVW $1701, R12 + B runtime·callbackasm1(SB) + MOVW $1702, R12 + B runtime·callbackasm1(SB) + MOVW $1703, R12 + B runtime·callbackasm1(SB) + MOVW $1704, R12 + B runtime·callbackasm1(SB) + MOVW $1705, R12 + B runtime·callbackasm1(SB) + MOVW $1706, R12 + B runtime·callbackasm1(SB) + MOVW $1707, R12 + B runtime·callbackasm1(SB) + MOVW $1708, R12 + B runtime·callbackasm1(SB) + MOVW $1709, R12 + B runtime·callbackasm1(SB) + MOVW $1710, R12 + B runtime·callbackasm1(SB) + MOVW $1711, R12 + B runtime·callbackasm1(SB) + MOVW $1712, R12 + B runtime·callbackasm1(SB) + MOVW $1713, R12 + B runtime·callbackasm1(SB) + MOVW $1714, R12 + B runtime·callbackasm1(SB) + MOVW $1715, R12 + B runtime·callbackasm1(SB) + MOVW $1716, R12 + B runtime·callbackasm1(SB) + MOVW $1717, R12 + B runtime·callbackasm1(SB) + MOVW $1718, R12 + B runtime·callbackasm1(SB) + MOVW $1719, R12 + B runtime·callbackasm1(SB) + MOVW $1720, R12 + B runtime·callbackasm1(SB) + MOVW $1721, R12 + B runtime·callbackasm1(SB) + MOVW $1722, R12 + B runtime·callbackasm1(SB) + MOVW $1723, R12 + B runtime·callbackasm1(SB) + MOVW $1724, R12 + B runtime·callbackasm1(SB) + MOVW $1725, R12 + B runtime·callbackasm1(SB) + MOVW $1726, R12 + B runtime·callbackasm1(SB) + MOVW $1727, R12 + B runtime·callbackasm1(SB) + MOVW $1728, R12 + B runtime·callbackasm1(SB) + MOVW $1729, R12 + B runtime·callbackasm1(SB) + MOVW $1730, R12 + B runtime·callbackasm1(SB) + MOVW $1731, R12 + B runtime·callbackasm1(SB) + MOVW $1732, R12 + B runtime·callbackasm1(SB) + MOVW $1733, R12 + B runtime·callbackasm1(SB) + MOVW $1734, R12 + B runtime·callbackasm1(SB) + MOVW $1735, R12 + B runtime·callbackasm1(SB) + MOVW $1736, R12 + B runtime·callbackasm1(SB) + MOVW $1737, R12 + B runtime·callbackasm1(SB) + MOVW $1738, R12 + B runtime·callbackasm1(SB) + MOVW $1739, R12 + B runtime·callbackasm1(SB) + MOVW $1740, R12 + B runtime·callbackasm1(SB) + MOVW $1741, R12 + B runtime·callbackasm1(SB) + MOVW $1742, R12 + B runtime·callbackasm1(SB) + MOVW $1743, R12 + B runtime·callbackasm1(SB) + MOVW $1744, R12 + B runtime·callbackasm1(SB) + MOVW $1745, R12 + B runtime·callbackasm1(SB) + MOVW $1746, R12 + B runtime·callbackasm1(SB) + MOVW $1747, R12 + B runtime·callbackasm1(SB) + MOVW $1748, R12 + B runtime·callbackasm1(SB) + MOVW $1749, R12 + B runtime·callbackasm1(SB) + MOVW $1750, R12 + B runtime·callbackasm1(SB) + MOVW $1751, R12 + B runtime·callbackasm1(SB) + MOVW $1752, R12 + B runtime·callbackasm1(SB) + MOVW $1753, R12 + B runtime·callbackasm1(SB) + MOVW $1754, R12 + B runtime·callbackasm1(SB) + MOVW $1755, R12 + B runtime·callbackasm1(SB) + MOVW $1756, R12 + B runtime·callbackasm1(SB) + MOVW $1757, R12 + B runtime·callbackasm1(SB) + MOVW $1758, R12 + B runtime·callbackasm1(SB) + MOVW $1759, R12 + B runtime·callbackasm1(SB) + MOVW $1760, R12 + B runtime·callbackasm1(SB) + MOVW $1761, R12 + B runtime·callbackasm1(SB) + MOVW $1762, R12 + B runtime·callbackasm1(SB) + MOVW $1763, R12 + B runtime·callbackasm1(SB) + MOVW $1764, R12 + B runtime·callbackasm1(SB) + MOVW $1765, R12 + B runtime·callbackasm1(SB) + MOVW $1766, R12 + B runtime·callbackasm1(SB) + MOVW $1767, R12 + B runtime·callbackasm1(SB) + MOVW $1768, R12 + B runtime·callbackasm1(SB) + MOVW $1769, R12 + B runtime·callbackasm1(SB) + MOVW $1770, R12 + B runtime·callbackasm1(SB) + MOVW $1771, R12 + B runtime·callbackasm1(SB) + MOVW $1772, R12 + B runtime·callbackasm1(SB) + MOVW $1773, R12 + B runtime·callbackasm1(SB) + MOVW $1774, R12 + B runtime·callbackasm1(SB) + MOVW $1775, R12 + B runtime·callbackasm1(SB) + MOVW $1776, R12 + B runtime·callbackasm1(SB) + MOVW $1777, R12 + B runtime·callbackasm1(SB) + MOVW $1778, R12 + B runtime·callbackasm1(SB) + MOVW $1779, R12 + B runtime·callbackasm1(SB) + MOVW $1780, R12 + B runtime·callbackasm1(SB) + MOVW $1781, R12 + B runtime·callbackasm1(SB) + MOVW $1782, R12 + B runtime·callbackasm1(SB) + MOVW $1783, R12 + B runtime·callbackasm1(SB) + MOVW $1784, R12 + B runtime·callbackasm1(SB) + MOVW $1785, R12 + B runtime·callbackasm1(SB) + MOVW $1786, R12 + B runtime·callbackasm1(SB) + MOVW $1787, R12 + B runtime·callbackasm1(SB) + MOVW $1788, R12 + B runtime·callbackasm1(SB) + MOVW $1789, R12 + B runtime·callbackasm1(SB) + MOVW $1790, R12 + B runtime·callbackasm1(SB) + MOVW $1791, R12 + B runtime·callbackasm1(SB) + MOVW $1792, R12 + B runtime·callbackasm1(SB) + MOVW $1793, R12 + B runtime·callbackasm1(SB) + MOVW $1794, R12 + B runtime·callbackasm1(SB) + MOVW $1795, R12 + B runtime·callbackasm1(SB) + MOVW $1796, R12 + B runtime·callbackasm1(SB) + MOVW $1797, R12 + B runtime·callbackasm1(SB) + MOVW $1798, R12 + B runtime·callbackasm1(SB) + MOVW $1799, R12 + B runtime·callbackasm1(SB) + MOVW $1800, R12 + B runtime·callbackasm1(SB) + MOVW $1801, R12 + B runtime·callbackasm1(SB) + MOVW $1802, R12 + B runtime·callbackasm1(SB) + MOVW $1803, R12 + B runtime·callbackasm1(SB) + MOVW $1804, R12 + B runtime·callbackasm1(SB) + MOVW $1805, R12 + B runtime·callbackasm1(SB) + MOVW $1806, R12 + B runtime·callbackasm1(SB) + MOVW $1807, R12 + B runtime·callbackasm1(SB) + MOVW $1808, R12 + B runtime·callbackasm1(SB) + MOVW $1809, R12 + B runtime·callbackasm1(SB) + MOVW $1810, R12 + B runtime·callbackasm1(SB) + MOVW $1811, R12 + B runtime·callbackasm1(SB) + MOVW $1812, R12 + B runtime·callbackasm1(SB) + MOVW $1813, R12 + B runtime·callbackasm1(SB) + MOVW $1814, R12 + B runtime·callbackasm1(SB) + MOVW $1815, R12 + B runtime·callbackasm1(SB) + MOVW $1816, R12 + B runtime·callbackasm1(SB) + MOVW $1817, R12 + B runtime·callbackasm1(SB) + MOVW $1818, R12 + B runtime·callbackasm1(SB) + MOVW $1819, R12 + B runtime·callbackasm1(SB) + MOVW $1820, R12 + B runtime·callbackasm1(SB) + MOVW $1821, R12 + B runtime·callbackasm1(SB) + MOVW $1822, R12 + B runtime·callbackasm1(SB) + MOVW $1823, R12 + B runtime·callbackasm1(SB) + MOVW $1824, R12 + B runtime·callbackasm1(SB) + MOVW $1825, R12 + B runtime·callbackasm1(SB) + MOVW $1826, R12 + B runtime·callbackasm1(SB) + MOVW $1827, R12 + B runtime·callbackasm1(SB) + MOVW $1828, R12 + B runtime·callbackasm1(SB) + MOVW $1829, R12 + B runtime·callbackasm1(SB) + MOVW $1830, R12 + B runtime·callbackasm1(SB) + MOVW $1831, R12 + B runtime·callbackasm1(SB) + MOVW $1832, R12 + B runtime·callbackasm1(SB) + MOVW $1833, R12 + B runtime·callbackasm1(SB) + MOVW $1834, R12 + B runtime·callbackasm1(SB) + MOVW $1835, R12 + B runtime·callbackasm1(SB) + MOVW $1836, R12 + B runtime·callbackasm1(SB) + MOVW $1837, R12 + B runtime·callbackasm1(SB) + MOVW $1838, R12 + B runtime·callbackasm1(SB) + MOVW $1839, R12 + B runtime·callbackasm1(SB) + MOVW $1840, R12 + B runtime·callbackasm1(SB) + MOVW $1841, R12 + B runtime·callbackasm1(SB) + MOVW $1842, R12 + B runtime·callbackasm1(SB) + MOVW $1843, R12 + B runtime·callbackasm1(SB) + MOVW $1844, R12 + B runtime·callbackasm1(SB) + MOVW $1845, R12 + B runtime·callbackasm1(SB) + MOVW $1846, R12 + B runtime·callbackasm1(SB) + MOVW $1847, R12 + B runtime·callbackasm1(SB) + MOVW $1848, R12 + B runtime·callbackasm1(SB) + MOVW $1849, R12 + B runtime·callbackasm1(SB) + MOVW $1850, R12 + B runtime·callbackasm1(SB) + MOVW $1851, R12 + B runtime·callbackasm1(SB) + MOVW $1852, R12 + B runtime·callbackasm1(SB) + MOVW $1853, R12 + B runtime·callbackasm1(SB) + MOVW $1854, R12 + B runtime·callbackasm1(SB) + MOVW $1855, R12 + B runtime·callbackasm1(SB) + MOVW $1856, R12 + B runtime·callbackasm1(SB) + MOVW $1857, R12 + B runtime·callbackasm1(SB) + MOVW $1858, R12 + B runtime·callbackasm1(SB) + MOVW $1859, R12 + B runtime·callbackasm1(SB) + MOVW $1860, R12 + B runtime·callbackasm1(SB) + MOVW $1861, R12 + B runtime·callbackasm1(SB) + MOVW $1862, R12 + B runtime·callbackasm1(SB) + MOVW $1863, R12 + B runtime·callbackasm1(SB) + MOVW $1864, R12 + B runtime·callbackasm1(SB) + MOVW $1865, R12 + B runtime·callbackasm1(SB) + MOVW $1866, R12 + B runtime·callbackasm1(SB) + MOVW $1867, R12 + B runtime·callbackasm1(SB) + MOVW $1868, R12 + B runtime·callbackasm1(SB) + MOVW $1869, R12 + B runtime·callbackasm1(SB) + MOVW $1870, R12 + B runtime·callbackasm1(SB) + MOVW $1871, R12 + B runtime·callbackasm1(SB) + MOVW $1872, R12 + B runtime·callbackasm1(SB) + MOVW $1873, R12 + B runtime·callbackasm1(SB) + MOVW $1874, R12 + B runtime·callbackasm1(SB) + MOVW $1875, R12 + B runtime·callbackasm1(SB) + MOVW $1876, R12 + B runtime·callbackasm1(SB) + MOVW $1877, R12 + B runtime·callbackasm1(SB) + MOVW $1878, R12 + B runtime·callbackasm1(SB) + MOVW $1879, R12 + B runtime·callbackasm1(SB) + MOVW $1880, R12 + B runtime·callbackasm1(SB) + MOVW $1881, R12 + B runtime·callbackasm1(SB) + MOVW $1882, R12 + B runtime·callbackasm1(SB) + MOVW $1883, R12 + B runtime·callbackasm1(SB) + MOVW $1884, R12 + B runtime·callbackasm1(SB) + MOVW $1885, R12 + B runtime·callbackasm1(SB) + MOVW $1886, R12 + B runtime·callbackasm1(SB) + MOVW $1887, R12 + B runtime·callbackasm1(SB) + MOVW $1888, R12 + B runtime·callbackasm1(SB) + MOVW $1889, R12 + B runtime·callbackasm1(SB) + MOVW $1890, R12 + B runtime·callbackasm1(SB) + MOVW $1891, R12 + B runtime·callbackasm1(SB) + MOVW $1892, R12 + B runtime·callbackasm1(SB) + MOVW $1893, R12 + B runtime·callbackasm1(SB) + MOVW $1894, R12 + B runtime·callbackasm1(SB) + MOVW $1895, R12 + B runtime·callbackasm1(SB) + MOVW $1896, R12 + B runtime·callbackasm1(SB) + MOVW $1897, R12 + B runtime·callbackasm1(SB) + MOVW $1898, R12 + B runtime·callbackasm1(SB) + MOVW $1899, R12 + B runtime·callbackasm1(SB) + MOVW $1900, R12 + B runtime·callbackasm1(SB) + MOVW $1901, R12 + B runtime·callbackasm1(SB) + MOVW $1902, R12 + B runtime·callbackasm1(SB) + MOVW $1903, R12 + B runtime·callbackasm1(SB) + MOVW $1904, R12 + B runtime·callbackasm1(SB) + MOVW $1905, R12 + B runtime·callbackasm1(SB) + MOVW $1906, R12 + B runtime·callbackasm1(SB) + MOVW $1907, R12 + B runtime·callbackasm1(SB) + MOVW $1908, R12 + B runtime·callbackasm1(SB) + MOVW $1909, R12 + B runtime·callbackasm1(SB) + MOVW $1910, R12 + B runtime·callbackasm1(SB) + MOVW $1911, R12 + B runtime·callbackasm1(SB) + MOVW $1912, R12 + B runtime·callbackasm1(SB) + MOVW $1913, R12 + B runtime·callbackasm1(SB) + MOVW $1914, R12 + B runtime·callbackasm1(SB) + MOVW $1915, R12 + B runtime·callbackasm1(SB) + MOVW $1916, R12 + B runtime·callbackasm1(SB) + MOVW $1917, R12 + B runtime·callbackasm1(SB) + MOVW $1918, R12 + B runtime·callbackasm1(SB) + MOVW $1919, R12 + B runtime·callbackasm1(SB) + MOVW $1920, R12 + B runtime·callbackasm1(SB) + MOVW $1921, R12 + B runtime·callbackasm1(SB) + MOVW $1922, R12 + B runtime·callbackasm1(SB) + MOVW $1923, R12 + B runtime·callbackasm1(SB) + MOVW $1924, R12 + B runtime·callbackasm1(SB) + MOVW $1925, R12 + B runtime·callbackasm1(SB) + MOVW $1926, R12 + B runtime·callbackasm1(SB) + MOVW $1927, R12 + B runtime·callbackasm1(SB) + MOVW $1928, R12 + B runtime·callbackasm1(SB) + MOVW $1929, R12 + B runtime·callbackasm1(SB) + MOVW $1930, R12 + B runtime·callbackasm1(SB) + MOVW $1931, R12 + B runtime·callbackasm1(SB) + MOVW $1932, R12 + B runtime·callbackasm1(SB) + MOVW $1933, R12 + B runtime·callbackasm1(SB) + MOVW $1934, R12 + B runtime·callbackasm1(SB) + MOVW $1935, R12 + B runtime·callbackasm1(SB) + MOVW $1936, R12 + B runtime·callbackasm1(SB) + MOVW $1937, R12 + B runtime·callbackasm1(SB) + MOVW $1938, R12 + B runtime·callbackasm1(SB) + MOVW $1939, R12 + B runtime·callbackasm1(SB) + MOVW $1940, R12 + B runtime·callbackasm1(SB) + MOVW $1941, R12 + B runtime·callbackasm1(SB) + MOVW $1942, R12 + B runtime·callbackasm1(SB) + MOVW $1943, R12 + B runtime·callbackasm1(SB) + MOVW $1944, R12 + B runtime·callbackasm1(SB) + MOVW $1945, R12 + B runtime·callbackasm1(SB) + MOVW $1946, R12 + B runtime·callbackasm1(SB) + MOVW $1947, R12 + B runtime·callbackasm1(SB) + MOVW $1948, R12 + B runtime·callbackasm1(SB) + MOVW $1949, R12 + B runtime·callbackasm1(SB) + MOVW $1950, R12 + B runtime·callbackasm1(SB) + MOVW $1951, R12 + B runtime·callbackasm1(SB) + MOVW $1952, R12 + B runtime·callbackasm1(SB) + MOVW $1953, R12 + B runtime·callbackasm1(SB) + MOVW $1954, R12 + B runtime·callbackasm1(SB) + MOVW $1955, R12 + B runtime·callbackasm1(SB) + MOVW $1956, R12 + B runtime·callbackasm1(SB) + MOVW $1957, R12 + B runtime·callbackasm1(SB) + MOVW $1958, R12 + B runtime·callbackasm1(SB) + MOVW $1959, R12 + B runtime·callbackasm1(SB) + MOVW $1960, R12 + B runtime·callbackasm1(SB) + MOVW $1961, R12 + B runtime·callbackasm1(SB) + MOVW $1962, R12 + B runtime·callbackasm1(SB) + MOVW $1963, R12 + B runtime·callbackasm1(SB) + MOVW $1964, R12 + B runtime·callbackasm1(SB) + MOVW $1965, R12 + B runtime·callbackasm1(SB) + MOVW $1966, R12 + B runtime·callbackasm1(SB) + MOVW $1967, R12 + B runtime·callbackasm1(SB) + MOVW $1968, R12 + B runtime·callbackasm1(SB) + MOVW $1969, R12 + B runtime·callbackasm1(SB) + MOVW $1970, R12 + B runtime·callbackasm1(SB) + MOVW $1971, R12 + B runtime·callbackasm1(SB) + MOVW $1972, R12 + B runtime·callbackasm1(SB) + MOVW $1973, R12 + B runtime·callbackasm1(SB) + MOVW $1974, R12 + B runtime·callbackasm1(SB) + MOVW $1975, R12 + B runtime·callbackasm1(SB) + MOVW $1976, R12 + B runtime·callbackasm1(SB) + MOVW $1977, R12 + B runtime·callbackasm1(SB) + MOVW $1978, R12 + B runtime·callbackasm1(SB) + MOVW $1979, R12 + B runtime·callbackasm1(SB) + MOVW $1980, R12 + B runtime·callbackasm1(SB) + MOVW $1981, R12 + B runtime·callbackasm1(SB) + MOVW $1982, R12 + B runtime·callbackasm1(SB) + MOVW $1983, R12 + B runtime·callbackasm1(SB) + MOVW $1984, R12 + B runtime·callbackasm1(SB) + MOVW $1985, R12 + B runtime·callbackasm1(SB) + MOVW $1986, R12 + B runtime·callbackasm1(SB) + MOVW $1987, R12 + B runtime·callbackasm1(SB) + MOVW $1988, R12 + B runtime·callbackasm1(SB) + MOVW $1989, R12 + B runtime·callbackasm1(SB) + MOVW $1990, R12 + B runtime·callbackasm1(SB) + MOVW $1991, R12 + B runtime·callbackasm1(SB) + MOVW $1992, R12 + B runtime·callbackasm1(SB) + MOVW $1993, R12 + B runtime·callbackasm1(SB) + MOVW $1994, R12 + B runtime·callbackasm1(SB) + MOVW $1995, R12 + B runtime·callbackasm1(SB) + MOVW $1996, R12 + B runtime·callbackasm1(SB) + MOVW $1997, R12 + B runtime·callbackasm1(SB) + MOVW $1998, R12 + B runtime·callbackasm1(SB) + MOVW $1999, R12 + B runtime·callbackasm1(SB) diff --git a/src/sort/sort.go b/src/sort/sort.go index 7282b26ec4b6f..dd5bb3762e364 100644 --- a/src/sort/sort.go +++ b/src/sort/sort.go @@ -131,7 +131,7 @@ func doPivot(data Interface, lo, hi int) (midlo, midhi int) { c-- } // If hi-c<3 then there are duplicates (by property of median of nine). - // Let be a bit more conservative, and set border to 5. + // Let's be a bit more conservative, and set border to 5. protect := hi-c < 5 if !protect && hi-c < (hi-lo)/4 { // Lets test some points for equality to pivot diff --git a/src/strconv/atoi.go b/src/strconv/atoi.go index bebed048204fb..ff33d555e4983 100644 --- a/src/strconv/atoi.go +++ b/src/strconv/atoi.go @@ -44,7 +44,7 @@ const intSize = 32 << (^uint(0) >> 63) // IntSize is the size in bits of an int or uint value. const IntSize = intSize -const maxUint64 = (1<<64 - 1) +const maxUint64 = 1<<64 - 1 // ParseUint is like ParseInt but for unsigned numbers. func ParseUint(s string, base int, bitSize int) (uint64, error) { @@ -198,7 +198,7 @@ func ParseInt(s string, base int, bitSize int) (i int64, err error) { return n, nil } -// Atoi returns the result of ParseInt(s, 10, 0) converted to type int. +// Atoi is equivalent to ParseInt(s, 10, 0), converted to type int. func Atoi(s string) (int, error) { const fnAtoi = "Atoi" diff --git a/src/strconv/doc.go b/src/strconv/doc.go index cba898426afe7..8db725f96ae5c 100644 --- a/src/strconv/doc.go +++ b/src/strconv/doc.go @@ -46,8 +46,8 @@ // The latter guarantees that the result is an ASCII string, by escaping // any non-ASCII Unicode with \u: // -// q := Quote("Hello, 世界") -// q := QuoteToASCII("Hello, 世界") +// q := strconv.Quote("Hello, 世界") +// q := strconv.QuoteToASCII("Hello, 世界") // // QuoteRune and QuoteRuneToASCII are similar but accept runes and // return quoted Go rune literals. diff --git a/src/strconv/example_test.go b/src/strconv/example_test.go index 5c2e8a9b5608b..2d1a2a9dbfd50 100644 --- a/src/strconv/example_test.go +++ b/src/strconv/example_test.go @@ -167,6 +167,22 @@ func ExampleFormatUint() { // string, 2a } +func ExampleIsGraphic() { + shamrock := strconv.IsGraphic('☘') + fmt.Println(shamrock) + + a := strconv.IsGraphic('a') + fmt.Println(a) + + bel := strconv.IsGraphic('\007') + fmt.Println(bel) + + // Output: + // true + // true + // false +} + func ExampleIsPrint() { c := strconv.IsPrint('\u263a') fmt.Println(c) @@ -249,7 +265,7 @@ func ExampleParseUint() { } func ExampleQuote() { - s := strconv.Quote(`"Fran & Freddie's Diner ☺"`) + s := strconv.Quote(`"Fran & Freddie's Diner ☺"`) // there is a tab character inside the string literal fmt.Println(s) // Output: @@ -272,14 +288,50 @@ func ExampleQuoteRuneToASCII() { // '\u263a' } +func ExampleQuoteRuneToGraphic() { + s := strconv.QuoteRuneToGraphic('☺') + fmt.Println(s) + + s = strconv.QuoteRuneToGraphic('\u263a') + fmt.Println(s) + + s = strconv.QuoteRuneToGraphic('\u000a') + fmt.Println(s) + + s = strconv.QuoteRuneToGraphic(' ') // tab character + fmt.Println(s) + + // Output: + // '☺' + // '☺' + // '\n' + // '\t' +} + func ExampleQuoteToASCII() { - s := strconv.QuoteToASCII(`"Fran & Freddie's Diner ☺"`) + s := strconv.QuoteToASCII(`"Fran & Freddie's Diner ☺"`) // there is a tab character inside the string literal fmt.Println(s) // Output: // "\"Fran & Freddie's Diner\t\u263a\"" } +func ExampleQuoteToGraphic() { + s := strconv.QuoteToGraphic("☺") + fmt.Println(s) + + s = strconv.QuoteToGraphic("This is a \u263a \u000a") // there is a tab character inside the string literal + fmt.Println(s) + + s = strconv.QuoteToGraphic(`" This is a ☺ \n "`) + fmt.Println(s) + + // Output: + // "☺" + // "This is a ☺\t\n" + // "\" This is a ☺ \\n \"" +} + func ExampleUnquote() { s, err := strconv.Unquote("You can't unquote a string without quotes") fmt.Printf("%q, %v\n", s, err) diff --git a/src/strconv/itoa.go b/src/strconv/itoa.go index 8afe7af2517db..45e4192c82e16 100644 --- a/src/strconv/itoa.go +++ b/src/strconv/itoa.go @@ -30,7 +30,7 @@ func FormatInt(i int64, base int) string { return s } -// Itoa is shorthand for FormatInt(int64(i), 10). +// Itoa is equivalent to FormatInt(int64(i), 10). func Itoa(i int) string { return FormatInt(int64(i), 10) } @@ -152,10 +152,14 @@ func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s } } else if isPowerOfTwo(base) { - // It is known that base is a power of two and - // 2 <= base <= len(digits). // Use shifts and masks instead of / and %. - shift := uint(bits.TrailingZeros(uint(base))) & 31 + // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36. + // The largest power of 2 below or equal to 36 is 32, which is 1 << 5; + // i.e., the largest possible shift count is 5. By &-ind that value with + // the constant 7 we tell the compiler that the shift count is always + // less than 8 which is smaller than any register width. This allows + // the compiler to generate better code for the shift operation. + shift := uint(bits.TrailingZeros(uint(base))) & 7 b := uint64(base) m := uint(base) - 1 // == 1<= b { diff --git a/src/strconv/quote.go b/src/strconv/quote.go index 9b7194a0f041d..6cd2f93068c03 100644 --- a/src/strconv/quote.go +++ b/src/strconv/quote.go @@ -6,7 +6,10 @@ package strconv -import "unicode/utf8" +import ( + "internal/bytealg" + "unicode/utf8" +) const lowerhex = "0123456789abcdef" @@ -424,12 +427,7 @@ func Unquote(s string) (string, error) { // contains reports whether the string contains the byte c. func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false + return bytealg.IndexByteString(s, c) != -1 } // bsearch16 returns the smallest i such that a[i] >= x. diff --git a/src/strings/builder.go b/src/strings/builder.go index ac58f34e1dec2..3f33a87508764 100644 --- a/src/strings/builder.go +++ b/src/strings/builder.go @@ -50,6 +50,11 @@ func (b *Builder) String() string { // Len returns the number of accumulated bytes; b.Len() == len(b.String()). func (b *Builder) Len() int { return len(b.buf) } +// Cap returns the capacity of the builder's underlying byte slice. It is the +// total space allocated for the string being built and includes any bytes +// already written. +func (b *Builder) Cap() int { return cap(b.buf) } + // Reset resets the Builder to be empty. func (b *Builder) Reset() { b.addr = nil diff --git a/src/strings/builder_test.go b/src/strings/builder_test.go index 949f214619d5e..9e597015d88f4 100644 --- a/src/strings/builder_test.go +++ b/src/strings/builder_test.go @@ -20,6 +20,9 @@ func check(t *testing.T, b *Builder, want string) { if n := b.Len(); n != len(got) { t.Errorf("Len: got %d; but len(String()) is %d", n, len(got)) } + if n := b.Cap(); n < len(got) { + t.Errorf("Cap: got %d; but len(String()) is %d", n, len(got)) + } } func TestBuilder(t *testing.T) { @@ -89,6 +92,9 @@ func TestBuilderGrow(t *testing.T) { allocs := testing.AllocsPerRun(100, func() { var b Builder b.Grow(growLen) // should be only alloc, when growLen > 0 + if b.Cap() < growLen { + t.Fatalf("growLen=%d: Cap() is lower than growLen", growLen) + } b.Write(p) if b.String() != string(p) { t.Fatalf("growLen=%d: bad data written after Grow", growLen) @@ -226,6 +232,16 @@ func TestBuilderCopyPanic(t *testing.T) { b.Len() }, }, + { + name: "Cap", + wantPanic: false, + fn: func() { + var a Builder + a.WriteByte('x') + b := a + b.Cap() + }, + }, { name: "Reset", wantPanic: false, diff --git a/src/strings/compare_test.go b/src/strings/compare_test.go index 5d5334461c310..94554e0af794c 100644 --- a/src/strings/compare_test.go +++ b/src/strings/compare_test.go @@ -11,6 +11,7 @@ import ( "internal/testenv" . "strings" "testing" + "unsafe" ) var compareTests = []struct { @@ -53,6 +54,12 @@ func TestCompareIdenticalString(t *testing.T) { } func TestCompareStrings(t *testing.T) { + // unsafeString converts a []byte to a string with no allocation. + // The caller must not modify b while the result string is in use. + unsafeString := func(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) + } + lengths := make([]int, 0) // lengths to test in ascending order for i := 0; i <= 128; i++ { lengths = append(lengths, i) @@ -79,7 +86,7 @@ func TestCompareStrings(t *testing.T) { b[i] = 9 } - sa, sb := string(a), string(b) + sa, sb := unsafeString(a), unsafeString(b) cmp := Compare(sa[:len], sb[:len]) if cmp != 0 { t.Errorf(`CompareIdentical(%d) = %d`, len, cmp) @@ -96,12 +103,12 @@ func TestCompareStrings(t *testing.T) { } for k := lastLen; k < len; k++ { b[k] = a[k] - 1 - cmp = Compare(string(a[:len]), string(b[:len])) + cmp = Compare(unsafeString(a[:len]), unsafeString(b[:len])) if cmp != 1 { t.Errorf(`CompareAbigger(%d,%d) = %d`, len, k, cmp) } b[k] = a[k] + 1 - cmp = Compare(string(a[:len]), string(b[:len])) + cmp = Compare(unsafeString(a[:len]), unsafeString(b[:len])) if cmp != -1 { t.Errorf(`CompareBbigger(%d,%d) = %d`, len, k, cmp) } diff --git a/src/strings/example_test.go b/src/strings/example_test.go index 607e4a0a70e7a..e31054a4e0b31 100644 --- a/src/strings/example_test.go +++ b/src/strings/example_test.go @@ -205,6 +205,12 @@ func ExampleReplace() { // moo moo moo } +func ExampleReplaceAll() { + fmt.Println(strings.ReplaceAll("oink oink oink", "oink", "moo")) + // Output: + // moo moo moo +} + func ExampleSplit() { fmt.Printf("%q\n", strings.Split("a,b,c", ",")) fmt.Printf("%q\n", strings.Split("a man a plan a canal panama", "a ")) diff --git a/src/strings/export_test.go b/src/strings/export_test.go index 17c806aa56373..b39cee6b1ded1 100644 --- a/src/strings/export_test.go +++ b/src/strings/export_test.go @@ -5,10 +5,12 @@ package strings func (r *Replacer) Replacer() interface{} { + r.once.Do(r.buildOnce) return r.r } func (r *Replacer) PrintTrie() string { + r.once.Do(r.buildOnce) gen := r.r.(*genericReplacer) return gen.printNode(&gen.root, 0) } diff --git a/src/strings/reader.go b/src/strings/reader.go index 6c1a5064c0d5e..eb2fa1164c0fc 100644 --- a/src/strings/reader.go +++ b/src/strings/reader.go @@ -13,6 +13,7 @@ import ( // A Reader implements the io.Reader, io.ReaderAt, io.Seeker, io.WriterTo, // io.ByteScanner, and io.RuneScanner interfaces by reading // from a string. +// The zero value for Reader operates like a Reader of an empty string. type Reader struct { s string i int64 // current reading index @@ -70,10 +71,10 @@ func (r *Reader) ReadByte() (byte, error) { } func (r *Reader) UnreadByte() error { - r.prevRune = -1 if r.i <= 0 { return errors.New("strings.Reader.UnreadByte: at beginning of string") } + r.prevRune = -1 r.i-- return nil } @@ -94,6 +95,9 @@ func (r *Reader) ReadRune() (ch rune, size int, err error) { } func (r *Reader) UnreadRune() error { + if r.i <= 0 { + return errors.New("strings.Reader.UnreadRune: at beginning of string") + } if r.prevRune < 0 { return errors.New("strings.Reader.UnreadRune: previous operation was not ReadRune") } diff --git a/src/strings/reader_test.go b/src/strings/reader_test.go index bf40eb1a31eb4..a4c211d699f97 100644 --- a/src/strings/reader_test.go +++ b/src/strings/reader_test.go @@ -190,3 +190,45 @@ func TestReaderReset(t *testing.T) { t.Errorf("ReadAll: got %q, want %q", got, want) } } + +func TestReaderZero(t *testing.T) { + if l := (&strings.Reader{}).Len(); l != 0 { + t.Errorf("Len: got %d, want 0", l) + } + + if n, err := (&strings.Reader{}).Read(nil); n != 0 || err != io.EOF { + t.Errorf("Read: got %d, %v; want 0, io.EOF", n, err) + } + + if n, err := (&strings.Reader{}).ReadAt(nil, 11); n != 0 || err != io.EOF { + t.Errorf("ReadAt: got %d, %v; want 0, io.EOF", n, err) + } + + if b, err := (&strings.Reader{}).ReadByte(); b != 0 || err != io.EOF { + t.Errorf("ReadByte: got %d, %v; want 0, io.EOF", b, err) + } + + if ch, size, err := (&strings.Reader{}).ReadRune(); ch != 0 || size != 0 || err != io.EOF { + t.Errorf("ReadRune: got %d, %d, %v; want 0, 0, io.EOF", ch, size, err) + } + + if offset, err := (&strings.Reader{}).Seek(11, io.SeekStart); offset != 11 || err != nil { + t.Errorf("Seek: got %d, %v; want 11, nil", offset, err) + } + + if s := (&strings.Reader{}).Size(); s != 0 { + t.Errorf("Size: got %d, want 0", s) + } + + if (&strings.Reader{}).UnreadByte() == nil { + t.Errorf("UnreadByte: got nil, want error") + } + + if (&strings.Reader{}).UnreadRune() == nil { + t.Errorf("UnreadRune: got nil, want error") + } + + if n, err := (&strings.Reader{}).WriteTo(ioutil.Discard); n != 0 || err != nil { + t.Errorf("WriteTo: got %d, %v; want 0, nil", n, err) + } +} diff --git a/src/strings/replace.go b/src/strings/replace.go index 58a11a63dbeb4..ace0b8d646e38 100644 --- a/src/strings/replace.go +++ b/src/strings/replace.go @@ -4,12 +4,17 @@ package strings -import "io" +import ( + "io" + "sync" +) // Replacer replaces a list of strings with replacements. // It is safe for concurrent use by multiple goroutines. type Replacer struct { - r replacer + once sync.Once // guards buildOnce method + r replacer + oldnew []string } // replacer is the interface that a replacement algorithm needs to implement. @@ -25,15 +30,24 @@ func NewReplacer(oldnew ...string) *Replacer { if len(oldnew)%2 == 1 { panic("strings.NewReplacer: odd argument count") } + return &Replacer{oldnew: append([]string(nil), oldnew...)} +} + +func (r *Replacer) buildOnce() { + r.r = r.build() + r.oldnew = nil +} +func (b *Replacer) build() replacer { + oldnew := b.oldnew if len(oldnew) == 2 && len(oldnew[0]) > 1 { - return &Replacer{r: makeSingleStringReplacer(oldnew[0], oldnew[1])} + return makeSingleStringReplacer(oldnew[0], oldnew[1]) } allNewBytes := true for i := 0; i < len(oldnew); i += 2 { if len(oldnew[i]) != 1 { - return &Replacer{r: makeGenericReplacer(oldnew)} + return makeGenericReplacer(oldnew) } if len(oldnew[i+1]) != 1 { allNewBytes = false @@ -52,7 +66,7 @@ func NewReplacer(oldnew ...string) *Replacer { n := oldnew[i+1][0] r[o] = n } - return &Replacer{r: &r} + return &r } r := byteStringReplacer{toReplace: make([]string, 0, len(oldnew)/2)} @@ -71,16 +85,18 @@ func NewReplacer(oldnew ...string) *Replacer { r.replacements[o] = []byte(n) } - return &Replacer{r: &r} + return &r } // Replace returns a copy of s with all replacements performed. func (r *Replacer) Replace(s string) string { + r.once.Do(r.buildOnce) return r.r.Replace(s) } // WriteString writes s to w with all replacements performed. func (r *Replacer) WriteString(w io.Writer, s string) (n int, err error) { + r.once.Do(r.buildOnce) return r.r.WriteString(w, s) } @@ -292,10 +308,6 @@ func (w *appendSliceWriter) WriteString(s string) (int, error) { return len(s), nil } -type stringWriterIface interface { - WriteString(string) (int, error) -} - type stringWriter struct { w io.Writer } @@ -304,8 +316,8 @@ func (w stringWriter) WriteString(s string) (int, error) { return w.w.Write([]byte(s)) } -func getStringWriter(w io.Writer) stringWriterIface { - sw, ok := w.(stringWriterIface) +func getStringWriter(w io.Writer) io.StringWriter { + sw, ok := w.(io.StringWriter) if !ok { sw = stringWriter{w} } @@ -447,7 +459,7 @@ func (r *byteReplacer) WriteString(w io.Writer, s string) (n int, err error) { buf := make([]byte, bufsize) for len(s) > 0 { - ncopy := copy(buf, s[:]) + ncopy := copy(buf, s) s = s[ncopy:] for i, b := range buf[:ncopy] { buf[i] = r[b] diff --git a/src/strings/strings.go b/src/strings/strings.go index 20868be269461..a98f5d8ff1394 100644 --- a/src/strings/strings.go +++ b/src/strings/strings.go @@ -146,6 +146,11 @@ func LastIndex(s, substr string) int { return -1 } +// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s. +func IndexByte(s string, c byte) int { + return bytealg.IndexByteString(s, c) +} + // IndexRune returns the index of the first instance of the Unicode code point // r, or -1 if rune is not present in s. // If r is utf8.RuneError, it returns the first instance of any @@ -423,27 +428,20 @@ func Join(a []string, sep string) string { return "" case 1: return a[0] - case 2: - // Special case for common small values. - // Remove if golang.org/issue/6714 is fixed - return a[0] + sep + a[1] - case 3: - // Special case for common small values. - // Remove if golang.org/issue/6714 is fixed - return a[0] + sep + a[1] + sep + a[2] } n := len(sep) * (len(a) - 1) for i := 0; i < len(a); i++ { n += len(a[i]) } - b := make([]byte, n) - bp := copy(b, a[0]) + var b Builder + b.Grow(n) + b.WriteString(a[0]) for _, s := range a[1:] { - bp += copy(b[bp:], sep) - bp += copy(b[bp:], s) + b.WriteString(sep) + b.WriteString(s) } - return string(b) + return b.String() } // HasPrefix tests whether the string s begins with prefix. @@ -466,68 +464,56 @@ func Map(mapping func(rune) rune, s string) string { // The output buffer b is initialized on demand, the first // time a character differs. - var b []byte - // nbytes is the number of bytes encoded in b. - var nbytes int + var b Builder for i, c := range s { r := mapping(c) - if r == c { + if r == c && c != utf8.RuneError { continue } - b = make([]byte, len(s)+utf8.UTFMax) - nbytes = copy(b, s[:i]) - if r >= 0 { - if r < utf8.RuneSelf { - b[nbytes] = byte(r) - nbytes++ - } else { - nbytes += utf8.EncodeRune(b[nbytes:], r) + var width int + if c == utf8.RuneError { + c, width = utf8.DecodeRuneInString(s[i:]) + if width != 1 && r == c { + continue } + } else { + width = utf8.RuneLen(c) } - if c == utf8.RuneError { - // RuneError is the result of either decoding - // an invalid sequence or '\uFFFD'. Determine - // the correct number of bytes we need to advance. - _, w := utf8.DecodeRuneInString(s[i:]) - i += w - } else { - i += utf8.RuneLen(c) + b.Grow(len(s) + utf8.UTFMax) + b.WriteString(s[:i]) + if r >= 0 { + b.WriteRune(r) } - s = s[i:] + s = s[i+width:] break } - if b == nil { + // Fast path for unchanged input + if b.Cap() == 0 { // didn't call b.Grow above return s } for _, c := range s { r := mapping(c) - // common case - if (0 <= r && r < utf8.RuneSelf) && nbytes < len(b) { - b[nbytes] = byte(r) - nbytes++ - continue - } - - // b is not big enough or r is not a ASCII rune. if r >= 0 { - if nbytes+utf8.UTFMax >= len(b) { - // Grow the buffer. - nb := make([]byte, 2*len(b)) - copy(nb, b[:nbytes]) - b = nb + // common case + // Due to inlining, it is more performant to determine if WriteByte should be + // invoked rather than always call WriteRune + if r < utf8.RuneSelf { + b.WriteByte(byte(r)) + } else { + // r is not a ASCII rune. + b.WriteRune(r) } - nbytes += utf8.EncodeRune(b[nbytes:], r) } } - return string(b[:nbytes]) + return b.String() } // Repeat returns a new string consisting of count copies of the string s. @@ -535,23 +521,33 @@ func Map(mapping func(rune) rune, s string) string { // It panics if count is negative or if // the result of (len(s) * count) overflows. func Repeat(s string, count int) string { + if count == 0 { + return "" + } + // Since we cannot return an error on overflow, // we should panic if the repeat will generate // an overflow. // See Issue golang.org/issue/16237 if count < 0 { panic("strings: negative Repeat count") - } else if count > 0 && len(s)*count/count != len(s) { + } else if len(s)*count/count != len(s) { panic("strings: Repeat count causes overflow") } - b := make([]byte, len(s)*count) - bp := copy(b, s) - for bp < len(b) { - copy(b[bp:], b[:bp]) - bp *= 2 + n := len(s) * count + var b Builder + b.Grow(n) + b.WriteString(s) + for b.Len() < n { + if b.Len() <= n/2 { + b.WriteString(b.String()) + } else { + b.WriteString(b.String()[:n-b.Len()]) + break + } } - return string(b) + return b.String() } // ToUpper returns a copy of the string s with all Unicode letters mapped to their upper case. @@ -570,15 +566,16 @@ func ToUpper(s string) string { if !hasLower { return s } - b := make([]byte, len(s)) + var b Builder + b.Grow(len(s)) for i := 0; i < len(s); i++ { c := s[i] if c >= 'a' && c <= 'z' { c -= 'a' - 'A' } - b[i] = c + b.WriteByte(c) } - return string(b) + return b.String() } return Map(unicode.ToUpper, s) } @@ -599,15 +596,16 @@ func ToLower(s string) string { if !hasUpper { return s } - b := make([]byte, len(s)) + var b Builder + b.Grow(len(s)) for i := 0; i < len(s); i++ { c := s[i] if c >= 'A' && c <= 'Z' { c += 'a' - 'A' } - b[i] = c + b.WriteByte(c) } - return string(b) + return b.String() } return Map(unicode.ToLower, s) } @@ -616,21 +614,21 @@ func ToLower(s string) string { func ToTitle(s string) string { return Map(unicode.ToTitle, s) } // ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their -// upper case, giving priority to the special casing rules. +// upper case using the case mapping specified by c. func ToUpperSpecial(c unicode.SpecialCase, s string) string { - return Map(func(r rune) rune { return c.ToUpper(r) }, s) + return Map(c.ToUpper, s) } // ToLowerSpecial returns a copy of the string s with all Unicode letters mapped to their -// lower case, giving priority to the special casing rules. +// lower case using the case mapping specified by c. func ToLowerSpecial(c unicode.SpecialCase, s string) string { - return Map(func(r rune) rune { return c.ToLower(r) }, s) + return Map(c.ToLower, s) } // ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their // title case, giving priority to the special casing rules. func ToTitleSpecial(c unicode.SpecialCase, s string) string { - return Map(func(r rune) rune { return c.ToTitle(r) }, s) + return Map(c.ToTitle, s) } // isSeparator reports whether the rune could mark a word boundary. @@ -881,6 +879,15 @@ func Replace(s, old, new string, n int) string { return string(t[0:w]) } +// ReplaceAll returns a copy of the string s with all +// non-overlapping instances of old replaced by new. +// If old is empty, it matches at the beginning of the string +// and after each UTF-8 sequence, yielding up to k+1 replacements +// for a k-rune string. +func ReplaceAll(s, old, new string) string { + return Replace(s, old, new, -1) +} + // EqualFold reports whether s and t, interpreted as UTF-8 strings, // are equal under Unicode case-folding. func EqualFold(s, t string) bool { @@ -956,21 +963,22 @@ func Index(s, substr string) int { if len(s) <= bytealg.MaxBruteForce { return bytealg.IndexString(s, substr) } - c := substr[0] + c0 := substr[0] + c1 := substr[1] i := 0 - t := s[:len(s)-n+1] + t := len(s) - n + 1 fails := 0 - for i < len(t) { - if t[i] != c { + for i < t { + if s[i] != c0 { // IndexByte is faster than bytealg.IndexString, so use it as long as // we're not getting lots of false positives. - o := IndexByte(t[i:], c) + o := IndexByte(s[i:t], c0) if o < 0 { return -1 } i += o } - if s[i:i+n] == substr { + if s[i+1] == c1 && s[i:i+n] == substr { return i } fails++ @@ -986,24 +994,25 @@ func Index(s, substr string) int { } return -1 } - c := substr[0] + c0 := substr[0] + c1 := substr[1] i := 0 - t := s[:len(s)-n+1] + t := len(s) - n + 1 fails := 0 - for i < len(t) { - if t[i] != c { - o := IndexByte(t[i:], c) + for i < t { + if s[i] != c0 { + o := IndexByte(s[i:t], c0) if o < 0 { return -1 } i += o } - if s[i:i+n] == substr { + if s[i+1] == c1 && s[i:i+n] == substr { return i } i++ fails++ - if fails >= 4+i>>4 && i < len(t) { + if fails >= 4+i>>4 && i < t { // See comment in ../bytes/bytes_generic.go. j := indexRabinKarp(s[i:], substr) if j < 0 { @@ -1036,5 +1045,4 @@ func indexRabinKarp(s, substr string) int { } } return -1 - } diff --git a/src/strings/strings_decl.go b/src/strings/strings_decl.go deleted file mode 100644 index 98194445e1cfc..0000000000000 --- a/src/strings/strings_decl.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package strings - -// IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s. -func IndexByte(s string, c byte) int // in internal/bytealg diff --git a/src/strings/strings_test.go b/src/strings/strings_test.go index 78bc573e5f0bf..eee2dd55dfd09 100644 --- a/src/strings/strings_test.go +++ b/src/strings/strings_test.go @@ -10,6 +10,7 @@ import ( "io" "math/rand" "reflect" + "strconv" . "strings" "testing" "unicode" @@ -645,10 +646,10 @@ func TestMap(t *testing.T) { if unicode.Is(unicode.Latin, r) { return r } - return '?' + return utf8.RuneError } m = Map(replaceNotLatin, "Hello\255World") - expect = "Hello?World" + expect = "Hello\uFFFDWorld" if m != expect { t.Errorf("replace invalid sequence: expected %q got %q", expect, m) } @@ -673,6 +674,19 @@ func TestMap(t *testing.T) { if m != s { t.Errorf("encoding not handled correctly: expected %q got %q", s, m) } + + // 9. Check mapping occurs in the front, middle and back + trimSpaces := func(r rune) rune { + if unicode.IsSpace(r) { + return -1 + } + return r + } + m = Map(trimSpaces, " abc 123 ") + expect = "abc123" + if m != expect { + t.Errorf("trimSpaces: expected %q got %q", expect, m) + } } func TestToUpper(t *testing.T) { runStringTests(t, ToUpper, "ToUpper", upperTests) } @@ -1229,6 +1243,12 @@ func TestReplace(t *testing.T) { if s := Replace(tt.in, tt.old, tt.new, tt.n); s != tt.out { t.Errorf("Replace(%q, %q, %q, %d) = %q, want %q", tt.in, tt.old, tt.new, tt.n, s, tt.out) } + if tt.n == -1 { + s := ReplaceAll(tt.in, tt.old, tt.new) + if s != tt.out { + t.Errorf("ReplaceAll(%q, %q, %q) = %q, want %q", tt.in, tt.old, tt.new, s, tt.out) + } + } } } @@ -1647,8 +1667,15 @@ func BenchmarkSplitNMultiByteSeparator(b *testing.B) { } func BenchmarkRepeat(b *testing.B) { - for i := 0; i < b.N; i++ { - Repeat("-", 80) + s := "0123456789" + for _, n := range []int{5, 10} { + for _, c := range []int{1, 2, 6} { + b.Run(fmt.Sprintf("%dx%d", n, c), func(b *testing.B) { + for i := 0; i < b.N; i++ { + Repeat(s[:n], c) + } + }) + } } } @@ -1691,3 +1718,16 @@ func BenchmarkIndexPeriodic(b *testing.B) { }) } } + +func BenchmarkJoin(b *testing.B) { + vals := []string{"red", "yellow", "pink", "green", "purple", "orange", "blue"} + for l := 0; l <= len(vals); l++ { + b.Run(strconv.Itoa(l), func(b *testing.B) { + b.ReportAllocs() + vals := vals[:l] + for i := 0; i < b.N; i++ { + Join(vals, " and ") + } + }) + } +} diff --git a/src/sync/atomic/doc.go b/src/sync/atomic/doc.go index 7c007d7a150c6..108b76b804903 100644 --- a/src/sync/atomic/doc.go +++ b/src/sync/atomic/doc.go @@ -47,7 +47,8 @@ import ( // // On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core. // -// On both ARM and x86-32, it is the caller's responsibility to arrange for 64-bit +// On ARM, x86-32, and 32-bit MIPS, +// it is the caller's responsibility to arrange for 64-bit // alignment of 64-bit words accessed atomically. The first word in a // variable or in an allocated struct, array, or slice can be relied upon to be // 64-bit aligned. diff --git a/src/sync/map.go b/src/sync/map.go index c4a0dc4194ab6..c6aa308856584 100644 --- a/src/sync/map.go +++ b/src/sync/map.go @@ -167,18 +167,14 @@ func (m *Map) Store(key, value interface{}) { // If the entry is expunged, tryStore returns false and leaves the entry // unchanged. func (e *entry) tryStore(i *interface{}) bool { - p := atomic.LoadPointer(&e.p) - if p == expunged { - return false - } for { - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - p = atomic.LoadPointer(&e.p) + p := atomic.LoadPointer(&e.p) if p == expunged { return false } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } } } diff --git a/src/sync/runtime.go b/src/sync/runtime.go index be16bcc8f7b15..b6b9e480a4eab 100644 --- a/src/sync/runtime.go +++ b/src/sync/runtime.go @@ -54,7 +54,7 @@ func init() { } // Active spinning runtime support. -// runtime_canSpin returns true is spinning makes sense at the moment. +// runtime_canSpin reports whether spinning makes sense at the moment. func runtime_canSpin(i int) bool // runtime_doSpin does active spinning. diff --git a/src/syscall/asm_aix_ppc64.s b/src/syscall/asm_aix_ppc64.s new file mode 100644 index 0000000000000..7eb9ffb7e7b36 --- /dev/null +++ b/src/syscall/asm_aix_ppc64.s @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// +// System calls for aix/ppc64 are implemented in ../runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0 + JMP runtime·syscall_syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0 + JMP runtime·syscall_rawSyscall6(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0 + JMP runtime·syscall_RawSyscall(SB) + +TEXT ·Syscall(SB),NOSPLIT,$0 + JMP runtime·syscall_Syscall(SB) diff --git a/src/syscall/asm_nacl_386.s b/src/syscall/asm_nacl_386.s index 9d1e541c7c860..b9891711200b5 100644 --- a/src/syscall/asm_nacl_386.s +++ b/src/syscall/asm_nacl_386.s @@ -42,4 +42,4 @@ ok: MOVL DX, r2+20(FP) MOVL $0, err+24(FP) CALL runtime·exitsyscall(SB) - RET + RET diff --git a/src/syscall/asm_nacl_amd64p32.s b/src/syscall/asm_nacl_amd64p32.s index b8c097b53900b..816f7dccfbf98 100644 --- a/src/syscall/asm_nacl_amd64p32.s +++ b/src/syscall/asm_nacl_amd64p32.s @@ -39,4 +39,4 @@ ok: MOVL DX, r2+20(FP) MOVL $0, err+24(FP) CALL runtime·exitsyscall(SB) - RET + RET diff --git a/src/syscall/asm_nacl_arm.s b/src/syscall/asm_nacl_arm.s index 3e7df1aadf758..6092afd9e6cba 100644 --- a/src/syscall/asm_nacl_arm.s +++ b/src/syscall/asm_nacl_arm.s @@ -41,4 +41,4 @@ ok: MOVW $0, R2 MOVW R2, err+24(FP) BL runtime·exitsyscall(SB) - RET + RET diff --git a/src/syscall/asm_plan9_386.s b/src/syscall/asm_plan9_386.s index 65ae6c77fb8bc..7a2c2daaaaecc 100644 --- a/src/syscall/asm_plan9_386.s +++ b/src/syscall/asm_plan9_386.s @@ -45,11 +45,11 @@ TEXT ·Syscall(SB),NOSPLIT,$148-32 CALL runtime·gostring(SB) LEAL str-144(SP), SI JMP copyresult3 - + ok3: CALL runtime·exitsyscall(SB) LEAL ·emptystring(SB), SI - + copyresult3: LEAL err+24(FP), DI @@ -78,7 +78,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$148-44 MOVL $0, r2+32(FP) CMPL AX, $-1 JNE ok4 - + LEAL errbuf-128(SP), AX MOVL AX, sysargs-144(SP) MOVL $128, sysargs1-140(SP) @@ -90,11 +90,11 @@ TEXT ·Syscall6(SB),NOSPLIT,$148-44 CALL runtime·gostring(SB) LEAL str-144(SP), SI JMP copyresult4 - + ok4: CALL runtime·exitsyscall(SB) LEAL ·emptystring(SB), SI - + copyresult4: LEAL err+36(FP), DI @@ -144,7 +144,7 @@ TEXT ·seek(SB),NOSPLIT,$24-36 NO_LOCAL_POINTERS LEAL newoffset+20(FP), AX MOVL AX, placeholder+0(FP) - + // copy args down LEAL placeholder+0(FP), SI LEAL sysargs-20(SP), DI @@ -156,19 +156,19 @@ TEXT ·seek(SB),NOSPLIT,$24-36 MOVSL MOVL $SYS_SEEK, AX // syscall entry INT $64 - + CMPL AX, $-1 JNE ok6 MOVL AX, newoffset_lo+20(FP) MOVL AX, newoffset_hi+24(FP) - + CALL syscall·errstr(SB) MOVL SP, SI JMP copyresult6 - + ok6: LEAL ·emptystring(SB), SI - + copyresult6: LEAL err+28(FP), DI diff --git a/src/syscall/asm_plan9_amd64.s b/src/syscall/asm_plan9_amd64.s index bba4012e5cc6b..d5c9f6c63f1e2 100644 --- a/src/syscall/asm_plan9_amd64.s +++ b/src/syscall/asm_plan9_amd64.s @@ -44,11 +44,11 @@ TEXT ·Syscall(SB),NOSPLIT,$168-64 CALL runtime·gostring(SB) LEAQ str-160(SP), SI JMP copyresult3 - + ok3: CALL runtime·exitsyscall(SB) LEAQ ·emptystring(SB), SI - + copyresult3: LEAQ err+48(FP), DI @@ -77,7 +77,7 @@ TEXT ·Syscall6(SB),NOSPLIT,$168-88 MOVQ $0, r2+64(FP) CMPL AX, $-1 JNE ok4 - + LEAQ errbuf-128(SP), AX MOVQ AX, sysargs-160(SP) MOVQ $128, sysargs1-152(SP) @@ -89,11 +89,11 @@ TEXT ·Syscall6(SB),NOSPLIT,$168-88 CALL runtime·gostring(SB) LEAQ str-160(SP), SI JMP copyresult4 - + ok4: CALL runtime·exitsyscall(SB) LEAQ ·emptystring(SB), SI - + copyresult4: LEAQ err+72(FP), DI @@ -143,7 +143,7 @@ TEXT ·seek(SB),NOSPLIT,$48-56 NO_LOCAL_POINTERS LEAQ newoffset+32(FP), AX MOVQ AX, placeholder+0(FP) - + // copy args down LEAQ placeholder+0(FP), SI LEAQ sysargs-40(SP), DI @@ -155,18 +155,18 @@ TEXT ·seek(SB),NOSPLIT,$48-56 MOVSQ MOVQ $SYS_SEEK, BP // syscall entry SYSCALL - + CMPL AX, $-1 JNE ok6 MOVQ AX, newoffset+32(FP) - + CALL syscall·errstr(SB) MOVQ SP, SI JMP copyresult6 - + ok6: LEAQ ·emptystring(SB), SI - + copyresult6: LEAQ err+40(FP), DI diff --git a/src/syscall/asm_solaris_amd64.s b/src/syscall/asm_solaris_amd64.s index 6fa041866d7f6..c61e04a42fab4 100644 --- a/src/syscall/asm_solaris_amd64.s +++ b/src/syscall/asm_solaris_amd64.s @@ -23,6 +23,10 @@ TEXT ·chroot1(SB),NOSPLIT,$0 TEXT ·close(SB),NOSPLIT,$0 JMP runtime·syscall_close(SB) +TEXT ·dup2child(SB),NOSPLIT,$0 + JMP runtime·syscall_dup2(SB) + RET + TEXT ·execve(SB),NOSPLIT,$0 JMP runtime·syscall_execve(SB) diff --git a/src/syscall/asm_windows.s b/src/syscall/asm_windows.s new file mode 100644 index 0000000000000..e96591433048e --- /dev/null +++ b/src/syscall/asm_windows.s @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// abi0Syms is a dummy symbol that creates ABI0 wrappers for Go +// functions called from assembly in other packages. +TEXT abi0Syms<>(SB),NOSPLIT,$0-0 + CALL ·getprocaddress(SB) + CALL ·loadlibrary(SB) diff --git a/src/syscall/bpf_bsd.go b/src/syscall/bpf_bsd.go index 8b587559edbcb..f67ee6064bf73 100644 --- a/src/syscall/bpf_bsd.go +++ b/src/syscall/bpf_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build dragonfly freebsd netbsd openbsd // Berkeley packet filter for BSD variants diff --git a/src/syscall/bpf_darwin.go b/src/syscall/bpf_darwin.go new file mode 100644 index 0000000000000..fb86049ae92de --- /dev/null +++ b/src/syscall/bpf_darwin.go @@ -0,0 +1,185 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Berkeley packet filter for Darwin + +package syscall + +import ( + "unsafe" +) + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfStmt(code, k int) *BpfInsn { + return &BpfInsn{Code: uint16(code), K: uint32(k)} +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfJump(code, k, jt, jf int) *BpfInsn { + return &BpfInsn{Code: uint16(code), Jt: uint8(jt), Jf: uint8(jf), K: uint32(k)} +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfBuflen(fd int) (int, error) { + var l int + err := ioctlPtr(fd, BIOCGBLEN, unsafe.Pointer(&l)) + if err != nil { + return 0, err + } + return l, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfBuflen(fd, l int) (int, error) { + err := ioctlPtr(fd, BIOCSBLEN, unsafe.Pointer(&l)) + if err != nil { + return 0, err + } + return l, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfDatalink(fd int) (int, error) { + var t int + err := ioctlPtr(fd, BIOCGDLT, unsafe.Pointer(&t)) + if err != nil { + return 0, err + } + return t, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfDatalink(fd, t int) (int, error) { + err := ioctlPtr(fd, BIOCSDLT, unsafe.Pointer(&t)) + if err != nil { + return 0, err + } + return t, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfPromisc(fd, m int) error { + err := ioctlPtr(fd, BIOCPROMISC, unsafe.Pointer(&m)) + if err != nil { + return err + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func FlushBpf(fd int) error { + err := ioctlPtr(fd, BIOCFLUSH, nil) + if err != nil { + return err + } + return nil +} + +type ivalue struct { + name [IFNAMSIZ]byte + value int16 +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfInterface(fd int, name string) (string, error) { + var iv ivalue + err := ioctlPtr(fd, BIOCGETIF, unsafe.Pointer(&iv)) + if err != nil { + return "", err + } + return name, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfInterface(fd int, name string) error { + var iv ivalue + copy(iv.name[:], []byte(name)) + err := ioctlPtr(fd, BIOCSETIF, unsafe.Pointer(&iv)) + if err != nil { + return err + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfTimeout(fd int) (*Timeval, error) { + var tv Timeval + err := ioctlPtr(fd, BIOCGRTIMEOUT, unsafe.Pointer(&tv)) + if err != nil { + return nil, err + } + return &tv, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfTimeout(fd int, tv *Timeval) error { + err := ioctlPtr(fd, BIOCSRTIMEOUT, unsafe.Pointer(tv)) + if err != nil { + return err + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfStats(fd int) (*BpfStat, error) { + var s BpfStat + err := ioctlPtr(fd, BIOCGSTATS, unsafe.Pointer(&s)) + if err != nil { + return nil, err + } + return &s, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfImmediate(fd, m int) error { + err := ioctlPtr(fd, BIOCIMMEDIATE, unsafe.Pointer(&m)) + if err != nil { + return err + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpf(fd int, i []BpfInsn) error { + var p BpfProgram + p.Len = uint32(len(i)) + p.Insns = (*BpfInsn)(unsafe.Pointer(&i[0])) + err := ioctlPtr(fd, BIOCSETF, unsafe.Pointer(&p)) + if err != nil { + return err + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func CheckBpfVersion(fd int) error { + var v BpfVersion + err := ioctlPtr(fd, BIOCVERSION, unsafe.Pointer(&v)) + if err != nil { + return err + } + if v.Major != BPF_MAJOR_VERSION || v.Minor != BPF_MINOR_VERSION { + return EINVAL + } + return nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func BpfHeadercmpl(fd int) (int, error) { + var f int + err := ioctlPtr(fd, BIOCGHDRCMPLT, unsafe.Pointer(&f)) + if err != nil { + return 0, err + } + return f, nil +} + +// Deprecated: Use golang.org/x/net/bpf instead. +func SetBpfHeadercmpl(fd, f int) error { + err := ioctlPtr(fd, BIOCSHDRCMPLT, unsafe.Pointer(&f)) + if err != nil { + return err + } + return nil +} diff --git a/src/syscall/dirent.go b/src/syscall/dirent.go index 26cbbbce2ad54..5c7af42b0c985 100644 --- a/src/syscall/dirent.go +++ b/src/syscall/dirent.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package syscall diff --git a/src/syscall/dirent_bsd_test.go b/src/syscall/dirent_bsd_test.go new file mode 100644 index 0000000000000..e5f5eb3f8aa1d --- /dev/null +++ b/src/syscall/dirent_bsd_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,!arm,!arm64 dragonfly freebsd netbsd openbsd + +package syscall_test + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "syscall" + "testing" +) + +func TestDirent(t *testing.T) { + const ( + direntBufSize = 2048 + filenameMinSize = 11 + ) + + d, err := ioutil.TempDir("", "dirent-test") + if err != nil { + t.Fatalf("tempdir: %v", err) + } + defer os.RemoveAll(d) + t.Logf("tmpdir: %s", d) + + for i, c := range []byte("0123456789") { + name := string(bytes.Repeat([]byte{c}, filenameMinSize+i)) + err = ioutil.WriteFile(filepath.Join(d, name), nil, 0644) + if err != nil { + t.Fatalf("writefile: %v", err) + } + } + + buf := bytes.Repeat([]byte("DEADBEAF"), direntBufSize/8) + fd, err := syscall.Open(d, syscall.O_RDONLY, 0) + defer syscall.Close(fd) + if err != nil { + t.Fatalf("syscall.open: %v", err) + } + n, err := syscall.ReadDirent(fd, buf) + if err != nil { + t.Fatalf("syscall.readdir: %v", err) + } + buf = buf[:n] + + names := make([]string, 0, 10) + for len(buf) > 0 { + var bc int + bc, _, names = syscall.ParseDirent(buf, -1, names) + buf = buf[bc:] + } + + sort.Strings(names) + t.Logf("names: %q", names) + + if len(names) != 10 { + t.Errorf("got %d names; expected 10", len(names)) + } + for i, name := range names { + ord, err := strconv.Atoi(name[:1]) + if err != nil { + t.Fatalf("names[%d] is non-integer %q: %v", i, names[i], err) + } + if expected := string(strings.Repeat(name[:1], filenameMinSize+ord)); name != expected { + t.Errorf("names[%d] is %q (len %d); expected %q (len %d)", i, name, len(name), expected, len(expected)) + } + } +} diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index 2ee85a0d77da1..c57cd34f8225b 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -26,6 +26,7 @@ func Syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err func Syscall9(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) func Syscall12(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2 uintptr, err Errno) func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2 uintptr, err Errno) +func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) func loadlibrary(filename *uint16) (handle uintptr, err Errno) func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno) func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err Errno) @@ -131,7 +132,7 @@ func (p *Proc) Addr() uintptr { //go:uintptrescapes -// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// Call executes procedure p with arguments a. It will panic if more than 18 arguments // are supplied. // // The returned error is always non-nil, constructed from the result of GetLastError. @@ -172,6 +173,12 @@ func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { return Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) case 15: return Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + case 16: + return Syscall18(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15], 0, 0) + case 17: + return Syscall18(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15], a[16], 0) + case 18: + return Syscall18(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15], a[16], a[17]) default: panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") } diff --git a/src/syscall/env_unix.go b/src/syscall/env_unix.go index 1ebc0b17f2be9..0b6b711a8ffeb 100644 --- a/src/syscall/env_unix.go +++ b/src/syscall/env_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris // Unix environment variables. diff --git a/src/syscall/exec_aix_test.go b/src/syscall/exec_aix_test.go new file mode 100644 index 0000000000000..22b752cf271d4 --- /dev/null +++ b/src/syscall/exec_aix_test.go @@ -0,0 +1,37 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package syscall + +import "unsafe" + +//go:cgo_import_dynamic libc_Getpgid getpgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getpgrp getpgrp "libc.a/shr_64.o" + +//go:linkname libc_Getpgid libc_Getpgid +//go:linkname libc_Getpgrp libc_Getpgrp + +var ( + libc_Getpgid, + libc_Getpgrp libcFunc +) + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Getpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + pgid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getpgrp)), 0, 0, 0, 0, 0, 0, 0) + pgrp = int(r0) + return +} + +var Ioctl = ioctl diff --git a/src/syscall/exec_bsd.go b/src/syscall/exec_bsd.go index 17ca6f06cf1f5..30b88eba7a5b3 100644 --- a/src/syscall/exec_bsd.go +++ b/src/syscall/exec_bsd.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd netbsd openbsd +// +build dragonfly freebsd netbsd openbsd package syscall import ( - "runtime" "unsafe" ) @@ -43,7 +42,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr // Declare all variables at top in case any // declarations require heap allocation (e.g., err1). var ( - r1, r2 uintptr + r1 uintptr err1 Errno nextfd int i int @@ -62,25 +61,15 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr } nextfd++ - darwin := runtime.GOOS == "darwin" - // About to call fork. // No more allocation or calls of non-assembly functions. runtime_BeforeFork() - r1, r2, err1 = RawSyscall(SYS_FORK, 0, 0, 0) + r1, _, err1 = RawSyscall(SYS_FORK, 0, 0, 0) if err1 != 0 { runtime_AfterFork() return 0, err1 } - // On Darwin: - // r1 = child pid in both parent and child. - // r2 = 0 in parent, 1 in child. - // Convert to normal Unix r1 = 0 in child. - if darwin && r2 == 1 { - r1 = 0 - } - if r1 != 0 { // parent; return PID runtime_AfterFork() diff --git a/src/syscall/exec_darwin.go b/src/syscall/exec_darwin.go new file mode 100644 index 0000000000000..f860f4628ebe2 --- /dev/null +++ b/src/syscall/exec_darwin.go @@ -0,0 +1,248 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +import ( + "unsafe" +) + +type SysProcAttr struct { + Chroot string // Chroot. + Credential *Credential // Credential. + Ptrace bool // Enable tracing. + Setsid bool // Create session. + Setpgid bool // Set process group ID to Pgid, or, if Pgid == 0, to new pid. + Setctty bool // Set controlling terminal to fd Ctty + Noctty bool // Detach fd 0 from controlling terminal + Ctty int // Controlling TTY fd + Foreground bool // Place child's process group in foreground. (Implies Setpgid. Uses Ctty as fd of controlling TTY) + Pgid int // Child's process group ID if Setpgid. +} + +// Implemented in runtime package. +func runtime_BeforeFork() +func runtime_AfterFork() +func runtime_AfterForkInChild() + +// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child. +// If a dup or exec fails, write the errno error to pipe. +// (Pipe is close-on-exec so if exec succeeds, it will be closed.) +// In the child, this function must not acquire any locks, because +// they might have been locked at the time of the fork. This means +// no rescheduling, no malloc calls, and no new stack segments. +// For the same reason compiler does not race instrument it. +// The calls to rawSyscall are okay because they are assembly +// functions that do not grow the stack. +//go:norace +func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) { + // Declare all variables at top in case any + // declarations require heap allocation (e.g., err1). + var ( + r1 uintptr + err1 Errno + nextfd int + i int + ) + + // guard against side effects of shuffling fds below. + // Make sure that nextfd is beyond any currently open files so + // that we can't run the risk of overwriting any of them. + fd := make([]int, len(attr.Files)) + nextfd = len(attr.Files) + for i, ufd := range attr.Files { + if nextfd < int(ufd) { + nextfd = int(ufd) + } + fd[i] = int(ufd) + } + nextfd++ + + // About to call fork. + // No more allocation or calls of non-assembly functions. + runtime_BeforeFork() + r1, _, err1 = rawSyscall(funcPC(libc_fork_trampoline), 0, 0, 0) + if err1 != 0 { + runtime_AfterFork() + return 0, err1 + } + + if r1 != 0 { + // parent; return PID + runtime_AfterFork() + return int(r1), 0 + } + + // Fork succeeded, now in child. + + runtime_AfterForkInChild() + + // Enable tracing if requested. + if sys.Ptrace { + _, _, err1 = rawSyscall(funcPC(libc_ptrace_trampoline), uintptr(PTRACE_TRACEME), 0, 0) + if err1 != 0 { + goto childerror + } + } + + // Session ID + if sys.Setsid { + _, _, err1 = rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) + if err1 != 0 { + goto childerror + } + } + + // Set process group + if sys.Setpgid || sys.Foreground { + // Place child in process group. + _, _, err1 = rawSyscall(funcPC(libc_setpgid_trampoline), 0, uintptr(sys.Pgid), 0) + if err1 != 0 { + goto childerror + } + } + + if sys.Foreground { + pgrp := sys.Pgid + if pgrp == 0 { + r1, _, err1 = rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) + if err1 != 0 { + goto childerror + } + + pgrp = int(r1) + } + + // Place process group in foreground. + _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp))) + if err1 != 0 { + goto childerror + } + } + + // Chroot + if chroot != nil { + _, _, err1 = rawSyscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(chroot)), 0, 0) + if err1 != 0 { + goto childerror + } + } + + // User and groups + if cred := sys.Credential; cred != nil { + ngroups := uintptr(len(cred.Groups)) + groups := uintptr(0) + if ngroups > 0 { + groups = uintptr(unsafe.Pointer(&cred.Groups[0])) + } + if !cred.NoSetGroups { + _, _, err1 = rawSyscall(funcPC(libc_setgroups_trampoline), ngroups, groups, 0) + if err1 != 0 { + goto childerror + } + } + _, _, err1 = rawSyscall(funcPC(libc_setgid_trampoline), uintptr(cred.Gid), 0, 0) + if err1 != 0 { + goto childerror + } + _, _, err1 = rawSyscall(funcPC(libc_setuid_trampoline), uintptr(cred.Uid), 0, 0) + if err1 != 0 { + goto childerror + } + } + + // Chdir + if dir != nil { + _, _, err1 = rawSyscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(dir)), 0, 0) + if err1 != 0 { + goto childerror + } + } + + // Pass 1: look for fd[i] < i and move those up above len(fd) + // so that pass 2 won't stomp on an fd it needs later. + if pipe < nextfd { + _, _, err1 = rawSyscall(funcPC(libc_dup2_trampoline), uintptr(pipe), uintptr(nextfd), 0) + if err1 != 0 { + goto childerror + } + rawSyscall(funcPC(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC) + pipe = nextfd + nextfd++ + } + for i = 0; i < len(fd); i++ { + if fd[i] >= 0 && fd[i] < int(i) { + if nextfd == pipe { // don't stomp on pipe + nextfd++ + } + _, _, err1 = rawSyscall(funcPC(libc_dup2_trampoline), uintptr(fd[i]), uintptr(nextfd), 0) + if err1 != 0 { + goto childerror + } + rawSyscall(funcPC(libc_fcntl_trampoline), uintptr(nextfd), F_SETFD, FD_CLOEXEC) + fd[i] = nextfd + nextfd++ + } + } + + // Pass 2: dup fd[i] down onto i. + for i = 0; i < len(fd); i++ { + if fd[i] == -1 { + rawSyscall(funcPC(libc_close_trampoline), uintptr(i), 0, 0) + continue + } + if fd[i] == int(i) { + // dup2(i, i) won't clear close-on-exec flag on Linux, + // probably not elsewhere either. + _, _, err1 = rawSyscall(funcPC(libc_fcntl_trampoline), uintptr(fd[i]), F_SETFD, 0) + if err1 != 0 { + goto childerror + } + continue + } + // The new fd is created NOT close-on-exec, + // which is exactly what we want. + _, _, err1 = rawSyscall(funcPC(libc_dup2_trampoline), uintptr(fd[i]), uintptr(i), 0) + if err1 != 0 { + goto childerror + } + } + + // By convention, we don't close-on-exec the fds we are + // started with, so if len(fd) < 3, close 0, 1, 2 as needed. + // Programs that know they inherit fds >= 3 will need + // to set them close-on-exec. + for i = len(fd); i < 3; i++ { + rawSyscall(funcPC(libc_close_trampoline), uintptr(i), 0, 0) + } + + // Detach fd 0 from tty + if sys.Noctty { + _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), 0, uintptr(TIOCNOTTY), 0) + if err1 != 0 { + goto childerror + } + } + + // Set the controlling TTY to Ctty + if sys.Setctty { + _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0) + if err1 != 0 { + goto childerror + } + } + + // Time to exec. + _, _, err1 = rawSyscall(funcPC(libc_execve_trampoline), + uintptr(unsafe.Pointer(argv0)), + uintptr(unsafe.Pointer(&argv[0])), + uintptr(unsafe.Pointer(&envv[0]))) + +childerror: + // send error code on pipe + rawSyscall(funcPC(libc_write_trampoline), uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1)) + for { + rawSyscall(funcPC(libc_exit_trampoline), 253, 0, 0) + } +} diff --git a/src/syscall/exec_solaris.go b/src/syscall/exec_libc.go similarity index 91% rename from src/syscall/exec_solaris.go rename to src/syscall/exec_libc.go index 9735ae570671c..0133139000b69 100644 --- a/src/syscall/exec_solaris.go +++ b/src/syscall/exec_libc.go @@ -2,6 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build aix solaris + +// This file handles forkAndExecInChild function for OS using libc syscall like AIX or Solaris. + package syscall import ( @@ -28,6 +32,7 @@ func runtime_AfterForkInChild() func chdir(path uintptr) (err Errno) func chroot1(path uintptr) (err Errno) func close(fd uintptr) (err Errno) +func dup2child(old uintptr, new uintptr) (val uintptr, err Errno) func execve(path uintptr, argv uintptr, envp uintptr) (err Errno) func exit(code uintptr) func fcntl1(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err Errno) @@ -43,7 +48,7 @@ func write1(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err Errno) // syscall defines this global on our behalf to avoid a build dependency on other platforms func init() { - execveSolaris = execve + execveLibc = execve } // Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child. @@ -119,14 +124,14 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr } if sys.Foreground { - pgrp := sys.Pgid + pgrp := _Pid_t(sys.Pgid) if pgrp == 0 { r1, err1 = getpid() if err1 != 0 { goto childerror } - pgrp = int(r1) + pgrp = _Pid_t(r1) } // Place process group in foreground. @@ -178,7 +183,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { - _, err1 = fcntl1(uintptr(pipe), F_DUP2FD, uintptr(nextfd)) + _, err1 = dup2child(uintptr(pipe), uintptr(nextfd)) if err1 != 0 { goto childerror } @@ -191,11 +196,14 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr if nextfd == pipe { // don't stomp on pipe nextfd++ } - _, err1 = fcntl1(uintptr(fd[i]), F_DUP2FD, uintptr(nextfd)) + _, err1 = dup2child(uintptr(fd[i]), uintptr(nextfd)) + if err1 != 0 { + goto childerror + } + _, err1 = fcntl1(uintptr(nextfd), F_SETFD, FD_CLOEXEC) if err1 != 0 { goto childerror } - fcntl1(uintptr(nextfd), F_SETFD, FD_CLOEXEC) fd[i] = nextfd nextfd++ } @@ -218,7 +226,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr } // The new fd is created NOT close-on-exec, // which is exactly what we want. - _, err1 = fcntl1(uintptr(fd[i]), F_DUP2FD, uintptr(i)) + _, err1 = dup2child(uintptr(fd[i]), uintptr(i)) if err1 != 0 { goto childerror } @@ -242,6 +250,11 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr // Set the controlling TTY to Ctty if sys.Setctty { + // On AIX, TIOCSCTTY is undefined + if TIOCSCTTY == 0 { + err1 = ENOSYS + goto childerror + } err1 = ioctl(uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0) if err1 != 0 { goto childerror diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index 7ae3177fdc087..6c761f85c4418 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -20,9 +20,12 @@ type SysProcIDMap struct { } type SysProcAttr struct { - Chroot string // Chroot. - Credential *Credential // Credential. - Ptrace bool // Enable tracing. + Chroot string // Chroot. + Credential *Credential // Credential. + // Ptrace tells the child to call ptrace(PTRACE_TRACEME). + // Call runtime.LockOSThread before starting a process with this set, + // and don't call UnlockOSThread until done with PtraceSyscall calls. + Ptrace bool Setsid bool // Create session. Setpgid bool // Set process group ID to Pgid, or, if Pgid == 0, to new pid. Setctty bool // Set controlling terminal to fd Ctty (only meaningful if Setsid is set) diff --git a/src/syscall/exec_linux_test.go b/src/syscall/exec_linux_test.go index f551e87736d98..ac5745bc80bca 100644 --- a/src/syscall/exec_linux_test.go +++ b/src/syscall/exec_linux_test.go @@ -16,6 +16,7 @@ import ( "os/exec" "os/user" "path/filepath" + "runtime" "strconv" "strings" "syscall" @@ -524,6 +525,11 @@ func TestAmbientCaps(t *testing.T) { t.Skip("skipping test on Kubernetes-based builders; see Issue 12815") } + // skip on android, due to lack of lookup support + if runtime.GOOS == "android" { + t.Skip("skipping test on android; see Issue 27327") + } + caps, err := getCaps() if err != nil { t.Fatal(err) diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 9a950ac17fdbe..997ccab07e8f7 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Fork, exec, wait, etc. @@ -246,9 +246,10 @@ func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle func runtime_BeforeExec() func runtime_AfterExec() -// execveSolaris is non-nil on Solaris, set to execve in exec_solaris.go; this +// execveLibc is non-nil on OS using libc syscall, set to execve in exec_libc.go; this // avoids a build dependency for other platforms. -var execveSolaris func(path uintptr, argv uintptr, envp uintptr) (err Errno) +var execveLibc func(path uintptr, argv uintptr, envp uintptr) Errno +var execveDarwin func(path *byte, argv **byte, envp **byte) error // Exec invokes the execve(2) system call. func Exec(argv0 string, argv []string, envv []string) (err error) { @@ -266,13 +267,16 @@ func Exec(argv0 string, argv []string, envv []string) (err error) { } runtime_BeforeExec() - var err1 Errno - if runtime.GOOS == "solaris" { - // RawSyscall should never be used on Solaris. - err1 = execveSolaris( + var err1 error + if runtime.GOOS == "solaris" || runtime.GOOS == "aix" { + // RawSyscall should never be used on Solaris or AIX. + err1 = execveLibc( uintptr(unsafe.Pointer(argv0p)), uintptr(unsafe.Pointer(&argvp[0])), uintptr(unsafe.Pointer(&envvp[0]))) + } else if runtime.GOOS == "darwin" { + // Similarly on Darwin. + err1 = execveDarwin(argv0p, &argvp[0], &envvp[0]) } else { _, _, err1 = RawSyscall(SYS_EXECVE, uintptr(unsafe.Pointer(argv0p)), diff --git a/src/syscall/exec_unix_test.go b/src/syscall/exec_unix_test.go index 9bb95c0f39508..33614f5221233 100644 --- a/src/syscall/exec_unix_test.go +++ b/src/syscall/exec_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package syscall_test diff --git a/src/syscall/export_freebsd_test.go b/src/syscall/export_freebsd_test.go new file mode 100644 index 0000000000000..d47f09024f763 --- /dev/null +++ b/src/syscall/export_freebsd_test.go @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +type Dirent_freebsd11 = dirent_freebsd11 + +var ( + Roundup = roundup + ConvertFromDirents11 = convertFromDirents11 +) diff --git a/src/syscall/flock.go b/src/syscall/flock.go index 62736ae9dcb4c..568efca7d4950 100644 --- a/src/syscall/flock.go +++ b/src/syscall/flock.go @@ -1,4 +1,4 @@ -// +build linux darwin freebsd openbsd netbsd dragonfly +// +build linux freebsd openbsd netbsd dragonfly // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/src/syscall/flock_aix.go b/src/syscall/flock_aix.go new file mode 100644 index 0000000000000..c9eab43b6bc2f --- /dev/null +++ b/src/syscall/flock_aix.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +import "unsafe" + +// On AIX, there is no flock() system call. + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/syscall/flock_darwin.go b/src/syscall/flock_darwin.go new file mode 100644 index 0000000000000..d2bd84130c339 --- /dev/null +++ b/src/syscall/flock_darwin.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +import "unsafe" + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} diff --git a/src/syscall/forkpipe.go b/src/syscall/forkpipe.go index 71890a29badcc..d9999cb8b860b 100644 --- a/src/syscall/forkpipe.go +++ b/src/syscall/forkpipe.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly solaris +// +build aix darwin dragonfly solaris package syscall diff --git a/src/syscall/fs_js.go b/src/syscall/fs_js.go index 00d6c769791ef..fcc5f038b823c 100644 --- a/src/syscall/fs_js.go +++ b/src/syscall/fs_js.go @@ -81,15 +81,15 @@ func Open(path string, openmode int, perm uint32) (int, error) { return 0, errors.New("syscall.Open: O_SYNC is not supported by js/wasm") } - jsFD, err := fsCall("openSync", path, flags, perm) + jsFD, err := fsCall("open", path, flags, perm) if err != nil { return 0, err } fd := jsFD.Int() var entries []string - if stat, err := fsCall("fstatSync", fd); err == nil && stat.Call("isDirectory").Bool() { - dir, err := fsCall("readdirSync", path) + if stat, err := fsCall("fstat", fd); err == nil && stat.Call("isDirectory").Bool() { + dir, err := fsCall("readdir", path) if err != nil { return 0, err } @@ -113,7 +113,7 @@ func Close(fd int) error { filesMu.Lock() delete(files, fd) filesMu.Unlock() - _, err := fsCall("closeSync", fd) + _, err := fsCall("close", fd) return err } @@ -125,7 +125,7 @@ func Mkdir(path string, perm uint32) error { if err := checkPath(path); err != nil { return err } - _, err := fsCall("mkdirSync", path, perm) + _, err := fsCall("mkdir", path, perm) return err } @@ -182,7 +182,7 @@ func Stat(path string, st *Stat_t) error { if err := checkPath(path); err != nil { return err } - jsSt, err := fsCall("statSync", path) + jsSt, err := fsCall("stat", path) if err != nil { return err } @@ -194,7 +194,7 @@ func Lstat(path string, st *Stat_t) error { if err := checkPath(path); err != nil { return err } - jsSt, err := fsCall("lstatSync", path) + jsSt, err := fsCall("lstat", path) if err != nil { return err } @@ -203,7 +203,7 @@ func Lstat(path string, st *Stat_t) error { } func Fstat(fd int, st *Stat_t) error { - jsSt, err := fsCall("fstatSync", fd) + jsSt, err := fsCall("fstat", fd) if err != nil { return err } @@ -215,7 +215,7 @@ func Unlink(path string) error { if err := checkPath(path); err != nil { return err } - _, err := fsCall("unlinkSync", path) + _, err := fsCall("unlink", path) return err } @@ -223,7 +223,7 @@ func Rmdir(path string) error { if err := checkPath(path); err != nil { return err } - _, err := fsCall("rmdirSync", path) + _, err := fsCall("rmdir", path) return err } @@ -231,12 +231,12 @@ func Chmod(path string, mode uint32) error { if err := checkPath(path); err != nil { return err } - _, err := fsCall("chmodSync", path, mode) + _, err := fsCall("chmod", path, mode) return err } func Fchmod(fd int, mode uint32) error { - _, err := fsCall("fchmodSync", fd, mode) + _, err := fsCall("fchmod", fd, mode) return err } @@ -267,7 +267,7 @@ func UtimesNano(path string, ts []Timespec) error { } atime := ts[0].Sec mtime := ts[1].Sec - _, err := fsCall("utimesSync", path, atime, mtime) + _, err := fsCall("utimes", path, atime, mtime) return err } @@ -278,7 +278,7 @@ func Rename(from, to string) error { if err := checkPath(to); err != nil { return err } - _, err := fsCall("renameSync", from, to) + _, err := fsCall("rename", from, to) return err } @@ -286,12 +286,12 @@ func Truncate(path string, length int64) error { if err := checkPath(path); err != nil { return err } - _, err := fsCall("truncateSync", path, length) + _, err := fsCall("truncate", path, length) return err } func Ftruncate(fd int, length int64) error { - _, err := fsCall("ftruncateSync", fd, length) + _, err := fsCall("ftruncate", fd, length) return err } @@ -299,7 +299,7 @@ func Getcwd(buf []byte) (n int, err error) { defer recoverErr(&err) cwd := jsProcess.Call("cwd").String() n = copy(buf, cwd) - return n, nil + return } func Chdir(path string) (err error) { @@ -323,7 +323,7 @@ func Readlink(path string, buf []byte) (n int, err error) { if err := checkPath(path); err != nil { return 0, err } - dst, err := fsCall("readlinkSync", path) + dst, err := fsCall("readlink", path) if err != nil { return 0, err } @@ -338,7 +338,7 @@ func Link(path, link string) error { if err := checkPath(link); err != nil { return err } - _, err := fsCall("linkSync", path, link) + _, err := fsCall("link", path, link) return err } @@ -349,12 +349,12 @@ func Symlink(path, link string) error { if err := checkPath(link); err != nil { return err } - _, err := fsCall("symlinkSync", path, link) + _, err := fsCall("symlink", path, link) return err } func Fsync(fd int) error { - _, err := fsCall("fsyncSync", fd) + _, err := fsCall("fsync", fd) return err } @@ -371,7 +371,7 @@ func Read(fd int, b []byte) (int, error) { } a := js.TypedArrayOf(b) - n, err := fsCall("readSync", fd, a, 0, len(b)) + n, err := fsCall("read", fd, a, 0, len(b), nil) a.Release() if err != nil { return 0, err @@ -394,7 +394,7 @@ func Write(fd int, b []byte) (int, error) { } a := js.TypedArrayOf(b) - n, err := fsCall("writeSync", fd, a, 0, len(b)) + n, err := fsCall("write", fd, a, 0, len(b), nil) a.Release() if err != nil { return 0, err @@ -406,7 +406,7 @@ func Write(fd int, b []byte) (int, error) { func Pread(fd int, b []byte, offset int64) (int, error) { a := js.TypedArrayOf(b) - n, err := fsCall("readSync", fd, a, 0, len(b), offset) + n, err := fsCall("read", fd, a, 0, len(b), offset) a.Release() if err != nil { return 0, err @@ -416,7 +416,7 @@ func Pread(fd int, b []byte, offset int64) (int, error) { func Pwrite(fd int, b []byte, offset int64) (int, error) { a := js.TypedArrayOf(b) - n, err := fsCall("writeSync", fd, a, 0, len(b), offset) + n, err := fsCall("write", fd, a, 0, len(b), offset) a.Release() if err != nil { return 0, err @@ -467,10 +467,32 @@ func Pipe(fd []int) error { return ENOSYS } -func fsCall(name string, args ...interface{}) (res js.Value, err error) { - defer recoverErr(&err) - res = jsFS.Call(name, args...) - return +func fsCall(name string, args ...interface{}) (js.Value, error) { + type callResult struct { + val js.Value + err error + } + + c := make(chan callResult, 1) + jsFS.Call(name, append(args, js.FuncOf(func(this js.Value, args []js.Value) interface{} { + var res callResult + + if len(args) >= 1 { // on Node.js 8, fs.utimes calls the callback without any arguments + if jsErr := args[0]; jsErr != js.Null() { + res.err = mapJSError(jsErr) + } + } + + res.val = js.Undefined() + if len(args) >= 2 { + res.val = args[1] + } + + c <- res + return nil + }))...) + res := <-c + return res.val, res.err } // checkPath checks that the path is not empty and that it contains no null characters. @@ -492,10 +514,15 @@ func recoverErr(errPtr *error) { if !ok { panic(err) } - errno, ok := errnoByCode[jsErr.Get("code").String()] - if !ok { - panic(err) - } - *errPtr = errnoErr(Errno(errno)) + *errPtr = mapJSError(jsErr.Value) + } +} + +// mapJSError maps an error given by Node.js to the appropriate Go error +func mapJSError(jsErr js.Value) error { + errno, ok := errnoByCode[jsErr.Get("code").String()] + if !ok { + panic(jsErr) } + return errnoErr(Errno(errno)) } diff --git a/src/syscall/js/callback.go b/src/syscall/js/callback.go deleted file mode 100644 index 9d573074cbd39..0000000000000 --- a/src/syscall/js/callback.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js,wasm - -package js - -import "sync" - -var ( - pendingCallbacks = Global().Get("Array").New() - makeCallbackHelper = Global().Get("Go").Get("_makeCallbackHelper") - makeEventCallbackHelper = Global().Get("Go").Get("_makeEventCallbackHelper") -) - -var ( - callbacksMu sync.Mutex - callbacks = make(map[uint32]func([]Value)) - nextCallbackID uint32 = 1 -) - -// Callback is a Go function that got wrapped for use as a JavaScript callback. -type Callback struct { - Value // the JavaScript function that queues the callback for execution - id uint32 -} - -// NewCallback returns a wrapped callback function. -// -// Invoking the callback in JavaScript will queue the Go function fn for execution. -// This execution happens asynchronously on a special goroutine that handles all callbacks and preserves -// the order in which the callbacks got called. -// As a consequence, if one callback blocks this goroutine, other callbacks will not be processed. -// A blocking callback should therefore explicitly start a new goroutine. -// -// Callback.Release must be called to free up resources when the callback will not be used any more. -func NewCallback(fn func(args []Value)) Callback { - callbackLoopOnce.Do(func() { - go callbackLoop() - }) - - callbacksMu.Lock() - id := nextCallbackID - nextCallbackID++ - callbacks[id] = fn - callbacksMu.Unlock() - return Callback{ - Value: makeCallbackHelper.Invoke(id, pendingCallbacks, jsGo), - id: id, - } -} - -type EventCallbackFlag int - -const ( - // PreventDefault can be used with NewEventCallback to call event.preventDefault synchronously. - PreventDefault EventCallbackFlag = 1 << iota - // StopPropagation can be used with NewEventCallback to call event.stopPropagation synchronously. - StopPropagation - // StopImmediatePropagation can be used with NewEventCallback to call event.stopImmediatePropagation synchronously. - StopImmediatePropagation -) - -// NewEventCallback returns a wrapped callback function, just like NewCallback, but the callback expects to have -// exactly one argument, the event. Depending on flags, it will synchronously call event.preventDefault, -// event.stopPropagation and/or event.stopImmediatePropagation before queuing the Go function fn for execution. -func NewEventCallback(flags EventCallbackFlag, fn func(event Value)) Callback { - c := NewCallback(func(args []Value) { - fn(args[0]) - }) - return Callback{ - Value: makeEventCallbackHelper.Invoke( - flags&PreventDefault != 0, - flags&StopPropagation != 0, - flags&StopImmediatePropagation != 0, - c, - ), - id: c.id, - } -} - -// Release frees up resources allocated for the callback. -// The callback must not be invoked after calling Release. -func (c Callback) Release() { - callbacksMu.Lock() - delete(callbacks, c.id) - callbacksMu.Unlock() -} - -var callbackLoopOnce sync.Once - -func callbackLoop() { - for !jsGo.Get("_callbackShutdown").Bool() { - sleepUntilCallback() - for { - cb := pendingCallbacks.Call("shift") - if cb == Undefined() { - break - } - - id := uint32(cb.Get("id").Int()) - callbacksMu.Lock() - f, ok := callbacks[id] - callbacksMu.Unlock() - if !ok { - Global().Get("console").Call("error", "call to closed callback") - continue - } - - argsObj := cb.Get("args") - args := make([]Value, argsObj.Length()) - for i := range args { - args[i] = argsObj.Index(i) - } - f(args) - } - } -} - -// sleepUntilCallback is defined in the runtime package -func sleepUntilCallback() diff --git a/src/syscall/js/func.go b/src/syscall/js/func.go new file mode 100644 index 0000000000000..6b7f39b8784a1 --- /dev/null +++ b/src/syscall/js/func.go @@ -0,0 +1,92 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js,wasm + +package js + +import "sync" + +var ( + funcsMu sync.Mutex + funcs = make(map[uint32]func(Value, []Value) interface{}) + nextFuncID uint32 = 1 +) + +var _ Wrapper = Func{} // Func must implement Wrapper + +// Func is a wrapped Go function to be called by JavaScript. +type Func struct { + Value // the JavaScript function that invokes the Go function + id uint32 +} + +// FuncOf returns a wrapped function. +// +// Invoking the JavaScript function will synchronously call the Go function fn with the value of JavaScript's +// "this" keyword and the arguments of the invocation. +// The return value of the invocation is the result of the Go function mapped back to JavaScript according to ValueOf. +// +// A wrapped function triggered during a call from Go to JavaScript gets executed on the same goroutine. +// A wrapped function triggered by JavaScript's event loop gets executed on an extra goroutine. +// Blocking operations in the wrapped function will block the event loop. +// As a consequence, if one wrapped function blocks, other wrapped funcs will not be processed. +// A blocking function should therefore explicitly start a new goroutine. +// +// Func.Release must be called to free up resources when the function will not be used any more. +func FuncOf(fn func(this Value, args []Value) interface{}) Func { + funcsMu.Lock() + id := nextFuncID + nextFuncID++ + funcs[id] = fn + funcsMu.Unlock() + return Func{ + id: id, + Value: jsGo.Call("_makeFuncWrapper", id), + } +} + +// Release frees up resources allocated for the function. +// The function must not be invoked after calling Release. +func (c Func) Release() { + funcsMu.Lock() + delete(funcs, c.id) + funcsMu.Unlock() +} + +// setEventHandler is defined in the runtime package. +func setEventHandler(fn func()) + +func init() { + setEventHandler(handleEvent) +} + +func handleEvent() { + cb := jsGo.Get("_pendingEvent") + if cb == Null() { + return + } + jsGo.Set("_pendingEvent", Null()) + + id := uint32(cb.Get("id").Int()) + if id == 0 { // zero indicates deadlock + select {} + } + funcsMu.Lock() + f, ok := funcs[id] + funcsMu.Unlock() + if !ok { + Global().Get("console").Call("error", "call to released function") + return + } + + this := cb.Get("this") + argsObj := cb.Get("args") + args := make([]Value, argsObj.Length()) + for i := range args { + args[i] = argsObj.Index(i) + } + result := f(this, args) + cb.Set("result", result) +} diff --git a/src/syscall/js/js.go b/src/syscall/js/js.go index 336586ca2dd18..0893db022d356 100644 --- a/src/syscall/js/js.go +++ b/src/syscall/js/js.go @@ -16,19 +16,32 @@ import ( ) // ref is used to identify a JavaScript value, since the value itself can not be passed to WebAssembly. -// A JavaScript number (64-bit float, except NaN) is represented by its IEEE 754 binary representation. +// +// The JavaScript value "undefined" is represented by the value 0. +// A JavaScript number (64-bit float, except 0 and NaN) is represented by its IEEE 754 binary representation. // All other values are represented as an IEEE 754 binary representation of NaN with bits 0-31 used as // an ID and bits 32-33 used to differentiate between string, symbol, function and object. type ref uint64 -// nanHead are the upper 32 bits of a ref which are set if the value is not a JavaScript number or NaN itself. +// nanHead are the upper 32 bits of a ref which are set if the value is not encoded as an IEEE 754 number (see above). const nanHead = 0x7FF80000 -// Value represents a JavaScript value. +// Wrapper is implemented by types that are backed by a JavaScript value. +type Wrapper interface { + // JSValue returns a JavaScript value associated with an object. + JSValue() Value +} + +// Value represents a JavaScript value. The zero value is the JavaScript value "undefined". type Value struct { ref ref } +// JSValue implements Wrapper interface. +func (v Value) JSValue() Value { + return v +} + func makeValue(v ref) Value { return Value{ref: v} } @@ -38,6 +51,9 @@ func predefValue(id uint32) Value { } func floatValue(f float64) Value { + if f == 0 { + return valueZero + } if f != f { return valueNaN } @@ -56,8 +72,9 @@ func (e Error) Error() string { } var ( + valueUndefined = Value{ref: 0} valueNaN = predefValue(0) - valueUndefined = predefValue(1) + valueZero = predefValue(1) valueNull = predefValue(2) valueTrue = predefValue(3) valueFalse = predefValue(4) @@ -90,21 +107,21 @@ func Global() Value { // | ---------------------- | ---------------------- | // | js.Value | [its value] | // | js.TypedArray | typed array | -// | js.Callback | function | +// | js.Func | function | // | nil | null | // | bool | boolean | // | integers and floats | number | // | string | string | // | []interface{} | new array | // | map[string]interface{} | new object | +// +// Panics if x is not one of the expected types. func ValueOf(x interface{}) Value { switch x := x.(type) { - case Value: + case Value: // should precede Wrapper to avoid a loop return x - case TypedArray: - return x.Value - case Callback: - return x.Value + case Wrapper: + return x.JSValue() case nil: return valueNull case bool: @@ -318,13 +335,18 @@ func (v Value) New(args ...interface{}) Value { func valueNew(v ref, args []ref) (ref, bool) func (v Value) isNumber() bool { - return v.ref>>32&nanHead != nanHead || v.ref == valueNaN.ref + return v.ref == valueZero.ref || + v.ref == valueNaN.ref || + (v.ref != valueUndefined.ref && v.ref>>32&nanHead != nanHead) } func (v Value) float(method string) float64 { if !v.isNumber() { panic(&ValueError{method, v.Type()}) } + if v.ref == valueZero.ref { + return 0 + } return *(*float64)(unsafe.Pointer(&v.ref)) } @@ -350,6 +372,26 @@ func (v Value) Bool() bool { } } +// Truthy returns the JavaScript "truthiness" of the value v. In JavaScript, +// false, 0, "", null, undefined, and NaN are "falsy", and everything else is +// "truthy". See https://developer.mozilla.org/en-US/docs/Glossary/Truthy. +func (v Value) Truthy() bool { + switch v.Type() { + case TypeUndefined, TypeNull: + return false + case TypeBoolean: + return v.Bool() + case TypeNumber: + return v.ref != valueNaN.ref && v.ref != valueZero.ref + case TypeString: + return v.String() != "" + case TypeSymbol, TypeFunction, TypeObject: + return true + default: + panic("bad type") + } +} + // String returns the value v converted to string according to JavaScript type conversions. func (v Value) String() string { str, length := valuePrepareString(v.ref) diff --git a/src/syscall/js/js_test.go b/src/syscall/js/js_test.go index 9cc931a31d38d..c14d2cc24c90b 100644 --- a/src/syscall/js/js_test.go +++ b/src/syscall/js/js_test.go @@ -4,6 +4,15 @@ // +build js,wasm +// To run these tests: +// +// - Install Node +// - Add /path/to/go/misc/wasm to your $PATH (so that "go test" can find +// "go_js_wasm_exec"). +// - GOOS=js GOARCH=wasm go test +// +// See -exec in "go help test", and "go help run" for details. + package js_test import ( @@ -19,10 +28,19 @@ var dummys = js.Global().Call("eval", `({ someInt: 42, someFloat: 42.123, someArray: [41, 42, 43], + someDate: new Date(), add: function(a, b) { return a + b; }, + zero: 0, + stringZero: "0", NaN: NaN, + emptyObj: {}, + emptyArray: [], + Infinity: Infinity, + NegInfinity: -Infinity, + objNumber0: new Number(0), + objBooleanFalse: new Boolean(false), })`) func TestBool(t *testing.T) { @@ -74,6 +92,9 @@ func TestInt(t *testing.T) { if dummys.Get("someInt") != dummys.Get("someInt") { t.Errorf("same value not equal") } + if got := dummys.Get("zero").Int(); got != 0 { + t.Errorf("got %#v, want %#v", got, 0) + } } func TestIntConversion(t *testing.T) { @@ -237,6 +258,9 @@ func TestType(t *testing.T) { if got, want := js.ValueOf(true).Type(), js.TypeBoolean; got != want { t.Errorf("got %s, want %s", got, want) } + if got, want := js.ValueOf(0).Type(), js.TypeNumber; got != want { + t.Errorf("got %s, want %s", got, want) + } if got, want := js.ValueOf(42).Type(), js.TypeNumber; got != want { t.Errorf("got %s, want %s", got, want) } @@ -269,51 +293,89 @@ func TestValueOf(t *testing.T) { } } -func TestCallback(t *testing.T) { +func TestZeroValue(t *testing.T) { + var v js.Value + if v != js.Undefined() { + t.Error("zero js.Value is not js.Undefined()") + } +} + +func TestFuncOf(t *testing.T) { c := make(chan struct{}) - cb := js.NewCallback(func(args []js.Value) { + cb := js.FuncOf(func(this js.Value, args []js.Value) interface{} { if got := args[0].Int(); got != 42 { t.Errorf("got %#v, want %#v", got, 42) } c <- struct{}{} + return nil }) defer cb.Release() js.Global().Call("setTimeout", cb, 0, 42) <-c } -func TestEventCallback(t *testing.T) { - for _, name := range []string{"preventDefault", "stopPropagation", "stopImmediatePropagation"} { - c := make(chan struct{}) - var flags js.EventCallbackFlag - switch name { - case "preventDefault": - flags = js.PreventDefault - case "stopPropagation": - flags = js.StopPropagation - case "stopImmediatePropagation": - flags = js.StopImmediatePropagation - } - cb := js.NewEventCallback(flags, func(event js.Value) { - c <- struct{}{} +func TestInvokeFunction(t *testing.T) { + called := false + cb := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + cb2 := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + called = true + return 42 }) - defer cb.Release() - - event := js.Global().Call("eval", fmt.Sprintf("({ called: false, %s: function() { this.called = true; } })", name)) - cb.Invoke(event) - if !event.Get("called").Bool() { - t.Errorf("%s not called", name) - } - - <-c + defer cb2.Release() + return cb2.Invoke() + }) + defer cb.Release() + if got := cb.Invoke().Int(); got != 42 { + t.Errorf("got %#v, want %#v", got, 42) + } + if !called { + t.Error("function not called") } } -func ExampleNewCallback() { - var cb js.Callback - cb = js.NewCallback(func(args []js.Value) { +func ExampleFuncOf() { + var cb js.Func + cb = js.FuncOf(func(this js.Value, args []js.Value) interface{} { fmt.Println("button clicked") - cb.Release() // release the callback if the button will not be clicked again + cb.Release() // release the function if the button will not be clicked again + return nil }) js.Global().Get("document").Call("getElementById", "myButton").Call("addEventListener", "click", cb) } + +// See +// - https://developer.mozilla.org/en-US/docs/Glossary/Truthy +// - https://stackoverflow.com/questions/19839952/all-falsey-values-in-javascript/19839953#19839953 +// - http://www.ecma-international.org/ecma-262/5.1/#sec-9.2 +func TestTruthy(t *testing.T) { + want := true + for _, key := range []string{ + "someBool", "someString", "someInt", "someFloat", "someArray", "someDate", + "stringZero", // "0" is truthy + "add", // functions are truthy + "emptyObj", "emptyArray", "Infinity", "NegInfinity", + // All objects are truthy, even if they're Number(0) or Boolean(false). + "objNumber0", "objBooleanFalse", + } { + if got := dummys.Get(key).Truthy(); got != want { + t.Errorf("%s: got %#v, want %#v", key, got, want) + } + } + + want = false + if got := dummys.Get("zero").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := dummys.Get("NaN").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.ValueOf("").Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.Null().Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } + if got := js.Undefined().Truthy(); got != want { + t.Errorf("got %#v, want %#v", got, want) + } +} diff --git a/src/syscall/js/typedarray.go b/src/syscall/js/typedarray.go index afa15488ec7a4..aa56cf69f3c5e 100644 --- a/src/syscall/js/typedarray.go +++ b/src/syscall/js/typedarray.go @@ -22,6 +22,8 @@ var ( float64Array = Global().Get("Float64Array") ) +var _ Wrapper = TypedArray{} // TypedArray must implement Wrapper + // TypedArray represents a JavaScript typed array. type TypedArray struct { Value diff --git a/src/syscall/mkall.sh b/src/syscall/mkall.sh index b381b93161d88..61f45f57905b6 100755 --- a/src/syscall/mkall.sh +++ b/src/syscall/mkall.sh @@ -83,6 +83,7 @@ mksysctl="" zsysctl="zsysctl_$GOOSARCH.go" mksysnum= mktypes= +mkasm= run="sh" case "$1" in @@ -115,21 +116,38 @@ _* | *_ | _) echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 exit 1 ;; +aix_ppc64) + mkerrors="$mkerrors -maix64" + mksyscall="./mksyscall_libc.pl -aix" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + ;; darwin_386) mkerrors="$mkerrors -m32" - mksyscall="./mksyscall.pl -l32" + mksyscall="./mksyscall.pl -l32 -darwin" mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mkasm="go run mkasm_darwin.go" ;; darwin_amd64) mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -darwin" mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mkasm="go run mkasm_darwin.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" + mksyscall="./mksyscall.pl -darwin" + mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" + mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mkasm="go run mkasm_darwin.go" + ;; +darwin_arm) + mkerrors="$mkerrors -m32" + mksyscall="./mksyscall.pl -l32 -darwin" mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h" mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mkasm="go run mkasm_darwin.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -292,7 +310,9 @@ openbsd_arm) mksysctl="./mksysctl_openbsd.pl" zsysctl="zsysctl_openbsd.go" mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" + # Let the type of C char be signed to make the bare syscall + # API consistent between platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; plan9_386) mkerrors= @@ -301,7 +321,7 @@ plan9_386) mktypes="XXX" ;; solaris_amd64) - mksyscall="./mksyscall_solaris.pl" + mksyscall="./mksyscall_libc.pl -solaris" mkerrors="$mkerrors -m64" mksysnum= mktypes="GOARCH=$GOARCH go tool cgo -godefs" @@ -327,5 +347,10 @@ esac if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi - if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go"; fi + if [ -n "$mktypes" ]; then + # ztypes_$GOOSARCH.go could be erased before "go run mkpost.go" is called. + # Therefore, "go run" tries to recompile syscall package but ztypes is empty and it fails. + echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go.NEW && mv ztypes_$GOOSARCH.go.NEW ztypes_$GOOSARCH.go"; + fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi ) | $run diff --git a/src/syscall/mkasm_darwin.go b/src/syscall/mkasm_darwin.go new file mode 100644 index 0000000000000..f6f75f99f6b69 --- /dev/null +++ b/src/syscall/mkasm_darwin.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. +//This program must be run after mksyscall.pl. +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "strings" +) + +func main() { + in1, err := ioutil.ReadFile("syscall_darwin.go") + if err != nil { + log.Fatalf("can't open syscall_darwin.go: %s", err) + } + arch := os.Args[1] + in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) + } + in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) + } + in := string(in1) + string(in2) + string(in3) + + trampolines := map[string]bool{} + + var out bytes.Buffer + + fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) + fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") + fmt.Fprintf(&out, "#include \"textflag.h\"\n") + for _, line := range strings.Split(in, "\n") { + if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { + continue + } + fn := line[5 : len(line)-13] + if !trampolines[fn] { + trampolines[fn] = true + fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) + fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) + } + } + err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) + if err != nil { + log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) + } +} diff --git a/src/syscall/mkerrors.sh b/src/syscall/mkerrors.sh index 93d6f7d2b60d8..d5880dcaf2dac 100755 --- a/src/syscall/mkerrors.sh +++ b/src/syscall/mkerrors.sh @@ -20,6 +20,16 @@ fi uname=$(uname) +includes_AIX=' +#include +#include +#include +#include +#include +#include +#include +' + includes_Darwin=' #define _DARWIN_C_SOURCE #define KERNEL diff --git a/src/syscall/mkpost.go b/src/syscall/mkpost.go index e75ba1502a94a..d5f5c8d6d62b2 100644 --- a/src/syscall/mkpost.go +++ b/src/syscall/mkpost.go @@ -30,7 +30,8 @@ func main() { goarch := os.Getenv("GOARCH") goos := os.Getenv("GOOS") - if goarch == "s390x" && goos == "linux" { + switch { + case goarch == "s390x" && goos == "linux": // Export the types of PtraceRegs fields. re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)") s = re.ReplaceAllString(s, "Ptrace$1") @@ -53,6 +54,11 @@ func main() { // the existing gccgo API. re = regexp.MustCompile("(Data\\s+\\[14\\])uint8") s = re.ReplaceAllString(s, "${1}int8") + + case goos == "freebsd": + // Keep pre-FreeBSD 10 / non-POSIX 2008 names for timespec fields + re := regexp.MustCompile("(A|M|C|Birth)tim\\s+Timespec") + s = re.ReplaceAllString(s, "${1}timespec Timespec") } // gofmt diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl index ccce82e172fdd..079b08dcb912e 100755 --- a/src/syscall/mksyscall.pl +++ b/src/syscall/mksyscall.pl @@ -25,6 +25,7 @@ my $errors = 0; my $_32bit = ""; my $plan9 = 0; +my $darwin = 0; my $openbsd = 0; my $netbsd = 0; my $dragonfly = 0; @@ -43,6 +44,10 @@ $plan9 = 1; shift; } +if($ARGV[0] eq "-darwin") { + $darwin = 1; + shift; +} if($ARGV[0] eq "-openbsd") { $openbsd = 1; shift; @@ -94,6 +99,9 @@ ($) return ($1, $2); } +# set of trampolines we've already generated +my %trampolines; + my $text = ""; while(<>) { chomp; @@ -211,6 +219,11 @@ ($) $asm = "RawSyscall"; } } + if ($darwin) { + # Call unexported syscall functions (which take + # libc functions instead of syscall numbers). + $asm = lcfirst($asm); + } if(@args <= 3) { while(@args < 3) { push @args, "0"; @@ -229,7 +242,16 @@ ($) print STDERR "$ARGV:$.: too many arguments to system call\n"; } + if ($darwin) { + # Use extended versions for calls that generate a 64-bit result. + my ($name, $type) = parseparam($out[0]); + if ($type eq "int64" || ($type eq "uintptr" && $_32bit eq "")) { + $asm .= "X"; + } + } + # System call number. + my $funcname = ""; if($sysname eq "") { $sysname = "SYS_$func"; $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar @@ -237,6 +259,18 @@ ($) if($nacl) { $sysname =~ y/A-Z/a-z/; } + if($darwin) { + $sysname =~ y/A-Z/a-z/; + $sysname = substr $sysname, 4; + $funcname = "libc_$sysname"; + } + } + if($darwin) { + if($funcname eq "") { + $sysname = substr $sysname, 4; + $funcname = "libc_$sysname"; + } + $sysname = "funcPC(${funcname}_trampoline)"; } # Actual call. @@ -306,6 +340,19 @@ ($) } $text .= "\treturn\n"; $text .= "}\n\n"; + if($darwin) { + if (not exists $trampolines{$funcname}) { + $trampolines{$funcname} = 1; + # The assembly trampoline that jumps to the libc routine. + $text .= "func ${funcname}_trampoline()\n"; + # Map syscall.funcname to just plain funcname. + # (The jump to this function is in the assembly trampoline, generated by mksyscallasm_darwin.go.) + $text .= "//go:linkname $funcname $funcname\n"; + # Tell the linker that funcname can be found in libSystem using varname without the libc_ prefix. + my $basename = substr $funcname, 5; + $text .= "//go:cgo_import_dynamic $funcname $basename \"/usr/lib/libSystem.B.dylib\"\n"; + } + } } chomp $text; diff --git a/src/syscall/mksyscall_solaris.pl b/src/syscall/mksyscall_libc.pl similarity index 86% rename from src/syscall/mksyscall_solaris.pl rename to src/syscall/mksyscall_libc.pl index 91729759147da..5ceedc812a364 100755 --- a/src/syscall/mksyscall_solaris.pl +++ b/src/syscall/mksyscall_libc.pl @@ -19,10 +19,12 @@ use strict; -my $cmdline = "mksyscall_solaris.pl " . join(' ', @ARGV); +my $cmdline = "mksyscall_libc.pl " . join(' ', @ARGV); my $errors = 0; my $_32bit = ""; my $tags = ""; # build tags +my $aix = 0; +my $solaris = 0; binmode STDOUT; @@ -33,14 +35,23 @@ $_32bit = "little-endian"; shift; } +if($ARGV[0] eq "-aix") { + $aix = 1; + shift; +} +if($ARGV[0] eq "-solaris") { + $solaris = 1; + shift; +} if($ARGV[0] eq "-tags") { shift; $tags = $ARGV[0]; shift; } + if($ARGV[0] =~ /^-/) { - print STDERR "usage: mksyscall_solaris.pl [-b32 | -l32] [-tags x,y] [file ...]\n"; + print STDERR "usage: mksyscall_libc.pl [-b32 | -l32] [-aix | -solaris] [-tags x,y] [file ...]\n"; exit 1; } @@ -95,9 +106,28 @@ ($) my @in = parseparamlist($in); my @out = parseparamlist($out); + # Try in vain to keep people from editing this file. + # The theory is that they jump into the middle of the file + # without reading the header. + $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; + # So file name. - if($modname eq "") { - $modname = "libc"; + if($aix) { + if($modname eq "") { + $modname = "libc.a/shr_64.o"; + } else { + print STDERR "$func: only syscall using libc are available\n"; + $errors = 1; + next; + } + + } + if($solaris) { + if($modname eq "") { + $modname = "libc"; + } + $modname .= ".so"; + } # System call name. @@ -114,7 +144,7 @@ ($) $sysname =~ y/A-Z/a-z/; # All libc functions are lowercase. # Runtime import of function to allow cross-platform builds. - $dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname.so\"\n"; + $dynimports .= "//go:cgo_import_dynamic ${sysvarname} ${sysname} \"$modname\"\n"; # Link symbol to proc address variable. $linknames .= "//go:linkname ${sysvarname} ${sysvarname}\n"; # Library proc address variable. @@ -184,10 +214,21 @@ ($) } my $nargs = @args; + my $asmfuncname=""; + my $asmrawfuncname=""; + + if($aix){ + $asmfuncname="syscall6"; + $asmrawfuncname="rawSyscall6"; + } else { + $asmfuncname="sysvicall6"; + $asmrawfuncname="rawSysvicall6"; + } + # Determine which form to use; pad args with zeros. - my $asm = "${syscalldot}sysvicall6"; + my $asm = "${syscalldot}${asmfuncname}"; if ($nonblock) { - $asm = "${syscalldot}rawSysvicall6"; + $asm = "${syscalldot}${asmrawfuncname}"; } if(@args <= 6) { while(@args < 6) { diff --git a/src/syscall/mksyscall_windows.go b/src/syscall/mksyscall_windows.go index 5fd3a756f87ad..ee2123f9393fc 100644 --- a/src/syscall/mksyscall_windows.go +++ b/src/syscall/mksyscall_windows.go @@ -22,7 +22,7 @@ like func declarations if //sys is replaced by func, but: * If the return parameter is an error number, it must be named err. -* If go func name needs to be different from it's winapi dll name, +* If go func name needs to be different from its winapi dll name, the winapi name could be specified at the end, after "=" sign, like //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA @@ -694,7 +694,7 @@ func (src *Source) ParseFile(path string) error { return nil } -// IsStdRepo returns true if src is part of standard library. +// IsStdRepo reports whether src is part of standard library. func (src *Source) IsStdRepo() (bool, error) { if len(src.Files) == 0 { return false, errors.New("no input files provided") diff --git a/src/syscall/mmap_unix_test.go b/src/syscall/mmap_unix_test.go index 01f7783022c57..d0b3644b59cb2 100644 --- a/src/syscall/mmap_unix_test.go +++ b/src/syscall/mmap_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd +// +build aix darwin dragonfly freebsd linux netbsd openbsd package syscall_test diff --git a/src/syscall/net.go b/src/syscall/net.go index 272d3afc38745..531fa80d8f1af 100644 --- a/src/syscall/net.go +++ b/src/syscall/net.go @@ -26,7 +26,7 @@ type RawConn interface { Write(f func(fd uintptr) (done bool)) error } -// Conn is implemented by some types in the net package to provide +// Conn is implemented by some types in the net and os packages to provide // access to the underlying file descriptor or handle. type Conn interface { // SyscallConn returns a raw network connection. diff --git a/src/syscall/route_freebsd.go b/src/syscall/route_freebsd.go index 2c2de7474a441..2b47faff42984 100644 --- a/src/syscall/route_freebsd.go +++ b/src/syscall/route_freebsd.go @@ -6,11 +6,7 @@ package syscall import "unsafe" -// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. -var freebsdVersion uint32 - func init() { - freebsdVersion, _ = SysctlUint32("kern.osreldate") conf, _ := Sysctl("kern.conftxt") for i, j := 0, 0; j < len(conf); j++ { if conf[j] != '\n' { diff --git a/src/syscall/route_freebsd_32bit.go b/src/syscall/route_freebsd_32bit.go index ec6f6b7f8bcf7..aed8682237a03 100644 --- a/src/syscall/route_freebsd_32bit.go +++ b/src/syscall/route_freebsd_32bit.go @@ -22,7 +22,7 @@ func (any *anyMessage) parseInterfaceMessage(b []byte) *InterfaceMessage { // FreeBSD 10 and beyond have a restructured mbuf // packet header view. // See https://svnweb.freebsd.org/base?view=revision&revision=254804. - if freebsdVersion >= 1000000 { + if supportsABI(1000000) { m := (*ifMsghdr)(unsafe.Pointer(any)) p.Header.Data.Hwassist = uint32(m.Data.Hwassist) p.Header.Data.Epoch = m.Data.Epoch diff --git a/src/syscall/sockcmsg_unix.go b/src/syscall/sockcmsg_unix.go index 5712bf13f223f..954148012fac1 100644 --- a/src/syscall/sockcmsg_unix.go +++ b/src/syscall/sockcmsg_unix.go @@ -2,23 +2,36 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // Socket control messages package syscall -import "unsafe" +import ( + "runtime" + "unsafe" +) // Round the length of a raw sockaddr up to align it properly. func cmsgAlignOf(salen int) int { salign := sizeofPtr - // NOTE: It seems like 64-bit Darwin, DragonFly BSD and - // Solaris kernels still require 32-bit aligned access to - // network subsystem. - if darwin64Bit || dragonfly64Bit || solaris64Bit { - salign = 4 + + switch runtime.GOOS { + case "darwin", "dragonfly", "solaris": + // NOTE: It seems like 64-bit Darwin, DragonFly BSD and + // Solaris kernels still require 32-bit aligned access to + // network subsystem. + if sizeofPtr == 8 { + salign = 4 + } + case "openbsd": + // OpenBSD armv7 requires 64-bit alignment. + if runtime.GOARCH == "arm" { + salign = 8 + } } + return (salen + salign - 1) & ^(salign - 1) } diff --git a/src/syscall/syscall_aix.go b/src/syscall/syscall_aix.go new file mode 100644 index 0000000000000..6512761c3381a --- /dev/null +++ b/src/syscall/syscall_aix.go @@ -0,0 +1,651 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Aix system calls. +// This file is compiled as ordinary Go code, +// but it is also input to mksyscall, +// which parses the //sys lines and generates system call stubs. +// Note that sometimes we use a lowercase //sys name and +// wrap it in our own nicer implementation. + +package syscall + +import ( + "unsafe" +) + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +// Constant expected by package but not supported +const ( + _ = iota + TIOCSCTTY + F_DUPFD_CLOEXEC + SYS_EXECVE + SYS_FCNTL +) + +/* + * Wrapped + */ + +// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX +// There is no way to create a custom fcntl and to keep //sys fcntl easily, +// because we need fcntl name for its libc symbol. This is linked with the script. +// But, as fcntl is currently not exported and isn't called with F_DUP2FD, +// it doesn't matter. +//sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys dup2(old int, new int) (val int, err error) + +//sysnb pipe(p *[2]_C_int) (err error) +func Pipe(p []int) (err error) { + if len(p) != 2 { + return EINVAL + } + var pp [2]_C_int + err = pipe(&pp) + p[0] = int(pp[0]) + p[1] = int(pp[1]) + return +} + +//sys readlink(path string, buf []byte, bufSize uint64) (n int, err error) +func Readlink(path string, buf []byte) (n int, err error) { + s := uint64(len(buf)) + return readlink(path, buf, s) +} + +//sys utimes(path string, times *[2]Timeval) (err error) +func Utimes(path string, tv []Timeval) error { + if len(tv) != 2 { + return EINVAL + } + return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) +} + +//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) +func UtimesNano(path string, ts []Timespec) error { + if len(ts) != 2 { + return EINVAL + } + return utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +//sys unlinkat(dirfd int, path string, flags int) (err error) +func Unlinkat(dirfd int, path string) (err error) { + return unlinkat(dirfd, path, 0) +} + +//sys getcwd(buf *byte, size uint64) (err error) + +const ImplementsGetwd = true + +func Getwd() (ret string, err error) { + for len := uint64(4096); ; len *= 2 { + b := make([]byte, len) + err := getcwd(&b[0], len) + if err == nil { + i := 0 + for b[i] != 0 { + i++ + } + return string(b[0:i]), nil + } + if err != ERANGE { + return "", err + } + } +} + +func Getcwd(buf []byte) (n int, err error) { + err = getcwd(&buf[0], uint64(len(buf))) + if err == nil { + i := 0 + for buf[i] != 0 { + i++ + } + n = i + 1 + } + return +} + +//sysnb getgroups(ngid int, gid *_Gid_t) (n int, err error) +//sysnb setgroups(ngid int, gid *_Gid_t) (err error) + +func Getgroups() (gids []int, err error) { + n, err := getgroups(0, nil) + if err != nil { + return nil, err + } + if n == 0 { + return nil, nil + } + + // Sanity check group count. Max is 16 on BSD. + if n < 0 || n > 1000 { + return nil, EINVAL + } + + a := make([]_Gid_t, n) + n, err = getgroups(n, &a[0]) + if err != nil { + return nil, err + } + gids = make([]int, n) + for i, v := range a[0:n] { + gids[i] = int(v) + } + return +} + +func Setgroups(gids []int) (err error) { + if len(gids) == 0 { + return setgroups(0, nil) + } + + a := make([]_Gid_t, len(gids)) + for i, v := range gids { + a[i] = _Gid_t(v) + } + return setgroups(len(a), &a[0]) +} + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} + +func Gettimeofday(tv *Timeval) (err error) { + err = gettimeofday(tv, nil) + return +} + +// TODO +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + return -1, ENOSYS +} + +//sys getdirent(fd int, buf []byte) (n int, err error) +func ReadDirent(fd int, buf []byte) (n int, err error) { + return getdirent(fd, buf) +} + +//sys wait4(pid _Pid_t, status *_C_int, options int, rusage *Rusage) (wpid _Pid_t, err error) +func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + var status _C_int + var r _Pid_t + err = ERESTART + // AIX wait4 may return with ERESTART errno, while the processus is still + // active. + for err == ERESTART { + r, err = wait4(_Pid_t(pid), &status, options, rusage) + } + wpid = int(r) + if wstatus != nil { + *wstatus = WaitStatus(status) + } + return +} + +/* + * Socket + */ +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys Getkerninfo(op int32, where uintptr, size uintptr, arg int64) (i int32, err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys Listen(s int, backlog int) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sys socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys Shutdown(s int, how int) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil +} + +func (sa *RawSockaddrUnix) setLen(n int) { + sa.Len = uint8(3 + n) // 2 for Family, Len; 1 for NUL. +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + sa.raw.setLen(n) + for i := 0; i < n; i++ { + sa.raw.Path[i] = uint8(name[i]) + } + // length is family (uint16), name, NUL. + sl := _Socklen(2) + if n > 0 { + sl += _Socklen(n) + 1 + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +func Getsockname(fd int) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + if err = getsockname(fd, &rsa, &len); err != nil { + return + } + return anyToSockaddr(&rsa) +} + +//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) +func Accept(fd int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept(fd, &rsa, &len) + if err != nil { + return + } + sa, err = anyToSockaddr(&rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var msg Msghdr + var rsa RawSockaddrAny + msg.Name = (*byte)(unsafe.Pointer(&rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return + } + // receive at least one normal byte + if sockType != SOCK_DGRAM && len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = recvmsg(fd, &msg, flags); err != nil { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + // source address is only specified if the socket is unconnected + if rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(&rsa) + } + return +} + +func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { + _, err = SendmsgN(fd, p, oob, to, flags) + return +} + +func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var iov Iovec + if len(p) > 0 { + iov.Base = (*byte)(unsafe.Pointer(&p[0])) + iov.SetLen(len(p)) + } + var dummy byte + if len(oob) > 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return 0, err + } + // send at least one normal byte + if sockType != SOCK_DGRAM && len(p) == 0 { + iov.Base = &dummy + iov.SetLen(1) + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + msg.Iov = &iov + msg.Iovlen = 1 + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && len(p) == 0 { + n = 0 + } + return n, nil +} + +func (sa *RawSockaddrUnix) getLen() (int, error) { + // Some versions of AIX have a bug in getsockname (see IV78655). + // We can't rely on sa.Len being set correctly. + n := SizeofSockaddrUnix - 3 // substract leading Family, Len, terminating NUL. + for i := 0; i < n; i++ { + if sa.Path[i] == 0 { + n = i + break + } + } + return n, nil +} + +func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + n, err := pp.getLen() + if err != nil { + return nil, err + } + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0])) + sa.Name = string(bytes[0:n]) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, EAFNOSUPPORT +} + +/* + * Wait + */ + +type WaitStatus uint32 + +func (w WaitStatus) Stopped() bool { return w&0x40 != 0 } +func (w WaitStatus) StopSignal() Signal { + if !w.Stopped() { + return -1 + } + return Signal(w>>8) & 0xFF +} + +func (w WaitStatus) Exited() bool { return w&0xFF == 0 } +func (w WaitStatus) ExitStatus() int { + if !w.Exited() { + return -1 + } + return int((w >> 8) & 0xFF) +} + +func (w WaitStatus) Signaled() bool { return w&0x40 == 0 && w&0xFF != 0 } +func (w WaitStatus) Signal() Signal { + if !w.Signaled() { + return -1 + } + return Signal(w>>16) & 0xFF +} + +func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } + +func (w WaitStatus) CoreDump() bool { return w&0x200 == 0 } + +func (w WaitStatus) TrapCause() int { return -1 } + +/* + * ptrace + */ + +//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) +//sys ptrace64(request int, id int64, addr int64, data int, buff uintptr) (err error) + +func raw_ptrace(request int, pid int, addr *byte, data *byte) Errno { + if request == PTRACE_TRACEME { + // Convert to AIX ptrace call. + err := ptrace64(PT_TRACE_ME, 0, 0, 0, 0) + if err != nil { + return err.(Errno) + } + return 0 + } + return ENOSYS +} + +func ptracePeek(pid int, addr uintptr, out []byte) (count int, err error) { + n := 0 + for len(out) > 0 { + bsize := len(out) + if bsize > 1024 { + bsize = 1024 + } + err = ptrace64(PT_READ_BLOCK, int64(pid), int64(addr), bsize, uintptr(unsafe.Pointer(&out[0]))) + if err != nil { + return 0, err + } + addr += uintptr(bsize) + n += bsize + out = out[n:] + } + return n, nil +} + +func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(pid, addr, out) +} + +func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { + return ptracePeek(pid, addr, out) +} + +func ptracePoke(pid int, addr uintptr, data []byte) (count int, err error) { + n := 0 + for len(data) > 0 { + bsize := len(data) + if bsize > 1024 { + bsize = 1024 + } + err = ptrace64(PT_WRITE_BLOCK, int64(pid), int64(addr), bsize, uintptr(unsafe.Pointer(&data[0]))) + if err != nil { + return 0, err + } + addr += uintptr(bsize) + n += bsize + data = data[n:] + } + return n, nil +} + +func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(pid, addr, data) +} + +func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) { + return ptracePoke(pid, addr, data) +} + +func PtraceCont(pid int, signal int) (err error) { + return ptrace64(PT_CONTINUE, int64(pid), 1, signal, 0) +} + +func PtraceSingleStep(pid int) (err error) { return ptrace64(PT_STEP, int64(pid), 1, 0, 0) } + +func PtraceAttach(pid int) (err error) { return ptrace64(PT_ATTACH, int64(pid), 0, 0, 0) } + +func PtraceDetach(pid int) (err error) { return ptrace64(PT_DETACH, int64(pid), 0, 0, 0) } + +/* + * Direct access + */ + +//sys Acct(path string) (err error) +//sys Chdir(path string) (err error) +//sys Chmod(path string, mode uint32) (err error) +//sys Chown(path string, uid int, gid int) (err error) +//sys Close(fd int) (err error) +//sys Dup(fd int) (nfd int, err error) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchdir(fd int) (err error) +//sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) +//sys Fpathconf(fd int, name int) (val int, err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sys Fsync(fd int) (err error) +//sysnb Getgid() (gid int) +//sysnb Getpid() (pid int) +//sys Geteuid() (euid int) +//sys Getegid() (egid int) +//sys Getppid() (ppid int) +//sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getuid() (uid int) +//sys Kill(pid int, signum Signal) (err error) +//sys Lchown(path string, uid int, gid int) (err error) +//sys Link(path string, link string) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys Mkdir(path string, mode uint32) (err error) +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Open(path string, mode int, perm uint32) (fd int, err error) +//sys Pread(fd int, p []byte, offset int64) (n int, err error) +//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) +//sys read(fd int, p []byte) (n int, err error) +//sys Reboot(how int) (err error) +//sys Rename(from string, to string) (err error) +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) +//sys Rmdir(path string) (err error) +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek +//sysnb Setegid(egid int) (err error) +//sysnb Seteuid(euid int) (err error) +//sysnb Setgid(gid int) (err error) +//sysnb Setpgid(pid int, pgid int) (err error) +//sysnb Setregid(rgid int, egid int) (err error) +//sysnb Setreuid(ruid int, euid int) (err error) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys Symlink(path string, link string) (err error) +//sys Truncate(path string, length int64) (err error) +//sys Umask(newmask int) (oldmask int) +//sys Unlink(path string) (err error) +//sysnb Uname(buf *Utsname) (err error) +//sys write(fd int, p []byte) (n int, err error) + +//sys gettimeofday(tv *Timeval, tzp *Timezone) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: int32(usec)} +} + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_read)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +/* + * Map + */ + +var mapper = &mmapper{ + active: make(map[*byte][]byte), + mmap: mmap, + munmap: munmap, +} + +//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) +//sys munmap(addr uintptr, length uintptr) (err error) + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} diff --git a/src/syscall/syscall_aix_ppc64.go b/src/syscall/syscall_aix_ppc64.go new file mode 100644 index 0000000000000..21ad5bc296b61 --- /dev/null +++ b/src/syscall/syscall_aix_ppc64.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} diff --git a/src/syscall/syscall_bsd.go b/src/syscall/syscall_bsd.go index 7337454abed30..3d04349387173 100644 --- a/src/syscall/syscall_bsd.go +++ b/src/syscall/syscall_bsd.go @@ -447,8 +447,6 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e return kevent(kq, change, len(changes), event, len(events), timeout) } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - func Sysctl(name string) (value string, err error) { // Translate name to mib number. mib, err := nametomib(name) diff --git a/src/syscall/syscall_darwin.go b/src/syscall/syscall_darwin.go index 4d6aa4fcf23e4..80e42b0aec8cb 100644 --- a/src/syscall/syscall_darwin.go +++ b/src/syscall/syscall_darwin.go @@ -120,8 +120,8 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( return nil, err } - _, _, e1 := Syscall6( - SYS_GETATTRLIST, + _, _, e1 := syscall6( + funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(&attrList)), uintptr(unsafe.Pointer(&attrBuf[0])), @@ -163,13 +163,21 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( return } -//sysnb pipe() (r int, w int, err error) +func libc_getattrlist_trampoline() + +//go:linkname libc_getattrlist libc_getattrlist +//go:cgo_import_dynamic libc_getattrlist getattrlist "/usr/lib/libSystem.B.dylib" + +//sysnb pipe(p *[2]int32) (err error) func Pipe(p []int) (err error) { if len(p) != 2 { return EINVAL } - p[0], p[1], err = pipe() + var q [2]int32 + err = pipe(&q) + p[0] = int(q[0]) + p[1] = int(q[1]) return } @@ -180,7 +188,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { _p0 = unsafe.Pointer(&buf[0]) bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_getfsstat64_trampoline), uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 @@ -188,6 +196,11 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +func libc_getfsstat64_trampoline() + +//go:linkname libc_getfsstat64 libc_getfsstat64 +//go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" + func setattrlistTimes(path string, times []Timespec) error { _p0, err := BytePtrFromString(path) if err != nil { @@ -201,8 +214,8 @@ func setattrlistTimes(path string, times []Timespec) error { // order is mtime, atime: the opposite of Chtimes attributes := [2]Timespec{times[1], times[0]} const options = 0 - _, _, e1 := Syscall6( - SYS_SETATTRLIST, + _, _, e1 := syscall6( + funcPC(libc_setattrlist_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(&attrList)), uintptr(unsafe.Pointer(&attributes)), @@ -216,6 +229,11 @@ func setattrlistTimes(path string, times []Timespec) error { return nil } +func libc_setattrlist_trampoline() + +//go:linkname libc_setattrlist libc_setattrlist +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + func utimensat(dirfd int, path string, times *[2]Timespec, flag int) error { // Darwin doesn't support SYS_UTIMENSAT return ENOSYS @@ -249,11 +267,9 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 -//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys Fsync(fd int) (err error) +// Fsync is not called for os.File.Sync(). Please see internal/poll/fd_fsync_darwin.go //sys Ftruncate(fd int, length int64) (err error) -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -272,7 +288,6 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) //sys Listen(s int, backlog int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Mkdir(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) @@ -290,7 +305,7 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sys Rename(from string, to string) (err error) //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) -//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK +//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_lseek //sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) @@ -305,8 +320,6 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 -//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 //sys Symlink(path string, link string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) @@ -315,7 +328,51 @@ func Kill(pid int, signum Signal) (err error) { return kill(pid, int(signum), 1) //sys Unlink(path string) (err error) //sys Unmount(path string, flags int) (err error) //sys write(fd int, p []byte) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (cnt uintptr, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE +//sysnb fork() (pid int, err error) +//sysnb ioctl(fd int, req int, arg int) (err error) +//sysnb ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_ioctl +//sysnb execve(path *byte, argv **byte, envp **byte) (err error) +//sysnb exit(res int) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) = SYS_fcntl +//sys unlinkat(fd int, path string, flags int) (err error) +//sys openat(fd int, path string, flags int, perm uint32) (fdret int, err error) + +func init() { + execveDarwin = execve +} + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +// Find the entry point for f. See comments in runtime/proc.go for the +// function of the same name. +//go:nosplit +func funcPC(f func()) uintptr { + return **(**uintptr)(unsafe.Pointer(&f)) +} diff --git a/src/syscall/syscall_darwin_386.go b/src/syscall/syscall_darwin_386.go index 05d02fc747145..045ebc726b813 100644 --- a/src/syscall/syscall_darwin_386.go +++ b/src/syscall/syscall_darwin_386.go @@ -14,23 +14,14 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) error { - // The tv passed to gettimeofday must be non-nil. - // Before macOS Sierra (10.12), tv was otherwise unused and - // the answers came back in the two registers. - // As of Sierra, gettimeofday return zeros and populates - // tv itself. - sec, usec, err := gettimeofday(tv) - if err != nil { - return err - } - if sec != 0 || usec != 0 { - tv.Sec = int32(sec) - tv.Usec = int32(usec) - } - return nil -} +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_fstat64 +//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_fstatfs64 +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS___getdirentries64 +//sysnb Gettimeofday(tp *Timeval) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_lstat64 +//sys Stat(path string, stat *Stat_t) (err error) = SYS_stat64 +//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_statfs64 +//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_fstatat64 func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) @@ -53,7 +44,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var length = uint64(count) - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + _, _, e1 := Syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) written = int(length) @@ -63,4 +54,12 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func libc_sendfile_trampoline() + +//go:linkname libc_sendfile libc_sendfile +//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin_32.go) +func syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) + func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // sic diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go index b15bd68dc76ba..7b6493bf9fe58 100644 --- a/src/syscall/syscall_darwin_amd64.go +++ b/src/syscall/syscall_darwin_amd64.go @@ -14,23 +14,14 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) error { - // The tv passed to gettimeofday must be non-nil. - // Before macOS Sierra (10.12), tv was otherwise unused and - // the answers came back in the two registers. - // As of Sierra, gettimeofday return zeros and populates - // tv itself. - sec, usec, err := gettimeofday(tv) - if err != nil { - return err - } - if sec != 0 || usec != 0 { - tv.Sec = sec - tv.Usec = usec - } - return nil -} +//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_fstat64 +//sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_fstatfs64 +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS___getdirentries64 +//sysnb Gettimeofday(tp *Timeval) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) = SYS_lstat64 +//sys Stat(path string, stat *Stat_t) (err error) = SYS_stat64 +//sys Statfs(path string, stat *Statfs_t) (err error) = SYS_statfs64 +//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_fstatat64 func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) @@ -53,7 +44,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var length = uint64(count) - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + _, _, e1 := syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) written = int(length) @@ -63,4 +54,12 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func libc_sendfile_trampoline() + +//go:linkname libc_sendfile libc_sendfile +//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin_64.go) +func syscallX(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) + func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) diff --git a/src/syscall/syscall_darwin_arm.go b/src/syscall/syscall_darwin_arm.go index 73bf83f41c223..cb7489ed7beed 100644 --- a/src/syscall/syscall_darwin_arm.go +++ b/src/syscall/syscall_darwin_arm.go @@ -14,20 +14,18 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) error { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - if err != nil { - return err - } - if sec != 0 || usec != 0 { - tv.Sec = int32(sec) - tv.Usec = int32(usec) - } - return nil +//sys closedir(dir uintptr) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sysnb Gettimeofday(tp *Timeval) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys readdir_r(dir uintptr, entry uintptr, result uintptr) (res int) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS } func SetKevent(k *Kevent_t, fd, mode, flags int) { @@ -51,7 +49,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var length = uint64(count) - _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) + _, _, e1 := Syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(*offset>>32), uintptr(unsafe.Pointer(&length)), 0, 0, 0, 0) written = int(length) @@ -61,4 +59,27 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func libc_sendfile_trampoline() + +//go:linkname libc_sendfile libc_sendfile +//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fdopendir_trampoline() + +//go:linkname libc_fdopendir libc_fdopendir +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin_32.go) +func syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) + func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // sic diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go index 6c8f9961f0809..57902d45c6358 100644 --- a/src/syscall/syscall_darwin_arm64.go +++ b/src/syscall/syscall_darwin_arm64.go @@ -14,20 +14,18 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int64(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) error { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - if err != nil { - return err - } - if sec != 0 || usec != 0 { - tv.Sec = sec - tv.Usec = usec - } - return nil +//sys closedir(dir uintptr) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sysnb Gettimeofday(tp *Timeval) (err error) +//sys Lstat(path string, stat *Stat_t) (err error) +//sys readdir_r(dirp uintptr, entry uintptr, result uintptr) (res int) +//sys Stat(path string, stat *Stat_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) +//sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS } func SetKevent(k *Kevent_t, fd, mode, flags int) { @@ -51,7 +49,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var length = uint64(count) - _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) + _, _, e1 := syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(unsafe.Pointer(&length)), 0, 0) written = int(length) @@ -61,4 +59,27 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +func libc_sendfile_trampoline() + +//go:linkname libc_sendfile libc_sendfile +//go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscallXPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fdopendir_trampoline() + +//go:linkname libc_fdopendir libc_fdopendir +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin_64.go) +func syscallX(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscallXPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) + func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // sic diff --git a/src/syscall/syscall_dragonfly.go b/src/syscall/syscall_dragonfly.go index 3dbbe342cfbb2..56dd0d76e9198 100644 --- a/src/syscall/syscall_dragonfly.go +++ b/src/syscall/syscall_dragonfly.go @@ -217,3 +217,4 @@ func setattrlistTimes(path string, times []Timespec) error { //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) //sys getcwd(buf []byte) (n int, err error) = SYS___GETCWD +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/src/syscall/syscall_freebsd.go b/src/syscall/syscall_freebsd.go index d5738ba1c1344..87a27b1ff74c2 100644 --- a/src/syscall/syscall_freebsd.go +++ b/src/syscall/syscall_freebsd.go @@ -12,7 +12,34 @@ package syscall -import "unsafe" +import ( + "sync" + "unsafe" +) + +const ( + _SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } + _SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ + _SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ + _SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ + _SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ + _SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ + _SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ +) + +// See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h +const _ino64First = 1200031 + +func supportsABI(ver uint32) bool { + osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) + return osreldate >= ver +} type SockaddrDatalink struct { Len uint8 @@ -113,17 +140,39 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer - var bufsize uintptr + var ( + _p0 unsafe.Pointer + bufsize uintptr + oldBuf []statfs_freebsd11_t + needsConvert bool + ) + if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + if supportsABI(_ino64First) { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) + } else { + n := len(buf) + oldBuf = make([]statfs_freebsd11_t, n) + _p0 = unsafe.Pointer(&oldBuf[0]) + bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) + needsConvert = true + } + } + var sysno uintptr = SYS_GETFSSTAT + if supportsABI(_ino64First) { + sysno = _SYS_GETFSSTAT_FREEBSD12 } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } + if e1 == 0 && needsConvert { + for i := range oldBuf { + buf[i].convertFrom(&oldBuf[i]) + } + } return } @@ -132,6 +181,221 @@ func setattrlistTimes(path string, times []Timespec) error { return ENOSYS } +func Stat(path string, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(_AT_FDCWD, path, st, 0) + } + err = stat(path, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Lstat(path string, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(_AT_FDCWD, path, st, _AT_SYMLINK_NOFOLLOW) + } + err = lstat(path, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Fstat(fd int, st *Stat_t) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstat_freebsd12(fd, st) + } + err = fstat(fd, &oldStat) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { + var oldStat stat_freebsd11_t + if supportsABI(_ino64First) { + return fstatat_freebsd12(fd, path, st, flags) + } + err = fstatat(fd, path, &oldStat, flags) + if err != nil { + return err + } + + st.convertFrom(&oldStat) + return nil +} + +func Statfs(path string, st *Statfs_t) (err error) { + var oldStatfs statfs_freebsd11_t + if supportsABI(_ino64First) { + return statfs_freebsd12(path, st) + } + err = statfs(path, &oldStatfs) + if err != nil { + return err + } + + st.convertFrom(&oldStatfs) + return nil +} + +func Fstatfs(fd int, st *Statfs_t) (err error) { + var oldStatfs statfs_freebsd11_t + if supportsABI(_ino64First) { + return fstatfs_freebsd12(fd, st) + } + err = fstatfs(fd, &oldStatfs) + if err != nil { + return err + } + + st.convertFrom(&oldStatfs) + return nil +} + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + if supportsABI(_ino64First) { + return getdirentries_freebsd12(fd, buf, basep) + } + + // The old syscall entries are smaller than the new. Use 1/4 of the original + // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). + oldBufLen := roundup(len(buf)/4, _dirblksiz) + oldBuf := make([]byte, oldBufLen) + n, err = getdirentries(fd, oldBuf, basep) + if err == nil && n > 0 { + n = convertFromDirents11(buf, oldBuf[:n]) + } + return +} + +func Mknod(path string, mode uint32, dev uint64) (err error) { + var oldDev int + if supportsABI(_ino64First) { + return mknodat_freebsd12(_AT_FDCWD, path, mode, dev) + } + oldDev = int(dev) + return mknod(path, mode, oldDev) +} + +// round x to the nearest multiple of y, larger or equal to x. +// +// from /usr/include/sys/param.h Macros for counting and rounding. +// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +func roundup(x, y int) int { + return ((x + y - 1) / y) * y +} + +func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { + *s = Stat_t{ + Dev: uint64(old.Dev), + Ino: uint64(old.Ino), + Nlink: uint64(old.Nlink), + Mode: old.Mode, + Uid: old.Uid, + Gid: old.Gid, + Rdev: uint64(old.Rdev), + Atimespec: old.Atimespec, + Mtimespec: old.Mtimespec, + Ctimespec: old.Ctimespec, + Birthtimespec: old.Birthtimespec, + Size: old.Size, + Blocks: old.Blocks, + Blksize: old.Blksize, + Flags: old.Flags, + Gen: uint64(old.Gen), + } +} + +func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { + *s = Statfs_t{ + Version: _statfsVersion, + Type: old.Type, + Flags: old.Flags, + Bsize: old.Bsize, + Iosize: old.Iosize, + Blocks: old.Blocks, + Bfree: old.Bfree, + Bavail: old.Bavail, + Files: old.Files, + Ffree: old.Ffree, + Syncwrites: old.Syncwrites, + Asyncwrites: old.Asyncwrites, + Syncreads: old.Syncreads, + Asyncreads: old.Asyncreads, + // Spare + Namemax: old.Namemax, + Owner: old.Owner, + Fsid: old.Fsid, + // Charspare + // Fstypename + // Mntfromname + // Mntonname + } + + sl := old.Fstypename[:] + n := clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Fstypename[:], old.Fstypename[:n]) + + sl = old.Mntfromname[:] + n = clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Mntfromname[:], old.Mntfromname[:n]) + + sl = old.Mntonname[:] + n = clen(*(*[]byte)(unsafe.Pointer(&sl))) + copy(s.Mntonname[:], old.Mntonname[:n]) +} + +func convertFromDirents11(buf []byte, old []byte) int { + const ( + fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) + oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) + ) + + dstPos := 0 + srcPos := 0 + for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { + dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos])) + srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) + + reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) + if dstPos+reclen > len(buf) { + break + } + + dstDirent.Fileno = uint64(srcDirent.Fileno) + dstDirent.Off = 0 + dstDirent.Reclen = uint16(reclen) + dstDirent.Type = srcDirent.Type + dstDirent.Pad0 = 0 + dstDirent.Namlen = uint16(srcDirent.Namlen) + dstDirent.Pad1 = 0 + + copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) + padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] + for i := range padding { + padding[i] = 0 + } + + dstPos += int(dstDirent.Reclen) + srcPos += int(srcDirent.Reclen) + } + + return dstPos +} + /* * Exposed directly */ @@ -151,11 +415,16 @@ func setattrlistTimes(path string, times []Timespec) error { //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys Fstat(fd int, stat *Stat_t) (err error) -//sys Fstatfs(fd int, stat *Statfs_t) (err error) +//sys fstat(fd int, stat *stat_freebsd11_t) (err error) +//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) = _SYS_FSTAT_FREEBSD12 +//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) +//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) = _SYS_FSTATAT_FREEBSD12 +//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) +//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) = _SYS_FSTATFS_FREEBSD12 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) +//sys getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) = _SYS_GETDIRENTRIES_FREEBSD12 //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -176,10 +445,11 @@ func setattrlistTimes(path string, times []Timespec) error { //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) //sys Listen(s int, backlog int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) +//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys Mknod(path string, mode uint32, dev int) (err error) +//sys mknod(path string, mode uint32, dev int) (err error) +//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) = _SYS_MKNODAT_FREEBSD12 //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -204,8 +474,9 @@ func setattrlistTimes(path string, times []Timespec) error { //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys Stat(path string, stat *Stat_t) (err error) -//sys Statfs(path string, stat *Statfs_t) (err error) +//sys stat(path string, stat *stat_freebsd11_t) (err error) +//sys statfs(path string, stat *statfs_freebsd11_t) (err error) +//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) = _SYS_STATFS_FREEBSD12 //sys Symlink(path string, link string) (err error) //sys Sync() (err error) //sys Truncate(path string, length int64) (err error) @@ -221,3 +492,4 @@ func setattrlistTimes(path string, times []Timespec) error { //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) //sys getcwd(buf []byte) (n int, err error) = SYS___GETCWD +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/src/syscall/syscall_freebsd_test.go b/src/syscall/syscall_freebsd_test.go new file mode 100644 index 0000000000000..3ccfe5d463f0e --- /dev/null +++ b/src/syscall/syscall_freebsd_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd + +package syscall_test + +import ( + "fmt" + "syscall" + "testing" + "unsafe" +) + +func TestConvertFromDirent11(t *testing.T) { + const ( + filenameFmt = "%04d" + numFiles = 64 + fixedHdrSize = int(unsafe.Offsetof(syscall.Dirent_freebsd11{}.Name)) + ) + + namlen := len(fmt.Sprintf(filenameFmt, 0)) + reclen := syscall.Roundup(fixedHdrSize+namlen+1, 4) + old := make([]byte, numFiles*reclen) + for i := 0; i < numFiles; i++ { + dent := syscall.Dirent_freebsd11{ + Fileno: uint32(i + 1), + Reclen: uint16(reclen), + Type: syscall.DT_REG, + Namlen: uint8(namlen), + } + rec := make([]byte, reclen) + copy(rec, (*[fixedHdrSize]byte)(unsafe.Pointer(&dent))[:]) + copy(rec[fixedHdrSize:], fmt.Sprintf(filenameFmt, i+1)) + copy(old[i*reclen:], rec) + } + + buf := make([]byte, 2*len(old)) + n := syscall.ConvertFromDirents11(buf, old) + + names := make([]string, 0, numFiles) + _, _, names = syscall.ParseDirent(buf[:n], -1, names) + + if len(names) != numFiles { + t.Errorf("expected %d files, have %d; names: %q", numFiles, len(names), names) + } + + for i, name := range names { + if expected := fmt.Sprintf(filenameFmt, i+1); name != expected { + t.Errorf("expected names[%d] to be %q; got %q", i, expected, name) + } + } +} diff --git a/src/syscall/syscall_js.go b/src/syscall/syscall_js.go index 6822eec8359ca..2e1a9ec9f1fef 100644 --- a/src/syscall/syscall_js.go +++ b/src/syscall/syscall_js.go @@ -74,6 +74,7 @@ const ( SIGKILL SIGTRAP SIGQUIT + SIGTERM ) func (s Signal) Signal() {} diff --git a/src/syscall/syscall_linux_386.go b/src/syscall/syscall_linux_386.go index 49db72450fbb7..6e162ebb41f34 100644 --- a/src/syscall/syscall_linux_386.go +++ b/src/syscall/syscall_linux_386.go @@ -62,8 +62,6 @@ func Pipe2(p []int, flags int) (err error) { //sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) -//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 -//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 @@ -74,7 +72,6 @@ func Pipe2(p []int, flags int) (err error) { //sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 //sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) -//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) = SYS_GETGROUPS32 @@ -84,6 +81,18 @@ func Pipe2(p []int, flags int) (err error) { //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +func Stat(path string, stat *Stat_t) (err error) { + return fstatat(_AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW) +} + func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { page := uintptr(offset / 4096) if offset != int64(page)*4096 { diff --git a/src/syscall/syscall_linux_amd64.go b/src/syscall/syscall_linux_amd64.go index 1a21d9db6f2b4..f740ab4e72d1a 100644 --- a/src/syscall/syscall_linux_amd64.go +++ b/src/syscall/syscall_linux_amd64.go @@ -22,9 +22,7 @@ const ( //sysnb InotifyInit() (fd int, err error) //sys Ioperm(from int, num int, on int) (err error) //sys Iopl(level int) (err error) -//sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -66,6 +64,14 @@ func Stat(path string, stat *Stat_t) (err error) { return fstatat(_AT_FDCWD, path, stat, 0) } +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW) +} + //go:noescape func gettimeofday(tv *Timeval) (err Errno) diff --git a/src/syscall/syscall_linux_arm.go b/src/syscall/syscall_linux_arm.go index b0c0ac7c4fae6..65543193e15eb 100644 --- a/src/syscall/syscall_linux_arm.go +++ b/src/syscall/syscall_linux_arm.go @@ -83,9 +83,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sysnb Getgid() (gid int) = SYS_GETGID32 //sysnb Getuid() (uid int) = SYS_GETUID32 //sysnb InotifyInit() (fd int, err error) -//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32 //sys Listen(s int, n int) (err error) -//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 @@ -96,7 +94,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) -//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 // Vsyscalls on amd64. //sysnb Gettimeofday(tv *Timeval) (err error) @@ -110,6 +107,18 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +func Stat(path string, stat *Stat_t) (err error) { + return fstatat(_AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(_AT_FDCWD, path, uid, gid, _AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return fstatat(_AT_FDCWD, path, stat, _AT_SYMLINK_NOFOLLOW) +} + func Fstatfs(fd int, buf *Statfs_t) (err error) { _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf))) if e != 0 { diff --git a/src/syscall/syscall_linux_ppc64x.go b/src/syscall/syscall_linux_ppc64x.go index 88a520e3fd613..1cdc5f9a44155 100644 --- a/src/syscall/syscall_linux_ppc64x.go +++ b/src/syscall/syscall_linux_ppc64x.go @@ -45,7 +45,6 @@ const ( //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) -//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE2 //sys Truncate(path string, length int64) (err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) //sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) @@ -120,3 +119,11 @@ func (cmsg *Cmsghdr) SetLen(length int) { func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) { panic("not implemented") } + +//sys syncFileRange2(fd int, flags int, off int64, n int64) (err error) = SYS_SYNC_FILE_RANGE2 + +func SyncFileRange(fd int, off int64, n int64, flags int) error { + // The sync_file_range and sync_file_range2 syscalls differ only in the + // order of their arguments. + return syncFileRange2(fd, flags, off, n) +} diff --git a/src/syscall/syscall_linux_test.go b/src/syscall/syscall_linux_test.go index 99de6ebaf2fcb..293549a841d05 100644 --- a/src/syscall/syscall_linux_test.go +++ b/src/syscall/syscall_linux_test.go @@ -19,6 +19,7 @@ import ( "syscall" "testing" "time" + "unsafe" ) // chtmpdir changes the working directory to a new temporary directory and @@ -294,7 +295,7 @@ func TestSyscallNoError(t *testing.T) { // On Linux there are currently no syscalls which don't fail and return // a value larger than 0xfffffffffffff001 so we could test RawSyscall // vs. RawSyscallNoError on 64bit architectures. - if runtime.GOARCH != "386" && runtime.GOARCH != "arm" { + if unsafe.Sizeof(uintptr(0)) != 4 { t.Skip("skipping on non-32bit architecture") } @@ -302,6 +303,10 @@ func TestSyscallNoError(t *testing.T) { t.Skip("skipping root only test") } + if runtime.GOOS == "android" { + t.Skip("skipping on rooted android, see issue 27364") + } + // Copy the test binary to a location that a non-root user can read/execute // after we drop privileges tempDir, err := ioutil.TempDir("", "TestSyscallNoError") diff --git a/src/syscall/syscall_netbsd.go b/src/syscall/syscall_netbsd.go index 18ed885f14ca6..fc13b706b5da3 100644 --- a/src/syscall/syscall_netbsd.go +++ b/src/syscall/syscall_netbsd.go @@ -52,7 +52,6 @@ func sysctlNodes(mib []_C_int) (nodes []Sysctlnode, err error) { } func nametomib(name string) (mib []_C_int, err error) { - // Split name into components. var parts []string last := 0 @@ -234,3 +233,4 @@ func setattrlistTimes(path string, times []Timespec) error { //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) //sys getcwd(buf []byte) (n int, err error) = SYS___GETCWD +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/src/syscall/syscall_openbsd.go b/src/syscall/syscall_openbsd.go index d2f58e67d7610..eebb5ceb1a860 100644 --- a/src/syscall/syscall_openbsd.go +++ b/src/syscall/syscall_openbsd.go @@ -29,7 +29,6 @@ type SockaddrDatalink struct { func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) func nametomib(name string) (mib []_C_int, err error) { - // Perform lookup via a binary search left := 0 right := len(sysctlMib) - 1 @@ -212,3 +211,4 @@ func setattrlistTimes(path string, times []Timespec) error { //sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) //sys getcwd(buf []byte) (n int, err error) = SYS___GETCWD +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/src/syscall/syscall_unix.go b/src/syscall/syscall_unix.go index c9c0f62dd2f51..4336851554801 100644 --- a/src/syscall/syscall_unix.go +++ b/src/syscall/syscall_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package syscall diff --git a/src/syscall/syscall_unix_test.go b/src/syscall/syscall_unix_test.go index 637aece439596..085afb2941121 100644 --- a/src/syscall/syscall_unix_test.go +++ b/src/syscall/syscall_unix_test.go @@ -315,6 +315,12 @@ func TestRlimit(t *testing.T) { } set := rlimit set.Cur = set.Max - 1 + if runtime.GOOS == "darwin" && set.Cur > 10240 { + // The max file limit is 10240, even though + // the max returned by Getrlimit is 1<<63-1. + // This is OPEN_MAX in sys/syslimits.h. + set.Cur = 10240 + } err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &set) if err != nil { t.Fatalf("Setrlimit: set failed: %#v %v", set, err) @@ -326,15 +332,11 @@ func TestRlimit(t *testing.T) { } set = rlimit set.Cur = set.Max - 1 + if runtime.GOOS == "darwin" && set.Cur > 10240 { + set.Cur = 10240 + } if set != get { - // Seems like Darwin requires some privilege to - // increase the soft limit of rlimit sandbox, though - // Setrlimit never reports an error. - switch runtime.GOOS { - case "darwin": - default: - t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) - } + t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get) } err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) if err != nil { diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go index b234f3d67d4ce..de05840386280 100644 --- a/src/syscall/syscall_windows.go +++ b/src/syscall/syscall_windows.go @@ -9,6 +9,7 @@ package syscall import ( errorspkg "errors" "internal/race" + "runtime" "sync" "unicode/utf16" "unsafe" @@ -122,14 +123,14 @@ func compileCallback(fn interface{}, cleanstack bool) uintptr // NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallback(fn interface{}) uintptr { return compileCallback(fn, true) } // NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. // This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. func NewCallbackCDecl(fn interface{}) uintptr { return compileCallback(fn, false) } @@ -340,12 +341,19 @@ const ptrSize = unsafe.Sizeof(uintptr(0)) // See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365542(v=vs.85).aspx func setFilePointerEx(handle Handle, distToMove int64, newFilePointer *int64, whence uint32) error { var e1 Errno - if ptrSize == 8 { + switch runtime.GOARCH { + default: + panic("unsupported architecture") + case "amd64": _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 4, uintptr(handle), uintptr(distToMove), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0, 0) - } else { + case "386": // distToMove is a LARGE_INTEGER: // https://msdn.microsoft.com/en-us/library/windows/desktop/aa383713(v=vs.85).aspx _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 5, uintptr(handle), uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence), 0) + case "arm": + // distToMove must be 8-byte aligned per ARM calling convention + // https://msdn.microsoft.com/en-us/library/dn736986.aspx#Anchor_7 + _, _, e1 = Syscall6(procSetFilePointerEx.Addr(), 6, uintptr(handle), 0, uintptr(distToMove), uintptr(distToMove>>32), uintptr(unsafe.Pointer(newFilePointer)), uintptr(whence)) } if e1 != 0 { return errnoErr(e1) @@ -626,7 +634,7 @@ type RawSockaddr struct { type RawSockaddrAny struct { Addr RawSockaddr - Pad [96]int8 + Pad [100]int8 } type Sockaddr interface { @@ -675,19 +683,69 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil } +type RawSockaddrUnix struct { + Family uint16 + Path [UNIX_PATH_MAX]int8 +} + type SockaddrUnix struct { Name string + raw RawSockaddrUnix } func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { - // TODO(brainman): implement SockaddrUnix.sockaddr() - return nil, 0, EWINDOWS + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := int32(2) + if n > 0 { + sl += int32(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil } func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: - return nil, EWINDOWS + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + sa.Name = string(bytes) + return sa, nil case AF_INET: pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) diff --git a/src/syscall/timestruct.go b/src/syscall/timestruct.go index 84a00a77d8be9..d17811c1214bf 100644 --- a/src/syscall/timestruct.go +++ b/src/syscall/timestruct.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package syscall diff --git a/src/syscall/types_aix.go b/src/syscall/types_aix.go new file mode 100644 index 0000000000000..b961bdb197eaa --- /dev/null +++ b/src/syscall/types_aix.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package syscall + +/* +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics; for internal use. + +const ( + sizeofPtr = C.sizeofPtr + sizeofShort = C.sizeof_short + sizeofInt = C.sizeof_int + sizeofLong = C.sizeof_long + sizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Timezone C.struct_timezone + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Pid_t C.pid_t + +type _Gid_t C.gid_t + +// Files + +type Flock_t C.struct_flock + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Fsid64_t C.fsid64_t + +type StTimespec_t C.st_timespec_t + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Cmsghdr C.struct_cmsghdr + +type ICMPv6Filter C.struct_icmp6_filter + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Linger C.struct_linger + +type Msghdr C.struct_msghdr + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr +) + +type IfMsgHdr C.struct_if_msghdr + +// Misc + +type Utsname C.struct_utsname + +const ( + _AT_FDCWD = C.AT_FDCWD + _AT_REMOVEDIR = C.AT_REMOVEDIR + _AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) diff --git a/src/syscall/types_dragonfly.go b/src/syscall/types_dragonfly.go index 0c060d932ece8..53bc12403b2fa 100644 --- a/src/syscall/types_dragonfly.go +++ b/src/syscall/types_dragonfly.go @@ -113,6 +113,8 @@ const ( // Directory mode bits S_IRUSR = C.S_IRUSR S_IWUSR = C.S_IWUSR S_IXUSR = C.S_IXUSR + S_IRWXG = C.S_IRWXG + S_IRWXO = C.S_IRWXO ) type Stat_t C.struct_stat diff --git a/src/syscall/types_freebsd.go b/src/syscall/types_freebsd.go index 020045bf84b71..f6860211211bf 100644 --- a/src/syscall/types_freebsd.go +++ b/src/syscall/types_freebsd.go @@ -14,7 +14,11 @@ Input to cgo -godefs. See also mkerrors.sh and mkall.sh package syscall /* -#define KERNEL +#define _WANT_FREEBSD11_STAT 1 +#define _WANT_FREEBSD11_STATFS 1 +#define _WANT_FREEBSD11_DIRENT 1 +#define _WANT_FREEBSD11_KEVENT 1 + #include #include #include @@ -60,50 +64,6 @@ struct sockaddr_any { char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; }; -// This structure is a duplicate of stat on FreeBSD 8-STABLE. -// See /usr/include/sys/stat.h. -struct stat8 { -#undef st_atimespec st_atim -#undef st_mtimespec st_mtim -#undef st_ctimespec st_ctim -#undef st_birthtimespec st_birthtim - __dev_t st_dev; - ino_t st_ino; - mode_t st_mode; - nlink_t st_nlink; - uid_t st_uid; - gid_t st_gid; - __dev_t st_rdev; -#if __BSD_VISIBLE - struct timespec st_atimespec; - struct timespec st_mtimespec; - struct timespec st_ctimespec; -#else - time_t st_atime; - long __st_atimensec; - time_t st_mtime; - long __st_mtimensec; - time_t st_ctime; - long __st_ctimensec; -#endif - off_t st_size; - blkcnt_t st_blocks; - blksize_t st_blksize; - fflags_t st_flags; - __uint32_t st_gen; - __int32_t st_lspare; -#if __BSD_VISIBLE - struct timespec st_birthtimespec; - unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); - unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec)); -#else - time_t st_birthtime; - long st_birthtimensec; - unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); - unsigned int :(8 / 2) * (16 - (int)sizeof(struct __timespec)); -#endif -}; - // This structure is a duplicate of if_data on FreeBSD 8-STABLE. // See /usr/include/net/if.h. struct if_data8 { @@ -130,7 +90,10 @@ struct if_data8 { u_long ifi_iqdrops; u_long ifi_noproto; u_long ifi_hwassist; +// FIXME: these are now unions, so maybe need to change definitions? +#undef ifi_epoch time_t ifi_epoch; +#undef ifi_lastchange struct timeval ifi_lastchange; }; @@ -198,16 +161,29 @@ const ( // Directory mode bits S_IRUSR = C.S_IRUSR S_IWUSR = C.S_IWUSR S_IXUSR = C.S_IXUSR + S_IRWXG = C.S_IRWXG + S_IRWXO = C.S_IRWXO ) -type Stat_t C.struct_stat8 +const ( + _statfsVersion = C.STATFS_VERSION + _dirblksiz = C.DIRBLKSIZ +) + +type Stat_t C.struct_stat + +type stat_freebsd11_t C.struct_freebsd11_stat type Statfs_t C.struct_statfs +type statfs_freebsd11_t C.struct_freebsd11_statfs + type Flock_t C.struct_flock type Dirent C.struct_dirent +type dirent_freebsd11 C.struct_freebsd11_dirent + type Fsid C.struct_fsid // File system limits @@ -279,7 +255,7 @@ const ( // Events (kqueue, kevent) -type Kevent_t C.struct_kevent +type Kevent_t C.struct_kevent_freebsd11 // Select @@ -346,7 +322,9 @@ type BpfZbufHeader C.struct_bpf_zbuf_header // Misc const ( - _AT_FDCWD = C.AT_FDCWD + _AT_FDCWD = C.AT_FDCWD + _AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + _AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW ) // Terminal handling diff --git a/src/syscall/types_linux.go b/src/syscall/types_linux.go index 3c4c2f2cfd767..ccc5c54f0bfd6 100644 --- a/src/syscall/types_linux.go +++ b/src/syscall/types_linux.go @@ -53,7 +53,6 @@ package syscall #include #include #include -#include #include enum { @@ -124,6 +123,15 @@ struct my_epoll_event { int32_t pad; }; +// ustat is deprecated and glibc 2.28 removed ustat.h. Provide the type here for +// backwards compatibility. Copied from /usr/include/bits/ustat.h +struct ustat { + __daddr_t f_tfree; + __ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + */ import "C" diff --git a/src/syscall/types_openbsd.go b/src/syscall/types_openbsd.go index 93456c31a0c29..922864815bc7d 100644 --- a/src/syscall/types_openbsd.go +++ b/src/syscall/types_openbsd.go @@ -114,6 +114,8 @@ const ( // Directory mode bits S_IRUSR = C.S_IRUSR S_IWUSR = C.S_IWUSR S_IXUSR = C.S_IXUSR + S_IRWXG = C.S_IRWXG + S_IRWXO = C.S_IRWXO ) type Stat_t C.struct_stat diff --git a/src/syscall/types_solaris.go b/src/syscall/types_solaris.go index a219a437d534a..76a74508d2b4e 100644 --- a/src/syscall/types_solaris.go +++ b/src/syscall/types_solaris.go @@ -101,6 +101,8 @@ type Rusage C.struct_rusage type Rlimit C.struct_rlimit +type _Pid_t C.pid_t + type _Gid_t C.gid_t // Files @@ -120,6 +122,8 @@ const ( // Directory mode bits S_IRUSR = C.S_IRUSR S_IWUSR = C.S_IWUSR S_IXUSR = C.S_IXUSR + S_IRWXG = C.S_IRWXG + S_IRWXO = C.S_IRWXO ) type Stat_t C.struct_stat diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go index 6911fe509cbbc..0b839339d2f19 100644 --- a/src/syscall/types_windows.go +++ b/src/syscall/types_windows.go @@ -1139,3 +1139,5 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 _SYMLINK_FLAG_RELATIVE = 1 ) + +const UNIX_PATH_MAX = 108 // defined in afunix.h diff --git a/src/syscall/types_windows_arm.go b/src/syscall/types_windows_arm.go new file mode 100644 index 0000000000000..e72e9f5ced2bd --- /dev/null +++ b/src/syscall/types_windows_arm.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/src/syscall/zerrors_aix_ppc64.go b/src/syscall/zerrors_aix_ppc64.go new file mode 100644 index 0000000000000..60130099a75ae --- /dev/null +++ b/src/syscall/zerrors_aix_ppc64.go @@ -0,0 +1,1248 @@ +// mkerrors.sh -maix64 +// Code generated by the command above; DO NOT EDIT. + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -maix64 _const.go + +package syscall + +const ( + AF_APPLETALK = 0x10 + AF_BYPASS = 0x19 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_INTF = 0x14 + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_MAX = 0x1e + AF_NDD = 0x17 + AF_NETWARE = 0x16 + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_RIF = 0x15 + AF_ROUTE = 0x11 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ARPHRD_802_3 = 0x6 + ARPHRD_802_5 = 0x6 + ARPHRD_ETHER = 0x1 + ARPHRD_FDDI = 0x1 + B0 = 0x0 + B110 = 0x3 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2400 = 0xb + B300 = 0x7 + B38400 = 0xf + B4800 = 0xc + B50 = 0x1 + B600 = 0x8 + B75 = 0x2 + B9600 = 0xd + CFLUSH = 0xf + CSIOCGIFCONF = -0x3fef96dc + CSTART = '\021' + CSTOP = '\023' + CSUSP = 0x1a + ECHO = 0x8 + ECH_ICMPID = 0x2 + ETHERNET_CSMACD = 0x6 + EVENP = 0x80 + EXCONTINUE = 0x0 + EXDLOK = 0x3 + EXIO = 0x2 + EXPGIO = 0x0 + EXRESUME = 0x2 + EXRETURN = 0x1 + EXSIG = 0x4 + EXTA = 0xe + EXTB = 0xf + EXTRAP = 0x1 + EYEC_RTENTRYA = 0x257274656e747241 + EYEC_RTENTRYF = 0x257274656e747246 + E_ACC = 0x0 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0xfffe + FLUSHBAND = 0x40 + FLUSHLOW = 0x8 + FLUSHO = 0x100000 + FLUSHR = 0x1 + FLUSHRW = 0x3 + FLUSHW = 0x2 + F_CLOSEM = 0xa + F_DUP2FD = 0xe + F_DUPFD = 0x0 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETLK64 = 0xb + F_GETOWN = 0x8 + F_LOCK = 0x1 + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLK64 = 0xc + F_SETLKW = 0xd + F_SETLKW64 = 0xd + F_SETOWN = 0x9 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_TSTLK = 0xf + F_ULOCK = 0x0 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + ICMP6_FILTER = 0x26 + ICMP6_SEC_SEND_DEL = 0x46 + ICMP6_SEC_SEND_GET = 0x47 + ICMP6_SEC_SEND_SET = 0x44 + ICMP6_SEC_SEND_SET_CGA_ADDR = 0x45 + IFA_FIRSTALIAS = 0x2000 + IFA_ROUTE = 0x1 + IFF_64BIT = 0x4000000 + IFF_ALLCAST = 0x20000 + IFF_ALLMULTI = 0x200 + IFF_BPF = 0x8000000 + IFF_BRIDGE = 0x40000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x80c52 + IFF_CHECKSUM_OFFLOAD = 0x10000000 + IFF_D1 = 0x8000 + IFF_D2 = 0x4000 + IFF_D3 = 0x2000 + IFF_D4 = 0x1000 + IFF_DEBUG = 0x4 + IFF_DEVHEALTH = 0x4000 + IFF_DO_HW_LOOPBACK = 0x10000 + IFF_GROUP_ROUTING = 0x2000000 + IFF_IFBUFMGT = 0x800000 + IFF_LINK0 = 0x100000 + IFF_LINK1 = 0x200000 + IFF_LINK2 = 0x400000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x80000 + IFF_NOARP = 0x80 + IFF_NOECHO = 0x800 + IFF_NOTRAILERS = 0x20 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_PSEG = 0x40000000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_SNAP = 0x8000 + IFF_TCP_DISABLE_CKSUM = 0x20000000 + IFF_TCP_NOCKSUM = 0x1000000 + IFF_UP = 0x1 + IFF_VIPA = 0x80000000 + IFNAMSIZ = 0x10 + IFO_FLUSH = 0x1 + IFT_1822 = 0x2 + IFT_AAL5 = 0x31 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 + IFT_CEPT = 0x13 + IFT_CLUSTER = 0x3e + IFT_DS3 = 0x1e + IFT_EON = 0x19 + IFT_ETHER = 0x6 + IFT_FCS = 0x3a + IFT_FDDI = 0xf + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_GIFTUNNEL = 0x3c + IFT_HDH1822 = 0x3 + IFT_HF = 0x3d + IFT_HIPPI = 0x2f + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IB = 0xc7 + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88026 = 0xa + IFT_LAPB = 0x10 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_NSIP = 0x1b + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PPP = 0x17 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PTPSERIAL = 0x16 + IFT_RS232 = 0x21 + IFT_SDLC = 0x11 + IFT_SIP = 0x1f + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SN = 0x38 + IFT_SONET = 0x27 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SP = 0x39 + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TUNNEL = 0x3b + IFT_ULTRA = 0x1d + IFT_V35 = 0x2d + IFT_VIPA = 0x37 + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_USE = 0x1 + IPPROTO_AH = 0x33 + IPPROTO_BIP = 0x53 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GIF = 0x8c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_LOCAL = 0x3f + IPPROTO_MAX = 0x100 + IPPROTO_MH = 0x87 + IPPROTO_NONE = 0x3b + IPPROTO_PUP = 0xc + IPPROTO_QOS = 0x2d + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPV6_ADDRFORM = 0x16 + IPV6_ADDR_PREFERENCES = 0x4a + IPV6_ADD_MEMBERSHIP = 0xc + IPV6_AIXRAWSOCKET = 0x39 + IPV6_CHECKSUM = 0x27 + IPV6_DONTFRAG = 0x2d + IPV6_DROP_MEMBERSHIP = 0xd + IPV6_DSTOPTS = 0x36 + IPV6_FLOWINFO_FLOWLABEL = 0xffffff + IPV6_FLOWINFO_PRIFLOW = 0xfffffff + IPV6_FLOWINFO_PRIORITY = 0xf000000 + IPV6_FLOWINFO_SRFLAG = 0x10000000 + IPV6_FLOWINFO_VERSION = 0xf0000000 + IPV6_HOPLIMIT = 0x28 + IPV6_HOPOPTS = 0x34 + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MIPDSTOPTS = 0x36 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_NOPROBE = 0x1c + IPV6_PATHMTU = 0x2e + IPV6_PKTINFO = 0x21 + IPV6_PKTOPTIONS = 0x24 + IPV6_PRIORITY_10 = 0xa000000 + IPV6_PRIORITY_11 = 0xb000000 + IPV6_PRIORITY_12 = 0xc000000 + IPV6_PRIORITY_13 = 0xd000000 + IPV6_PRIORITY_14 = 0xe000000 + IPV6_PRIORITY_15 = 0xf000000 + IPV6_PRIORITY_8 = 0x8000000 + IPV6_PRIORITY_9 = 0x9000000 + IPV6_PRIORITY_BULK = 0x4000000 + IPV6_PRIORITY_CONTROL = 0x7000000 + IPV6_PRIORITY_FILLER = 0x1000000 + IPV6_PRIORITY_INTERACTIVE = 0x6000000 + IPV6_PRIORITY_RESERVED1 = 0x3000000 + IPV6_PRIORITY_RESERVED2 = 0x5000000 + IPV6_PRIORITY_UNATTENDED = 0x2000000 + IPV6_PRIORITY_UNCHARACTERIZED = 0x0 + IPV6_RECVDSTOPTS = 0x38 + IPV6_RECVHOPLIMIT = 0x29 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVHOPS = 0x22 + IPV6_RECVIF = 0x1e + IPV6_RECVPATHMTU = 0x2f + IPV6_RECVPKTINFO = 0x23 + IPV6_RECVRTHDR = 0x33 + IPV6_RECVSRCRT = 0x1d + IPV6_RECVTCLASS = 0x2a + IPV6_RTHDR = 0x32 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RTHDR_TYPE_2 = 0x2 + IPV6_SENDIF = 0x1f + IPV6_SRFLAG_LOOSE = 0x0 + IPV6_SRFLAG_STRICT = 0x10000000 + IPV6_TCLASS = 0x2b + IPV6_TOKEN_LENGTH = 0x40 + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2c + IPV6_V6ONLY = 0x25 + IPV6_VERSION = 0x60000000 + IP_ADDRFORM = 0x16 + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x3c + IP_BLOCK_SOURCE = 0x3a + IP_BROADCAST_IF = 0x10 + IP_CACHE_LINE_SIZE = 0x80 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DHCPMODE = 0x11 + IP_DONTFRAG = 0x19 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x3d + IP_FINDPMTU = 0x1a + IP_HDRINCL = 0x2 + IP_INC_MEMBERSHIPS = 0x14 + IP_INIT_MEMBERSHIP = 0x14 + IP_MAXPACKET = 0xffff + IP_MF = 0x2000 + IP_MSS = 0x240 + IP_MULTICAST_HOPS = 0xa + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OPT = 0x1b + IP_OPTIONS = 0x1 + IP_PMTUAGE = 0x1b + IP_RECVDSTADDR = 0x7 + IP_RECVIF = 0x14 + IP_RECVIFINFO = 0xf + IP_RECVINTERFACE = 0x20 + IP_RECVMACHDR = 0xe + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVTTL = 0x22 + IP_RETOPTS = 0x8 + IP_SOURCE_FILTER = 0x48 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x3b + IP_UNICAST_HOPS = 0x4 + I_FLUSH = 0x20005305 + LNOFLSH = 0x8000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x10 + MAP_ANONYMOUS = 0x10 + MAP_FILE = 0x0 + MAP_FIXED = 0x100 + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_TYPE = 0xf0 + MAP_VARIABLE = 0x0 + MCL_CURRENT = 0x100 + MCL_FUTURE = 0x200 + MSG_ANY = 0x4 + MSG_ARGEXT = 0x400 + MSG_BAND = 0x2 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_EOR = 0x8 + MSG_HIPRI = 0x1 + MSG_MAXIOVLEN = 0x10 + MSG_MPEG2 = 0x80 + MSG_NONBLOCK = 0x4000 + MSG_NOSIGNAL = 0x100 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x200 + MS_ASYNC = 0x10 + MS_EINTR = 0x80 + MS_INVALIDATE = 0x40 + MS_PER_SEC = 0x3e8 + MS_SYNC = 0x20 + NOFLUSH = 0x80000000 + O_ACCMODE = 0x23 + O_APPEND = 0x8 + O_CIO = 0x80 + O_CIOR = 0x800000000 + O_CLOEXEC = 0x800000 + O_CREAT = 0x100 + O_DEFER = 0x2000 + O_DELAY = 0x4000 + O_DIRECT = 0x8000000 + O_DIRECTORY = 0x80000 + O_DSYNC = 0x400000 + O_EFSOFF = 0x400000000 + O_EFSON = 0x200000000 + O_EXCL = 0x400 + O_EXEC = 0x20 + O_LARGEFILE = 0x4000000 + O_NDELAY = 0x8000 + O_NOCACHE = 0x100000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x1000000 + O_NONBLOCK = 0x4 + O_NONE = 0x3 + O_NSHARE = 0x10000 + O_RAW = 0x100000000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSHARE = 0x1000 + O_RSYNC = 0x200000 + O_SEARCH = 0x20 + O_SNAPSHOT = 0x40 + O_SYNC = 0x10 + O_TRUNC = 0x200 + O_TTY_INIT = 0x0 + O_WRONLY = 0x1 + PENDIN = 0x20000000 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_64BIT = 0x20 + PR_ADDR = 0x2 + PR_ARGEXT = 0x400 + PR_ATOMIC = 0x1 + PR_CONNREQUIRED = 0x4 + PR_FASTHZ = 0x5 + PR_INP = 0x40 + PR_INTRLEVEL = 0x8000 + PR_MLS = 0x100 + PR_MLS_1_LABEL = 0x200 + PR_NOEOR = 0x4000 + PR_RIGHTS = 0x10 + PR_SLOWHZ = 0x2 + PR_WANTRCVD = 0x8 + PT_ATTACH = 0x1e + PT_CLEAR = 0x26 + PT_COMMAND_MAX = 0x45 + PT_CONTINUE = 0x7 + PT_DETACH = 0x1f + PT_GET_UKEY = 0x40 + PT_KILL = 0x8 + PT_LDINFO = 0x22 + PT_LDXINFO = 0x27 + PT_MULTI = 0x23 + PT_NEXT = 0x24 + PT_QUERY = 0x28 + PT_READ_BLOCK = 0x11 + PT_READ_D = 0x2 + PT_READ_FPR = 0xc + PT_READ_GPR = 0xb + PT_READ_I = 0x1 + PT_REATT = 0x21 + PT_REGSET = 0x20 + PT_SET = 0x25 + PT_STEP = 0x9 + PT_TRACE_ME = 0x0 + PT_WATCH = 0x29 + PT_WRITE_BLOCK = 0x13 + PT_WRITE_D = 0x5 + PT_WRITE_FPR = 0xf + PT_WRITE_GPR = 0xe + PT_WRITE_I = 0x4 + RLIMIT_AS = 0x6 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_NOFILE = 0x7 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DOWNSTREAM = 0x100 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_ACTIVE_DGD = 0x1000000 + RTF_BCE = 0x80000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_BUL = 0x2000 + RTF_CLONE = 0x10000 + RTF_CLONED = 0x20000 + RTF_CLONING = 0x100 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FREE_IN_PROG = 0x4000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MASK = 0x80 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PERMANENT6 = 0x8000000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_SMALLMTU = 0x40000 + RTF_STATIC = 0x800 + RTF_STOPSRCH = 0x2000000 + RTF_UNREACHABLE = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_EXPIRE = 0xf + RTM_GET = 0x4 + RTM_GETNEXT = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_OLDADD = 0x9 + RTM_OLDDEL = 0xa + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTLOST = 0x10 + RTM_RTTUNIT = 0xf4240 + RTM_SAMEADDR = 0x12 + RTM_SET = 0x13 + RTM_VERSION = 0x2 + RTM_VERSION_GR = 0x4 + RTM_VERSION_GR_COMPAT = 0x3 + RTM_VERSION_POLICY = 0x5 + RTM_VERSION_POLICY_EXT = 0x6 + RTM_VERSION_POLICY_PRFN = 0x7 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIGQUEUE_MAX = 0x20 + SIOCADDIFVIPA = 0x20006942 + SIOCADDMTU = -0x7ffb9690 + SIOCADDMULTI = -0x7fdf96cf + SIOCADDNETID = -0x7fd796a9 + SIOCADDRT = -0x7fc78df6 + SIOCAIFADDR = -0x7fbf96e6 + SIOCATMARK = 0x40047307 + SIOCDARP = -0x7fb396e0 + SIOCDELIFVIPA = 0x20006943 + SIOCDELMTU = -0x7ffb968f + SIOCDELMULTI = -0x7fdf96ce + SIOCDELPMTU = -0x7fd78ff6 + SIOCDELRT = -0x7fc78df5 + SIOCDIFADDR = -0x7fd796e7 + SIOCDNETOPT = -0x3ffe9680 + SIOCDX25XLATE = -0x7fd7969b + SIOCFIFADDR = -0x7fdf966d + SIOCGARP = -0x3fb396da + SIOCGETMTUS = 0x2000696f + SIOCGETSGCNT = -0x3feb8acc + SIOCGETVIFCNT = -0x3feb8acd + SIOCGHIWAT = 0x40047301 + SIOCGIFADDR = -0x3fd796df + SIOCGIFADDRS = 0x2000698c + SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBRDADDR = -0x3fd796dd + SIOCGIFCONF = -0x3fef96bb + SIOCGIFCONFGLOB = -0x3fef9670 + SIOCGIFDSTADDR = -0x3fd796de + SIOCGIFFLAGS = -0x3fd796ef + SIOCGIFGIDLIST = 0x20006968 + SIOCGIFHWADDR = -0x3fab966b + SIOCGIFMETRIC = -0x3fd796e9 + SIOCGIFMTU = -0x3fd796aa + SIOCGIFNETMASK = -0x3fd796db + SIOCGIFOPTIONS = -0x3fd796d6 + SIOCGISNO = -0x3fd79695 + SIOCGLOADF = -0x3ffb967e + SIOCGLOWAT = 0x40047303 + SIOCGNETOPT = -0x3ffe96a5 + SIOCGNETOPT1 = -0x3fdf967f + SIOCGNMTUS = 0x2000696e + SIOCGPGRP = 0x40047309 + SIOCGSIZIFCONF = 0x4004696a + SIOCGSRCFILTER = -0x3fe796cb + SIOCGTUNEPHASE = -0x3ffb9676 + SIOCGX25XLATE = -0x3fd7969c + SIOCIFATTACH = -0x7fdf9699 + SIOCIFDETACH = -0x7fdf969a + SIOCIFGETPKEY = -0x7fdf969b + SIOCIF_ATM_DARP = -0x7fdf9683 + SIOCIF_ATM_DUMPARP = -0x7fdf9685 + SIOCIF_ATM_GARP = -0x7fdf9682 + SIOCIF_ATM_IDLE = -0x7fdf9686 + SIOCIF_ATM_SARP = -0x7fdf9681 + SIOCIF_ATM_SNMPARP = -0x7fdf9687 + SIOCIF_ATM_SVC = -0x7fdf9684 + SIOCIF_ATM_UBR = -0x7fdf9688 + SIOCIF_DEVHEALTH = -0x7ffb966c + SIOCIF_IB_ARP_INCOMP = -0x7fdf9677 + SIOCIF_IB_ARP_TIMER = -0x7fdf9678 + SIOCIF_IB_CLEAR_PINFO = -0x3fdf966f + SIOCIF_IB_DEL_ARP = -0x7fdf967f + SIOCIF_IB_DEL_PINFO = -0x3fdf9670 + SIOCIF_IB_DUMP_ARP = -0x7fdf9680 + SIOCIF_IB_GET_ARP = -0x7fdf967e + SIOCIF_IB_GET_INFO = -0x3f879675 + SIOCIF_IB_GET_STATS = -0x3f879672 + SIOCIF_IB_NOTIFY_ADDR_REM = -0x3f87966a + SIOCIF_IB_RESET_STATS = -0x3f879671 + SIOCIF_IB_RESIZE_CQ = -0x7fdf9679 + SIOCIF_IB_SET_ARP = -0x7fdf967d + SIOCIF_IB_SET_PKEY = -0x7fdf967c + SIOCIF_IB_SET_PORT = -0x7fdf967b + SIOCIF_IB_SET_QKEY = -0x7fdf9676 + SIOCIF_IB_SET_QSIZE = -0x7fdf967a + SIOCLISTIFVIPA = 0x20006944 + SIOCSARP = -0x7fb396e2 + SIOCSHIWAT = 0xffffffff80047300 + SIOCSIFADDR = -0x7fd796f4 + SIOCSIFADDRORI = -0x7fdb9673 + SIOCSIFBRDADDR = -0x7fd796ed + SIOCSIFDSTADDR = -0x7fd796f2 + SIOCSIFFLAGS = -0x7fd796f0 + SIOCSIFGIDLIST = 0x20006969 + SIOCSIFMETRIC = -0x7fd796e8 + SIOCSIFMTU = -0x7fd796a8 + SIOCSIFNETDUMP = -0x7fd796e4 + SIOCSIFNETMASK = -0x7fd796ea + SIOCSIFOPTIONS = -0x7fd796d7 + SIOCSIFSUBCHAN = -0x7fd796e5 + SIOCSISNO = -0x7fd79694 + SIOCSLOADF = -0x3ffb967d + SIOCSLOWAT = 0xffffffff80047302 + SIOCSNETOPT = -0x7ffe96a6 + SIOCSPGRP = 0xffffffff80047308 + SIOCSX25XLATE = -0x7fd7969d + SOCK_CONN_DGRAM = 0x6 + SOCK_DGRAM = 0x2 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x400 + SO_ACCEPTCONN = 0x2 + SO_AUDIT = 0x8000 + SO_BROADCAST = 0x20 + SO_CKSUMRECV = 0x800 + SO_DEBUG = 0x1 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_KERNACCEPT = 0x2000 + SO_LINGER = 0x80 + SO_NOMULTIPATH = 0x4000 + SO_NOREUSEADDR = 0x1000 + SO_OOBINLINE = 0x100 + SO_PEERID = 0x1009 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMPNS = 0x100a + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USE_IFBUFS = 0x400 + S_BANDURG = 0x400 + S_EMODFMT = 0x3c000000 + S_ENFMT = 0x400 + S_ERROR = 0x100 + S_HANGUP = 0x200 + S_HIPRI = 0x2 + S_ICRYPTO = 0x80000 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFJOURNAL = 0x10000 + S_IFLNK = 0xa000 + S_IFMPX = 0x2200 + S_IFMT = 0xf000 + S_IFPDIR = 0x4000000 + S_IFPSDIR = 0x8000000 + S_IFPSSDIR = 0xc000000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFSYSEA = 0x30000000 + S_INPUT = 0x1 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_ITCB = 0x1000000 + S_ITP = 0x800000 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXACL = 0x2000000 + S_IXATTR = 0x40000 + S_IXGRP = 0x8 + S_IXINTERFACE = 0x100000 + S_IXMOD = 0x40000000 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + S_MSG = 0x8 + S_OUTPUT = 0x4 + S_RDBAND = 0x20 + S_RDNORM = 0x10 + S_RESERVED1 = 0x20000 + S_RESERVED2 = 0x200000 + S_RESERVED3 = 0x400000 + S_RESERVED4 = 0x80000000 + S_RESFMT1 = 0x10000000 + S_RESFMT10 = 0x34000000 + S_RESFMT11 = 0x38000000 + S_RESFMT12 = 0x3c000000 + S_RESFMT2 = 0x14000000 + S_RESFMT3 = 0x18000000 + S_RESFMT4 = 0x1c000000 + S_RESFMT5 = 0x20000000 + S_RESFMT6 = 0x24000000 + S_RESFMT7 = 0x28000000 + S_RESFMT8 = 0x2c000000 + S_WRBAND = 0x80 + S_WRNORM = 0x40 + TCP_24DAYS_WORTH_OF_SLOWTICKS = 0x3f4800 + TCP_ACLADD = 0x23 + TCP_ACLBIND = 0x26 + TCP_ACLCLEAR = 0x22 + TCP_ACLDEL = 0x24 + TCP_ACLDENY = 0x8 + TCP_ACLFLUSH = 0x21 + TCP_ACLGID = 0x1 + TCP_ACLLS = 0x25 + TCP_ACLSUBNET = 0x4 + TCP_ACLUID = 0x2 + TCP_CWND_DF = 0x16 + TCP_CWND_IF = 0x15 + TCP_DELAY_ACK_FIN = 0x2 + TCP_DELAY_ACK_SYN = 0x1 + TCP_FASTNAME = 0x101080a + TCP_KEEPCNT = 0x13 + TCP_KEEPIDLE = 0x11 + TCP_KEEPINTVL = 0x12 + TCP_LSPRIV = 0x29 + TCP_LUID = 0x20 + TCP_MAXBURST = 0x8 + TCP_MAXDF = 0x64 + TCP_MAXIF = 0x64 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAXWINDOWSCALE = 0xe + TCP_MAX_SACK = 0x4 + TCP_MSS = 0x5b4 + TCP_NODELAY = 0x1 + TCP_NODELAYACK = 0x14 + TCP_NOREDUCE_CWND_EXIT_FRXMT = 0x19 + TCP_NOREDUCE_CWND_IN_FRXMT = 0x18 + TCP_NOTENTER_SSTART = 0x17 + TCP_OPT = 0x19 + TCP_RFC1323 = 0x4 + TCP_SETPRIV = 0x27 + TCP_STDURG = 0x10 + TCP_TIMESTAMP_OPTLEN = 0xc + TCP_UNSETPRIV = 0x28 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0xffffffff80047462 + TIOCEXCL = 0x2000740d + TIOCFLUSH = 0xffffffff80047410 + TIOCGETC = 0x40067412 + TIOCGETD = 0x40047400 + TIOCGETP = 0x40067408 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047448 + TIOCGSIZE = 0x40087468 + TIOCGWINSZ = 0x40087468 + TIOCHPCL = 0x20007402 + TIOCLBIC = 0xffffffff8004747e + TIOCLBIS = 0xffffffff8004747f + TIOCLGET = 0x4004747c + TIOCLSET = 0xffffffff8004747d + TIOCMBIC = 0xffffffff8004746b + TIOCMBIS = 0xffffffff8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0xffffffff80047464 + TIOCMODG = 0x40047403 + TIOCMODS = 0xffffffff80047404 + TIOCMSET = 0xffffffff8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0xffffffff80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0xffffffff80047469 + TIOCSBRK = 0x2000747b + TIOCSDTR = 0x20007479 + TIOCSETC = 0xffffffff80067411 + TIOCSETD = 0xffffffff80047401 + TIOCSETN = 0xffffffff8006740a + TIOCSETP = 0xffffffff80067409 + TIOCSLTC = 0xffffffff80067475 + TIOCSPGRP = 0xffffffff80047476 + TIOCSSIZE = 0xffffffff80087467 + TIOCSTART = 0x2000746e + TIOCSTI = 0xffffffff80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0xffffffff80087467 + TIOCUCNTL = 0xffffffff80047466 + TOSTOP = 0x10000 + VTDELAY = 0x2000 + WPARSTART = 0x1 + WPARSTOP = 0x2 + WPARTTYNAME = "Global" + _FDATAFLUSH = 0x2000000000 +) + +// Errors +const ( + E2BIG = Errno(0x7) + EACCES = Errno(0xd) + EADDRINUSE = Errno(0x43) + EADDRNOTAVAIL = Errno(0x44) + EAFNOSUPPORT = Errno(0x42) + EAGAIN = Errno(0xb) + EALREADY = Errno(0x38) + EBADF = Errno(0x9) + EBADMSG = Errno(0x78) + EBUSY = Errno(0x10) + ECANCELED = Errno(0x75) + ECHILD = Errno(0xa) + ECHRNG = Errno(0x25) + ECLONEME = Errno(0x52) + ECONNABORTED = Errno(0x48) + ECONNREFUSED = Errno(0x4f) + ECONNRESET = Errno(0x49) + ECORRUPT = Errno(0x59) + EDEADLK = Errno(0x2d) + EDESTADDREQ = Errno(0x3a) + EDESTADDRREQ = Errno(0x3a) + EDIST = Errno(0x35) + EDOM = Errno(0x21) + EDQUOT = Errno(0x58) + EEXIST = Errno(0x11) + EFAULT = Errno(0xe) + EFBIG = Errno(0x1b) + EFORMAT = Errno(0x30) + EHOSTDOWN = Errno(0x50) + EHOSTUNREACH = Errno(0x51) + EIDRM = Errno(0x24) + EILSEQ = Errno(0x74) + EINPROGRESS = Errno(0x37) + EINTR = Errno(0x4) + EINVAL = Errno(0x16) + EIO = Errno(0x5) + EISCONN = Errno(0x4b) + EISDIR = Errno(0x15) + EL2HLT = Errno(0x2c) + EL2NSYNC = Errno(0x26) + EL3HLT = Errno(0x27) + EL3RST = Errno(0x28) + ELNRNG = Errno(0x29) + ELOOP = Errno(0x55) + EMEDIA = Errno(0x6e) + EMFILE = Errno(0x18) + EMLINK = Errno(0x1f) + EMSGSIZE = Errno(0x3b) + EMULTIHOP = Errno(0x7d) + ENAMETOOLONG = Errno(0x56) + ENETDOWN = Errno(0x45) + ENETRESET = Errno(0x47) + ENETUNREACH = Errno(0x46) + ENFILE = Errno(0x17) + ENOATTR = Errno(0x70) + ENOBUFS = Errno(0x4a) + ENOCONNECT = Errno(0x32) + ENOCSI = Errno(0x2b) + ENODATA = Errno(0x7a) + ENODEV = Errno(0x13) + ENOENT = Errno(0x2) + ENOEXEC = Errno(0x8) + ENOLCK = Errno(0x31) + ENOLINK = Errno(0x7e) + ENOMEM = Errno(0xc) + ENOMSG = Errno(0x23) + ENOPROTOOPT = Errno(0x3d) + ENOSPC = Errno(0x1c) + ENOSR = Errno(0x76) + ENOSTR = Errno(0x7b) + ENOSYS = Errno(0x6d) + ENOTBLK = Errno(0xf) + ENOTCONN = Errno(0x4c) + ENOTDIR = Errno(0x14) + ENOTEMPTY = Errno(0x11) + ENOTREADY = Errno(0x2e) + ENOTRECOVERABLE = Errno(0x5e) + ENOTRUST = Errno(0x72) + ENOTSOCK = Errno(0x39) + ENOTSUP = Errno(0x7c) + ENOTTY = Errno(0x19) + ENXIO = Errno(0x6) + EOPNOTSUPP = Errno(0x40) + EOVERFLOW = Errno(0x7f) + EOWNERDEAD = Errno(0x5f) + EPERM = Errno(0x1) + EPFNOSUPPORT = Errno(0x41) + EPIPE = Errno(0x20) + EPROCLIM = Errno(0x53) + EPROTO = Errno(0x79) + EPROTONOSUPPORT = Errno(0x3e) + EPROTOTYPE = Errno(0x3c) + ERANGE = Errno(0x22) + EREMOTE = Errno(0x5d) + ERESTART = Errno(0x52) + EROFS = Errno(0x1e) + ESAD = Errno(0x71) + ESHUTDOWN = Errno(0x4d) + ESOCKTNOSUPPORT = Errno(0x3f) + ESOFT = Errno(0x6f) + ESPIPE = Errno(0x1d) + ESRCH = Errno(0x3) + ESTALE = Errno(0x34) + ESYSERROR = Errno(0x5a) + ETIME = Errno(0x77) + ETIMEDOUT = Errno(0x4e) + ETOOMANYREFS = Errno(0x73) + ETXTBSY = Errno(0x1a) + EUNATCH = Errno(0x2a) + EUSERS = Errno(0x54) + EWOULDBLOCK = Errno(0xb) + EWRPROTECT = Errno(0x2f) + EXDEV = Errno(0x12) +) + +// Signals +const ( + SIGABRT = Signal(0x6) + SIGAIO = Signal(0x17) + SIGALRM = Signal(0xe) + SIGALRM1 = Signal(0x26) + SIGBUS = Signal(0xa) + SIGCAPI = Signal(0x31) + SIGCHLD = Signal(0x14) + SIGCLD = Signal(0x14) + SIGCONT = Signal(0x13) + SIGCPUFAIL = Signal(0x3b) + SIGDANGER = Signal(0x21) + SIGEMT = Signal(0x7) + SIGFPE = Signal(0x8) + SIGGRANT = Signal(0x3c) + SIGHUP = Signal(0x1) + SIGILL = Signal(0x4) + SIGINT = Signal(0x2) + SIGIO = Signal(0x17) + SIGIOINT = Signal(0x10) + SIGIOT = Signal(0x6) + SIGKAP = Signal(0x3c) + SIGKILL = Signal(0x9) + SIGLOST = Signal(0x6) + SIGMAX = Signal(0xff) + SIGMAX32 = Signal(0x3f) + SIGMAX64 = Signal(0xff) + SIGMIGRATE = Signal(0x23) + SIGMSG = Signal(0x1b) + SIGPIPE = Signal(0xd) + SIGPOLL = Signal(0x17) + SIGPRE = Signal(0x24) + SIGPROF = Signal(0x20) + SIGPTY = Signal(0x17) + SIGPWR = Signal(0x1d) + SIGQUIT = Signal(0x3) + SIGRECONFIG = Signal(0x3a) + SIGRETRACT = Signal(0x3d) + SIGSAK = Signal(0x3f) + SIGSEGV = Signal(0xb) + SIGSOUND = Signal(0x3e) + SIGSTOP = Signal(0x11) + SIGSYS = Signal(0xc) + SIGSYSERROR = Signal(0x30) + SIGTALRM = Signal(0x26) + SIGTERM = Signal(0xf) + SIGTRAP = Signal(0x5) + SIGTSTP = Signal(0x12) + SIGTTIN = Signal(0x15) + SIGTTOU = Signal(0x16) + SIGURG = Signal(0x10) + SIGUSR1 = Signal(0x1e) + SIGUSR2 = Signal(0x1f) + SIGVIRT = Signal(0x25) + SIGVTALRM = Signal(0x22) + SIGWAITING = Signal(0x27) + SIGWINCH = Signal(0x1c) + SIGXCPU = Signal(0x18) + SIGXFSZ = Signal(0x19) +) + +// Error table +var errors = [...]string{ + 1: "not owner", + 2: "no such file or directory", + 3: "no such process", + 4: "interrupted system call", + 5: "I/O error", + 6: "no such device or address", + 7: "arg list too long", + 8: "exec format error", + 9: "bad file number", + 10: "no child processes", + 11: "resource temporarily unavailable", + 12: "not enough space", + 13: "permission denied", + 14: "bad address", + 15: "block device required", + 16: "device busy", + 17: "file exists", + 18: "cross-device link", + 19: "no such device", + 20: "not a directory", + 21: "is a directory", + 22: "invalid argument", + 23: "file table overflow", + 24: "too many open files", + 25: "not a typewriter", + 26: "text file busy", + 27: "file too large", + 28: "no space left on device", + 29: "illegal seek", + 30: "read-only file system", + 31: "too many links", + 32: "broken pipe", + 33: "argument out of domain", + 34: "result too large", + 35: "no message of desired type", + 36: "identifier removed", + 37: "channel number out of range", + 38: "level 2 not synchronized", + 39: "level 3 halted", + 40: "level 3 reset", + 41: "link number out of range", + 42: "protocol driver not attached", + 43: "no CSI structure available", + 44: "level 2 halted", + 45: "deadlock condition if locked", + 46: "device not ready", + 47: "write-protected media", + 48: "unformatted or incompatible media", + 49: "no locks available", + 50: "cannot Establish Connection", + 52: "missing file or filesystem", + 53: "requests blocked by Administrator", + 55: "operation now in progress", + 56: "operation already in progress", + 57: "socket operation on non-socket", + 58: "destination address required", + 59: "message too long", + 60: "protocol wrong type for socket", + 61: "protocol not available", + 62: "protocol not supported", + 63: "socket type not supported", + 64: "operation not supported on socket", + 65: "protocol family not supported", + 66: "addr family not supported by protocol", + 67: "address already in use", + 68: "can't assign requested address", + 69: "network is down", + 70: "network is unreachable", + 71: "network dropped connection on reset", + 72: "software caused connection abort", + 73: "connection reset by peer", + 74: "no buffer space available", + 75: "socket is already connected", + 76: "socket is not connected", + 77: "can't send after socket shutdown", + 78: "connection timed out", + 79: "connection refused", + 80: "host is down", + 81: "no route to host", + 82: "restart the system call", + 83: "too many processes", + 84: "too many users", + 85: "too many levels of symbolic links", + 86: "file name too long", + 88: "disk quota exceeded", + 89: "invalid file system control data detected", + 90: "for future use ", + 93: "item is not local to host", + 94: "state not recoverable ", + 95: "previous owner died ", + 109: "function not implemented", + 110: "media surface error", + 111: "I/O completed, but needs relocation", + 112: "no attribute found", + 113: "security Authentication Denied", + 114: "not a Trusted Program", + 115: "too many references: can't splice", + 116: "invalid wide character", + 117: "asynchronous I/O cancelled", + 118: "out of STREAMS resources", + 119: "system call timed out", + 120: "next message has wrong type", + 121: "error in protocol", + 122: "no message on stream head read q", + 123: "fd not associated with a stream", + 124: "unsupported attribute value", + 125: "multihop is not allowed", + 126: "the server link has been severed", + 127: "value too large to be stored in data type", +} + +// Signal table +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/BPT trap", + 6: "IOT/Abort trap", + 7: "EMT trap", + 8: "floating point exception", + 9: "killed", + 10: "bus error", + 11: "segmentation fault", + 12: "bad system call", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", + 16: "urgent I/O condition", + 17: "stopped (signal)", + 18: "stopped", + 19: "continued", + 20: "child exited", + 21: "stopped (tty input)", + 22: "stopped (tty output)", + 23: "I/O possible/complete", + 24: "cputime limit exceeded", + 25: "filesize limit exceeded", + 27: "input device data", + 28: "window size changes", + 29: "power-failure", + 30: "user defined signal 1", + 31: "user defined signal 2", + 32: "profiling timer expired", + 33: "paging space low", + 34: "virtual timer expired", + 35: "signal 35", + 36: "signal 36", + 37: "signal 37", + 38: "signal 38", + 39: "signal 39", + 48: "signal 48", + 49: "signal 49", + 58: "signal 58", + 59: "CPU Failure Predicted", + 60: "monitor mode granted", + 61: "monitor mode retracted", + 62: "sound completed", + 63: "secure attention", + 255: "signal 255", +} diff --git a/src/syscall/zsyscall_aix_ppc64.go b/src/syscall/zsyscall_aix_ppc64.go new file mode 100644 index 0000000000000..fe27dcadf2802 --- /dev/null +++ b/src/syscall/zsyscall_aix_ppc64.go @@ -0,0 +1,1341 @@ +// mksyscall_libc.pl -aix -tags aix,ppc64 syscall_aix.go syscall_aix_ppc64.go +// Code generated by the command above; DO NOT EDIT. + +// +build aix,ppc64 + +package syscall + +import "unsafe" + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_dup2 dup2 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_readlink readlink "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_utimes utimes "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_utimensat utimensat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getcwd getcwd "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getgroups getgroups "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setgroups setgroups "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getdirent getdirent "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_wait4 wait4 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_bind bind "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_connect connect "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getkerninfo getkerninfo "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Listen listen "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_socket socket "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_socketpair socketpair "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpeername getpeername "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getsockname getsockname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sendto sendto "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Shutdown shutdown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_accept accept "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Openat openat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_ptrace64 ptrace64 "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Acct acct "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Chdir chdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Chmod chmod "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Chown chown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Close close "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Dup dup "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Faccessat faccessat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fchdir fchdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fchmod fchmod "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fchmodat fchmodat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fchown fchown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fchownat fchownat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fpathconf fpathconf "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fstat fstat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fstatfs fstatfs "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Ftruncate ftruncate "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Fsync fsync "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getgid getgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getpid getpid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Geteuid geteuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getegid getegid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getppid getppid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getrlimit getrlimit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Getuid getuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Kill kill "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Lchown lchown "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Link link "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Lstat lstat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Mkdir mkdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Mkdirat mkdirat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Mknodat mknodat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Open open "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Pread pread "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Pwrite pwrite "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_read read "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Reboot reboot "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Rename rename "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Renameat renameat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Rmdir rmdir "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Setegid setegid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Seteuid seteuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Setgid setgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Setpgid setpgid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Setregid setregid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Setreuid setreuid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Stat stat "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Statfs statfs "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Symlink symlink "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Truncate truncate "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Umask umask "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Unlink unlink "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_Uname uname "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_write write "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mmap mmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" + +//go:linkname libc_fcntl libc_fcntl +//go:linkname libc_dup2 libc_dup2 +//go:linkname libc_pipe libc_pipe +//go:linkname libc_readlink libc_readlink +//go:linkname libc_utimes libc_utimes +//go:linkname libc_utimensat libc_utimensat +//go:linkname libc_unlinkat libc_unlinkat +//go:linkname libc_getcwd libc_getcwd +//go:linkname libc_getgroups libc_getgroups +//go:linkname libc_setgroups libc_setgroups +//go:linkname libc_getdirent libc_getdirent +//go:linkname libc_wait4 libc_wait4 +//go:linkname libc_bind libc_bind +//go:linkname libc_connect libc_connect +//go:linkname libc_Getkerninfo libc_Getkerninfo +//go:linkname libc_getsockopt libc_getsockopt +//go:linkname libc_Listen libc_Listen +//go:linkname libc_setsockopt libc_setsockopt +//go:linkname libc_socket libc_socket +//go:linkname libc_socketpair libc_socketpair +//go:linkname libc_getpeername libc_getpeername +//go:linkname libc_getsockname libc_getsockname +//go:linkname libc_recvfrom libc_recvfrom +//go:linkname libc_sendto libc_sendto +//go:linkname libc_Shutdown libc_Shutdown +//go:linkname libc_recvmsg libc_recvmsg +//go:linkname libc_sendmsg libc_sendmsg +//go:linkname libc_accept libc_accept +//go:linkname libc_Openat libc_Openat +//go:linkname libc_ptrace64 libc_ptrace64 +//go:linkname libc_Acct libc_Acct +//go:linkname libc_Chdir libc_Chdir +//go:linkname libc_Chmod libc_Chmod +//go:linkname libc_Chown libc_Chown +//go:linkname libc_Close libc_Close +//go:linkname libc_Dup libc_Dup +//go:linkname libc_Faccessat libc_Faccessat +//go:linkname libc_Fchdir libc_Fchdir +//go:linkname libc_Fchmod libc_Fchmod +//go:linkname libc_Fchmodat libc_Fchmodat +//go:linkname libc_Fchown libc_Fchown +//go:linkname libc_Fchownat libc_Fchownat +//go:linkname libc_Fpathconf libc_Fpathconf +//go:linkname libc_Fstat libc_Fstat +//go:linkname libc_Fstatfs libc_Fstatfs +//go:linkname libc_Ftruncate libc_Ftruncate +//go:linkname libc_Fsync libc_Fsync +//go:linkname libc_Getgid libc_Getgid +//go:linkname libc_Getpid libc_Getpid +//go:linkname libc_Geteuid libc_Geteuid +//go:linkname libc_Getegid libc_Getegid +//go:linkname libc_Getppid libc_Getppid +//go:linkname libc_Getrlimit libc_Getrlimit +//go:linkname libc_Getuid libc_Getuid +//go:linkname libc_Kill libc_Kill +//go:linkname libc_Lchown libc_Lchown +//go:linkname libc_Link libc_Link +//go:linkname libc_Lstat libc_Lstat +//go:linkname libc_Mkdir libc_Mkdir +//go:linkname libc_Mkdirat libc_Mkdirat +//go:linkname libc_Mknodat libc_Mknodat +//go:linkname libc_Open libc_Open +//go:linkname libc_Pread libc_Pread +//go:linkname libc_Pwrite libc_Pwrite +//go:linkname libc_read libc_read +//go:linkname libc_Reboot libc_Reboot +//go:linkname libc_Rename libc_Rename +//go:linkname libc_Renameat libc_Renameat +//go:linkname libc_Rmdir libc_Rmdir +//go:linkname libc_lseek libc_lseek +//go:linkname libc_Setegid libc_Setegid +//go:linkname libc_Seteuid libc_Seteuid +//go:linkname libc_Setgid libc_Setgid +//go:linkname libc_Setpgid libc_Setpgid +//go:linkname libc_Setregid libc_Setregid +//go:linkname libc_Setreuid libc_Setreuid +//go:linkname libc_Stat libc_Stat +//go:linkname libc_Statfs libc_Statfs +//go:linkname libc_Symlink libc_Symlink +//go:linkname libc_Truncate libc_Truncate +//go:linkname libc_Umask libc_Umask +//go:linkname libc_Unlink libc_Unlink +//go:linkname libc_Uname libc_Uname +//go:linkname libc_write libc_write +//go:linkname libc_gettimeofday libc_gettimeofday +//go:linkname libc_mmap libc_mmap +//go:linkname libc_munmap libc_munmap + +type libcFunc uintptr + +var ( + libc_fcntl, + libc_dup2, + libc_pipe, + libc_readlink, + libc_utimes, + libc_utimensat, + libc_unlinkat, + libc_getcwd, + libc_getgroups, + libc_setgroups, + libc_getdirent, + libc_wait4, + libc_bind, + libc_connect, + libc_Getkerninfo, + libc_getsockopt, + libc_Listen, + libc_setsockopt, + libc_socket, + libc_socketpair, + libc_getpeername, + libc_getsockname, + libc_recvfrom, + libc_sendto, + libc_Shutdown, + libc_recvmsg, + libc_sendmsg, + libc_accept, + libc_Openat, + libc_ptrace64, + libc_Acct, + libc_Chdir, + libc_Chmod, + libc_Chown, + libc_Close, + libc_Dup, + libc_Faccessat, + libc_Fchdir, + libc_Fchmod, + libc_Fchmodat, + libc_Fchown, + libc_Fchownat, + libc_Fpathconf, + libc_Fstat, + libc_Fstatfs, + libc_Ftruncate, + libc_Fsync, + libc_Getgid, + libc_Getpid, + libc_Geteuid, + libc_Getegid, + libc_Getppid, + libc_Getrlimit, + libc_Getuid, + libc_Kill, + libc_Lchown, + libc_Link, + libc_Lstat, + libc_Mkdir, + libc_Mkdirat, + libc_Mknodat, + libc_Open, + libc_Pread, + libc_Pwrite, + libc_read, + libc_Reboot, + libc_Rename, + libc_Renameat, + libc_Rmdir, + libc_lseek, + libc_Setegid, + libc_Seteuid, + libc_Setgid, + libc_Setpgid, + libc_Setregid, + libc_Setreuid, + libc_Stat, + libc_Statfs, + libc_Symlink, + libc_Truncate, + libc_Umask, + libc_Unlink, + libc_Uname, + libc_write, + libc_gettimeofday, + libc_mmap, + libc_munmap libcFunc +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func dup2(old int, new int) (val int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_dup2)), 2, uintptr(old), uintptr(new), 0, 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe(p *[2]_C_int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_pipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlink(path string, buf []byte, bufSize uint64) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + if len(buf) > 0 { + _p1 = &buf[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_readlink)), 4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), uintptr(bufSize), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, times *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_utimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_utimensat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_unlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getcwd(buf *byte, size uint64) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getcwd)), 2, uintptr(unsafe.Pointer(buf)), uintptr(size), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_getgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_setgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirent(fd int, buf []byte) (n int, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getdirent)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid _Pid_t, status *_C_int, options int, rusage *Rusage) (wpid _Pid_t, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_wait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(status)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = _Pid_t(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getkerninfo(op int32, where uintptr, size uintptr, arg int64) (i int32, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Getkerninfo)), 4, uintptr(op), uintptr(where), uintptr(size), uintptr(arg), 0, 0) + i = int32(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_setsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_getpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_getsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_recvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Shutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_accept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Openat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace64(request int, id int64, addr int64, data int, buff uintptr) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_ptrace64)), 5, uintptr(request), uintptr(id), uintptr(addr), uintptr(data), uintptr(buff), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Acct)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Chown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Close)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Dup)), 1, uintptr(fd), 0, 0, 0, 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Faccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fstatfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Ftruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getgid)), 0, 0, 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getpid)), 0, 0, 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Geteuid)), 0, 0, 0, 0, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getegid)), 0, 0, 0, 0, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Getppid)), 0, 0, 0, 0, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := rawSyscall6(uintptr(unsafe.Pointer(&libc_Getuid)), 0, 0, 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum Signal) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Kill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Lchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Link)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Lstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Mknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Open)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Pread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Pwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_read)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Reboot(how int) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Reboot)), 1, uintptr(how), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Rename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Renameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Rmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Seteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Setreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Stat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Statfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Symlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Truncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall6(uintptr(unsafe.Pointer(&libc_Umask)), 1, uintptr(newmask), 0, 0, 0, 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_Unlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := rawSyscall6(uintptr(unsafe.Pointer(&libc_Uname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 *byte + if len(p) > 0 { + _p0 = &p[0] + } + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_write)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func gettimeofday(tv *Timeval, tzp *Timezone) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_gettimeofday)), 2, uintptr(unsafe.Pointer(tv)), uintptr(unsafe.Pointer(tzp)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_mmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_munmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/syscall/zsyscall_darwin_386.go b/src/syscall/zsyscall_darwin_386.go index 44fc684f7a335..758ff7b129dd2 100644 --- a/src/syscall/zsyscall_darwin_386.go +++ b/src/syscall/zsyscall_darwin_386.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// mksyscall.pl -l32 -darwin -tags darwin,386 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go // Code generated by the command above; DO NOT EDIT. // +build darwin,386 @@ -10,7 +10,7 @@ import "unsafe" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -18,20 +18,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +func libc_getgroups_trampoline() + +//go:linkname libc_getgroups libc_getgroups +//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgroups_trampoline() + +//go:linkname libc_setgroups libc_setgroups +//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -39,10 +47,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +func libc_wait4_trampoline() + +//go:linkname libc_wait4 libc_wait4 +//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -50,30 +62,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +func libc_accept_trampoline() + +//go:linkname libc_accept libc_accept +//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_bind_trampoline() + +//go:linkname libc_bind libc_bind +//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_connect_trampoline() + +//go:linkname libc_connect libc_connect +//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -81,66 +105,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +func libc_socket_trampoline() + +//go:linkname libc_socket libc_socket +//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockopt_trampoline() + +//go:linkname libc_getsockopt libc_getsockopt +//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setsockopt_trampoline() + +//go:linkname libc_setsockopt libc_setsockopt +//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getpeername_trampoline() + +//go:linkname libc_getpeername libc_getpeername +//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockname_trampoline() + +//go:linkname libc_getsockname libc_getsockname +//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_shutdown_trampoline() + +//go:linkname libc_shutdown libc_shutdown +//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_socketpair_trampoline() + +//go:linkname libc_socketpair libc_socketpair +//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -150,7 +202,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -158,6 +210,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +func libc_recvfrom_trampoline() + +//go:linkname libc_recvfrom libc_recvfrom +//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -167,17 +223,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sendto_trampoline() + +//go:linkname libc_sendto libc_sendto +//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -185,10 +245,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_recvmsg_trampoline() + +//go:linkname libc_recvmsg libc_recvmsg +//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -196,10 +260,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_sendmsg_trampoline() + +//go:linkname libc_sendmsg libc_sendmsg +//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -207,22 +275,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_kevent_trampoline() +//go:linkname libc_kevent libc_kevent +//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -231,27 +287,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_utimes_trampoline() + +//go:linkname libc_utimes libc_utimes +//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_futimes_trampoline() + +//go:linkname libc_futimes libc_futimes +//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -259,38 +323,52 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe(p *[2]int32) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_pipe_trampoline() + +//go:linkname libc_pipe libc_pipe +//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_kill_trampoline() + +//go:linkname libc_kill libc_kill +//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -299,23 +377,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_access_trampoline() + +//go:linkname libc_access libc_access +//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_adjtime_trampoline() + +//go:linkname libc_adjtime libc_adjtime +//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -324,13 +410,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chdir_trampoline() + +//go:linkname libc_chdir libc_chdir +//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -339,13 +429,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chflags_trampoline() + +//go:linkname libc_chflags libc_chflags +//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -354,13 +448,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chmod_trampoline() + +//go:linkname libc_chmod libc_chmod +//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -369,13 +467,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chown_trampoline() + +//go:linkname libc_chown libc_chown +//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -384,27 +486,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chroot_trampoline() + +//go:linkname libc_chroot libc_chroot +//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_close_trampoline() + +//go:linkname libc_close libc_close +//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -412,16 +522,24 @@ func Dup(fd int) (nfd int, err error) { return } +func libc_dup_trampoline() + +//go:linkname libc_dup libc_dup +//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_dup2_trampoline() + +//go:linkname libc_dup2 libc_dup2 +//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exchangedata(path1 string, path2 string, options int) (err error) { @@ -435,67 +553,91 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_exchangedata_trampoline() + +//go:linkname libc_exchangedata libc_exchangedata +//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchdir_trampoline() + +//go:linkname libc_fchdir libc_fchdir +//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchflags_trampoline() + +//go:linkname libc_fchflags libc_fchflags +//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchmod_trampoline() + +//go:linkname libc_fchmod libc_fchmod +//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchown_trampoline() + +//go:linkname libc_fchown libc_fchown +//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_flock_trampoline() + +//go:linkname libc_flock libc_flock +//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -503,99 +645,90 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_fpathconf_trampoline() +//go:linkname libc_fpathconf libc_fpathconf +//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fsync_trampoline() + +//go:linkname libc_fsync libc_fsync +//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + _, _, e1 := syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_ftruncate_trampoline() +//go:linkname libc_ftruncate libc_ftruncate +//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + r0, _, _ := syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) return } +func libc_getdtablesize_trampoline() + +//go:linkname libc_getdtablesize libc_getdtablesize +//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) egid = int(r0) return } +func libc_getegid_trampoline() + +//go:linkname libc_getegid libc_getegid +//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_geteuid_trampoline() + +//go:linkname libc_geteuid libc_geteuid +//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) gid = int(r0) return } +func libc_getgid_trampoline() + +//go:linkname libc_getgid libc_getgid +//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -603,34 +736,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +func libc_getpgid_trampoline() + +//go:linkname libc_getpgid libc_getpgid +//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) pgrp = int(r0) return } +func libc_getpgrp_trampoline() + +//go:linkname libc_getpgrp libc_getpgrp +//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) pid = int(r0) return } +func libc_getpid_trampoline() + +//go:linkname libc_getpid libc_getpid +//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) ppid = int(r0) return } +func libc_getppid_trampoline() + +//go:linkname libc_getppid libc_getppid +//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -638,30 +787,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } +func libc_getpriority_trampoline() + +//go:linkname libc_getpriority libc_getpriority +//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrlimit_trampoline() + +//go:linkname libc_getrlimit libc_getrlimit +//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrusage_trampoline() + +//go:linkname libc_getrusage libc_getrusage +//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -669,26 +830,38 @@ func Getsid(pid int) (sid int, err error) { return } +func libc_getsid_trampoline() + +//go:linkname libc_getsid libc_getsid +//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_getuid_trampoline() + +//go:linkname libc_getuid libc_getuid +//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) tainted = bool(r0 != 0) return } +func libc_issetugid_trampoline() + +//go:linkname libc_issetugid libc_issetugid +//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -696,6 +869,10 @@ func Kqueue() (fd int, err error) { return } +func libc_kqueue_trampoline() + +//go:linkname libc_kqueue libc_kqueue +//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -704,13 +881,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lchown_trampoline() + +//go:linkname libc_lchown libc_lchown +//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -724,38 +905,31 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_link_trampoline() + +//go:linkname libc_link libc_link +//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_listen_trampoline() +//go:linkname libc_listen libc_listen +//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -764,13 +938,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkdir_trampoline() + +//go:linkname libc_mkdir libc_mkdir +//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -779,13 +957,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkfifo_trampoline() + +//go:linkname libc_mkfifo libc_mkfifo +//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -794,13 +976,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mknod_trampoline() + +//go:linkname libc_mknod libc_mknod +//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -810,23 +996,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlock_trampoline() + +//go:linkname libc_mlock libc_mlock +//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlockall_trampoline() + +//go:linkname libc_mlockall libc_mlockall +//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -836,13 +1030,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mprotect_trampoline() + +//go:linkname libc_mprotect libc_mprotect +//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -852,23 +1050,31 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlock_trampoline() + +//go:linkname libc_munlock libc_munlock +//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlockall_trampoline() + +//go:linkname libc_munlockall libc_munlockall +//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -877,7 +1083,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -885,6 +1091,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +func libc_open_trampoline() + +//go:linkname libc_open libc_open +//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -893,7 +1103,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -901,6 +1111,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +func libc_pathconf_trampoline() + +//go:linkname libc_pathconf libc_pathconf +//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { @@ -910,7 +1124,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -918,6 +1132,10 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pread_trampoline() + +//go:linkname libc_pread libc_pread +//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -927,7 +1145,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -935,6 +1153,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pwrite_trampoline() + +//go:linkname libc_pwrite libc_pwrite +//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -944,7 +1166,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -952,6 +1174,10 @@ func read(fd int, p []byte) (n int, err error) { return } +func libc_read_trampoline() + +//go:linkname libc_read libc_read +//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -966,7 +1192,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -974,6 +1200,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +func libc_readlink_trampoline() + +//go:linkname libc_readlink libc_readlink +//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -987,13 +1217,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rename_trampoline() + +//go:linkname libc_rename libc_rename +//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1002,13 +1236,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_revoke_trampoline() + +//go:linkname libc_revoke libc_revoke +//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1017,17 +1255,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rmdir_trampoline() + +//go:linkname libc_rmdir libc_rmdir +//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + r0, r1, e1 := syscall6X(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1035,46 +1277,66 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +func libc_lseek_trampoline() + +//go:linkname libc_lseek libc_lseek +//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + _, _, e1 := syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_select_trampoline() + +//go:linkname libc_select libc_select +//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setegid_trampoline() + +//go:linkname libc_setegid libc_setegid +//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_seteuid_trampoline() + +//go:linkname libc_seteuid libc_seteuid +//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgid_trampoline() + +//go:linkname libc_setgid libc_setgid +//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1083,77 +1345,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setlogin_trampoline() + +//go:linkname libc_setlogin libc_setlogin +//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpgid_trampoline() + +//go:linkname libc_setpgid libc_setpgid +//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpriority_trampoline() + +//go:linkname libc_setpriority libc_setpriority +//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + _, _, e1 := syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setprivexec_trampoline() + +//go:linkname libc_setprivexec libc_setprivexec +//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setregid_trampoline() + +//go:linkname libc_setregid libc_setregid +//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setreuid_trampoline() + +//go:linkname libc_setreuid libc_setreuid +//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setrlimit_trampoline() + +//go:linkname libc_setrlimit libc_setrlimit +//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1161,164 +1451,410 @@ func Setsid() (pid int, err error) { return } +func libc_setsid_trampoline() + +//go:linkname libc_setsid libc_setsid +//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_settimeofday_trampoline() + +//go:linkname libc_settimeofday libc_settimeofday +//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setuid_trampoline() + +//go:linkname libc_setuid libc_setuid +//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_symlink_trampoline() + +//go:linkname libc_symlink libc_symlink +//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func Sync() (err error) { + _, _, e1 := syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sync_trampoline() + +//go:linkname libc_sync libc_sync +//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_truncate_trampoline() + +//go:linkname libc_truncate libc_truncate +//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlink(path string, link string) (err error) { +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +func libc_umask_trampoline() + +//go:linkname libc_umask libc_umask +//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_undelete_trampoline() + +//go:linkname libc_undelete libc_undelete +//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlink_trampoline() + +//go:linkname libc_unlink libc_unlink +//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Truncate(path string, length int64) (err error) { +func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + _, _, e1 := syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unmount_trampoline() + +//go:linkname libc_unmount libc_unmount +//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } +func libc_write_trampoline() + +//go:linkname libc_write libc_write +//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + r0, _, e1 := syscall(funcPC(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + cnt = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_writev_trampoline() + +//go:linkname libc_writev libc_writev +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlink(path string) (err error) { +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_mmap_trampoline() + +//go:linkname libc_mmap libc_mmap +//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_munmap_trampoline() + +//go:linkname libc_munmap libc_munmap +//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fork() (pid int, err error) { + r0, _, e1 := rawSyscall(funcPC(libc_fork_trampoline), 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fork_trampoline() + +//go:linkname libc_fork libc_fork +//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req int, arg int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ioctl_trampoline() + +//go:linkname libc_ioctl libc_ioctl +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func execve(path *byte, argv **byte, envp **byte) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_execve_trampoline() + +//go:linkname libc_execve libc_execve +//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exit(res int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_exit_trampoline), uintptr(res), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_exit_trampoline() + +//go:linkname libc_exit libc_exit +//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) { + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(fd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlinkat_trampoline() + +//go:linkname libc_unlinkat libc_unlinkat +//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unmount(path string, flags int) (err error) { +func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + r0, _, e1 := syscall6(funcPC(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0) + fdret = int(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_openat_trampoline() + +//go:linkname libc_openat libc_openat +//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func write(fd int, p []byte) (n int, err error) { +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstat64_trampoline() + +//go:linkname libc_fstat64 libc_fstat64 +//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatfs64_trampoline() + +//go:linkname libc_fstatfs64 libc_fstatfs64 +//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1326,57 +1862,97 @@ func write(fd int, p []byte) (n int, err error) { return } +func libc___getdirentries64_trampoline() + +//go:linkname libc___getdirentries64 libc___getdirentries64 +//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) - ret = uintptr(r0) +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lstat64_trampoline() + +//go:linkname libc_lstat64 libc_lstat64 +//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_stat64_trampoline() + +//go:linkname libc_stat64 libc_stat64 +//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_statfs64_trampoline() + +//go:linkname libc_statfs64 libc_statfs64 +//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) +func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +func libc_fstatat64_trampoline() + +//go:linkname libc_fstatat64 libc_fstatat64 +//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_386.s b/src/syscall/zsyscall_darwin_386.s new file mode 100644 index 0000000000000..a688192501b32 --- /dev/null +++ b/src/syscall/zsyscall_darwin_386.s @@ -0,0 +1,249 @@ +// go run mkasm_darwin.go 386 +// Code generated by the command above; DO NOT EDIT. +#include "textflag.h" +TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getattrlist(SB) +TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getfsstat64(SB) +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendfile(SB) +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pipe(SB) +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exchangedata(SB) +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getdtablesize(SB) +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setprivexec(SB) +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + JMP libc_undelete(SB) +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fork(SB) +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 + JMP libc_execve(SB) +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstat64(SB) +TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatfs64(SB) +TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 + JMP libc___getdirentries64(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lstat64(SB) +TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_stat64(SB) +TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_statfs64(SB) +TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatat64(SB) diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go index a2a95006a214a..afc3d72d8d7cd 100644 --- a/src/syscall/zsyscall_darwin_amd64.go +++ b/src/syscall/zsyscall_darwin_amd64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// mksyscall.pl -darwin -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; DO NOT EDIT. // +build darwin,amd64 @@ -10,7 +10,7 @@ import "unsafe" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -18,20 +18,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +func libc_getgroups_trampoline() + +//go:linkname libc_getgroups libc_getgroups +//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgroups_trampoline() + +//go:linkname libc_setgroups libc_setgroups +//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -39,10 +47,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +func libc_wait4_trampoline() + +//go:linkname libc_wait4 libc_wait4 +//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -50,30 +62,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +func libc_accept_trampoline() + +//go:linkname libc_accept libc_accept +//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_bind_trampoline() + +//go:linkname libc_bind libc_bind +//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_connect_trampoline() + +//go:linkname libc_connect libc_connect +//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -81,66 +105,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +func libc_socket_trampoline() + +//go:linkname libc_socket libc_socket +//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockopt_trampoline() + +//go:linkname libc_getsockopt libc_getsockopt +//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setsockopt_trampoline() + +//go:linkname libc_setsockopt libc_setsockopt +//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getpeername_trampoline() + +//go:linkname libc_getpeername libc_getpeername +//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockname_trampoline() + +//go:linkname libc_getsockname libc_getsockname +//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_shutdown_trampoline() + +//go:linkname libc_shutdown libc_shutdown +//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_socketpair_trampoline() + +//go:linkname libc_socketpair libc_socketpair +//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -150,7 +202,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -158,6 +210,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +func libc_recvfrom_trampoline() + +//go:linkname libc_recvfrom libc_recvfrom +//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -167,17 +223,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sendto_trampoline() + +//go:linkname libc_sendto libc_sendto +//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -185,10 +245,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_recvmsg_trampoline() + +//go:linkname libc_recvmsg libc_recvmsg +//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -196,10 +260,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_sendmsg_trampoline() + +//go:linkname libc_sendmsg libc_sendmsg +//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -207,22 +275,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_kevent_trampoline() +//go:linkname libc_kevent libc_kevent +//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -231,27 +287,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_utimes_trampoline() + +//go:linkname libc_utimes libc_utimes +//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_futimes_trampoline() + +//go:linkname libc_futimes libc_futimes +//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -259,38 +323,52 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe(p *[2]int32) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_pipe_trampoline() + +//go:linkname libc_pipe libc_pipe +//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_kill_trampoline() + +//go:linkname libc_kill libc_kill +//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -299,23 +377,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_access_trampoline() + +//go:linkname libc_access libc_access +//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_adjtime_trampoline() + +//go:linkname libc_adjtime libc_adjtime +//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -324,13 +410,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chdir_trampoline() + +//go:linkname libc_chdir libc_chdir +//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -339,13 +429,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chflags_trampoline() + +//go:linkname libc_chflags libc_chflags +//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -354,13 +448,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chmod_trampoline() + +//go:linkname libc_chmod libc_chmod +//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -369,13 +467,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chown_trampoline() + +//go:linkname libc_chown libc_chown +//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -384,27 +486,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chroot_trampoline() + +//go:linkname libc_chroot libc_chroot +//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_close_trampoline() + +//go:linkname libc_close libc_close +//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -412,16 +522,24 @@ func Dup(fd int) (nfd int, err error) { return } +func libc_dup_trampoline() + +//go:linkname libc_dup libc_dup +//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_dup2_trampoline() + +//go:linkname libc_dup2 libc_dup2 +//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exchangedata(path1 string, path2 string, options int) (err error) { @@ -435,67 +553,91 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_exchangedata_trampoline() + +//go:linkname libc_exchangedata libc_exchangedata +//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchdir_trampoline() + +//go:linkname libc_fchdir libc_fchdir +//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchflags_trampoline() + +//go:linkname libc_fchflags libc_fchflags +//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchmod_trampoline() + +//go:linkname libc_fchmod libc_fchmod +//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchown_trampoline() + +//go:linkname libc_fchown libc_fchown +//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_flock_trampoline() + +//go:linkname libc_flock libc_flock +//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -503,99 +645,90 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_fpathconf_trampoline() +//go:linkname libc_fpathconf libc_fpathconf +//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fsync_trampoline() + +//go:linkname libc_fsync libc_fsync +//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_ftruncate_trampoline() +//go:linkname libc_ftruncate libc_ftruncate +//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + r0, _, _ := syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) return } +func libc_getdtablesize_trampoline() + +//go:linkname libc_getdtablesize libc_getdtablesize +//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) egid = int(r0) return } +func libc_getegid_trampoline() + +//go:linkname libc_getegid libc_getegid +//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_geteuid_trampoline() + +//go:linkname libc_geteuid libc_geteuid +//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) gid = int(r0) return } +func libc_getgid_trampoline() + +//go:linkname libc_getgid libc_getgid +//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -603,34 +736,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +func libc_getpgid_trampoline() + +//go:linkname libc_getpgid libc_getpgid +//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) pgrp = int(r0) return } +func libc_getpgrp_trampoline() + +//go:linkname libc_getpgrp libc_getpgrp +//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) pid = int(r0) return } +func libc_getpid_trampoline() + +//go:linkname libc_getpid libc_getpid +//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) ppid = int(r0) return } +func libc_getppid_trampoline() + +//go:linkname libc_getppid libc_getppid +//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -638,30 +787,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } +func libc_getpriority_trampoline() + +//go:linkname libc_getpriority libc_getpriority +//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrlimit_trampoline() + +//go:linkname libc_getrlimit libc_getrlimit +//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrusage_trampoline() + +//go:linkname libc_getrusage libc_getrusage +//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -669,26 +830,38 @@ func Getsid(pid int) (sid int, err error) { return } +func libc_getsid_trampoline() + +//go:linkname libc_getsid libc_getsid +//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_getuid_trampoline() + +//go:linkname libc_getuid libc_getuid +//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) tainted = bool(r0 != 0) return } +func libc_issetugid_trampoline() + +//go:linkname libc_issetugid libc_issetugid +//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -696,6 +869,10 @@ func Kqueue() (fd int, err error) { return } +func libc_kqueue_trampoline() + +//go:linkname libc_kqueue libc_kqueue +//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -704,13 +881,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lchown_trampoline() + +//go:linkname libc_lchown libc_lchown +//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -724,38 +905,31 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_link_trampoline() + +//go:linkname libc_link libc_link +//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_listen_trampoline() +//go:linkname libc_listen libc_listen +//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -764,13 +938,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkdir_trampoline() + +//go:linkname libc_mkdir libc_mkdir +//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -779,13 +957,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkfifo_trampoline() + +//go:linkname libc_mkfifo libc_mkfifo +//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -794,13 +976,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mknod_trampoline() + +//go:linkname libc_mknod libc_mknod +//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -810,23 +996,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlock_trampoline() + +//go:linkname libc_mlock libc_mlock +//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlockall_trampoline() + +//go:linkname libc_mlockall libc_mlockall +//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -836,13 +1030,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mprotect_trampoline() + +//go:linkname libc_mprotect libc_mprotect +//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -852,23 +1050,31 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlock_trampoline() + +//go:linkname libc_munlock libc_munlock +//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlockall_trampoline() + +//go:linkname libc_munlockall libc_munlockall +//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -877,7 +1083,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -885,6 +1091,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +func libc_open_trampoline() + +//go:linkname libc_open libc_open +//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -893,7 +1103,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -901,6 +1111,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +func libc_pathconf_trampoline() + +//go:linkname libc_pathconf libc_pathconf +//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { @@ -910,7 +1124,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -918,6 +1132,10 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pread_trampoline() + +//go:linkname libc_pread libc_pread +//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -927,7 +1145,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -935,6 +1153,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pwrite_trampoline() + +//go:linkname libc_pwrite libc_pwrite +//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -944,7 +1166,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -952,6 +1174,10 @@ func read(fd int, p []byte) (n int, err error) { return } +func libc_read_trampoline() + +//go:linkname libc_read libc_read +//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -966,7 +1192,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -974,6 +1200,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +func libc_readlink_trampoline() + +//go:linkname libc_readlink libc_readlink +//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -987,13 +1217,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rename_trampoline() + +//go:linkname libc_rename libc_rename +//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1002,13 +1236,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_revoke_trampoline() + +//go:linkname libc_revoke libc_revoke +//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1017,17 +1255,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rmdir_trampoline() + +//go:linkname libc_rmdir libc_rmdir +//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscallX(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1035,46 +1277,66 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +func libc_lseek_trampoline() + +//go:linkname libc_lseek libc_lseek +//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + _, _, e1 := syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_select_trampoline() + +//go:linkname libc_select libc_select +//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setegid_trampoline() + +//go:linkname libc_setegid libc_setegid +//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_seteuid_trampoline() + +//go:linkname libc_seteuid libc_seteuid +//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgid_trampoline() + +//go:linkname libc_setgid libc_setgid +//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1083,77 +1345,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setlogin_trampoline() + +//go:linkname libc_setlogin libc_setlogin +//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpgid_trampoline() + +//go:linkname libc_setpgid libc_setpgid +//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpriority_trampoline() + +//go:linkname libc_setpriority libc_setpriority +//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + _, _, e1 := syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setprivexec_trampoline() + +//go:linkname libc_setprivexec libc_setprivexec +//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setregid_trampoline() + +//go:linkname libc_setregid libc_setregid +//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setreuid_trampoline() + +//go:linkname libc_setreuid libc_setreuid +//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setrlimit_trampoline() + +//go:linkname libc_setrlimit libc_setrlimit +//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1161,164 +1451,410 @@ func Setsid() (pid int, err error) { return } +func libc_setsid_trampoline() + +//go:linkname libc_setsid libc_setsid +//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_settimeofday_trampoline() + +//go:linkname libc_settimeofday libc_settimeofday +//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setuid_trampoline() + +//go:linkname libc_setuid libc_setuid +//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_symlink_trampoline() + +//go:linkname libc_symlink libc_symlink +//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func Sync() (err error) { + _, _, e1 := syscall(funcPC(libc_sync_trampoline), 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sync_trampoline() + +//go:linkname libc_sync libc_sync +//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_truncate_trampoline() + +//go:linkname libc_truncate libc_truncate +//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Symlink(path string, link string) (err error) { +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +func libc_umask_trampoline() + +//go:linkname libc_umask libc_umask +//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(link) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_undelete_trampoline() + +//go:linkname libc_undelete libc_undelete +//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlink_trampoline() + +//go:linkname libc_unlink libc_unlink +//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Truncate(path string, length int64) (err error) { +func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unmount_trampoline() + +//go:linkname libc_unmount libc_unmount +//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) - oldmask = int(r0) +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } return } +func libc_write_trampoline() + +//go:linkname libc_write libc_write +//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Undelete(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return +func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + r0, _, e1 := syscallX(funcPC(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + cnt = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_writev_trampoline() + +//go:linkname libc_writev libc_writev +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unlink(path string) (err error) { +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall6X(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_mmap_trampoline() + +//go:linkname libc_mmap libc_mmap +//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_munmap_trampoline() + +//go:linkname libc_munmap libc_munmap +//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fork() (pid int, err error) { + r0, _, e1 := rawSyscall(funcPC(libc_fork_trampoline), 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fork_trampoline() + +//go:linkname libc_fork libc_fork +//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req int, arg int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ioctl_trampoline() + +//go:linkname libc_ioctl libc_ioctl +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func execve(path *byte, argv **byte, envp **byte) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_execve_trampoline() + +//go:linkname libc_execve libc_execve +//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exit(res int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_exit_trampoline), uintptr(res), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_exit_trampoline() + +//go:linkname libc_exit libc_exit +//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) { + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(fd int, path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlinkat_trampoline() + +//go:linkname libc_unlinkat libc_unlinkat +//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Unmount(path string, flags int) (err error) { +func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + r0, _, e1 := syscall6(funcPC(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0) + fdret = int(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_openat_trampoline() + +//go:linkname libc_openat libc_openat +//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func write(fd int, p []byte) (n int, err error) { +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstat64_trampoline() + +//go:linkname libc_fstat64 libc_fstat64 +//go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstatfs64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatfs64_trampoline() + +//go:linkname libc_fstatfs64 libc_fstatfs64 +//go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1326,57 +1862,97 @@ func write(fd int, p []byte) (n int, err error) { return } +func libc___getdirentries64_trampoline() + +//go:linkname libc___getdirentries64 libc___getdirentries64 +//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) - ret = uintptr(r0) +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_lstat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lstat64_trampoline() + +//go:linkname libc_lstat64 libc_lstat64 +//go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_stat64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_stat64_trampoline() + +//go:linkname libc_stat64 libc_stat64 +//go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_statfs64_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_statfs64_trampoline() + +//go:linkname libc_statfs64 libc_statfs64 +//go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) +func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(funcPC(libc_fstatat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +func libc_fstatat64_trampoline() + +//go:linkname libc_fstatat64 libc_fstatat64 +//go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_amd64.s b/src/syscall/zsyscall_darwin_amd64.s new file mode 100644 index 0000000000000..21ab38e3eedde --- /dev/null +++ b/src/syscall/zsyscall_darwin_amd64.s @@ -0,0 +1,249 @@ +// go run mkasm_darwin.go amd64 +// Code generated by the command above; DO NOT EDIT. +#include "textflag.h" +TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getattrlist(SB) +TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getfsstat64(SB) +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendfile(SB) +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pipe(SB) +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exchangedata(SB) +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getdtablesize(SB) +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setprivexec(SB) +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + JMP libc_undelete(SB) +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fork(SB) +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 + JMP libc_execve(SB) +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstat64(SB) +TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatfs64(SB) +TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 + JMP libc___getdirentries64(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lstat64(SB) +TEXT ·libc_stat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_stat64(SB) +TEXT ·libc_statfs64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_statfs64(SB) +TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatat64(SB) diff --git a/src/syscall/zsyscall_darwin_arm.go b/src/syscall/zsyscall_darwin_arm.go index 419fd3ab1cd00..80ef9e514fbb1 100644 --- a/src/syscall/zsyscall_darwin_arm.go +++ b/src/syscall/zsyscall_darwin_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// mksyscall.pl -l32 -darwin -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // Code generated by the command above; DO NOT EDIT. // +build darwin,arm @@ -10,7 +10,7 @@ import "unsafe" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -18,20 +18,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +func libc_getgroups_trampoline() + +//go:linkname libc_getgroups libc_getgroups +//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgroups_trampoline() + +//go:linkname libc_setgroups libc_setgroups +//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -39,10 +47,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +func libc_wait4_trampoline() + +//go:linkname libc_wait4 libc_wait4 +//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -50,30 +62,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +func libc_accept_trampoline() + +//go:linkname libc_accept libc_accept +//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_bind_trampoline() + +//go:linkname libc_bind libc_bind +//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_connect_trampoline() + +//go:linkname libc_connect libc_connect +//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -81,66 +105,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +func libc_socket_trampoline() + +//go:linkname libc_socket libc_socket +//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockopt_trampoline() + +//go:linkname libc_getsockopt libc_getsockopt +//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setsockopt_trampoline() + +//go:linkname libc_setsockopt libc_setsockopt +//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getpeername_trampoline() + +//go:linkname libc_getpeername libc_getpeername +//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockname_trampoline() + +//go:linkname libc_getsockname libc_getsockname +//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_shutdown_trampoline() + +//go:linkname libc_shutdown libc_shutdown +//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_socketpair_trampoline() + +//go:linkname libc_socketpair libc_socketpair +//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -150,7 +202,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -158,6 +210,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +func libc_recvfrom_trampoline() + +//go:linkname libc_recvfrom libc_recvfrom +//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -167,17 +223,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sendto_trampoline() + +//go:linkname libc_sendto libc_sendto +//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -185,10 +245,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_recvmsg_trampoline() + +//go:linkname libc_recvmsg libc_recvmsg +//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -196,10 +260,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_sendmsg_trampoline() + +//go:linkname libc_sendmsg libc_sendmsg +//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -207,22 +275,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_kevent_trampoline() +//go:linkname libc_kevent libc_kevent +//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -231,27 +287,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_utimes_trampoline() + +//go:linkname libc_utimes libc_utimes +//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_futimes_trampoline() + +//go:linkname libc_futimes libc_futimes +//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -259,38 +323,52 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe(p *[2]int32) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_pipe_trampoline() + +//go:linkname libc_pipe libc_pipe +//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_kill_trampoline() + +//go:linkname libc_kill libc_kill +//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -299,23 +377,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_access_trampoline() + +//go:linkname libc_access libc_access +//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_adjtime_trampoline() + +//go:linkname libc_adjtime libc_adjtime +//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -324,13 +410,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chdir_trampoline() + +//go:linkname libc_chdir libc_chdir +//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -339,13 +429,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chflags_trampoline() + +//go:linkname libc_chflags libc_chflags +//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -354,13 +448,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chmod_trampoline() + +//go:linkname libc_chmod libc_chmod +//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -369,13 +467,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chown_trampoline() + +//go:linkname libc_chown libc_chown +//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -384,27 +486,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chroot_trampoline() + +//go:linkname libc_chroot libc_chroot +//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_close_trampoline() + +//go:linkname libc_close libc_close +//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -412,16 +522,24 @@ func Dup(fd int) (nfd int, err error) { return } +func libc_dup_trampoline() + +//go:linkname libc_dup libc_dup +//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_dup2_trampoline() + +//go:linkname libc_dup2 libc_dup2 +//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exchangedata(path1 string, path2 string, options int) (err error) { @@ -435,67 +553,91 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_exchangedata_trampoline() + +//go:linkname libc_exchangedata libc_exchangedata +//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchdir_trampoline() + +//go:linkname libc_fchdir libc_fchdir +//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchflags_trampoline() + +//go:linkname libc_fchflags libc_fchflags +//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchmod_trampoline() + +//go:linkname libc_fchmod libc_fchmod +//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchown_trampoline() + +//go:linkname libc_fchown libc_fchown +//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_flock_trampoline() + +//go:linkname libc_flock libc_flock +//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -503,99 +645,90 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_fpathconf_trampoline() +//go:linkname libc_fpathconf libc_fpathconf +//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fsync_trampoline() + +//go:linkname libc_fsync libc_fsync +//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), uintptr(length>>32)) + _, _, e1 := syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_ftruncate_trampoline() +//go:linkname libc_ftruncate libc_ftruncate +//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + r0, _, _ := syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) return } +func libc_getdtablesize_trampoline() + +//go:linkname libc_getdtablesize libc_getdtablesize +//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) egid = int(r0) return } +func libc_getegid_trampoline() + +//go:linkname libc_getegid libc_getegid +//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_geteuid_trampoline() + +//go:linkname libc_geteuid libc_geteuid +//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) gid = int(r0) return } +func libc_getgid_trampoline() + +//go:linkname libc_getgid libc_getgid +//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -603,34 +736,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +func libc_getpgid_trampoline() + +//go:linkname libc_getpgid libc_getpgid +//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) pgrp = int(r0) return } +func libc_getpgrp_trampoline() + +//go:linkname libc_getpgrp libc_getpgrp +//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) pid = int(r0) return } +func libc_getpid_trampoline() + +//go:linkname libc_getpid libc_getpid +//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) ppid = int(r0) return } +func libc_getppid_trampoline() + +//go:linkname libc_getppid libc_getppid +//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -638,30 +787,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } +func libc_getpriority_trampoline() + +//go:linkname libc_getpriority libc_getpriority +//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrlimit_trampoline() + +//go:linkname libc_getrlimit libc_getrlimit +//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrusage_trampoline() + +//go:linkname libc_getrusage libc_getrusage +//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -669,26 +830,38 @@ func Getsid(pid int) (sid int, err error) { return } +func libc_getsid_trampoline() + +//go:linkname libc_getsid libc_getsid +//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_getuid_trampoline() + +//go:linkname libc_getuid libc_getuid +//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) tainted = bool(r0 != 0) return } +func libc_issetugid_trampoline() + +//go:linkname libc_issetugid libc_issetugid +//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -696,6 +869,10 @@ func Kqueue() (fd int, err error) { return } +func libc_kqueue_trampoline() + +//go:linkname libc_kqueue libc_kqueue +//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -704,13 +881,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lchown_trampoline() + +//go:linkname libc_lchown libc_lchown +//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -724,38 +905,31 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_link_trampoline() + +//go:linkname libc_link libc_link +//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_listen_trampoline() +//go:linkname libc_listen libc_listen +//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -764,13 +938,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkdir_trampoline() + +//go:linkname libc_mkdir libc_mkdir +//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -779,13 +957,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkfifo_trampoline() + +//go:linkname libc_mkfifo libc_mkfifo +//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -794,13 +976,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mknod_trampoline() + +//go:linkname libc_mknod libc_mknod +//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -810,23 +996,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlock_trampoline() + +//go:linkname libc_mlock libc_mlock +//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlockall_trampoline() + +//go:linkname libc_mlockall libc_mlockall +//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -836,13 +1030,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mprotect_trampoline() + +//go:linkname libc_mprotect libc_mprotect +//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -852,23 +1050,31 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlock_trampoline() + +//go:linkname libc_munlock libc_munlock +//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlockall_trampoline() + +//go:linkname libc_munlockall libc_munlockall +//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -877,7 +1083,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -885,6 +1091,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +func libc_open_trampoline() + +//go:linkname libc_open libc_open +//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -893,7 +1103,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -901,6 +1111,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +func libc_pathconf_trampoline() + +//go:linkname libc_pathconf libc_pathconf +//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { @@ -910,7 +1124,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -918,6 +1132,10 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pread_trampoline() + +//go:linkname libc_pread libc_pread +//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -927,7 +1145,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -935,6 +1153,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pwrite_trampoline() + +//go:linkname libc_pwrite libc_pwrite +//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -944,7 +1166,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -952,6 +1174,10 @@ func read(fd int, p []byte) (n int, err error) { return } +func libc_read_trampoline() + +//go:linkname libc_read libc_read +//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -966,7 +1192,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -974,6 +1200,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +func libc_readlink_trampoline() + +//go:linkname libc_readlink libc_readlink +//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -987,13 +1217,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rename_trampoline() + +//go:linkname libc_rename libc_rename +//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1002,13 +1236,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_revoke_trampoline() + +//go:linkname libc_revoke libc_revoke +//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1017,17 +1255,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rmdir_trampoline() + +//go:linkname libc_rmdir libc_rmdir +//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + r0, r1, e1 := syscall6X(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1035,46 +1277,66 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +func libc_lseek_trampoline() + +//go:linkname libc_lseek libc_lseek +//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + _, _, e1 := syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_select_trampoline() + +//go:linkname libc_select libc_select +//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setegid_trampoline() + +//go:linkname libc_setegid libc_setegid +//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_seteuid_trampoline() + +//go:linkname libc_seteuid libc_seteuid +//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgid_trampoline() + +//go:linkname libc_setgid libc_setgid +//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1083,77 +1345,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setlogin_trampoline() + +//go:linkname libc_setlogin libc_setlogin +//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpgid_trampoline() + +//go:linkname libc_setpgid libc_setpgid +//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpriority_trampoline() + +//go:linkname libc_setpriority libc_setpriority +//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + _, _, e1 := syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setprivexec_trampoline() + +//go:linkname libc_setprivexec libc_setprivexec +//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setregid_trampoline() + +//go:linkname libc_setregid libc_setregid +//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setreuid_trampoline() + +//go:linkname libc_setreuid libc_setreuid +//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setrlimit_trampoline() + +//go:linkname libc_setrlimit libc_setrlimit +//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1161,56 +1451,38 @@ func Setsid() (pid int, err error) { return } +func libc_setsid_trampoline() + +//go:linkname libc_setsid libc_setsid +//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_settimeofday_trampoline() + +//go:linkname libc_settimeofday libc_settimeofday +//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_setuid_trampoline() +//go:linkname libc_setuid libc_setuid +//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1224,23 +1496,31 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_symlink_trampoline() + +//go:linkname libc_symlink libc_symlink +//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_sync_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sync_trampoline() + +//go:linkname libc_sync libc_sync +//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1249,21 +1529,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + _, _, e1 := syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_truncate_trampoline() + +//go:linkname libc_truncate libc_truncate +//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) oldmask = int(r0) return } +func libc_umask_trampoline() + +//go:linkname libc_umask libc_umask +//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Undelete(path string) (err error) { @@ -1272,13 +1560,17 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_undelete_trampoline() + +//go:linkname libc_undelete libc_undelete +//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1287,13 +1579,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlink_trampoline() + +//go:linkname libc_unlink libc_unlink +//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1302,13 +1598,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unmount_trampoline() + +//go:linkname libc_unmount libc_unmount +//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1318,7 +1618,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1326,10 +1626,35 @@ func write(fd int, p []byte) (n int, err error) { return } +func libc_write_trampoline() + +//go:linkname libc_write libc_write +//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall(funcPC(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + cnt = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_writev_trampoline() + +//go:linkname libc_writev libc_writev +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + r0, _, e1 := syscall9(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1337,46 +1662,302 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +func libc_mmap_trampoline() + +//go:linkname libc_mmap libc_mmap +//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munmap_trampoline() + +//go:linkname libc_munmap libc_munmap +//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func fork() (pid int, err error) { + r0, _, e1 := rawSyscall(funcPC(libc_fork_trampoline), 0, 0, 0) + pid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fork_trampoline() + +//go:linkname libc_fork libc_fork +//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func ioctl(fd int, req int, arg int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ioctl_trampoline() + +//go:linkname libc_ioctl libc_ioctl +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func execve(path *byte, argv **byte, envp **byte) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_execve_trampoline() + +//go:linkname libc_execve libc_execve +//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exit(res int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_exit_trampoline), uintptr(res), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_exit_trampoline() + +//go:linkname libc_exit libc_exit +//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) { + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(fd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_unlinkat_trampoline() + +//go:linkname libc_unlinkat libc_unlinkat +//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall6(funcPC(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0) + fdret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_openat_trampoline() + +//go:linkname libc_openat libc_openat +//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstat_trampoline() + +//go:linkname libc_fstat libc_fstat +//go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatfs_trampoline() + +//go:linkname libc_fstatfs libc_fstatfs +//go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lstat_trampoline() + +//go:linkname libc_lstat libc_lstat +//go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry uintptr, result uintptr) (res int) { + r0, _, _ := syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(entry), uintptr(result)) + res = int(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } + +func libc_stat_trampoline() + +//go:linkname libc_stat libc_stat +//go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_statfs_trampoline() + +//go:linkname libc_statfs libc_statfs +//go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatat_trampoline() + +//go:linkname libc_fstatat libc_fstatat +//go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_arm.s b/src/syscall/zsyscall_darwin_arm.s new file mode 100644 index 0000000000000..f9978d755d49e --- /dev/null +++ b/src/syscall/zsyscall_darwin_arm.s @@ -0,0 +1,253 @@ +// go run mkasm_darwin.go arm +// Code generated by the command above; DO NOT EDIT. +#include "textflag.h" +TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getattrlist(SB) +TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getfsstat64(SB) +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendfile(SB) +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pipe(SB) +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exchangedata(SB) +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getdtablesize(SB) +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setprivexec(SB) +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + JMP libc_undelete(SB) +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fork(SB) +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 + JMP libc_execve(SB) +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) +TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index 1807559399294..a917176a31e18 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// mksyscall.pl -darwin -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; DO NOT EDIT. // +build darwin,arm64 @@ -10,7 +10,7 @@ import "unsafe" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := rawSyscall(funcPC(libc_getgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -18,20 +18,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +func libc_getgroups_trampoline() + +//go:linkname libc_getgroups libc_getgroups +//go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setgroups_trampoline), uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgroups_trampoline() + +//go:linkname libc_setgroups libc_setgroups +//go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_wait4_trampoline), uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -39,10 +47,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +func libc_wait4_trampoline() + +//go:linkname libc_wait4 libc_wait4 +//go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall(funcPC(libc_accept_trampoline), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -50,30 +62,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +func libc_accept_trampoline() + +//go:linkname libc_accept libc_accept +//go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_bind_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_bind_trampoline() + +//go:linkname libc_bind libc_bind +//go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall(funcPC(libc_connect_trampoline), uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_connect_trampoline() + +//go:linkname libc_connect libc_connect +//go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := rawSyscall(funcPC(libc_socket_trampoline), uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -81,66 +105,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +func libc_socket_trampoline() + +//go:linkname libc_socket libc_socket +//go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall6(funcPC(libc_getsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockopt_trampoline() + +//go:linkname libc_getsockopt libc_getsockopt +//go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall6(funcPC(libc_setsockopt_trampoline), uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setsockopt_trampoline() + +//go:linkname libc_setsockopt libc_setsockopt +//go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getpeername_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getpeername_trampoline() + +//go:linkname libc_getpeername libc_getpeername +//go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := rawSyscall(funcPC(libc_getsockname_trampoline), uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getsockname_trampoline() + +//go:linkname libc_getsockname libc_getsockname +//go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_shutdown_trampoline), uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_shutdown_trampoline() + +//go:linkname libc_shutdown libc_shutdown +//go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := rawSyscall6(funcPC(libc_socketpair_trampoline), uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_socketpair_trampoline() + +//go:linkname libc_socketpair libc_socketpair +//go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -150,7 +202,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall6(funcPC(libc_recvfrom_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -158,6 +210,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +func libc_recvfrom_trampoline() + +//go:linkname libc_recvfrom libc_recvfrom +//go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -167,17 +223,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall6(funcPC(libc_sendto_trampoline), uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sendto_trampoline() + +//go:linkname libc_sendto libc_sendto +//go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_recvmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -185,10 +245,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_recvmsg_trampoline() + +//go:linkname libc_recvmsg libc_recvmsg +//go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall(funcPC(libc_sendmsg_trampoline), uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -196,10 +260,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +func libc_sendmsg_trampoline() + +//go:linkname libc_sendmsg libc_sendmsg +//go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall6(funcPC(libc_kevent_trampoline), uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -207,22 +275,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_kevent_trampoline() +//go:linkname libc_kevent libc_kevent +//go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -231,27 +287,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_utimes_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_utimes_trampoline() + +//go:linkname libc_utimes libc_utimes +//go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall(funcPC(libc_futimes_trampoline), uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_futimes_trampoline() + +//go:linkname libc_futimes libc_futimes +//go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -259,38 +323,52 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + _, _, e1 := syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe() (r int, w int, err error) { - r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) - r = int(r0) - w = int(r1) +func pipe(p *[2]int32) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_pipe_trampoline), uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_pipe_trampoline() + +//go:linkname libc_pipe libc_pipe +//go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) + _, _, e1 := syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_kill_trampoline() + +//go:linkname libc_kill libc_kill +//go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -299,23 +377,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_access_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_access_trampoline() + +//go:linkname libc_access libc_access +//go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall(funcPC(libc_adjtime_trampoline), uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_adjtime_trampoline() + +//go:linkname libc_adjtime libc_adjtime +//go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -324,13 +410,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chdir_trampoline() + +//go:linkname libc_chdir libc_chdir +//go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -339,13 +429,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_chflags_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chflags_trampoline() + +//go:linkname libc_chflags libc_chflags +//go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -354,13 +448,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_chmod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chmod_trampoline() + +//go:linkname libc_chmod libc_chmod +//go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -369,13 +467,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_chown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chown_trampoline() + +//go:linkname libc_chown libc_chown +//go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -384,27 +486,35 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_chroot_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_chroot_trampoline() + +//go:linkname libc_chroot libc_chroot +//go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_close_trampoline() + +//go:linkname libc_close libc_close +//go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall(funcPC(libc_dup_trampoline), uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -412,16 +522,24 @@ func Dup(fd int) (nfd int, err error) { return } +func libc_dup_trampoline() + +//go:linkname libc_dup libc_dup +//go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall(funcPC(libc_dup2_trampoline), uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_dup2_trampoline() + +//go:linkname libc_dup2 libc_dup2 +//go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exchangedata(path1 string, path2 string, options int) (err error) { @@ -435,67 +553,91 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + _, _, e1 := syscall(funcPC(libc_exchangedata_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_exchangedata_trampoline() + +//go:linkname libc_exchangedata libc_exchangedata +//go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fchdir_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchdir_trampoline() + +//go:linkname libc_fchdir libc_fchdir +//go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_fchflags_trampoline), uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchflags_trampoline() + +//go:linkname libc_fchflags libc_fchflags +//go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_fchmod_trampoline), uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchmod_trampoline() + +//go:linkname libc_fchmod libc_fchmod +//go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_fchown_trampoline), uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fchown_trampoline() + +//go:linkname libc_fchown libc_fchown +//go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall(funcPC(libc_flock_trampoline), uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_flock_trampoline() + +//go:linkname libc_flock libc_flock +//go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_fpathconf_trampoline), uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -503,99 +645,90 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_fpathconf_trampoline() +//go:linkname libc_fpathconf libc_fpathconf +//go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall(funcPC(libc_fsync_trampoline), uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fsync_trampoline() + +//go:linkname libc_fsync libc_fsync +//go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_ftruncate_trampoline), uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_ftruncate_trampoline() +//go:linkname libc_ftruncate libc_ftruncate +//go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { - r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + r0, _, _ := syscall(funcPC(libc_getdtablesize_trampoline), 0, 0, 0) size = int(r0) return } +func libc_getdtablesize_trampoline() + +//go:linkname libc_getdtablesize libc_getdtablesize +//go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getegid_trampoline), 0, 0, 0) egid = int(r0) return } +func libc_getegid_trampoline() + +//go:linkname libc_getegid libc_getegid +//go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_geteuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_geteuid_trampoline() + +//go:linkname libc_geteuid libc_geteuid +//go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getgid_trampoline), 0, 0, 0) gid = int(r0) return } +func libc_getgid_trampoline() + +//go:linkname libc_getgid libc_getgid +//go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getpgid_trampoline), uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -603,34 +736,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +func libc_getpgid_trampoline() + +//go:linkname libc_getpgid libc_getpgid +//go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpgrp_trampoline), 0, 0, 0) pgrp = int(r0) return } +func libc_getpgrp_trampoline() + +//go:linkname libc_getpgrp libc_getpgrp +//go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getpid_trampoline), 0, 0, 0) pid = int(r0) return } +func libc_getpid_trampoline() + +//go:linkname libc_getpid libc_getpid +//go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getppid_trampoline), 0, 0, 0) ppid = int(r0) return } +func libc_getppid_trampoline() + +//go:linkname libc_getppid libc_getppid +//go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall(funcPC(libc_getpriority_trampoline), uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -638,30 +787,42 @@ func Getpriority(which int, who int) (prio int, err error) { return } +func libc_getpriority_trampoline() + +//go:linkname libc_getpriority libc_getpriority +//go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrlimit_trampoline() + +//go:linkname libc_getrlimit libc_getrlimit +//go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := rawSyscall(funcPC(libc_getrusage_trampoline), uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_getrusage_trampoline() + +//go:linkname libc_getrusage libc_getrusage +//go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_getsid_trampoline), uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -669,26 +830,38 @@ func Getsid(pid int) (sid int, err error) { return } +func libc_getsid_trampoline() + +//go:linkname libc_getsid libc_getsid +//go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) return } +func libc_getuid_trampoline() + +//go:linkname libc_getuid libc_getuid +//go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := rawSyscall(funcPC(libc_issetugid_trampoline), 0, 0, 0) tainted = bool(r0 != 0) return } +func libc_issetugid_trampoline() + +//go:linkname libc_issetugid libc_issetugid +//go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall(funcPC(libc_kqueue_trampoline), 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -696,6 +869,10 @@ func Kqueue() (fd int, err error) { return } +func libc_kqueue_trampoline() + +//go:linkname libc_kqueue libc_kqueue +//go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -704,13 +881,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall(funcPC(libc_lchown_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lchown_trampoline() + +//go:linkname libc_lchown libc_lchown +//go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -724,38 +905,31 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_link_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_link_trampoline() + +//go:linkname libc_link libc_link +//go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall(funcPC(libc_listen_trampoline), uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_listen_trampoline() +//go:linkname libc_listen libc_listen +//go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -764,13 +938,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkdir_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkdir_trampoline() + +//go:linkname libc_mkdir libc_mkdir +//go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -779,13 +957,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall(funcPC(libc_mkfifo_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mkfifo_trampoline() + +//go:linkname libc_mkfifo libc_mkfifo +//go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -794,13 +976,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall(funcPC(libc_mknod_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mknod_trampoline() + +//go:linkname libc_mknod libc_mknod +//go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -810,23 +996,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_mlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlock_trampoline() + +//go:linkname libc_mlock libc_mlock +//go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall(funcPC(libc_mlockall_trampoline), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mlockall_trampoline() + +//go:linkname libc_mlockall libc_mlockall +//go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -836,13 +1030,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall(funcPC(libc_mprotect_trampoline), uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_mprotect_trampoline() + +//go:linkname libc_mprotect libc_mprotect +//go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -852,23 +1050,31 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall(funcPC(libc_munlock_trampoline), uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlock_trampoline() + +//go:linkname libc_munlock libc_munlock +//go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_munlockall_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munlockall_trampoline() + +//go:linkname libc_munlockall libc_munlockall +//go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -877,7 +1083,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall(funcPC(libc_open_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -885,6 +1091,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +func libc_open_trampoline() + +//go:linkname libc_open libc_open +//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -893,7 +1103,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall(funcPC(libc_pathconf_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -901,6 +1111,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +func libc_pathconf_trampoline() + +//go:linkname libc_pathconf libc_pathconf +//go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { @@ -910,7 +1124,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_pread_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -918,6 +1132,10 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pread_trampoline() + +//go:linkname libc_pread libc_pread +//go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -927,7 +1145,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := syscall6(funcPC(libc_pwrite_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -935,6 +1153,10 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +func libc_pwrite_trampoline() + +//go:linkname libc_pwrite libc_pwrite +//go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -944,7 +1166,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_read_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -952,6 +1174,10 @@ func read(fd int, p []byte) (n int, err error) { return } +func libc_read_trampoline() + +//go:linkname libc_read libc_read +//go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -966,7 +1192,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall(funcPC(libc_readlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -974,6 +1200,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +func libc_readlink_trampoline() + +//go:linkname libc_readlink libc_readlink +//go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -987,13 +1217,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_rename_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rename_trampoline() + +//go:linkname libc_rename libc_rename +//go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1002,13 +1236,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_revoke_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_revoke_trampoline() + +//go:linkname libc_revoke libc_revoke +//go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1017,17 +1255,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_rmdir_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_rmdir_trampoline() + +//go:linkname libc_rmdir libc_rmdir +//go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := syscallX(funcPC(libc_lseek_trampoline), uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1035,46 +1277,66 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +func libc_lseek_trampoline() + +//go:linkname libc_lseek libc_lseek +//go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + _, _, e1 := syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_select_trampoline() + +//go:linkname libc_select libc_select +//go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall(funcPC(libc_setegid_trampoline), uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setegid_trampoline() + +//go:linkname libc_setegid libc_setegid +//go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_seteuid_trampoline), uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_seteuid_trampoline() + +//go:linkname libc_seteuid libc_seteuid +//go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setgid_trampoline), uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setgid_trampoline() + +//go:linkname libc_setgid libc_setgid +//go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1083,77 +1345,105 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_setlogin_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setlogin_trampoline() + +//go:linkname libc_setlogin libc_setlogin +//go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setpgid_trampoline), uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpgid_trampoline() + +//go:linkname libc_setpgid libc_setpgid +//go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall(funcPC(libc_setpriority_trampoline), uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setpriority_trampoline() + +//go:linkname libc_setpriority libc_setpriority +//go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) + _, _, e1 := syscall(funcPC(libc_setprivexec_trampoline), uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setprivexec_trampoline() + +//go:linkname libc_setprivexec libc_setprivexec +//go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setregid_trampoline), uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setregid_trampoline() + +//go:linkname libc_setregid libc_setregid +//go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := rawSyscall(funcPC(libc_setreuid_trampoline), uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setreuid_trampoline() + +//go:linkname libc_setreuid libc_setreuid +//go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := rawSyscall(funcPC(libc_setrlimit_trampoline), uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_setrlimit_trampoline() + +//go:linkname libc_setrlimit libc_setrlimit +//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := rawSyscall(funcPC(libc_setsid_trampoline), 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1161,56 +1451,38 @@ func Setsid() (pid int, err error) { return } +func libc_setsid_trampoline() + +//go:linkname libc_setsid libc_setsid +//go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_settimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_settimeofday_trampoline() + +//go:linkname libc_settimeofday libc_settimeofday +//go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := rawSyscall(funcPC(libc_setuid_trampoline), uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statfs(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +func libc_setuid_trampoline() +//go:linkname libc_setuid libc_setuid +//go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1224,23 +1496,31 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall(funcPC(libc_symlink_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_symlink_trampoline() + +//go:linkname libc_symlink libc_symlink +//go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall(funcPC(libc_sync_trampoline), 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_sync_trampoline() + +//go:linkname libc_sync libc_sync +//go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1249,21 +1529,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_truncate_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_truncate_trampoline() + +//go:linkname libc_truncate libc_truncate +//go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall(funcPC(libc_umask_trampoline), uintptr(newmask), 0, 0) oldmask = int(r0) return } +func libc_umask_trampoline() + +//go:linkname libc_umask libc_umask +//go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Undelete(path string) (err error) { @@ -1272,13 +1560,17 @@ func Undelete(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_undelete_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_undelete_trampoline() + +//go:linkname libc_undelete libc_undelete +//go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1287,13 +1579,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall(funcPC(libc_unlink_trampoline), uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unlink_trampoline() + +//go:linkname libc_unlink libc_unlink +//go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1302,13 +1598,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall(funcPC(libc_unmount_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_unmount_trampoline() + +//go:linkname libc_unmount libc_unmount +//go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1318,7 +1618,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall(funcPC(libc_write_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1326,10 +1626,35 @@ func write(fd int, p []byte) (n int, err error) { return } +func libc_write_trampoline() + +//go:linkname libc_write libc_write +//go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (cnt uintptr, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscallX(funcPC(libc_writev_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + cnt = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_writev_trampoline() + +//go:linkname libc_writev libc_writev +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := syscall6X(funcPC(libc_mmap_trampoline), uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1337,46 +1662,302 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +func libc_mmap_trampoline() + +//go:linkname libc_mmap libc_mmap +//go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall(funcPC(libc_munmap_trampoline), uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_munmap_trampoline() + +//go:linkname libc_munmap libc_munmap +//go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func fork() (pid int, err error) { + r0, _, e1 := rawSyscall(funcPC(libc_fork_trampoline), 0, 0, 0) + pid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_fork_trampoline() + +//go:linkname libc_fork libc_fork +//go:cgo_import_dynamic libc_fork fork "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) +func ioctl(fd int, req int, arg int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ioctl_trampoline() + +//go:linkname libc_ioctl libc_ioctl +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func execve(path *byte, argv **byte, envp **byte) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_execve_trampoline), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(argv)), uintptr(unsafe.Pointer(envp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_execve_trampoline() + +//go:linkname libc_execve libc_execve +//go:cgo_import_dynamic libc_execve execve "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exit(res int) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_exit_trampoline), uintptr(res), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_exit_trampoline() + +//go:linkname libc_exit libc_exit +//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (val int, err error) { + r0, _, e1 := syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unlinkat(fd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_unlinkat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_unlinkat_trampoline() + +//go:linkname libc_unlinkat libc_unlinkat +//go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(fd int, path string, flags int, perm uint32) (fdret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall6(funcPC(libc_openat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(perm), 0, 0) + fdret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_openat_trampoline() + +//go:linkname libc_openat libc_openat +//go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstat_trampoline() + +//go:linkname libc_fstat libc_fstat +//go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall(funcPC(libc_fstatfs_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatfs_trampoline() + +//go:linkname libc_fstatfs libc_fstatfs +//go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_lstat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +func libc_lstat_trampoline() + +//go:linkname libc_lstat libc_lstat +//go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dirp uintptr, entry uintptr, result uintptr) (res int) { + r0, _, _ := syscall(funcPC(libc_readdir_r_trampoline), uintptr(dirp), uintptr(entry), uintptr(result)) + res = int(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_stat_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } + +func libc_stat_trampoline() + +//go:linkname libc_stat libc_stat +//go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall(funcPC(libc_statfs_trampoline), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_statfs_trampoline() + +//go:linkname libc_statfs libc_statfs +//go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall6(funcPC(libc_fstatat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fstatat_trampoline() + +//go:linkname libc_fstatat libc_fstatat +//go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" diff --git a/src/syscall/zsyscall_darwin_arm64.s b/src/syscall/zsyscall_darwin_arm64.s new file mode 100644 index 0000000000000..7ef24e534d066 --- /dev/null +++ b/src/syscall/zsyscall_darwin_arm64.s @@ -0,0 +1,253 @@ +// go run mkasm_darwin.go arm64 +// Code generated by the command above; DO NOT EDIT. +#include "textflag.h" +TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getattrlist(SB) +TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getfsstat64(SB) +TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendfile(SB) +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_getgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +TEXT ·libc_setgroups_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +TEXT ·libc_wait4_trampoline(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +TEXT ·libc_accept_trampoline(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +TEXT ·libc_bind_trampoline(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +TEXT ·libc_connect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +TEXT ·libc_socket_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +TEXT ·libc_getsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +TEXT ·libc_setsockopt_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +TEXT ·libc_getpeername_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +TEXT ·libc_getsockname_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +TEXT ·libc_shutdown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +TEXT ·libc_socketpair_trampoline(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +TEXT ·libc_recvfrom_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +TEXT ·libc_sendto_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +TEXT ·libc_recvmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) +TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pipe(SB) +TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +TEXT ·libc_adjtime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +TEXT ·libc_chdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +TEXT ·libc_chflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +TEXT ·libc_chmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +TEXT ·libc_dup2_trampoline(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +TEXT ·libc_exchangedata_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exchangedata(SB) +TEXT ·libc_fchdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +TEXT ·libc_fchflags_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +TEXT ·libc_fchmod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +TEXT ·libc_fchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +TEXT ·libc_flock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +TEXT ·libc_fpathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +TEXT ·libc_fsync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +TEXT ·libc_ftruncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +TEXT ·libc_getdtablesize_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getdtablesize(SB) +TEXT ·libc_getegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +TEXT ·libc_geteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +TEXT ·libc_getgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +TEXT ·libc_getpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +TEXT ·libc_getpgrp_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +TEXT ·libc_getpid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +TEXT ·libc_getppid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +TEXT ·libc_getpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +TEXT ·libc_getrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +TEXT ·libc_getrusage_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +TEXT ·libc_getsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +TEXT ·libc_getuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +TEXT ·libc_issetugid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +TEXT ·libc_kqueue_trampoline(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +TEXT ·libc_lchown_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +TEXT ·libc_link_trampoline(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +TEXT ·libc_listen_trampoline(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +TEXT ·libc_mkdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +TEXT ·libc_mkfifo_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +TEXT ·libc_mknod_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +TEXT ·libc_mlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +TEXT ·libc_mlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +TEXT ·libc_mprotect_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +TEXT ·libc_open_trampoline(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +TEXT ·libc_pathconf_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +TEXT ·libc_pread_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +TEXT ·libc_pwrite_trampoline(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +TEXT ·libc_read_trampoline(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +TEXT ·libc_readlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +TEXT ·libc_rename_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +TEXT ·libc_revoke_trampoline(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +TEXT ·libc_rmdir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +TEXT ·libc_lseek_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +TEXT ·libc_select_trampoline(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +TEXT ·libc_setegid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +TEXT ·libc_seteuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +TEXT ·libc_setgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +TEXT ·libc_setlogin_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +TEXT ·libc_setpgid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +TEXT ·libc_setpriority_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +TEXT ·libc_setprivexec_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setprivexec(SB) +TEXT ·libc_setregid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +TEXT ·libc_setreuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +TEXT ·libc_setrlimit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setrlimit(SB) +TEXT ·libc_setsid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +TEXT ·libc_settimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +TEXT ·libc_setuid_trampoline(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +TEXT ·libc_symlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +TEXT ·libc_sync_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +TEXT ·libc_truncate_trampoline(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +TEXT ·libc_umask_trampoline(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +TEXT ·libc_undelete_trampoline(SB),NOSPLIT,$0-0 + JMP libc_undelete(SB) +TEXT ·libc_unlink_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +TEXT ·libc_unmount_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +TEXT ·libc_write_trampoline(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +TEXT ·libc_writev_trampoline(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +TEXT ·libc_fork_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fork(SB) +TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +TEXT ·libc_execve_trampoline(SB),NOSPLIT,$0-0 + JMP libc_execve(SB) +TEXT ·libc_exit_trampoline(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +TEXT ·libc_unlinkat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +TEXT ·libc_openat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_fstat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +TEXT ·libc_fstatfs_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +TEXT ·libc_lstat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) +TEXT ·libc_stat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +TEXT ·libc_statfs_trampoline(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +TEXT ·libc_fstatat_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) diff --git a/src/syscall/zsyscall_freebsd_386.go b/src/syscall/zsyscall_freebsd_386.go index 451da4d6fe3c9..8f4234c7e9d20 100644 --- a/src/syscall/zsyscall_freebsd_386.go +++ b/src/syscall/zsyscall_freebsd_386.go @@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -473,7 +473,47 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -483,6 +523,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -503,7 +553,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -520,6 +570,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -721,7 +788,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -766,7 +833,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -781,6 +848,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1093,7 +1175,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1108,7 +1190,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1123,6 +1205,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/syscall/zsyscall_freebsd_amd64.go b/src/syscall/zsyscall_freebsd_amd64.go index 0312ca347cecb..baa7d68a7d2e6 100644 --- a/src/syscall/zsyscall_freebsd_amd64.go +++ b/src/syscall/zsyscall_freebsd_amd64.go @@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -473,7 +473,47 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -483,6 +523,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -503,7 +553,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -520,6 +570,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -721,7 +788,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -766,7 +833,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -781,6 +848,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1093,7 +1175,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1108,7 +1190,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1123,6 +1205,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/syscall/zsyscall_freebsd_arm.go b/src/syscall/zsyscall_freebsd_arm.go index fcb07337749b6..16e4bc5414438 100644 --- a/src/syscall/zsyscall_freebsd_arm.go +++ b/src/syscall/zsyscall_freebsd_arm.go @@ -463,7 +463,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstat(fd int, stat *Stat_t) (err error) { +func fstat(fd int, stat *stat_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -473,7 +473,47 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fstatfs(fd int, stat *Statfs_t) (err error) { +func fstat_freebsd12(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -483,6 +523,16 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(_SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -503,7 +553,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -520,6 +570,23 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getdirentries_freebsd12(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -721,7 +788,7 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { +func lstat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -766,7 +833,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { +func mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -781,6 +848,21 @@ func Mknod(path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(_SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1093,7 +1175,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { +func stat(path string, stat *stat_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1108,7 +1190,7 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Statfs(path string, stat *Statfs_t) (err error) { +func statfs(path string, stat *statfs_freebsd11_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1123,6 +1205,21 @@ func Statfs(path string, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func statfs_freebsd12(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(_SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/syscall/zsyscall_linux_386.go b/src/syscall/zsyscall_linux_386.go index 62827f16dc346..0882494c470dd 100644 --- a/src/syscall/zsyscall_linux_386.go +++ b/src/syscall/zsyscall_linux_386.go @@ -1276,36 +1276,6 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -1422,21 +1392,6 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32), uintptr(flags)) if e1 != 0 { diff --git a/src/syscall/zsyscall_linux_amd64.go b/src/syscall/zsyscall_linux_amd64.go index b6638269bebb8..9f2046bf93fee 100644 --- a/src/syscall/zsyscall_linux_amd64.go +++ b/src/syscall/zsyscall_linux_amd64.go @@ -1261,21 +1261,6 @@ func Iopl(level int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, n int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) if e1 != 0 { @@ -1286,21 +1271,6 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/src/syscall/zsyscall_linux_arm.go b/src/syscall/zsyscall_linux_arm.go index bb20d6e9463ea..3d099aa16df65 100644 --- a/src/syscall/zsyscall_linux_arm.go +++ b/src/syscall/zsyscall_linux_arm.go @@ -1415,21 +1415,6 @@ func InotifyInit() (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LCHOWN32, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Listen(s int, n int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) if e1 != 0 { @@ -1440,21 +1425,6 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lstat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1558,21 +1528,6 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { diff --git a/src/syscall/zsyscall_linux_ppc64.go b/src/syscall/zsyscall_linux_ppc64.go index 20c78ef2d285c..74402250ebdb7 100644 --- a/src/syscall/zsyscall_linux_ppc64.go +++ b/src/syscall/zsyscall_linux_ppc64.go @@ -1504,16 +1504,6 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1764,3 +1754,13 @@ func pipe2(p *[2]_C_int, flags int) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/syscall/zsyscall_linux_ppc64le.go b/src/syscall/zsyscall_linux_ppc64le.go index 592934399374f..3b6c283c4b62b 100644 --- a/src/syscall/zsyscall_linux_ppc64le.go +++ b/src/syscall/zsyscall_linux_ppc64le.go @@ -1504,16 +1504,6 @@ func Statfs(path string, buf *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { - _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1764,3 +1754,13 @@ func pipe2(p *[2]_C_int, flags int) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off), uintptr(n), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/syscall/zsyscall_solaris_amd64.go b/src/syscall/zsyscall_solaris_amd64.go index ecd37902e0b1f..446ebfc503d8e 100644 --- a/src/syscall/zsyscall_solaris_amd64.go +++ b/src/syscall/zsyscall_solaris_amd64.go @@ -1,4 +1,4 @@ -// mksyscall_solaris.pl -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go +// mksyscall_libc.pl -solaris -tags solaris,amd64 syscall_solaris.go syscall_solaris_amd64.go // Code generated by the command above; DO NOT EDIT. // +build solaris,amd64 @@ -263,6 +263,8 @@ var ( libc_utimensat libcFunc ) +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getcwd(buf []byte) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -276,6 +278,8 @@ func Getcwd(buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_getgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) @@ -285,6 +289,8 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_setgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { @@ -293,6 +299,8 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) @@ -302,6 +310,8 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_accept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) @@ -311,6 +321,8 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -320,6 +332,8 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -333,6 +347,8 @@ func Access(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Adjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { @@ -341,6 +357,8 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -354,6 +372,8 @@ func Chdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chmod(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -367,6 +387,8 @@ func Chmod(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -380,6 +402,8 @@ func Chown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Chroot(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -393,6 +417,8 @@ func Chroot(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Close)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -401,6 +427,8 @@ func Close(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Dup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) @@ -410,6 +438,8 @@ func Dup(fd int) (nfd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -418,6 +448,8 @@ func Fchdir(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { @@ -426,6 +458,8 @@ func Fchmod(fd int, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { @@ -434,6 +468,8 @@ func Fchown(fd int, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) @@ -443,6 +479,8 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { @@ -451,6 +489,8 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 *byte if len(buf) > 0 { @@ -464,36 +504,48 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getgid() (gid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getgid)), 0, 0, 0, 0, 0, 0, 0) gid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpid() (pid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getpid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Geteuid() (euid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Geteuid)), 0, 0, 0, 0, 0, 0, 0) euid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getegid() (egid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Getegid)), 0, 0, 0, 0, 0, 0, 0) egid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getppid() (ppid int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Getppid)), 0, 0, 0, 0, 0, 0, 0) ppid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Getpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) @@ -503,6 +555,8 @@ func Getpriority(which int, who int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -511,6 +565,8 @@ func Getrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Gettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -519,12 +575,16 @@ func Gettimeofday(tv *Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Getuid)), 0, 0, 0, 0, 0, 0, 0) uid = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Kill(pid int, signum Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Kill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { @@ -533,6 +593,8 @@ func Kill(pid int, signum Signal) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -546,6 +608,8 @@ func Lchown(path string, uid int, gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Link(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +628,8 @@ func Link(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { @@ -572,6 +638,8 @@ func Listen(s int, backlog int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -585,6 +653,8 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -598,6 +668,8 @@ func Mkdir(path string, mode uint32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -611,6 +683,8 @@ func Mknod(path string, mode uint32, dev int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Nanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { @@ -619,6 +693,8 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -633,6 +709,8 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -647,6 +725,8 @@ func Pathconf(path string, name int) (val int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -660,6 +740,8 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -673,6 +755,8 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func read(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -686,6 +770,8 @@ func read(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Readlink(path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -704,6 +790,8 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) @@ -722,6 +810,8 @@ func Rename(from string, to string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -735,6 +825,8 @@ func Rmdir(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) @@ -744,6 +836,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_sendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -753,6 +847,8 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -761,6 +857,8 @@ func Setegid(egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Seteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -769,6 +867,8 @@ func Seteuid(euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -777,6 +877,8 @@ func Setgid(gid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { @@ -785,6 +887,8 @@ func Setpgid(pid int, pgid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Setpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { @@ -793,6 +897,8 @@ func Setpriority(which int, who int, prio int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { @@ -801,6 +907,8 @@ func Setregid(rgid int, egid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { @@ -809,6 +917,8 @@ func Setreuid(ruid int, euid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { @@ -817,6 +927,8 @@ func Setrlimit(which int, lim *Rlimit) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) @@ -826,6 +938,8 @@ func Setsid() (pid int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_Setuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { @@ -834,6 +948,8 @@ func Setuid(uid int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_shutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { @@ -842,6 +958,8 @@ func Shutdown(s int, how int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -855,6 +973,8 @@ func Stat(path string, stat *Stat_t) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -873,6 +993,8 @@ func Symlink(path string, link string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Sync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { @@ -881,6 +1003,8 @@ func Sync() (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -894,6 +1018,8 @@ func Truncate(path string, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Fsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -902,6 +1028,8 @@ func Fsync(fd int) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_Ftruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -910,12 +1038,16 @@ func Ftruncate(fd int, length int64) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Umask(newmask int) (oldmask int) { r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&libc_Umask)), 1, uintptr(newmask), 0, 0, 0, 0, 0) oldmask = int(r0) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -929,6 +1061,8 @@ func Unlink(path string) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimes(path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -942,6 +1076,8 @@ func utimes(path string, times *[2]Timeval) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -950,6 +1086,8 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { @@ -958,6 +1096,8 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_mmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) @@ -967,6 +1107,8 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_munmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { @@ -975,6 +1117,8 @@ func munmap(addr uintptr, length uintptr) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 *byte if len(buf) > 0 { @@ -987,6 +1131,8 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) @@ -996,6 +1142,8 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc___xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { @@ -1004,6 +1152,8 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func write(fd int, p []byte) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1017,6 +1167,8 @@ func write(fd int, p []byte) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { @@ -1025,6 +1177,8 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&libc_getpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -1033,6 +1187,8 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_getsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { @@ -1041,6 +1197,8 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_setsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { @@ -1049,6 +1207,8 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var _p0 *byte if len(p) > 0 { @@ -1062,6 +1222,8 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc___xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) @@ -1071,6 +1233,8 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getexecname() (path unsafe.Pointer, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_getexecname)), 0, 0, 0, 0, 0, 0, 0) path = unsafe.Pointer(r0) @@ -1080,6 +1244,8 @@ func getexecname() (path unsafe.Pointer, err error) { return } +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/src/syscall/ztypes_aix_ppc64.go b/src/syscall/ztypes_aix_ppc64.go new file mode 100644 index 0000000000000..314266ea79776 --- /dev/null +++ b/src/syscall/ztypes_aix_ppc64.go @@ -0,0 +1,272 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_aix.go | go run mkpost.go + +package syscall + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 + PathMax = 0x3ff +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Timeval32 struct { + Sec int32 + Usec int32 +} + +type Timezone struct { + Minuteswest int32 + Dsttime int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Pid_t int32 + +type _Gid_t uint32 + +type Flock_t struct { + Type int16 + Whence int16 + Sysid uint32 + Pid int32 + Vfs int32 + Start int64 + Len int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink int16 + Flag uint16 + Uid uint32 + Gid uint32 + Rdev uint64 + Ssize int32 + Pad_cgo_0 [4]byte + Atim StTimespec_t + Mtim StTimespec_t + Ctim StTimespec_t + Blksize int64 + Blocks int64 + Vfstype int32 + Vfs uint32 + Type uint32 + Gen uint32 + Reserved [9]uint32 + Padto_ll uint32 + Size int64 +} + +type Statfs_t struct { + Version int32 + Type int32 + Bsize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid64_t + Vfstype int32 + Pad_cgo_0 [4]byte + Fsize uint64 + Vfsnumber int32 + Vfsoff int32 + Vfslen int32 + Vfsvers int32 + Fname [32]uint8 + Fpack [32]uint8 + Name_max int32 + Pad_cgo_1 [4]byte +} + +type Fsid64_t struct { + Val [2]uint64 +} + +type StTimespec_t struct { + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte +} + +type Dirent struct { + Offset uint64 + Ino uint64 + Reclen uint16 + Namlen uint16 + Name [256]uint8 + Pad_cgo_0 [4]byte +} + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [1023]uint8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]uint8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [1012]uint8 +} + +type _Socklen uint32 + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *Iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x404 + SizeofSockaddrUnix = 0x401 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +const ( + SizeofIfMsghdr = 0x10 +) + +type IfMsgHdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Addrlen uint8 + Pad_cgo_0 [1]byte +} + +type Utsname struct { + Sysname [32]uint8 + Nodename [32]uint8 + Release [32]uint8 + Version [32]uint8 + Machine [32]uint8 +} + +const ( + _AT_FDCWD = -0x2 + _AT_REMOVEDIR = 0x1 + _AT_SYMLINK_NOFOLLOW = 0x1 +) diff --git a/src/syscall/ztypes_dragonfly_amd64.go b/src/syscall/ztypes_dragonfly_amd64.go index 1cb8608228543..e9e811f77643b 100644 --- a/src/syscall/ztypes_dragonfly_amd64.go +++ b/src/syscall/ztypes_dragonfly_amd64.go @@ -71,6 +71,8 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 ) type Stat_t struct { diff --git a/src/syscall/ztypes_freebsd_386.go b/src/syscall/ztypes_freebsd_386.go index c9c58f9fe7af4..27d82dea1028b 100644 --- a/src/syscall/ztypes_freebsd_386.go +++ b/src/syscall/ztypes_freebsd_386.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go | go run mkpost.go // +build 386,freebsd @@ -71,9 +71,42 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 +) + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 ) type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + Padding0 int16 + Uid uint32 + Gid uint32 + Padding1 int32 + Rdev uint64 + Atim_ext int32 + Atimespec Timespec + Mtim_ext int32 + Mtimespec Timespec + Ctim_ext int32 + Ctimespec Timespec + Btim_ext int32 + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { Dev uint32 Ino uint32 Mode uint16 @@ -86,7 +119,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize uint32 + Blksize int32 Flags uint32 Gen uint32 Lspare int32 @@ -95,6 +128,31 @@ type Stat_t struct { } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -129,6 +187,17 @@ type Flock_t struct { } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -488,7 +557,9 @@ type BpfZbufHeader struct { } const ( - _AT_FDCWD = -0x64 + _AT_FDCWD = -0x64 + _AT_SYMLINK_FOLLOW = 0x400 + _AT_SYMLINK_NOFOLLOW = 0x200 ) type Termios struct { diff --git a/src/syscall/ztypes_freebsd_amd64.go b/src/syscall/ztypes_freebsd_amd64.go index 847527cdda28a..8abfbb45d692a 100644 --- a/src/syscall/ztypes_freebsd_amd64.go +++ b/src/syscall/ztypes_freebsd_amd64.go @@ -1,5 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go | go run mkpost.go // +build amd64,freebsd @@ -71,9 +71,38 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 +) + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 ) type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + Padding0 int16 + Uid uint32 + Gid uint32 + Padding1 int32 + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { Dev uint32 Ino uint32 Mode uint16 @@ -86,7 +115,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize uint32 + Blksize int32 Flags uint32 Gen uint32 Lspare int32 @@ -94,6 +123,31 @@ type Stat_t struct { } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -129,6 +183,17 @@ type Flock_t struct { } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -491,7 +556,9 @@ type BpfZbufHeader struct { } const ( - _AT_FDCWD = -0x64 + _AT_FDCWD = -0x64 + _AT_SYMLINK_FOLLOW = 0x400 + _AT_SYMLINK_NOFOLLOW = 0x200 ) type Termios struct { diff --git a/src/syscall/ztypes_freebsd_arm.go b/src/syscall/ztypes_freebsd_arm.go index 83108dd1c43c4..ff552a6a63f9d 100644 --- a/src/syscall/ztypes_freebsd_arm.go +++ b/src/syscall/ztypes_freebsd_arm.go @@ -1,4 +1,4 @@ -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -fsigned-char types_freebsd.go // +build arm,freebsd @@ -73,9 +73,38 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 +) + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 ) type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + Padding0 int16 + Uid uint32 + Gid uint32 + Padding1 int32 + Rdev uint64 + Atimespec Timespec + Mtimespec Timespec + Ctimespec Timespec + Birthtimespec Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type stat_freebsd11_t struct { Dev uint32 Ino uint32 Mode uint16 @@ -88,7 +117,7 @@ type Stat_t struct { Ctimespec Timespec Size int64 Blocks int64 - Blksize uint32 + Blksize int32 Flags uint32 Gen uint32 Lspare int32 @@ -96,6 +125,31 @@ type Stat_t struct { } type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]int8 + Mntfromname [1024]int8 + Mntonname [1024]int8 +} + +type statfs_freebsd11_t struct { Version uint32 Type uint32 Flags uint64 @@ -131,6 +185,17 @@ type Flock_t struct { } type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type dirent_freebsd11 struct { Fileno uint32 Reclen uint16 Type uint8 @@ -491,7 +556,9 @@ type BpfZbufHeader struct { } const ( - _AT_FDCWD = -0x64 + _AT_FDCWD = -0x64 + _AT_SYMLINK_FOLLOW = 0x400 + _AT_SYMLINK_NOFOLLOW = 0x200 ) type Termios struct { diff --git a/src/syscall/ztypes_openbsd_386.go b/src/syscall/ztypes_openbsd_386.go index 04d53966f4609..c2a03ebdd8079 100644 --- a/src/syscall/ztypes_openbsd_386.go +++ b/src/syscall/ztypes_openbsd_386.go @@ -71,6 +71,8 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 ) type Stat_t struct { diff --git a/src/syscall/ztypes_openbsd_amd64.go b/src/syscall/ztypes_openbsd_amd64.go index aad787a3e497b..1a659ba2feacb 100644 --- a/src/syscall/ztypes_openbsd_amd64.go +++ b/src/syscall/ztypes_openbsd_amd64.go @@ -71,6 +71,8 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 ) type Stat_t struct { diff --git a/src/syscall/ztypes_openbsd_arm.go b/src/syscall/ztypes_openbsd_arm.go index 4383b68eae230..acadf4b48ca1e 100644 --- a/src/syscall/ztypes_openbsd_arm.go +++ b/src/syscall/ztypes_openbsd_arm.go @@ -1,7 +1,5 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_openbsd.go - -// +build arm,openbsd +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -fsigned-char types_openbsd.go package syscall @@ -21,13 +19,15 @@ type ( ) type Timespec struct { - Sec int64 - Nsec int32 + Sec int64 + Nsec int32 + Pad_cgo_0 [4]byte } type Timeval struct { - Sec int64 - Usec int32 + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte } type Rusage struct { @@ -71,6 +71,8 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 ) type Stat_t struct { @@ -89,6 +91,7 @@ type Stat_t struct { Blksize int32 Flags uint32 Gen uint32 + Pad_cgo_0 [4]byte X__st_birthtim Timespec } @@ -96,6 +99,7 @@ type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 + Pad_cgo_0 [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -110,11 +114,11 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]uint8 - F_mntonname [90]uint8 - F_mntfromname [90]uint8 - F_mntfromspec [90]uint8 - Pad_cgo_0 [2]byte + F_fstypename [16]int8 + F_mntonname [90]int8 + F_mntfromname [90]int8 + F_mntfromspec [90]int8 + Pad_cgo_1 [2]byte Mount_info [160]byte } @@ -133,7 +137,7 @@ type Dirent struct { Type uint8 Namlen uint8 X__d_padding [4]uint8 - Name [256]uint8 + Name [256]int8 } type Fsid struct { @@ -264,12 +268,14 @@ const ( ) type Kevent_t struct { - Ident uint32 - Filter int16 - Flags uint16 - Fflags uint32 - Data int64 - Udata *byte + Ident uint32 + Filter int16 + Flags uint16 + Fflags uint32 + Pad_cgo_0 [4]byte + Data int64 + Udata *byte + Pad_cgo_1 [4]byte } type FdSet struct { @@ -277,8 +283,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0x98 - SizeofIfData = 0x80 + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -307,7 +313,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -319,8 +325,10 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 + Pad_cgo_0 [4]byte Lastchange Timeval } @@ -345,7 +353,7 @@ type IfAnnounceMsghdr struct { Hdrlen uint16 Index uint16 What uint16 - Name [16]uint8 + Name [16]int8 } type RtMsghdr struct { diff --git a/src/syscall/ztypes_solaris_amd64.go b/src/syscall/ztypes_solaris_amd64.go index 12307abfaa6bf..f846666fff2cb 100644 --- a/src/syscall/ztypes_solaris_amd64.go +++ b/src/syscall/ztypes_solaris_amd64.go @@ -60,6 +60,8 @@ type Rlimit struct { Max uint64 } +type _Pid_t int32 + type _Gid_t uint32 const ( @@ -77,6 +79,8 @@ const ( S_IRUSR = 0x100 S_IWUSR = 0x80 S_IXUSR = 0x40 + S_IRWXG = 0x38 + S_IRWXO = 0x7 ) type Stat_t struct { diff --git a/src/testdata/Isaac.Newton-Opticks.txt b/src/testdata/Isaac.Newton-Opticks.txt new file mode 100644 index 0000000000000..15bb4c54d0b75 --- /dev/null +++ b/src/testdata/Isaac.Newton-Opticks.txt @@ -0,0 +1,9286 @@ +Produced by Suzanne Lybarger, steve harris, Josephine +Paolucci and the Online Distributed Proofreading Team at +http://www.pgdp.net. + + + + + + +OPTICKS: + +OR, A + +TREATISE + +OF THE + +_Reflections_, _Refractions_, +_Inflections_ and _Colours_ + +OF + +LIGHT. + +_The_ FOURTH EDITION, _corrected_. + +By Sir _ISAAC NEWTON_, Knt. + +LONDON: + +Printed for WILLIAM INNYS at the West-End of St. _Paul's_. MDCCXXX. + +TITLE PAGE OF THE 1730 EDITION + + + + +SIR ISAAC NEWTON'S ADVERTISEMENTS + + + + +Advertisement I + + +_Part of the ensuing Discourse about Light was written at the Desire of +some Gentlemen of the_ Royal-Society, _in the Year 1675, and then sent +to their Secretary, and read at their Meetings, and the rest was added +about twelve Years after to complete the Theory; except the third Book, +and the last Proposition of the Second, which were since put together +out of scatter'd Papers. To avoid being engaged in Disputes about these +Matters, I have hitherto delayed the printing, and should still have +delayed it, had not the Importunity of Friends prevailed upon me. If any +other Papers writ on this Subject are got out of my Hands they are +imperfect, and were perhaps written before I had tried all the +Experiments here set down, and fully satisfied my self about the Laws of +Refractions and Composition of Colours. I have here publish'd what I +think proper to come abroad, wishing that it may not be translated into +another Language without my Consent._ + +_The Crowns of Colours, which sometimes appear about the Sun and Moon, I +have endeavoured to give an Account of; but for want of sufficient +Observations leave that Matter to be farther examined. The Subject of +the Third Book I have also left imperfect, not having tried all the +Experiments which I intended when I was about these Matters, nor +repeated some of those which I did try, until I had satisfied my self +about all their Circumstances. To communicate what I have tried, and +leave the rest to others for farther Enquiry, is all my Design in +publishing these Papers._ + +_In a Letter written to Mr._ Leibnitz _in the year 1679, and published +by Dr._ Wallis, _I mention'd a Method by which I had found some general +Theorems about squaring Curvilinear Figures, or comparing them with the +Conic Sections, or other the simplest Figures with which they may be +compared. And some Years ago I lent out a Manuscript containing such +Theorems, and having since met with some Things copied out of it, I have +on this Occasion made it publick, prefixing to it an_ Introduction, _and +subjoining a_ Scholium _concerning that Method. And I have joined with +it another small Tract concerning the Curvilinear Figures of the Second +Kind, which was also written many Years ago, and made known to some +Friends, who have solicited the making it publick._ + + _I. N._ + +April 1, 1704. + + +Advertisement II + +_In this Second Edition of these Opticks I have omitted the Mathematical +Tracts publish'd at the End of the former Edition, as not belonging to +the Subject. And at the End of the Third Book I have added some +Questions. And to shew that I do not take Gravity for an essential +Property of Bodies, I have added one Question concerning its Cause, +chusing to propose it by way of a Question, because I am not yet +satisfied about it for want of Experiments._ + + _I. N._ + +July 16, 1717. + + +Advertisement to this Fourth Edition + +_This new Edition of Sir_ Isaac Newton's Opticks _is carefully printed +from the Third Edition, as it was corrected by the Author's own Hand, +and left before his Death with the Bookseller. Since Sir_ Isaac's +Lectiones Opticæ, _which he publickly read in the University of_ +Cambridge _in the Years 1669, 1670, and 1671, are lately printed, it has +been thought proper to make at the bottom of the Pages several Citations +from thence, where may be found the Demonstrations, which the Author +omitted in these_ Opticks. + + * * * * * + +Transcriber's Note: There are several greek letters used in the +descriptions of the illustrations. They are signified by [Greek: +letter]. Square roots are noted by the letters sqrt before the equation. + + * * * * * + +THE FIRST BOOK OF OPTICKS + + + + +_PART I._ + + +My Design in this Book is not to explain the Properties of Light by +Hypotheses, but to propose and prove them by Reason and Experiments: In +order to which I shall premise the following Definitions and Axioms. + + + + +_DEFINITIONS_ + + +DEFIN. I. + +_By the Rays of Light I understand its least Parts, and those as well +Successive in the same Lines, as Contemporary in several Lines._ For it +is manifest that Light consists of Parts, both Successive and +Contemporary; because in the same place you may stop that which comes +one moment, and let pass that which comes presently after; and in the +same time you may stop it in any one place, and let it pass in any +other. For that part of Light which is stopp'd cannot be the same with +that which is let pass. The least Light or part of Light, which may be +stopp'd alone without the rest of the Light, or propagated alone, or do +or suffer any thing alone, which the rest of the Light doth not or +suffers not, I call a Ray of Light. + + +DEFIN. II. + +_Refrangibility of the Rays of Light, is their Disposition to be +refracted or turned out of their Way in passing out of one transparent +Body or Medium into another. And a greater or less Refrangibility of +Rays, is their Disposition to be turned more or less out of their Way in +like Incidences on the same Medium._ Mathematicians usually consider the +Rays of Light to be Lines reaching from the luminous Body to the Body +illuminated, and the refraction of those Rays to be the bending or +breaking of those lines in their passing out of one Medium into another. +And thus may Rays and Refractions be considered, if Light be propagated +in an instant. But by an Argument taken from the Æquations of the times +of the Eclipses of _Jupiter's Satellites_, it seems that Light is +propagated in time, spending in its passage from the Sun to us about +seven Minutes of time: And therefore I have chosen to define Rays and +Refractions in such general terms as may agree to Light in both cases. + + +DEFIN. III. + +_Reflexibility of Rays, is their Disposition to be reflected or turned +back into the same Medium from any other Medium upon whose Surface they +fall. And Rays are more or less reflexible, which are turned back more +or less easily._ As if Light pass out of a Glass into Air, and by being +inclined more and more to the common Surface of the Glass and Air, +begins at length to be totally reflected by that Surface; those sorts of +Rays which at like Incidences are reflected most copiously, or by +inclining the Rays begin soonest to be totally reflected, are most +reflexible. + + +DEFIN. IV. + +_The Angle of Incidence is that Angle, which the Line described by the +incident Ray contains with the Perpendicular to the reflecting or +refracting Surface at the Point of Incidence._ + + +DEFIN. V. + +_The Angle of Reflexion or Refraction, is the Angle which the line +described by the reflected or refracted Ray containeth with the +Perpendicular to the reflecting or refracting Surface at the Point of +Incidence._ + + +DEFIN. VI. + +_The Sines of Incidence, Reflexion, and Refraction, are the Sines of the +Angles of Incidence, Reflexion, and Refraction._ + + +DEFIN. VII + +_The Light whose Rays are all alike Refrangible, I call Simple, +Homogeneal and Similar; and that whose Rays are some more Refrangible +than others, I call Compound, Heterogeneal and Dissimilar._ The former +Light I call Homogeneal, not because I would affirm it so in all +respects, but because the Rays which agree in Refrangibility, agree at +least in all those their other Properties which I consider in the +following Discourse. + + +DEFIN. VIII. + +_The Colours of Homogeneal Lights, I call Primary, Homogeneal and +Simple; and those of Heterogeneal Lights, Heterogeneal and Compound._ +For these are always compounded of the colours of Homogeneal Lights; as +will appear in the following Discourse. + + + + +_AXIOMS._ + + +AX. I. + +_The Angles of Reflexion and Refraction, lie in one and the same Plane +with the Angle of Incidence._ + + +AX. II. + +_The Angle of Reflexion is equal to the Angle of Incidence._ + + +AX. III. + +_If the refracted Ray be returned directly back to the Point of +Incidence, it shall be refracted into the Line before described by the +incident Ray._ + + +AX. IV. + +_Refraction out of the rarer Medium into the denser, is made towards the +Perpendicular; that is, so that the Angle of Refraction be less than the +Angle of Incidence._ + + +AX. V. + +_The Sine of Incidence is either accurately or very nearly in a given +Ratio to the Sine of Refraction._ + +Whence if that Proportion be known in any one Inclination of the +incident Ray, 'tis known in all the Inclinations, and thereby the +Refraction in all cases of Incidence on the same refracting Body may be +determined. Thus if the Refraction be made out of Air into Water, the +Sine of Incidence of the red Light is to the Sine of its Refraction as 4 +to 3. If out of Air into Glass, the Sines are as 17 to 11. In Light of +other Colours the Sines have other Proportions: but the difference is so +little that it need seldom be considered. + +[Illustration: FIG. 1] + +Suppose therefore, that RS [in _Fig._ 1.] represents the Surface of +stagnating Water, and that C is the point of Incidence in which any Ray +coming in the Air from A in the Line AC is reflected or refracted, and I +would know whither this Ray shall go after Reflexion or Refraction: I +erect upon the Surface of the Water from the point of Incidence the +Perpendicular CP and produce it downwards to Q, and conclude by the +first Axiom, that the Ray after Reflexion and Refraction, shall be +found somewhere in the Plane of the Angle of Incidence ACP produced. I +let fall therefore upon the Perpendicular CP the Sine of Incidence AD; +and if the reflected Ray be desired, I produce AD to B so that DB be +equal to AD, and draw CB. For this Line CB shall be the reflected Ray; +the Angle of Reflexion BCP and its Sine BD being equal to the Angle and +Sine of Incidence, as they ought to be by the second Axiom, But if the +refracted Ray be desired, I produce AD to H, so that DH may be to AD as +the Sine of Refraction to the Sine of Incidence, that is, (if the Light +be red) as 3 to 4; and about the Center C and in the Plane ACP with the +Radius CA describing a Circle ABE, I draw a parallel to the +Perpendicular CPQ, the Line HE cutting the Circumference in E, and +joining CE, this Line CE shall be the Line of the refracted Ray. For if +EF be let fall perpendicularly on the Line PQ, this Line EF shall be the +Sine of Refraction of the Ray CE, the Angle of Refraction being ECQ; and +this Sine EF is equal to DH, and consequently in Proportion to the Sine +of Incidence AD as 3 to 4. + +In like manner, if there be a Prism of Glass (that is, a Glass bounded +with two Equal and Parallel Triangular ends, and three plain and well +polished Sides, which meet in three Parallel Lines running from the +three Angles of one end to the three Angles of the other end) and if the +Refraction of the Light in passing cross this Prism be desired: Let ACB +[in _Fig._ 2.] represent a Plane cutting this Prism transversly to its +three Parallel lines or edges there where the Light passeth through it, +and let DE be the Ray incident upon the first side of the Prism AC where +the Light goes into the Glass; and by putting the Proportion of the Sine +of Incidence to the Sine of Refraction as 17 to 11 find EF the first +refracted Ray. Then taking this Ray for the Incident Ray upon the second +side of the Glass BC where the Light goes out, find the next refracted +Ray FG by putting the Proportion of the Sine of Incidence to the Sine of +Refraction as 11 to 17. For if the Sine of Incidence out of Air into +Glass be to the Sine of Refraction as 17 to 11, the Sine of Incidence +out of Glass into Air must on the contrary be to the Sine of Refraction +as 11 to 17, by the third Axiom. + +[Illustration: FIG. 2.] + +Much after the same manner, if ACBD [in _Fig._ 3.] represent a Glass +spherically convex on both sides (usually called a _Lens_, such as is a +Burning-glass, or Spectacle-glass, or an Object-glass of a Telescope) +and it be required to know how Light falling upon it from any lucid +point Q shall be refracted, let QM represent a Ray falling upon any +point M of its first spherical Surface ACB, and by erecting a +Perpendicular to the Glass at the point M, find the first refracted Ray +MN by the Proportion of the Sines 17 to 11. Let that Ray in going out of +the Glass be incident upon N, and then find the second refracted Ray +N_q_ by the Proportion of the Sines 11 to 17. And after the same manner +may the Refraction be found when the Lens is convex on one side and +plane or concave on the other, or concave on both sides. + +[Illustration: FIG. 3.] + + +AX. VI. + +_Homogeneal Rays which flow from several Points of any Object, and fall +perpendicularly or almost perpendicularly on any reflecting or +refracting Plane or spherical Surface, shall afterwards diverge from so +many other Points, or be parallel to so many other Lines, or converge to +so many other Points, either accurately or without any sensible Error. +And the same thing will happen, if the Rays be reflected or refracted +successively by two or three or more Plane or Spherical Surfaces._ + +The Point from which Rays diverge or to which they converge may be +called their _Focus_. And the Focus of the incident Rays being given, +that of the reflected or refracted ones may be found by finding the +Refraction of any two Rays, as above; or more readily thus. + +_Cas._ 1. Let ACB [in _Fig._ 4.] be a reflecting or refracting Plane, +and Q the Focus of the incident Rays, and Q_q_C a Perpendicular to that +Plane. And if this Perpendicular be produced to _q_, so that _q_C be +equal to QC, the Point _q_ shall be the Focus of the reflected Rays: Or +if _q_C be taken on the same side of the Plane with QC, and in +proportion to QC as the Sine of Incidence to the Sine of Refraction, the +Point _q_ shall be the Focus of the refracted Rays. + +[Illustration: FIG. 4.] + +_Cas._ 2. Let ACB [in _Fig._ 5.] be the reflecting Surface of any Sphere +whose Centre is E. Bisect any Radius thereof, (suppose EC) in T, and if +in that Radius on the same side the Point T you take the Points Q and +_q_, so that TQ, TE, and T_q_, be continual Proportionals, and the Point +Q be the Focus of the incident Rays, the Point _q_ shall be the Focus of +the reflected ones. + +[Illustration: FIG. 5.] + +_Cas._ 3. Let ACB [in _Fig._ 6.] be the refracting Surface of any Sphere +whose Centre is E. In any Radius thereof EC produced both ways take ET +and C_t_ equal to one another and severally in such Proportion to that +Radius as the lesser of the Sines of Incidence and Refraction hath to +the difference of those Sines. And then if in the same Line you find any +two Points Q and _q_, so that TQ be to ET as E_t_ to _tq_, taking _tq_ +the contrary way from _t_ which TQ lieth from T, and if the Point Q be +the Focus of any incident Rays, the Point _q_ shall be the Focus of the +refracted ones. + +[Illustration: FIG. 6.] + +And by the same means the Focus of the Rays after two or more Reflexions +or Refractions may be found. + +[Illustration: FIG. 7.] + +_Cas._ 4. Let ACBD [in _Fig._ 7.] be any refracting Lens, spherically +Convex or Concave or Plane on either side, and let CD be its Axis (that +is, the Line which cuts both its Surfaces perpendicularly, and passes +through the Centres of the Spheres,) and in this Axis produced let F and +_f_ be the Foci of the refracted Rays found as above, when the incident +Rays on both sides the Lens are parallel to the same Axis; and upon the +Diameter F_f_ bisected in E, describe a Circle. Suppose now that any +Point Q be the Focus of any incident Rays. Draw QE cutting the said +Circle in T and _t_, and therein take _tq_ in such proportion to _t_E as +_t_E or TE hath to TQ. Let _tq_ lie the contrary way from _t_ which TQ +doth from T, and _q_ shall be the Focus of the refracted Rays without +any sensible Error, provided the Point Q be not so remote from the Axis, +nor the Lens so broad as to make any of the Rays fall too obliquely on +the refracting Surfaces.[A] + +And by the like Operations may the reflecting or refracting Surfaces be +found when the two Foci are given, and thereby a Lens be formed, which +shall make the Rays flow towards or from what Place you please.[B] + +So then the Meaning of this Axiom is, that if Rays fall upon any Plane +or Spherical Surface or Lens, and before their Incidence flow from or +towards any Point Q, they shall after Reflexion or Refraction flow from +or towards the Point _q_ found by the foregoing Rules. And if the +incident Rays flow from or towards several points Q, the reflected or +refracted Rays shall flow from or towards so many other Points _q_ +found by the same Rules. Whether the reflected and refracted Rays flow +from or towards the Point _q_ is easily known by the situation of that +Point. For if that Point be on the same side of the reflecting or +refracting Surface or Lens with the Point Q, and the incident Rays flow +from the Point Q, the reflected flow towards the Point _q_ and the +refracted from it; and if the incident Rays flow towards Q, the +reflected flow from _q_, and the refracted towards it. And the contrary +happens when _q_ is on the other side of the Surface. + + +AX. VII. + +_Wherever the Rays which come from all the Points of any Object meet +again in so many Points after they have been made to converge by +Reflection or Refraction, there they will make a Picture of the Object +upon any white Body on which they fall._ + +So if PR [in _Fig._ 3.] represent any Object without Doors, and AB be a +Lens placed at a hole in the Window-shut of a dark Chamber, whereby the +Rays that come from any Point Q of that Object are made to converge and +meet again in the Point _q_; and if a Sheet of white Paper be held at +_q_ for the Light there to fall upon it, the Picture of that Object PR +will appear upon the Paper in its proper shape and Colours. For as the +Light which comes from the Point Q goes to the Point _q_, so the Light +which comes from other Points P and R of the Object, will go to so many +other correspondent Points _p_ and _r_ (as is manifest by the sixth +Axiom;) so that every Point of the Object shall illuminate a +correspondent Point of the Picture, and thereby make a Picture like the +Object in Shape and Colour, this only excepted, that the Picture shall +be inverted. And this is the Reason of that vulgar Experiment of casting +the Species of Objects from abroad upon a Wall or Sheet of white Paper +in a dark Room. + +In like manner, when a Man views any Object PQR, [in _Fig._ 8.] the +Light which comes from the several Points of the Object is so refracted +by the transparent skins and humours of the Eye, (that is, by the +outward coat EFG, called the _Tunica Cornea_, and by the crystalline +humour AB which is beyond the Pupil _mk_) as to converge and meet again +in so many Points in the bottom of the Eye, and there to paint the +Picture of the Object upon that skin (called the _Tunica Retina_) with +which the bottom of the Eye is covered. For Anatomists, when they have +taken off from the bottom of the Eye that outward and most thick Coat +called the _Dura Mater_, can then see through the thinner Coats, the +Pictures of Objects lively painted thereon. And these Pictures, +propagated by Motion along the Fibres of the Optick Nerves into the +Brain, are the cause of Vision. For accordingly as these Pictures are +perfect or imperfect, the Object is seen perfectly or imperfectly. If +the Eye be tinged with any colour (as in the Disease of the _Jaundice_) +so as to tinge the Pictures in the bottom of the Eye with that Colour, +then all Objects appear tinged with the same Colour. If the Humours of +the Eye by old Age decay, so as by shrinking to make the _Cornea_ and +Coat of the _Crystalline Humour_ grow flatter than before, the Light +will not be refracted enough, and for want of a sufficient Refraction +will not converge to the bottom of the Eye but to some place beyond it, +and by consequence paint in the bottom of the Eye a confused Picture, +and according to the Indistinctness of this Picture the Object will +appear confused. This is the reason of the decay of sight in old Men, +and shews why their Sight is mended by Spectacles. For those Convex +glasses supply the defect of plumpness in the Eye, and by increasing the +Refraction make the Rays converge sooner, so as to convene distinctly at +the bottom of the Eye if the Glass have a due degree of convexity. And +the contrary happens in short-sighted Men whose Eyes are too plump. For +the Refraction being now too great, the Rays converge and convene in the +Eyes before they come at the bottom; and therefore the Picture made in +the bottom and the Vision caused thereby will not be distinct, unless +the Object be brought so near the Eye as that the place where the +converging Rays convene may be removed to the bottom, or that the +plumpness of the Eye be taken off and the Refractions diminished by a +Concave-glass of a due degree of Concavity, or lastly that by Age the +Eye grow flatter till it come to a due Figure: For short-sighted Men see +remote Objects best in Old Age, and therefore they are accounted to have +the most lasting Eyes. + +[Illustration: FIG. 8.] + + +AX. VIII. + +_An Object seen by Reflexion or Refraction, appears in that place from +whence the Rays after their last Reflexion or Refraction diverge in +falling on the Spectator's Eye._ + +[Illustration: FIG. 9.] + +If the Object A [in FIG. 9.] be seen by Reflexion of a Looking-glass +_mn_, it shall appear, not in its proper place A, but behind the Glass +at _a_, from whence any Rays AB, AC, AD, which flow from one and the +same Point of the Object, do after their Reflexion made in the Points B, +C, D, diverge in going from the Glass to E, F, G, where they are +incident on the Spectator's Eyes. For these Rays do make the same +Picture in the bottom of the Eyes as if they had come from the Object +really placed at _a_ without the Interposition of the Looking-glass; and +all Vision is made according to the place and shape of that Picture. + +In like manner the Object D [in FIG. 2.] seen through a Prism, appears +not in its proper place D, but is thence translated to some other place +_d_ situated in the last refracted Ray FG drawn backward from F to _d_. + +[Illustration: FIG. 10.] + +And so the Object Q [in FIG. 10.] seen through the Lens AB, appears at +the place _q_ from whence the Rays diverge in passing from the Lens to +the Eye. Now it is to be noted, that the Image of the Object at _q_ is +so much bigger or lesser than the Object it self at Q, as the distance +of the Image at _q_ from the Lens AB is bigger or less than the distance +of the Object at Q from the same Lens. And if the Object be seen through +two or more such Convex or Concave-glasses, every Glass shall make a new +Image, and the Object shall appear in the place of the bigness of the +last Image. Which consideration unfolds the Theory of Microscopes and +Telescopes. For that Theory consists in almost nothing else than the +describing such Glasses as shall make the last Image of any Object as +distinct and large and luminous as it can conveniently be made. + +I have now given in Axioms and their Explications the sum of what hath +hitherto been treated of in Opticks. For what hath been generally +agreed on I content my self to assume under the notion of Principles, in +order to what I have farther to write. And this may suffice for an +Introduction to Readers of quick Wit and good Understanding not yet +versed in Opticks: Although those who are already acquainted with this +Science, and have handled Glasses, will more readily apprehend what +followeth. + +FOOTNOTES: + +[A] In our Author's _Lectiones Opticæ_, Part I. Sect. IV. Prop 29, 30, +there is an elegant Method of determining these _Foci_; not only in +spherical Surfaces, but likewise in any other curved Figure whatever: +And in Prop. 32, 33, the same thing is done for any Ray lying out of the +Axis. + +[B] _Ibid._ Prop. 34. + + + + +_PROPOSITIONS._ + + + +_PROP._ I. THEOR. I. + +_Lights which differ in Colour, differ also in Degrees of +Refrangibility._ + +The PROOF by Experiments. + +_Exper._ 1. + +I took a black oblong stiff Paper terminated by Parallel Sides, and with +a Perpendicular right Line drawn cross from one Side to the other, +distinguished it into two equal Parts. One of these parts I painted with +a red colour and the other with a blue. The Paper was very black, and +the Colours intense and thickly laid on, that the Phænomenon might be +more conspicuous. This Paper I view'd through a Prism of solid Glass, +whose two Sides through which the Light passed to the Eye were plane and +well polished, and contained an Angle of about sixty degrees; which +Angle I call the refracting Angle of the Prism. And whilst I view'd it, +I held it and the Prism before a Window in such manner that the Sides of +the Paper were parallel to the Prism, and both those Sides and the Prism +were parallel to the Horizon, and the cross Line was also parallel to +it: and that the Light which fell from the Window upon the Paper made an +Angle with the Paper, equal to that Angle which was made with the same +Paper by the Light reflected from it to the Eye. Beyond the Prism was +the Wall of the Chamber under the Window covered over with black Cloth, +and the Cloth was involved in Darkness that no Light might be reflected +from thence, which in passing by the Edges of the Paper to the Eye, +might mingle itself with the Light of the Paper, and obscure the +Phænomenon thereof. These things being thus ordered, I found that if the +refracting Angle of the Prism be turned upwards, so that the Paper may +seem to be lifted upwards by the Refraction, its blue half will be +lifted higher by the Refraction than its red half. But if the refracting +Angle of the Prism be turned downward, so that the Paper may seem to be +carried lower by the Refraction, its blue half will be carried something +lower thereby than its red half. Wherefore in both Cases the Light which +comes from the blue half of the Paper through the Prism to the Eye, does +in like Circumstances suffer a greater Refraction than the Light which +comes from the red half, and by consequence is more refrangible. + +_Illustration._ In the eleventh Figure, MN represents the Window, and DE +the Paper terminated with parallel Sides DJ and HE, and by the +transverse Line FG distinguished into two halfs, the one DG of an +intensely blue Colour, the other FE of an intensely red. And BAC_cab_ +represents the Prism whose refracting Planes AB_ba_ and AC_ca_ meet in +the Edge of the refracting Angle A_a_. This Edge A_a_ being upward, is +parallel both to the Horizon, and to the Parallel-Edges of the Paper DJ +and HE, and the transverse Line FG is perpendicular to the Plane of the +Window. And _de_ represents the Image of the Paper seen by Refraction +upwards in such manner, that the blue half DG is carried higher to _dg_ +than the red half FE is to _fe_, and therefore suffers a greater +Refraction. If the Edge of the refracting Angle be turned downward, the +Image of the Paper will be refracted downward; suppose to [Greek: de], +and the blue half will be refracted lower to [Greek: dg] than the red +half is to [Greek: pe]. + +[Illustration: FIG. 11.] + +_Exper._ 2. About the aforesaid Paper, whose two halfs were painted over +with red and blue, and which was stiff like thin Pasteboard, I lapped +several times a slender Thred of very black Silk, in such manner that +the several parts of the Thred might appear upon the Colours like so +many black Lines drawn over them, or like long and slender dark Shadows +cast upon them. I might have drawn black Lines with a Pen, but the +Threds were smaller and better defined. This Paper thus coloured and +lined I set against a Wall perpendicularly to the Horizon, so that one +of the Colours might stand to the Right Hand, and the other to the Left. +Close before the Paper, at the Confine of the Colours below, I placed a +Candle to illuminate the Paper strongly: For the Experiment was tried in +the Night. The Flame of the Candle reached up to the lower edge of the +Paper, or a very little higher. Then at the distance of six Feet, and +one or two Inches from the Paper upon the Floor I erected a Glass Lens +four Inches and a quarter broad, which might collect the Rays coming +from the several Points of the Paper, and make them converge towards so +many other Points at the same distance of six Feet, and one or two +Inches on the other side of the Lens, and so form the Image of the +coloured Paper upon a white Paper placed there, after the same manner +that a Lens at a Hole in a Window casts the Images of Objects abroad +upon a Sheet of white Paper in a dark Room. The aforesaid white Paper, +erected perpendicular to the Horizon, and to the Rays which fell upon it +from the Lens, I moved sometimes towards the Lens, sometimes from it, to +find the Places where the Images of the blue and red Parts of the +coloured Paper appeared most distinct. Those Places I easily knew by the +Images of the black Lines which I had made by winding the Silk about the +Paper. For the Images of those fine and slender Lines (which by reason +of their Blackness were like Shadows on the Colours) were confused and +scarce visible, unless when the Colours on either side of each Line were +terminated most distinctly, Noting therefore, as diligently as I could, +the Places where the Images of the red and blue halfs of the coloured +Paper appeared most distinct, I found that where the red half of the +Paper appeared distinct, the blue half appeared confused, so that the +black Lines drawn upon it could scarce be seen; and on the contrary, +where the blue half appeared most distinct, the red half appeared +confused, so that the black Lines upon it were scarce visible. And +between the two Places where these Images appeared distinct there was +the distance of an Inch and a half; the distance of the white Paper from +the Lens, when the Image of the red half of the coloured Paper appeared +most distinct, being greater by an Inch and an half than the distance of +the same white Paper from the Lens, when the Image of the blue half +appeared most distinct. In like Incidences therefore of the blue and red +upon the Lens, the blue was refracted more by the Lens than the red, so +as to converge sooner by an Inch and a half, and therefore is more +refrangible. + +_Illustration._ In the twelfth Figure (p. 27), DE signifies the coloured +Paper, DG the blue half, FE the red half, MN the Lens, HJ the white +Paper in that Place where the red half with its black Lines appeared +distinct, and _hi_ the same Paper in that Place where the blue half +appeared distinct. The Place _hi_ was nearer to the Lens MN than the +Place HJ by an Inch and an half. + +_Scholium._ The same Things succeed, notwithstanding that some of the +Circumstances be varied; as in the first Experiment when the Prism and +Paper are any ways inclined to the Horizon, and in both when coloured +Lines are drawn upon very black Paper. But in the Description of these +Experiments, I have set down such Circumstances, by which either the +Phænomenon might be render'd more conspicuous, or a Novice might more +easily try them, or by which I did try them only. The same Thing, I have +often done in the following Experiments: Concerning all which, this one +Admonition may suffice. Now from these Experiments it follows not, that +all the Light of the blue is more refrangible than all the Light of the +red: For both Lights are mixed of Rays differently refrangible, so that +in the red there are some Rays not less refrangible than those of the +blue, and in the blue there are some Rays not more refrangible than +those of the red: But these Rays, in proportion to the whole Light, are +but few, and serve to diminish the Event of the Experiment, but are not +able to destroy it. For, if the red and blue Colours were more dilute +and weak, the distance of the Images would be less than an Inch and a +half; and if they were more intense and full, that distance would be +greater, as will appear hereafter. These Experiments may suffice for the +Colours of Natural Bodies. For in the Colours made by the Refraction of +Prisms, this Proposition will appear by the Experiments which are now to +follow in the next Proposition. + + +_PROP._ II. THEOR. II. + +_The Light of the Sun consists of Rays differently Refrangible._ + +The PROOF by Experiments. + +[Illustration: FIG. 12.] + +[Illustration: FIG. 13.] + +_Exper._ 3. + +In a very dark Chamber, at a round Hole, about one third Part of an Inch +broad, made in the Shut of a Window, I placed a Glass Prism, whereby the +Beam of the Sun's Light, which came in at that Hole, might be refracted +upwards toward the opposite Wall of the Chamber, and there form a +colour'd Image of the Sun. The Axis of the Prism (that is, the Line +passing through the middle of the Prism from one end of it to the other +end parallel to the edge of the Refracting Angle) was in this and the +following Experiments perpendicular to the incident Rays. About this +Axis I turned the Prism slowly, and saw the refracted Light on the Wall, +or coloured Image of the Sun, first to descend, and then to ascend. +Between the Descent and Ascent, when the Image seemed Stationary, I +stopp'd the Prism, and fix'd it in that Posture, that it should be moved +no more. For in that Posture the Refractions of the Light at the two +Sides of the refracting Angle, that is, at the Entrance of the Rays into +the Prism, and at their going out of it, were equal to one another.[C] +So also in other Experiments, as often as I would have the Refractions +on both sides the Prism to be equal to one another, I noted the Place +where the Image of the Sun formed by the refracted Light stood still +between its two contrary Motions, in the common Period of its Progress +and Regress; and when the Image fell upon that Place, I made fast the +Prism. And in this Posture, as the most convenient, it is to be +understood that all the Prisms are placed in the following Experiments, +unless where some other Posture is described. The Prism therefore being +placed in this Posture, I let the refracted Light fall perpendicularly +upon a Sheet of white Paper at the opposite Wall of the Chamber, and +observed the Figure and Dimensions of the Solar Image formed on the +Paper by that Light. This Image was Oblong and not Oval, but terminated +with two Rectilinear and Parallel Sides, and two Semicircular Ends. On +its Sides it was bounded pretty distinctly, but on its Ends very +confusedly and indistinctly, the Light there decaying and vanishing by +degrees. The Breadth of this Image answered to the Sun's Diameter, and +was about two Inches and the eighth Part of an Inch, including the +Penumbra. For the Image was eighteen Feet and an half distant from the +Prism, and at this distance that Breadth, if diminished by the Diameter +of the Hole in the Window-shut, that is by a quarter of an Inch, +subtended an Angle at the Prism of about half a Degree, which is the +Sun's apparent Diameter. But the Length of the Image was about ten +Inches and a quarter, and the Length of the Rectilinear Sides about +eight Inches; and the refracting Angle of the Prism, whereby so great a +Length was made, was 64 degrees. With a less Angle the Length of the +Image was less, the Breadth remaining the same. If the Prism was turned +about its Axis that way which made the Rays emerge more obliquely out of +the second refracting Surface of the Prism, the Image soon became an +Inch or two longer, or more; and if the Prism was turned about the +contrary way, so as to make the Rays fall more obliquely on the first +refracting Surface, the Image soon became an Inch or two shorter. And +therefore in trying this Experiment, I was as curious as I could be in +placing the Prism by the above-mention'd Rule exactly in such a Posture, +that the Refractions of the Rays at their Emergence out of the Prism +might be equal to that at their Incidence on it. This Prism had some +Veins running along within the Glass from one end to the other, which +scattered some of the Sun's Light irregularly, but had no sensible +Effect in increasing the Length of the coloured Spectrum. For I tried +the same Experiment with other Prisms with the same Success. And +particularly with a Prism which seemed free from such Veins, and whose +refracting Angle was 62-1/2 Degrees, I found the Length of the Image +9-3/4 or 10 Inches at the distance of 18-1/2 Feet from the Prism, the +Breadth of the Hole in the Window-shut being 1/4 of an Inch, as before. +And because it is easy to commit a Mistake in placing the Prism in its +due Posture, I repeated the Experiment four or five Times, and always +found the Length of the Image that which is set down above. With another +Prism of clearer Glass and better Polish, which seemed free from Veins, +and whose refracting Angle was 63-1/2 Degrees, the Length of this Image +at the same distance of 18-1/2 Feet was also about 10 Inches, or 10-1/8. +Beyond these Measures for about a 1/4 or 1/3 of an Inch at either end of +the Spectrum the Light of the Clouds seemed to be a little tinged with +red and violet, but so very faintly, that I suspected that Tincture +might either wholly, or in great Measure arise from some Rays of the +Spectrum scattered irregularly by some Inequalities in the Substance and +Polish of the Glass, and therefore I did not include it in these +Measures. Now the different Magnitude of the hole in the Window-shut, +and different thickness of the Prism where the Rays passed through it, +and different inclinations of the Prism to the Horizon, made no sensible +changes in the length of the Image. Neither did the different matter of +the Prisms make any: for in a Vessel made of polished Plates of Glass +cemented together in the shape of a Prism and filled with Water, there +is the like Success of the Experiment according to the quantity of the +Refraction. It is farther to be observed, that the Rays went on in right +Lines from the Prism to the Image, and therefore at their very going out +of the Prism had all that Inclination to one another from which the +length of the Image proceeded, that is, the Inclination of more than two +degrees and an half. And yet according to the Laws of Opticks vulgarly +received, they could not possibly be so much inclined to one another.[D] +For let EG [_Fig._ 13. (p. 27)] represent the Window-shut, F the hole +made therein through which a beam of the Sun's Light was transmitted +into the darkened Chamber, and ABC a Triangular Imaginary Plane whereby +the Prism is feigned to be cut transversely through the middle of the +Light. Or if you please, let ABC represent the Prism it self, looking +directly towards the Spectator's Eye with its nearer end: And let XY be +the Sun, MN the Paper upon which the Solar Image or Spectrum is cast, +and PT the Image it self whose sides towards _v_ and _w_ are Rectilinear +and Parallel, and ends towards P and T Semicircular. YKHP and XLJT are +two Rays, the first of which comes from the lower part of the Sun to the +higher part of the Image, and is refracted in the Prism at K and H, and +the latter comes from the higher part of the Sun to the lower part of +the Image, and is refracted at L and J. Since the Refractions on both +sides the Prism are equal to one another, that is, the Refraction at K +equal to the Refraction at J, and the Refraction at L equal to the +Refraction at H, so that the Refractions of the incident Rays at K and L +taken together, are equal to the Refractions of the emergent Rays at H +and J taken together: it follows by adding equal things to equal things, +that the Refractions at K and H taken together, are equal to the +Refractions at J and L taken together, and therefore the two Rays being +equally refracted, have the same Inclination to one another after +Refraction which they had before; that is, the Inclination of half a +Degree answering to the Sun's Diameter. For so great was the inclination +of the Rays to one another before Refraction. So then, the length of the +Image PT would by the Rules of Vulgar Opticks subtend an Angle of half a +Degree at the Prism, and by Consequence be equal to the breadth _vw_; +and therefore the Image would be round. Thus it would be were the two +Rays XLJT and YKHP, and all the rest which form the Image P_w_T_v_, +alike refrangible. And therefore seeing by Experience it is found that +the Image is not round, but about five times longer than broad, the Rays +which going to the upper end P of the Image suffer the greatest +Refraction, must be more refrangible than those which go to the lower +end T, unless the Inequality of Refraction be casual. + +This Image or Spectrum PT was coloured, being red at its least refracted +end T, and violet at its most refracted end P, and yellow green and +blue in the intermediate Spaces. Which agrees with the first +Proposition, that Lights which differ in Colour, do also differ in +Refrangibility. The length of the Image in the foregoing Experiments, I +measured from the faintest and outmost red at one end, to the faintest +and outmost blue at the other end, excepting only a little Penumbra, +whose breadth scarce exceeded a quarter of an Inch, as was said above. + +_Exper._ 4. In the Sun's Beam which was propagated into the Room through +the hole in the Window-shut, at the distance of some Feet from the hole, +I held the Prism in such a Posture, that its Axis might be perpendicular +to that Beam. Then I looked through the Prism upon the hole, and turning +the Prism to and fro about its Axis, to make the Image of the Hole +ascend and descend, when between its two contrary Motions it seemed +Stationary, I stopp'd the Prism, that the Refractions of both sides of +the refracting Angle might be equal to each other, as in the former +Experiment. In this situation of the Prism viewing through it the said +Hole, I observed the length of its refracted Image to be many times +greater than its breadth, and that the most refracted part thereof +appeared violet, the least refracted red, the middle parts blue, green +and yellow in order. The same thing happen'd when I removed the Prism +out of the Sun's Light, and looked through it upon the hole shining by +the Light of the Clouds beyond it. And yet if the Refraction were done +regularly according to one certain Proportion of the Sines of Incidence +and Refraction as is vulgarly supposed, the refracted Image ought to +have appeared round. + +So then, by these two Experiments it appears, that in Equal Incidences +there is a considerable inequality of Refractions. But whence this +inequality arises, whether it be that some of the incident Rays are +refracted more, and others less, constantly, or by chance, or that one +and the same Ray is by Refraction disturbed, shatter'd, dilated, and as +it were split and spread into many diverging Rays, as _Grimaldo_ +supposes, does not yet appear by these Experiments, but will appear by +those that follow. + +_Exper._ 5. Considering therefore, that if in the third Experiment the +Image of the Sun should be drawn out into an oblong Form, either by a +Dilatation of every Ray, or by any other casual inequality of the +Refractions, the same oblong Image would by a second Refraction made +sideways be drawn out as much in breadth by the like Dilatation of the +Rays, or other casual inequality of the Refractions sideways, I tried +what would be the Effects of such a second Refraction. For this end I +ordered all things as in the third Experiment, and then placed a second +Prism immediately after the first in a cross Position to it, that it +might again refract the beam of the Sun's Light which came to it through +the first Prism. In the first Prism this beam was refracted upwards, and +in the second sideways. And I found that by the Refraction of the second +Prism, the breadth of the Image was not increased, but its superior +part, which in the first Prism suffered the greater Refraction, and +appeared violet and blue, did again in the second Prism suffer a greater +Refraction than its inferior part, which appeared red and yellow, and +this without any Dilatation of the Image in breadth. + +[Illustration: FIG. 14] + +_Illustration._ Let S [_Fig._ 14, 15.] represent the Sun, F the hole in +the Window, ABC the first Prism, DH the second Prism, Y the round Image +of the Sun made by a direct beam of Light when the Prisms are taken +away, PT the oblong Image of the Sun made by that beam passing through +the first Prism alone, when the second Prism is taken away, and _pt_ the +Image made by the cross Refractions of both Prisms together. Now if the +Rays which tend towards the several Points of the round Image Y were +dilated and spread by the Refraction of the first Prism, so that they +should not any longer go in single Lines to single Points, but that +every Ray being split, shattered, and changed from a Linear Ray to a +Superficies of Rays diverging from the Point of Refraction, and lying in +the Plane of the Angles of Incidence and Refraction, they should go in +those Planes to so many Lines reaching almost from one end of the Image +PT to the other, and if that Image should thence become oblong: those +Rays and their several parts tending towards the several Points of the +Image PT ought to be again dilated and spread sideways by the transverse +Refraction of the second Prism, so as to compose a four square Image, +such as is represented at [Greek: pt]. For the better understanding of +which, let the Image PT be distinguished into five equal parts PQK, +KQRL, LRSM, MSVN, NVT. And by the same irregularity that the orbicular +Light Y is by the Refraction of the first Prism dilated and drawn out +into a long Image PT, the Light PQK which takes up a space of the same +length and breadth with the Light Y ought to be by the Refraction of the +second Prism dilated and drawn out into the long Image _[Greek: p]qkp_, +and the Light KQRL into the long Image _kqrl_, and the Lights LRSM, +MSVN, NVT, into so many other long Images _lrsm_, _msvn_, _nvt[Greek: +t]_; and all these long Images would compose the four square Images +_[Greek: pt]_. Thus it ought to be were every Ray dilated by Refraction, +and spread into a triangular Superficies of Rays diverging from the +Point of Refraction. For the second Refraction would spread the Rays one +way as much as the first doth another, and so dilate the Image in +breadth as much as the first doth in length. And the same thing ought to +happen, were some rays casually refracted more than others. But the +Event is otherwise. The Image PT was not made broader by the Refraction +of the second Prism, but only became oblique, as 'tis represented at +_pt_, its upper end P being by the Refraction translated to a greater +distance than its lower end T. So then the Light which went towards the +upper end P of the Image, was (at equal Incidences) more refracted in +the second Prism, than the Light which tended towards the lower end T, +that is the blue and violet, than the red and yellow; and therefore was +more refrangible. The same Light was by the Refraction of the first +Prism translated farther from the place Y to which it tended before +Refraction; and therefore suffered as well in the first Prism as in the +second a greater Refraction than the rest of the Light, and by +consequence was more refrangible than the rest, even before its +incidence on the first Prism. + +Sometimes I placed a third Prism after the second, and sometimes also a +fourth after the third, by all which the Image might be often refracted +sideways: but the Rays which were more refracted than the rest in the +first Prism were also more refracted in all the rest, and that without +any Dilatation of the Image sideways: and therefore those Rays for their +constancy of a greater Refraction are deservedly reputed more +refrangible. + +[Illustration: FIG. 15] + +But that the meaning of this Experiment may more clearly appear, it is +to be considered that the Rays which are equally refrangible do fall +upon a Circle answering to the Sun's Disque. For this was proved in the +third Experiment. By a Circle I understand not here a perfect +geometrical Circle, but any orbicular Figure whose length is equal to +its breadth, and which, as to Sense, may seem circular. Let therefore AG +[in _Fig._ 15.] represent the Circle which all the most refrangible Rays +propagated from the whole Disque of the Sun, would illuminate and paint +upon the opposite Wall if they were alone; EL the Circle which all the +least refrangible Rays would in like manner illuminate and paint if they +were alone; BH, CJ, DK, the Circles which so many intermediate sorts of +Rays would successively paint upon the Wall, if they were singly +propagated from the Sun in successive order, the rest being always +intercepted; and conceive that there are other intermediate Circles +without Number, which innumerable other intermediate sorts of Rays would +successively paint upon the Wall if the Sun should successively emit +every sort apart. And seeing the Sun emits all these sorts at once, they +must all together illuminate and paint innumerable equal Circles, of all +which, being according to their degrees of Refrangibility placed in +order in a continual Series, that oblong Spectrum PT is composed which I +described in the third Experiment. Now if the Sun's circular Image Y [in +_Fig._ 15.] which is made by an unrefracted beam of Light was by any +Dilation of the single Rays, or by any other irregularity in the +Refraction of the first Prism, converted into the oblong Spectrum, PT: +then ought every Circle AG, BH, CJ, &c. in that Spectrum, by the cross +Refraction of the second Prism again dilating or otherwise scattering +the Rays as before, to be in like manner drawn out and transformed into +an oblong Figure, and thereby the breadth of the Image PT would be now +as much augmented as the length of the Image Y was before by the +Refraction of the first Prism; and thus by the Refractions of both +Prisms together would be formed a four square Figure _p[Greek: +p]t[Greek: t]_, as I described above. Wherefore since the breadth of the +Spectrum PT is not increased by the Refraction sideways, it is certain +that the Rays are not split or dilated, or otherways irregularly +scatter'd by that Refraction, but that every Circle is by a regular and +uniform Refraction translated entire into another Place, as the Circle +AG by the greatest Refraction into the place _ag_, the Circle BH by a +less Refraction into the place _bh_, the Circle CJ by a Refraction still +less into the place _ci_, and so of the rest; by which means a new +Spectrum _pt_ inclined to the former PT is in like manner composed of +Circles lying in a right Line; and these Circles must be of the same +bigness with the former, because the breadths of all the Spectrums Y, PT +and _pt_ at equal distances from the Prisms are equal. + +I considered farther, that by the breadth of the hole F through which +the Light enters into the dark Chamber, there is a Penumbra made in the +Circuit of the Spectrum Y, and that Penumbra remains in the rectilinear +Sides of the Spectrums PT and _pt_. I placed therefore at that hole a +Lens or Object-glass of a Telescope which might cast the Image of the +Sun distinctly on Y without any Penumbra at all, and found that the +Penumbra of the rectilinear Sides of the oblong Spectrums PT and _pt_ +was also thereby taken away, so that those Sides appeared as distinctly +defined as did the Circumference of the first Image Y. Thus it happens +if the Glass of the Prisms be free from Veins, and their sides be +accurately plane and well polished without those numberless Waves or +Curles which usually arise from Sand-holes a little smoothed in +polishing with Putty. If the Glass be only well polished and free from +Veins, and the Sides not accurately plane, but a little Convex or +Concave, as it frequently happens; yet may the three Spectrums Y, PT and +_pt_ want Penumbras, but not in equal distances from the Prisms. Now +from this want of Penumbras, I knew more certainly that every one of the +Circles was refracted according to some most regular, uniform and +constant Law. For if there were any irregularity in the Refraction, the +right Lines AE and GL, which all the Circles in the Spectrum PT do +touch, could not by that Refraction be translated into the Lines _ae_ +and _gl_ as distinct and straight as they were before, but there would +arise in those translated Lines some Penumbra or Crookedness or +Undulation, or other sensible Perturbation contrary to what is found by +Experience. Whatsoever Penumbra or Perturbation should be made in the +Circles by the cross Refraction of the second Prism, all that Penumbra +or Perturbation would be conspicuous in the right Lines _ae_ and _gl_ +which touch those Circles. And therefore since there is no such Penumbra +or Perturbation in those right Lines, there must be none in the +Circles. Since the distance between those Tangents or breadth of the +Spectrum is not increased by the Refractions, the Diameters of the +Circles are not increased thereby. Since those Tangents continue to be +right Lines, every Circle which in the first Prism is more or less +refracted, is exactly in the same proportion more or less refracted in +the second. And seeing all these things continue to succeed after the +same manner when the Rays are again in a third Prism, and again in a +fourth refracted sideways, it is evident that the Rays of one and the +same Circle, as to their degree of Refrangibility, continue always +uniform and homogeneal to one another, and that those of several Circles +do differ in degree of Refrangibility, and that in some certain and +constant Proportion. Which is the thing I was to prove. + +There is yet another Circumstance or two of this Experiment by which it +becomes still more plain and convincing. Let the second Prism DH [in +_Fig._ 16.] be placed not immediately after the first, but at some +distance from it; suppose in the mid-way between it and the Wall on +which the oblong Spectrum PT is cast, so that the Light from the first +Prism may fall upon it in the form of an oblong Spectrum [Greek: pt] +parallel to this second Prism, and be refracted sideways to form the +oblong Spectrum _pt_ upon the Wall. And you will find as before, that +this Spectrum _pt_ is inclined to that Spectrum PT, which the first +Prism forms alone without the second; the blue ends P and _p_ being +farther distant from one another than the red ones T and _t_, and by +consequence that the Rays which go to the blue end [Greek: p] of the +Image [Greek: pt], and which therefore suffer the greatest Refraction in +the first Prism, are again in the second Prism more refracted than the +rest. + +[Illustration: FIG. 16.] + +[Illustration: FIG. 17.] + +The same thing I try'd also by letting the Sun's Light into a dark Room +through two little round holes F and [Greek: ph] [in _Fig._ 17.] made in +the Window, and with two parallel Prisms ABC and [Greek: abg] placed at +those holes (one at each) refracting those two beams of Light to the +opposite Wall of the Chamber, in such manner that the two colour'd +Images PT and MN which they there painted were joined end to end and lay +in one straight Line, the red end T of the one touching the blue end M +of the other. For if these two refracted Beams were again by a third +Prism DH placed cross to the two first, refracted sideways, and the +Spectrums thereby translated to some other part of the Wall of the +Chamber, suppose the Spectrum PT to _pt_ and the Spectrum MN to _mn_, +these translated Spectrums _pt_ and _mn_ would not lie in one straight +Line with their ends contiguous as before, but be broken off from one +another and become parallel, the blue end _m_ of the Image _mn_ being by +a greater Refraction translated farther from its former place MT, than +the red end _t_ of the other Image _pt_ from the same place MT; which +puts the Proposition past Dispute. And this happens whether the third +Prism DH be placed immediately after the two first, or at a great +distance from them, so that the Light refracted in the two first Prisms +be either white and circular, or coloured and oblong when it falls on +the third. + +_Exper._ 6. In the middle of two thin Boards I made round holes a third +part of an Inch in diameter, and in the Window-shut a much broader hole +being made to let into my darkned Chamber a large Beam of the Sun's +Light; I placed a Prism behind the Shut in that beam to refract it +towards the opposite Wall, and close behind the Prism I fixed one of the +Boards, in such manner that the middle of the refracted Light might pass +through the hole made in it, and the rest be intercepted by the Board. +Then at the distance of about twelve Feet from the first Board I fixed +the other Board in such manner that the middle of the refracted Light +which came through the hole in the first Board, and fell upon the +opposite Wall, might pass through the hole in this other Board, and the +rest being intercepted by the Board might paint upon it the coloured +Spectrum of the Sun. And close behind this Board I fixed another Prism +to refract the Light which came through the hole. Then I returned +speedily to the first Prism, and by turning it slowly to and fro about +its Axis, I caused the Image which fell upon the second Board to move up +and down upon that Board, that all its parts might successively pass +through the hole in that Board and fall upon the Prism behind it. And in +the mean time, I noted the places on the opposite Wall to which that +Light after its Refraction in the second Prism did pass; and by the +difference of the places I found that the Light which being most +refracted in the first Prism did go to the blue end of the Image, was +again more refracted in the second Prism than the Light which went to +the red end of that Image, which proves as well the first Proposition as +the second. And this happened whether the Axis of the two Prisms were +parallel, or inclined to one another, and to the Horizon in any given +Angles. + +_Illustration._ Let F [in _Fig._ 18.] be the wide hole in the +Window-shut, through which the Sun shines upon the first Prism ABC, and +let the refracted Light fall upon the middle of the Board DE, and the +middle part of that Light upon the hole G made in the middle part of +that Board. Let this trajected part of that Light fall again upon the +middle of the second Board _de_, and there paint such an oblong coloured +Image of the Sun as was described in the third Experiment. By turning +the Prism ABC slowly to and fro about its Axis, this Image will be made +to move up and down the Board _de_, and by this means all its parts from +one end to the other may be made to pass successively through the hole +_g_ which is made in the middle of that Board. In the mean while another +Prism _abc_ is to be fixed next after that hole _g_, to refract the +trajected Light a second time. And these things being thus ordered, I +marked the places M and N of the opposite Wall upon which the refracted +Light fell, and found that whilst the two Boards and second Prism +remained unmoved, those places by turning the first Prism about its Axis +were changed perpetually. For when the lower part of the Light which +fell upon the second Board _de_ was cast through the hole _g_, it went +to a lower place M on the Wall and when the higher part of that Light +was cast through the same hole _g_, it went to a higher place N on the +Wall, and when any intermediate part of the Light was cast through that +hole, it went to some place on the Wall between M and N. The unchanged +Position of the holes in the Boards, made the Incidence of the Rays upon +the second Prism to be the same in all cases. And yet in that common +Incidence some of the Rays were more refracted, and others less. And +those were more refracted in this Prism, which by a greater Refraction +in the first Prism were more turned out of the way, and therefore for +their Constancy of being more refracted are deservedly called more +refrangible. + +[Illustration: FIG. 18.] + +[Illustration: FIG. 20.] + +_Exper._ 7. At two holes made near one another in my Window-shut I +placed two Prisms, one at each, which might cast upon the opposite Wall +(after the manner of the third Experiment) two oblong coloured Images of +the Sun. And at a little distance from the Wall I placed a long slender +Paper with straight and parallel edges, and ordered the Prisms and Paper +so, that the red Colour of one Image might fall directly upon one half +of the Paper, and the violet Colour of the other Image upon the other +half of the same Paper; so that the Paper appeared of two Colours, red +and violet, much after the manner of the painted Paper in the first and +second Experiments. Then with a black Cloth I covered the Wall behind +the Paper, that no Light might be reflected from it to disturb the +Experiment, and viewing the Paper through a third Prism held parallel +to it, I saw that half of it which was illuminated by the violet Light +to be divided from the other half by a greater Refraction, especially +when I went a good way off from the Paper. For when I viewed it too near +at hand, the two halfs of the Paper did not appear fully divided from +one another, but seemed contiguous at one of their Angles like the +painted Paper in the first Experiment. Which also happened when the +Paper was too broad. + +[Illustration: FIG. 19.] + +Sometimes instead of the Paper I used a white Thred, and this appeared +through the Prism divided into two parallel Threds as is represented in +the nineteenth Figure, where DG denotes the Thred illuminated with +violet Light from D to E and with red Light from F to G, and _defg_ are +the parts of the Thred seen by Refraction. If one half of the Thred be +constantly illuminated with red, and the other half be illuminated with +all the Colours successively, (which may be done by causing one of the +Prisms to be turned about its Axis whilst the other remains unmoved) +this other half in viewing the Thred through the Prism, will appear in +a continual right Line with the first half when illuminated with red, +and begin to be a little divided from it when illuminated with Orange, +and remove farther from it when illuminated with yellow, and still +farther when with green, and farther when with blue, and go yet farther +off when illuminated with Indigo, and farthest when with deep violet. +Which plainly shews, that the Lights of several Colours are more and +more refrangible one than another, in this Order of their Colours, red, +orange, yellow, green, blue, indigo, deep violet; and so proves as well +the first Proposition as the second. + +I caused also the coloured Spectrums PT [in _Fig._ 17.] and MN made in a +dark Chamber by the Refractions of two Prisms to lie in a Right Line end +to end, as was described above in the fifth Experiment, and viewing them +through a third Prism held parallel to their Length, they appeared no +longer in a Right Line, but became broken from one another, as they are +represented at _pt_ and _mn_, the violet end _m_ of the Spectrum _mn_ +being by a greater Refraction translated farther from its former Place +MT than the red end _t_ of the other Spectrum _pt_. + +I farther caused those two Spectrums PT [in _Fig._ 20.] and MN to become +co-incident in an inverted Order of their Colours, the red end of each +falling on the violet end of the other, as they are represented in the +oblong Figure PTMN; and then viewing them through a Prism DH held +parallel to their Length, they appeared not co-incident, as when view'd +with the naked Eye, but in the form of two distinct Spectrums _pt_ and +_mn_ crossing one another in the middle after the manner of the Letter +X. Which shews that the red of the one Spectrum and violet of the other, +which were co-incident at PN and MT, being parted from one another by a +greater Refraction of the violet to _p_ and _m_ than of the red to _n_ +and _t_, do differ in degrees of Refrangibility. + +I illuminated also a little Circular Piece of white Paper all over with +the Lights of both Prisms intermixed, and when it was illuminated with +the red of one Spectrum, and deep violet of the other, so as by the +Mixture of those Colours to appear all over purple, I viewed the Paper, +first at a less distance, and then at a greater, through a third Prism; +and as I went from the Paper, the refracted Image thereof became more +and more divided by the unequal Refraction of the two mixed Colours, and +at length parted into two distinct Images, a red one and a violet one, +whereof the violet was farthest from the Paper, and therefore suffered +the greatest Refraction. And when that Prism at the Window, which cast +the violet on the Paper was taken away, the violet Image disappeared; +but when the other Prism was taken away the red vanished; which shews, +that these two Images were nothing else than the Lights of the two +Prisms, which had been intermixed on the purple Paper, but were parted +again by their unequal Refractions made in the third Prism, through +which the Paper was view'd. This also was observable, that if one of the +Prisms at the Window, suppose that which cast the violet on the Paper, +was turned about its Axis to make all the Colours in this order, +violet, indigo, blue, green, yellow, orange, red, fall successively on +the Paper from that Prism, the violet Image changed Colour accordingly, +turning successively to indigo, blue, green, yellow and red, and in +changing Colour came nearer and nearer to the red Image made by the +other Prism, until when it was also red both Images became fully +co-incident. + +I placed also two Paper Circles very near one another, the one in the +red Light of one Prism, and the other in the violet Light of the other. +The Circles were each of them an Inch in diameter, and behind them the +Wall was dark, that the Experiment might not be disturbed by any Light +coming from thence. These Circles thus illuminated, I viewed through a +Prism, so held, that the Refraction might be made towards the red +Circle, and as I went from them they came nearer and nearer together, +and at length became co-incident; and afterwards when I went still +farther off, they parted again in a contrary Order, the violet by a +greater Refraction being carried beyond the red. + +_Exper._ 8. In Summer, when the Sun's Light uses to be strongest, I +placed a Prism at the Hole of the Window-shut, as in the third +Experiment, yet so that its Axis might be parallel to the Axis of the +World, and at the opposite Wall in the Sun's refracted Light, I placed +an open Book. Then going six Feet and two Inches from the Book, I placed +there the above-mentioned Lens, by which the Light reflected from the +Book might be made to converge and meet again at the distance of six +Feet and two Inches behind the Lens, and there paint the Species of the +Book upon a Sheet of white Paper much after the manner of the second +Experiment. The Book and Lens being made fast, I noted the Place where +the Paper was, when the Letters of the Book, illuminated by the fullest +red Light of the Solar Image falling upon it, did cast their Species on +that Paper most distinctly: And then I stay'd till by the Motion of the +Sun, and consequent Motion of his Image on the Book, all the Colours +from that red to the middle of the blue pass'd over those Letters; and +when those Letters were illuminated by that blue, I noted again the +Place of the Paper when they cast their Species most distinctly upon it: +And I found that this last Place of the Paper was nearer to the Lens +than its former Place by about two Inches and an half, or two and three +quarters. So much sooner therefore did the Light in the violet end of +the Image by a greater Refraction converge and meet, than the Light in +the red end. But in trying this, the Chamber was as dark as I could make +it. For, if these Colours be diluted and weakned by the Mixture of any +adventitious Light, the distance between the Places of the Paper will +not be so great. This distance in the second Experiment, where the +Colours of natural Bodies were made use of, was but an Inch and an half, +by reason of the Imperfection of those Colours. Here in the Colours of +the Prism, which are manifestly more full, intense, and lively than +those of natural Bodies, the distance is two Inches and three quarters. +And were the Colours still more full, I question not but that the +distance would be considerably greater. For the coloured Light of the +Prism, by the interfering of the Circles described in the second Figure +of the fifth Experiment, and also by the Light of the very bright Clouds +next the Sun's Body intermixing with these Colours, and by the Light +scattered by the Inequalities in the Polish of the Prism, was so very +much compounded, that the Species which those faint and dark Colours, +the indigo and violet, cast upon the Paper were not distinct enough to +be well observed. + +_Exper._ 9. A Prism, whose two Angles at its Base were equal to one +another, and half right ones, and the third a right one, I placed in a +Beam of the Sun's Light let into a dark Chamber through a Hole in the +Window-shut, as in the third Experiment. And turning the Prism slowly +about its Axis, until all the Light which went through one of its +Angles, and was refracted by it began to be reflected by its Base, at +which till then it went out of the Glass, I observed that those Rays +which had suffered the greatest Refraction were sooner reflected than +the rest. I conceived therefore, that those Rays of the reflected Light, +which were most refrangible, did first of all by a total Reflexion +become more copious in that Light than the rest, and that afterwards the +rest also, by a total Reflexion, became as copious as these. To try +this, I made the reflected Light pass through another Prism, and being +refracted by it to fall afterwards upon a Sheet of white Paper placed +at some distance behind it, and there by that Refraction to paint the +usual Colours of the Prism. And then causing the first Prism to be +turned about its Axis as above, I observed that when those Rays, which +in this Prism had suffered the greatest Refraction, and appeared of a +blue and violet Colour began to be totally reflected, the blue and +violet Light on the Paper, which was most refracted in the second Prism, +received a sensible Increase above that of the red and yellow, which was +least refracted; and afterwards, when the rest of the Light which was +green, yellow, and red, began to be totally reflected in the first +Prism, the Light of those Colours on the Paper received as great an +Increase as the violet and blue had done before. Whence 'tis manifest, +that the Beam of Light reflected by the Base of the Prism, being +augmented first by the more refrangible Rays, and afterwards by the less +refrangible ones, is compounded of Rays differently refrangible. And +that all such reflected Light is of the same Nature with the Sun's Light +before its Incidence on the Base of the Prism, no Man ever doubted; it +being generally allowed, that Light by such Reflexions suffers no +Alteration in its Modifications and Properties. I do not here take +Notice of any Refractions made in the sides of the first Prism, because +the Light enters it perpendicularly at the first side, and goes out +perpendicularly at the second side, and therefore suffers none. So then, +the Sun's incident Light being of the same Temper and Constitution with +his emergent Light, and the last being compounded of Rays differently +refrangible, the first must be in like manner compounded. + +[Illustration: FIG. 21.] + +_Illustration._ In the twenty-first Figure, ABC is the first Prism, BC +its Base, B and C its equal Angles at the Base, each of 45 Degrees, A +its rectangular Vertex, FM a beam of the Sun's Light let into a dark +Room through a hole F one third part of an Inch broad, M its Incidence +on the Base of the Prism, MG a less refracted Ray, MH a more refracted +Ray, MN the beam of Light reflected from the Base, VXY the second Prism +by which this beam in passing through it is refracted, N_t_ the less +refracted Light of this beam, and N_p_ the more refracted part thereof. +When the first Prism ABC is turned about its Axis according to the order +of the Letters ABC, the Rays MH emerge more and more obliquely out of +that Prism, and at length after their most oblique Emergence are +reflected towards N, and going on to _p_ do increase the Number of the +Rays N_p_. Afterwards by continuing the Motion of the first Prism, the +Rays MG are also reflected to N and increase the number of the Rays +N_t_. And therefore the Light MN admits into its Composition, first the +more refrangible Rays, and then the less refrangible Rays, and yet after +this Composition is of the same Nature with the Sun's immediate Light +FM, the Reflexion of the specular Base BC causing no Alteration therein. + +_Exper._ 10. Two Prisms, which were alike in Shape, I tied so together, +that their Axis and opposite Sides being parallel, they composed a +Parallelopiped. And, the Sun shining into my dark Chamber through a +little hole in the Window-shut, I placed that Parallelopiped in his beam +at some distance from the hole, in such a Posture, that the Axes of the +Prisms might be perpendicular to the incident Rays, and that those Rays +being incident upon the first Side of one Prism, might go on through the +two contiguous Sides of both Prisms, and emerge out of the last Side of +the second Prism. This Side being parallel to the first Side of the +first Prism, caused the emerging Light to be parallel to the incident. +Then, beyond these two Prisms I placed a third, which might refract that +emergent Light, and by that Refraction cast the usual Colours of the +Prism upon the opposite Wall, or upon a sheet of white Paper held at a +convenient Distance behind the Prism for that refracted Light to fall +upon it. After this I turned the Parallelopiped about its Axis, and +found that when the contiguous Sides of the two Prisms became so oblique +to the incident Rays, that those Rays began all of them to be +reflected, those Rays which in the third Prism had suffered the greatest +Refraction, and painted the Paper with violet and blue, were first of +all by a total Reflexion taken out of the transmitted Light, the rest +remaining and on the Paper painting their Colours of green, yellow, +orange and red, as before; and afterwards by continuing the Motion of +the two Prisms, the rest of the Rays also by a total Reflexion vanished +in order, according to their degrees of Refrangibility. The Light +therefore which emerged out of the two Prisms is compounded of Rays +differently refrangible, seeing the more refrangible Rays may be taken +out of it, while the less refrangible remain. But this Light being +trajected only through the parallel Superficies of the two Prisms, if it +suffer'd any change by the Refraction of one Superficies it lost that +Impression by the contrary Refraction of the other Superficies, and so +being restor'd to its pristine Constitution, became of the same Nature +and Condition as at first before its Incidence on those Prisms; and +therefore, before its Incidence, was as much compounded of Rays +differently refrangible, as afterwards. + +[Illustration: FIG. 22.] + +_Illustration._ In the twenty second Figure ABC and BCD are the two +Prisms tied together in the form of a Parallelopiped, their Sides BC and +CB being contiguous, and their Sides AB and CD parallel. And HJK is the +third Prism, by which the Sun's Light propagated through the hole F into +the dark Chamber, and there passing through those sides of the Prisms +AB, BC, CB and CD, is refracted at O to the white Paper PT, falling +there partly upon P by a greater Refraction, partly upon T by a less +Refraction, and partly upon R and other intermediate places by +intermediate Refractions. By turning the Parallelopiped ACBD about its +Axis, according to the order of the Letters A, C, D, B, at length when +the contiguous Planes BC and CB become sufficiently oblique to the Rays +FM, which are incident upon them at M, there will vanish totally out of +the refracted Light OPT, first of all the most refracted Rays OP, (the +rest OR and OT remaining as before) then the Rays OR and other +intermediate ones, and lastly, the least refracted Rays OT. For when +the Plane BC becomes sufficiently oblique to the Rays incident upon it, +those Rays will begin to be totally reflected by it towards N; and first +the most refrangible Rays will be totally reflected (as was explained in +the preceding Experiment) and by Consequence must first disappear at P, +and afterwards the rest as they are in order totally reflected to N, +they must disappear in the same order at R and T. So then the Rays which +at O suffer the greatest Refraction, may be taken out of the Light MO +whilst the rest of the Rays remain in it, and therefore that Light MO is +compounded of Rays differently refrangible. And because the Planes AB +and CD are parallel, and therefore by equal and contrary Refractions +destroy one anothers Effects, the incident Light FM must be of the same +Kind and Nature with the emergent Light MO, and therefore doth also +consist of Rays differently refrangible. These two Lights FM and MO, +before the most refrangible Rays are separated out of the emergent Light +MO, agree in Colour, and in all other Properties so far as my +Observation reaches, and therefore are deservedly reputed of the same +Nature and Constitution, and by Consequence the one is compounded as +well as the other. But after the most refrangible Rays begin to be +totally reflected, and thereby separated out of the emergent Light MO, +that Light changes its Colour from white to a dilute and faint yellow, a +pretty good orange, a very full red successively, and then totally +vanishes. For after the most refrangible Rays which paint the Paper at +P with a purple Colour, are by a total Reflexion taken out of the beam +of Light MO, the rest of the Colours which appear on the Paper at R and +T being mix'd in the Light MO compound there a faint yellow, and after +the blue and part of the green which appear on the Paper between P and R +are taken away, the rest which appear between R and T (that is the +yellow, orange, red and a little green) being mixed in the beam MO +compound there an orange; and when all the Rays are by Reflexion taken +out of the beam MO, except the least refrangible, which at T appear of a +full red, their Colour is the same in that beam MO as afterwards at T, +the Refraction of the Prism HJK serving only to separate the differently +refrangible Rays, without making any Alteration in their Colours, as +shall be more fully proved hereafter. All which confirms as well the +first Proposition as the second. + +_Scholium._ If this Experiment and the former be conjoined and made one +by applying a fourth Prism VXY [in _Fig._ 22.] to refract the reflected +beam MN towards _tp_, the Conclusion will be clearer. For then the Light +N_p_ which in the fourth Prism is more refracted, will become fuller and +stronger when the Light OP, which in the third Prism HJK is more +refracted, vanishes at P; and afterwards when the less refracted Light +OT vanishes at T, the less refracted Light N_t_ will become increased +whilst the more refracted Light at _p_ receives no farther increase. And +as the trajected beam MO in vanishing is always of such a Colour as +ought to result from the mixture of the Colours which fall upon the +Paper PT, so is the reflected beam MN always of such a Colour as ought +to result from the mixture of the Colours which fall upon the Paper +_pt_. For when the most refrangible Rays are by a total Reflexion taken +out of the beam MO, and leave that beam of an orange Colour, the Excess +of those Rays in the reflected Light, does not only make the violet, +indigo and blue at _p_ more full, but also makes the beam MN change from +the yellowish Colour of the Sun's Light, to a pale white inclining to +blue, and afterward recover its yellowish Colour again, so soon as all +the rest of the transmitted Light MOT is reflected. + +Now seeing that in all this variety of Experiments, whether the Trial be +made in Light reflected, and that either from natural Bodies, as in the +first and second Experiment, or specular, as in the ninth; or in Light +refracted, and that either before the unequally refracted Rays are by +diverging separated from one another, and losing their whiteness which +they have altogether, appear severally of several Colours, as in the +fifth Experiment; or after they are separated from one another, and +appear colour'd as in the sixth, seventh, and eighth Experiments; or in +Light trajected through parallel Superficies, destroying each others +Effects, as in the tenth Experiment; there are always found Rays, which +at equal Incidences on the same Medium suffer unequal Refractions, and +that without any splitting or dilating of single Rays, or contingence in +the inequality of the Refractions, as is proved in the fifth and sixth +Experiments. And seeing the Rays which differ in Refrangibility may be +parted and sorted from one another, and that either by Refraction as in +the third Experiment, or by Reflexion as in the tenth, and then the +several sorts apart at equal Incidences suffer unequal Refractions, and +those sorts are more refracted than others after Separation, which were +more refracted before it, as in the sixth and following Experiments, and +if the Sun's Light be trajected through three or more cross Prisms +successively, those Rays which in the first Prism are refracted more +than others, are in all the following Prisms refracted more than others +in the same Rate and Proportion, as appears by the fifth Experiment; +it's manifest that the Sun's Light is an heterogeneous Mixture of Rays, +some of which are constantly more refrangible than others, as was +proposed. + + +_PROP._ III. THEOR. III. + +_The Sun's Light consists of Rays differing in Reflexibility, and those +Rays are more reflexible than others which are more refrangible._ + +This is manifest by the ninth and tenth Experiments: For in the ninth +Experiment, by turning the Prism about its Axis, until the Rays within +it which in going out into the Air were refracted by its Base, became so +oblique to that Base, as to begin to be totally reflected thereby; those +Rays became first of all totally reflected, which before at equal +Incidences with the rest had suffered the greatest Refraction. And the +same thing happens in the Reflexion made by the common Base of the two +Prisms in the tenth Experiment. + + +_PROP._ IV. PROB. I. + +_To separate from one another the heterogeneous Rays of compound Light._ + +[Illustration: FIG. 23.] + +The heterogeneous Rays are in some measure separated from one another by +the Refraction of the Prism in the third Experiment, and in the fifth +Experiment, by taking away the Penumbra from the rectilinear sides of +the coloured Image, that Separation in those very rectilinear sides or +straight edges of the Image becomes perfect. But in all places between +those rectilinear edges, those innumerable Circles there described, +which are severally illuminated by homogeneal Rays, by interfering with +one another, and being every where commix'd, do render the Light +sufficiently compound. But if these Circles, whilst their Centers keep +their Distances and Positions, could be made less in Diameter, their +interfering one with another, and by Consequence the Mixture of the +heterogeneous Rays would be proportionally diminish'd. In the twenty +third Figure let AG, BH, CJ, DK, EL, FM be the Circles which so many +sorts of Rays flowing from the same disque of the Sun, do in the third +Experiment illuminate; of all which and innumerable other intermediate +ones lying in a continual Series between the two rectilinear and +parallel edges of the Sun's oblong Image PT, that Image is compos'd, as +was explained in the fifth Experiment. And let _ag_, _bh_, _ci_, _dk_, +_el_, _fm_ be so many less Circles lying in a like continual Series +between two parallel right Lines _af_ and _gm_ with the same distances +between their Centers, and illuminated by the same sorts of Rays, that +is the Circle _ag_ with the same sort by which the corresponding Circle +AG was illuminated, and the Circle _bh_ with the same sort by which the +corresponding Circle BH was illuminated, and the rest of the Circles +_ci_, _dk_, _el_, _fm_ respectively, with the same sorts of Rays by +which the several corresponding Circles CJ, DK, EL, FM were illuminated. +In the Figure PT composed of the greater Circles, three of those Circles +AG, BH, CJ, are so expanded into one another, that the three sorts of +Rays by which those Circles are illuminated, together with other +innumerable sorts of intermediate Rays, are mixed at QR in the middle +of the Circle BH. And the like Mixture happens throughout almost the +whole length of the Figure PT. But in the Figure _pt_ composed of the +less Circles, the three less Circles _ag_, _bh_, _ci_, which answer to +those three greater, do not extend into one another; nor are there any +where mingled so much as any two of the three sorts of Rays by which +those Circles are illuminated, and which in the Figure PT are all of +them intermingled at BH. + +Now he that shall thus consider it, will easily understand that the +Mixture is diminished in the same Proportion with the Diameters of the +Circles. If the Diameters of the Circles whilst their Centers remain the +same, be made three times less than before, the Mixture will be also +three times less; if ten times less, the Mixture will be ten times less, +and so of other Proportions. That is, the Mixture of the Rays in the +greater Figure PT will be to their Mixture in the less _pt_, as the +Latitude of the greater Figure is to the Latitude of the less. For the +Latitudes of these Figures are equal to the Diameters of their Circles. +And hence it easily follows, that the Mixture of the Rays in the +refracted Spectrum _pt_ is to the Mixture of the Rays in the direct and +immediate Light of the Sun, as the breadth of that Spectrum is to the +difference between the length and breadth of the same Spectrum. + +So then, if we would diminish the Mixture of the Rays, we are to +diminish the Diameters of the Circles. Now these would be diminished if +the Sun's Diameter to which they answer could be made less than it is, +or (which comes to the same Purpose) if without Doors, at a great +distance from the Prism towards the Sun, some opake Body were placed, +with a round hole in the middle of it, to intercept all the Sun's Light, +excepting so much as coming from the middle of his Body could pass +through that Hole to the Prism. For so the Circles AG, BH, and the rest, +would not any longer answer to the whole Disque of the Sun, but only to +that Part of it which could be seen from the Prism through that Hole, +that it is to the apparent Magnitude of that Hole view'd from the Prism. +But that these Circles may answer more distinctly to that Hole, a Lens +is to be placed by the Prism to cast the Image of the Hole, (that is, +every one of the Circles AG, BH, &c.) distinctly upon the Paper at PT, +after such a manner, as by a Lens placed at a Window, the Species of +Objects abroad are cast distinctly upon a Paper within the Room, and the +rectilinear Sides of the oblong Solar Image in the fifth Experiment +became distinct without any Penumbra. If this be done, it will not be +necessary to place that Hole very far off, no not beyond the Window. And +therefore instead of that Hole, I used the Hole in the Window-shut, as +follows. + +_Exper._ 11. In the Sun's Light let into my darken'd Chamber through a +small round Hole in my Window-shut, at about ten or twelve Feet from the +Window, I placed a Lens, by which the Image of the Hole might be +distinctly cast upon a Sheet of white Paper, placed at the distance of +six, eight, ten, or twelve Feet from the Lens. For, according to the +difference of the Lenses I used various distances, which I think not +worth the while to describe. Then immediately after the Lens I placed a +Prism, by which the trajected Light might be refracted either upwards or +sideways, and thereby the round Image, which the Lens alone did cast +upon the Paper might be drawn out into a long one with Parallel Sides, +as in the third Experiment. This oblong Image I let fall upon another +Paper at about the same distance from the Prism as before, moving the +Paper either towards the Prism or from it, until I found the just +distance where the Rectilinear Sides of the Image became most distinct. +For in this Case, the Circular Images of the Hole, which compose that +Image after the same manner that the Circles _ag_, _bh_, _ci_, &c. do +the Figure _pt_ [in _Fig._ 23.] were terminated most distinctly without +any Penumbra, and therefore extended into one another the least that +they could, and by consequence the Mixture of the heterogeneous Rays was +now the least of all. By this means I used to form an oblong Image (such +as is _pt_) [in _Fig._ 23, and 24.] of Circular Images of the Hole, +(such as are _ag_, _bh_, _ci_, &c.) and by using a greater or less Hole +in the Window-shut, I made the Circular Images _ag_, _bh_, _ci_, &c. of +which it was formed, to become greater or less at pleasure, and thereby +the Mixture of the Rays in the Image _pt_ to be as much, or as little as +I desired. + +[Illustration: FIG. 24.] + +_Illustration._ In the twenty-fourth Figure, F represents the Circular +Hole in the Window-shut, MN the Lens, whereby the Image or Species of +that Hole is cast distinctly upon a Paper at J, ABC the Prism, whereby +the Rays are at their emerging out of the Lens refracted from J towards +another Paper at _pt_, and the round Image at J is turned into an oblong +Image _pt_ falling on that other Paper. This Image _pt_ consists of +Circles placed one after another in a Rectilinear Order, as was +sufficiently explained in the fifth Experiment; and these Circles are +equal to the Circle J, and consequently answer in magnitude to the Hole +F; and therefore by diminishing that Hole they may be at pleasure +diminished, whilst their Centers remain in their Places. By this means I +made the Breadth of the Image _pt_ to be forty times, and sometimes +sixty or seventy times less than its Length. As for instance, if the +Breadth of the Hole F be one tenth of an Inch, and MF the distance of +the Lens from the Hole be 12 Feet; and if _p_B or _p_M the distance of +the Image _pt_ from the Prism or Lens be 10 Feet, and the refracting +Angle of the Prism be 62 Degrees, the Breadth of the Image _pt_ will be +one twelfth of an Inch, and the Length about six Inches, and therefore +the Length to the Breadth as 72 to 1, and by consequence the Light of +this Image 71 times less compound than the Sun's direct Light. And Light +thus far simple and homogeneal, is sufficient for trying all the +Experiments in this Book about simple Light. For the Composition of +heterogeneal Rays is in this Light so little, that it is scarce to be +discovered and perceiv'd by Sense, except perhaps in the indigo and +violet. For these being dark Colours do easily suffer a sensible Allay +by that little scattering Light which uses to be refracted irregularly +by the Inequalities of the Prism. + +Yet instead of the Circular Hole F, 'tis better to substitute an oblong +Hole shaped like a long Parallelogram with its Length parallel to the +Prism ABC. For if this Hole be an Inch or two long, and but a tenth or +twentieth Part of an Inch broad, or narrower; the Light of the Image +_pt_ will be as simple as before, or simpler, and the Image will become +much broader, and therefore more fit to have Experiments try'd in its +Light than before. + +Instead of this Parallelogram Hole may be substituted a triangular one +of equal Sides, whose Base, for instance, is about the tenth Part of an +Inch, and its Height an Inch or more. For by this means, if the Axis of +the Prism be parallel to the Perpendicular of the Triangle, the Image +_pt_ [in _Fig._ 25.] will now be form'd of equicrural Triangles _ag_, +_bh_, _ci_, _dk_, _el_, _fm_, &c. and innumerable other intermediate +ones answering to the triangular Hole in Shape and Bigness, and lying +one after another in a continual Series between two Parallel Lines _af_ +and _gm_. These Triangles are a little intermingled at their Bases, but +not at their Vertices; and therefore the Light on the brighter Side _af_ +of the Image, where the Bases of the Triangles are, is a little +compounded, but on the darker Side _gm_ is altogether uncompounded, and +in all Places between the Sides the Composition is proportional to the +distances of the Places from that obscurer Side _gm_. And having a +Spectrum _pt_ of such a Composition, we may try Experiments either in +its stronger and less simple Light near the Side _af_, or in its weaker +and simpler Light near the other Side _gm_, as it shall seem most +convenient. + +[Illustration: FIG. 25.] + +But in making Experiments of this kind, the Chamber ought to be made as +dark as can be, lest any Foreign Light mingle it self with the Light of +the Spectrum _pt_, and render it compound; especially if we would try +Experiments in the more simple Light next the Side _gm_ of the Spectrum; +which being fainter, will have a less proportion to the Foreign Light; +and so by the mixture of that Light be more troubled, and made more +compound. The Lens also ought to be good, such as may serve for optical +Uses, and the Prism ought to have a large Angle, suppose of 65 or 70 +Degrees, and to be well wrought, being made of Glass free from Bubbles +and Veins, with its Sides not a little convex or concave, as usually +happens, but truly plane, and its Polish elaborate, as in working +Optick-glasses, and not such as is usually wrought with Putty, whereby +the edges of the Sand-holes being worn away, there are left all over the +Glass a numberless Company of very little convex polite Risings like +Waves. The edges also of the Prism and Lens, so far as they may make any +irregular Refraction, must be covered with a black Paper glewed on. And +all the Light of the Sun's Beam let into the Chamber, which is useless +and unprofitable to the Experiment, ought to be intercepted with black +Paper, or other black Obstacles. For otherwise the useless Light being +reflected every way in the Chamber, will mix with the oblong Spectrum, +and help to disturb it. In trying these Things, so much diligence is not +altogether necessary, but it will promote the Success of the +Experiments, and by a very scrupulous Examiner of Things deserves to be +apply'd. It's difficult to get Glass Prisms fit for this Purpose, and +therefore I used sometimes prismatick Vessels made with pieces of broken +Looking-glasses, and filled with Rain Water. And to increase the +Refraction, I sometimes impregnated the Water strongly with _Saccharum +Saturni_. + + +_PROP._ V. THEOR. IV. + +_Homogeneal Light is refracted regularly without any Dilatation +splitting or shattering of the Rays, and the confused Vision of Objects +seen through refracting Bodies by heterogeneal Light arises from the +different Refrangibility of several sorts of Rays._ + +The first Part of this Proposition has been already sufficiently proved +in the fifth Experiment, and will farther appear by the Experiments +which follow. + +_Exper._ 12. In the middle of a black Paper I made a round Hole about a +fifth or sixth Part of an Inch in diameter. Upon this Paper I caused the +Spectrum of homogeneal Light described in the former Proposition, so to +fall, that some part of the Light might pass through the Hole of the +Paper. This transmitted part of the Light I refracted with a Prism +placed behind the Paper, and letting this refracted Light fall +perpendicularly upon a white Paper two or three Feet distant from the +Prism, I found that the Spectrum formed on the Paper by this Light was +not oblong, as when 'tis made (in the third Experiment) by refracting +the Sun's compound Light, but was (so far as I could judge by my Eye) +perfectly circular, the Length being no greater than the Breadth. Which +shews, that this Light is refracted regularly without any Dilatation of +the Rays. + +_Exper._ 13. In the homogeneal Light I placed a Paper Circle of a +quarter of an Inch in diameter, and in the Sun's unrefracted +heterogeneal white Light I placed another Paper Circle of the same +Bigness. And going from the Papers to the distance of some Feet, I +viewed both Circles through a Prism. The Circle illuminated by the Sun's +heterogeneal Light appeared very oblong, as in the fourth Experiment, +the Length being many times greater than the Breadth; but the other +Circle, illuminated with homogeneal Light, appeared circular and +distinctly defined, as when 'tis view'd with the naked Eye. Which proves +the whole Proposition. + +_Exper._ 14. In the homogeneal Light I placed Flies, and such-like +minute Objects, and viewing them through a Prism, I saw their Parts as +distinctly defined, as if I had viewed them with the naked Eye. The same +Objects placed in the Sun's unrefracted heterogeneal Light, which was +white, I viewed also through a Prism, and saw them most confusedly +defined, so that I could not distinguish their smaller Parts from one +another. I placed also the Letters of a small print, one while in the +homogeneal Light, and then in the heterogeneal, and viewing them through +a Prism, they appeared in the latter Case so confused and indistinct, +that I could not read them; but in the former they appeared so distinct, +that I could read readily, and thought I saw them as distinct, as when I +view'd them with my naked Eye. In both Cases I view'd the same Objects, +through the same Prism at the same distance from me, and in the same +Situation. There was no difference, but in the Light by which the +Objects were illuminated, and which in one Case was simple, and in the +other compound; and therefore, the distinct Vision in the former Case, +and confused in the latter, could arise from nothing else than from that +difference of the Lights. Which proves the whole Proposition. + +And in these three Experiments it is farther very remarkable, that the +Colour of homogeneal Light was never changed by the Refraction. + + +_PROP._ VI. THEOR. V. + +_The Sine of Incidence of every Ray considered apart, is to its Sine of +Refraction in a given Ratio._ + +That every Ray consider'd apart, is constant to it self in some degree +of Refrangibility, is sufficiently manifest out of what has been said. +Those Rays, which in the first Refraction, are at equal Incidences most +refracted, are also in the following Refractions at equal Incidences +most refracted; and so of the least refrangible, and the rest which have +any mean Degree of Refrangibility, as is manifest by the fifth, sixth, +seventh, eighth, and ninth Experiments. And those which the first Time +at like Incidences are equally refracted, are again at like Incidences +equally and uniformly refracted, and that whether they be refracted +before they be separated from one another, as in the fifth Experiment, +or whether they be refracted apart, as in the twelfth, thirteenth and +fourteenth Experiments. The Refraction therefore of every Ray apart is +regular, and what Rule that Refraction observes we are now to shew.[E] + +The late Writers in Opticks teach, that the Sines of Incidence are in a +given Proportion to the Sines of Refraction, as was explained in the +fifth Axiom, and some by Instruments fitted for measuring of +Refractions, or otherwise experimentally examining this Proportion, do +acquaint us that they have found it accurate. But whilst they, not +understanding the different Refrangibility of several Rays, conceived +them all to be refracted according to one and the same Proportion, 'tis +to be presumed that they adapted their Measures only to the middle of +the refracted Light; so that from their Measures we may conclude only +that the Rays which have a mean Degree of Refrangibility, that is, those +which when separated from the rest appear green, are refracted according +to a given Proportion of their Sines. And therefore we are now to shew, +that the like given Proportions obtain in all the rest. That it should +be so is very reasonable, Nature being ever conformable to her self; but +an experimental Proof is desired. And such a Proof will be had, if we +can shew that the Sines of Refraction of Rays differently refrangible +are one to another in a given Proportion when their Sines of Incidence +are equal. For, if the Sines of Refraction of all the Rays are in given +Proportions to the Sine of Refractions of a Ray which has a mean Degree +of Refrangibility, and this Sine is in a given Proportion to the equal +Sines of Incidence, those other Sines of Refraction will also be in +given Proportions to the equal Sines of Incidence. Now, when the Sines +of Incidence are equal, it will appear by the following Experiment, that +the Sines of Refraction are in a given Proportion to one another. + +[Illustration: FIG. 26.] + +_Exper._ 15. The Sun shining into a dark Chamber through a little round +Hole in the Window-shut, let S [in _Fig._ 26.] represent his round white +Image painted on the opposite Wall by his direct Light, PT his oblong +coloured Image made by refracting that Light with a Prism placed at the +Window; and _pt_, or _2p 2t_, _3p 3t_, his oblong colour'd Image made by +refracting again the same Light sideways with a second Prism placed +immediately after the first in a cross Position to it, as was explained +in the fifth Experiment; that is to say, _pt_ when the Refraction of the +second Prism is small, _2p 2t_ when its Refraction is greater, and _3p +3t_ when it is greatest. For such will be the diversity of the +Refractions, if the refracting Angle of the second Prism be of various +Magnitudes; suppose of fifteen or twenty Degrees to make the Image _pt_, +of thirty or forty to make the Image _2p 2t_, and of sixty to make the +Image _3p 3t_. But for want of solid Glass Prisms with Angles of +convenient Bignesses, there may be Vessels made of polished Plates of +Glass cemented together in the form of Prisms and filled with Water. +These things being thus ordered, I observed that all the solar Images or +coloured Spectrums PT, _pt_, _2p 2t_, _3p 3t_ did very nearly converge +to the place S on which the direct Light of the Sun fell and painted his +white round Image when the Prisms were taken away. The Axis of the +Spectrum PT, that is the Line drawn through the middle of it parallel to +its rectilinear Sides, did when produced pass exactly through the middle +of that white round Image S. And when the Refraction of the second Prism +was equal to the Refraction of the first, the refracting Angles of them +both being about 60 Degrees, the Axis of the Spectrum _3p 3t_ made by +that Refraction, did when produced pass also through the middle of the +same white round Image S. But when the Refraction of the second Prism +was less than that of the first, the produced Axes of the Spectrums _tp_ +or _2t 2p_ made by that Refraction did cut the produced Axis of the +Spectrum TP in the points _m_ and _n_, a little beyond the Center of +that white round Image S. Whence the proportion of the Line 3_t_T to the +Line 3_p_P was a little greater than the Proportion of 2_t_T or 2_p_P, +and this Proportion a little greater than that of _t_T to _p_P. Now when +the Light of the Spectrum PT falls perpendicularly upon the Wall, those +Lines 3_t_T, 3_p_P, and 2_t_T, and 2_p_P, and _t_T, _p_P, are the +Tangents of the Refractions, and therefore by this Experiment the +Proportions of the Tangents of the Refractions are obtained, from whence +the Proportions of the Sines being derived, they come out equal, so far +as by viewing the Spectrums, and using some mathematical Reasoning I +could estimate. For I did not make an accurate Computation. So then the +Proposition holds true in every Ray apart, so far as appears by +Experiment. And that it is accurately true, may be demonstrated upon +this Supposition. _That Bodies refract Light by acting upon its Rays in +Lines perpendicular to their Surfaces._ But in order to this +Demonstration, I must distinguish the Motion of every Ray into two +Motions, the one perpendicular to the refracting Surface, the other +parallel to it, and concerning the perpendicular Motion lay down the +following Proposition. + +If any Motion or moving thing whatsoever be incident with any Velocity +on any broad and thin space terminated on both sides by two parallel +Planes, and in its Passage through that space be urged perpendicularly +towards the farther Plane by any force which at given distances from the +Plane is of given Quantities; the perpendicular velocity of that Motion +or Thing, at its emerging out of that space, shall be always equal to +the square Root of the sum of the square of the perpendicular velocity +of that Motion or Thing at its Incidence on that space; and of the +square of the perpendicular velocity which that Motion or Thing would +have at its Emergence, if at its Incidence its perpendicular velocity +was infinitely little. + +And the same Proposition holds true of any Motion or Thing +perpendicularly retarded in its passage through that space, if instead +of the sum of the two Squares you take their difference. The +Demonstration Mathematicians will easily find out, and therefore I shall +not trouble the Reader with it. + +Suppose now that a Ray coming most obliquely in the Line MC [in _Fig._ +1.] be refracted at C by the Plane RS into the Line CN, and if it be +required to find the Line CE, into which any other Ray AC shall be +refracted; let MC, AD, be the Sines of Incidence of the two Rays, and +NG, EF, their Sines of Refraction, and let the equal Motions of the +incident Rays be represented by the equal Lines MC and AC, and the +Motion MC being considered as parallel to the refracting Plane, let the +other Motion AC be distinguished into two Motions AD and DC, one of +which AD is parallel, and the other DC perpendicular to the refracting +Surface. In like manner, let the Motions of the emerging Rays be +distinguish'd into two, whereof the perpendicular ones are MC/NG × CG +and AD/EF × CF. And if the force of the refracting Plane begins to act +upon the Rays either in that Plane or at a certain distance from it on +the one side, and ends at a certain distance from it on the other side, +and in all places between those two limits acts upon the Rays in Lines +perpendicular to that refracting Plane, and the Actions upon the Rays at +equal distances from the refracting Plane be equal, and at unequal ones +either equal or unequal according to any rate whatever; that Motion of +the Ray which is parallel to the refracting Plane, will suffer no +Alteration by that Force; and that Motion which is perpendicular to it +will be altered according to the rule of the foregoing Proposition. If +therefore for the perpendicular velocity of the emerging Ray CN you +write MC/NG × CG as above, then the perpendicular velocity of any other +emerging Ray CE which was AD/EF × CF, will be equal to the square Root +of CD_q_ + (_MCq/NGq_ × CG_q_). And by squaring these Equals, and adding +to them the Equals AD_q_ and MC_q_ - CD_q_, and dividing the Sums by the +Equals CF_q_ + EF_q_ and CG_q_ + NG_q_, you will have _MCq/NGq_ equal to +_ADq/EFq_. Whence AD, the Sine of Incidence, is to EF the Sine of +Refraction, as MC to NG, that is, in a given _ratio_. And this +Demonstration being general, without determining what Light is, or by +what kind of Force it is refracted, or assuming any thing farther than +that the refracting Body acts upon the Rays in Lines perpendicular to +its Surface; I take it to be a very convincing Argument of the full +truth of this Proposition. + +So then, if the _ratio_ of the Sines of Incidence and Refraction of any +sort of Rays be found in any one case, 'tis given in all cases; and this +may be readily found by the Method in the following Proposition. + + +_PROP._ VII. THEOR. VI. + +_The Perfection of Telescopes is impeded by the different Refrangibility +of the Rays of Light._ + +The Imperfection of Telescopes is vulgarly attributed to the spherical +Figures of the Glasses, and therefore Mathematicians have propounded to +figure them by the conical Sections. To shew that they are mistaken, I +have inserted this Proposition; the truth of which will appear by the +measure of the Refractions of the several sorts of Rays; and these +measures I thus determine. + +In the third Experiment of this first Part, where the refracting Angle +of the Prism was 62-1/2 Degrees, the half of that Angle 31 deg. 15 min. +is the Angle of Incidence of the Rays at their going out of the Glass +into the Air[F]; and the Sine of this Angle is 5188, the Radius being +10000. When the Axis of this Prism was parallel to the Horizon, and the +Refraction of the Rays at their Incidence on this Prism equal to that at +their Emergence out of it, I observed with a Quadrant the Angle which +the mean refrangible Rays, (that is those which went to the middle of +the Sun's coloured Image) made with the Horizon, and by this Angle and +the Sun's altitude observed at the same time, I found the Angle which +the emergent Rays contained with the incident to be 44 deg. and 40 min. +and the half of this Angle added to the Angle of Incidence 31 deg. 15 +min. makes the Angle of Refraction, which is therefore 53 deg. 35 min. +and its Sine 8047. These are the Sines of Incidence and Refraction of +the mean refrangible Rays, and their Proportion in round Numbers is 20 +to 31. This Glass was of a Colour inclining to green. The last of the +Prisms mentioned in the third Experiment was of clear white Glass. Its +refracting Angle 63-1/2 Degrees. The Angle which the emergent Rays +contained, with the incident 45 deg. 50 min. The Sine of half the first +Angle 5262. The Sine of half the Sum of the Angles 8157. And their +Proportion in round Numbers 20 to 31, as before. + +From the Length of the Image, which was about 9-3/4 or 10 Inches, +subduct its Breadth, which was 2-1/8 Inches, and the Remainder 7-3/4 +Inches would be the Length of the Image were the Sun but a Point, and +therefore subtends the Angle which the most and least refrangible Rays, +when incident on the Prism in the same Lines, do contain with one +another after their Emergence. Whence this Angle is 2 deg. 0´. 7´´. For +the distance between the Image and the Prism where this Angle is made, +was 18-1/2 Feet, and at that distance the Chord 7-3/4 Inches subtends an +Angle of 2 deg. 0´. 7´´. Now half this Angle is the Angle which these +emergent Rays contain with the emergent mean refrangible Rays, and a +quarter thereof, that is 30´. 2´´. may be accounted the Angle which they +would contain with the same emergent mean refrangible Rays, were they +co-incident to them within the Glass, and suffered no other Refraction +than that at their Emergence. For, if two equal Refractions, the one at +the Incidence of the Rays on the Prism, the other at their Emergence, +make half the Angle 2 deg. 0´. 7´´. then one of those Refractions will +make about a quarter of that Angle, and this quarter added to, and +subducted from the Angle of Refraction of the mean refrangible Rays, +which was 53 deg. 35´, gives the Angles of Refraction of the most and +least refrangible Rays 54 deg. 5´ 2´´, and 53 deg. 4´ 58´´, whose Sines +are 8099 and 7995, the common Angle of Incidence being 31 deg. 15´, and +its Sine 5188; and these Sines in the least round Numbers are in +proportion to one another, as 78 and 77 to 50. + +Now, if you subduct the common Sine of Incidence 50 from the Sines of +Refraction 77 and 78, the Remainders 27 and 28 shew, that in small +Refractions the Refraction of the least refrangible Rays is to the +Refraction of the most refrangible ones, as 27 to 28 very nearly, and +that the difference of the Refractions of the least refrangible and most +refrangible Rays is about the 27-1/2th Part of the whole Refraction of +the mean refrangible Rays. + +Whence they that are skilled in Opticks will easily understand,[G] that +the Breadth of the least circular Space, into which Object-glasses of +Telescopes can collect all sorts of Parallel Rays, is about the 27-1/2th +Part of half the Aperture of the Glass, or 55th Part of the whole +Aperture; and that the Focus of the most refrangible Rays is nearer to +the Object-glass than the Focus of the least refrangible ones, by about +the 27-1/2th Part of the distance between the Object-glass and the Focus +of the mean refrangible ones. + +And if Rays of all sorts, flowing from any one lucid Point in the Axis +of any convex Lens, be made by the Refraction of the Lens to converge to +Points not too remote from the Lens, the Focus of the most refrangible +Rays shall be nearer to the Lens than the Focus of the least refrangible +ones, by a distance which is to the 27-1/2th Part of the distance of the +Focus of the mean refrangible Rays from the Lens, as the distance +between that Focus and the lucid Point, from whence the Rays flow, is to +the distance between that lucid Point and the Lens very nearly. + +Now to examine whether the Difference between the Refractions, which the +most refrangible and the least refrangible Rays flowing from the same +Point suffer in the Object-glasses of Telescopes and such-like Glasses, +be so great as is here described, I contrived the following Experiment. + +_Exper._ 16. The Lens which I used in the second and eighth Experiments, +being placed six Feet and an Inch distant from any Object, collected the +Species of that Object by the mean refrangible Rays at the distance of +six Feet and an Inch from the Lens on the other side. And therefore by +the foregoing Rule, it ought to collect the Species of that Object by +the least refrangible Rays at the distance of six Feet and 3-2/3 Inches +from the Lens, and by the most refrangible ones at the distance of five +Feet and 10-1/3 Inches from it: So that between the two Places, where +these least and most refrangible Rays collect the Species, there may be +the distance of about 5-1/3 Inches. For by that Rule, as six Feet and an +Inch (the distance of the Lens from the lucid Object) is to twelve Feet +and two Inches (the distance of the lucid Object from the Focus of the +mean refrangible Rays) that is, as One is to Two; so is the 27-1/2th +Part of six Feet and an Inch (the distance between the Lens and the same +Focus) to the distance between the Focus of the most refrangible Rays +and the Focus of the least refrangible ones, which is therefore 5-17/55 +Inches, that is very nearly 5-1/3 Inches. Now to know whether this +Measure was true, I repeated the second and eighth Experiment with +coloured Light, which was less compounded than that I there made use of: +For I now separated the heterogeneous Rays from one another by the +Method I described in the eleventh Experiment, so as to make a coloured +Spectrum about twelve or fifteen Times longer than broad. This Spectrum +I cast on a printed Book, and placing the above-mentioned Lens at the +distance of six Feet and an Inch from this Spectrum to collect the +Species of the illuminated Letters at the same distance on the other +side, I found that the Species of the Letters illuminated with blue were +nearer to the Lens than those illuminated with deep red by about three +Inches, or three and a quarter; but the Species of the Letters +illuminated with indigo and violet appeared so confused and indistinct, +that I could not read them: Whereupon viewing the Prism, I found it was +full of Veins running from one end of the Glass to the other; so that +the Refraction could not be regular. I took another Prism therefore +which was free from Veins, and instead of the Letters I used two or +three Parallel black Lines a little broader than the Strokes of the +Letters, and casting the Colours upon these Lines in such manner, that +the Lines ran along the Colours from one end of the Spectrum to the +other, I found that the Focus where the indigo, or confine of this +Colour and violet cast the Species of the black Lines most distinctly, +to be about four Inches, or 4-1/4 nearer to the Lens than the Focus, +where the deepest red cast the Species of the same black Lines most +distinctly. The violet was so faint and dark, that I could not discern +the Species of the Lines distinctly by that Colour; and therefore +considering that the Prism was made of a dark coloured Glass inclining +to green, I took another Prism of clear white Glass; but the Spectrum of +Colours which this Prism made had long white Streams of faint Light +shooting out from both ends of the Colours, which made me conclude that +something was amiss; and viewing the Prism, I found two or three little +Bubbles in the Glass, which refracted the Light irregularly. Wherefore I +covered that Part of the Glass with black Paper, and letting the Light +pass through another Part of it which was free from such Bubbles, the +Spectrum of Colours became free from those irregular Streams of Light, +and was now such as I desired. But still I found the violet so dark and +faint, that I could scarce see the Species of the Lines by the violet, +and not at all by the deepest Part of it, which was next the end of the +Spectrum. I suspected therefore, that this faint and dark Colour might +be allayed by that scattering Light which was refracted, and reflected +irregularly, partly by some very small Bubbles in the Glasses, and +partly by the Inequalities of their Polish; which Light, tho' it was but +little, yet it being of a white Colour, might suffice to affect the +Sense so strongly as to disturb the Phænomena of that weak and dark +Colour the violet, and therefore I tried, as in the 12th, 13th, and 14th +Experiments, whether the Light of this Colour did not consist of a +sensible Mixture of heterogeneous Rays, but found it did not. Nor did +the Refractions cause any other sensible Colour than violet to emerge +out of this Light, as they would have done out of white Light, and by +consequence out of this violet Light had it been sensibly compounded +with white Light. And therefore I concluded, that the reason why I could +not see the Species of the Lines distinctly by this Colour, was only +the Darkness of this Colour, and Thinness of its Light, and its distance +from the Axis of the Lens; I divided therefore those Parallel black +Lines into equal Parts, by which I might readily know the distances of +the Colours in the Spectrum from one another, and noted the distances of +the Lens from the Foci of such Colours, as cast the Species of the Lines +distinctly, and then considered whether the difference of those +distances bear such proportion to 5-1/3 Inches, the greatest Difference +of the distances, which the Foci of the deepest red and violet ought to +have from the Lens, as the distance of the observed Colours from one +another in the Spectrum bear to the greatest distance of the deepest red +and violet measured in the Rectilinear Sides of the Spectrum, that is, +to the Length of those Sides, or Excess of the Length of the Spectrum +above its Breadth. And my Observations were as follows. + +When I observed and compared the deepest sensible red, and the Colour in +the Confine of green and blue, which at the Rectilinear Sides of the +Spectrum was distant from it half the Length of those Sides, the Focus +where the Confine of green and blue cast the Species of the Lines +distinctly on the Paper, was nearer to the Lens than the Focus, where +the red cast those Lines distinctly on it by about 2-1/2 or 2-3/4 +Inches. For sometimes the Measures were a little greater, sometimes a +little less, but seldom varied from one another above 1/3 of an Inch. +For it was very difficult to define the Places of the Foci, without some +little Errors. Now, if the Colours distant half the Length of the +Image, (measured at its Rectilinear Sides) give 2-1/2 or 2-3/4 +Difference of the distances of their Foci from the Lens, then the +Colours distant the whole Length ought to give 5 or 5-1/2 Inches +difference of those distances. + +But here it's to be noted, that I could not see the red to the full end +of the Spectrum, but only to the Center of the Semicircle which bounded +that end, or a little farther; and therefore I compared this red not +with that Colour which was exactly in the middle of the Spectrum, or +Confine of green and blue, but with that which verged a little more to +the blue than to the green: And as I reckoned the whole Length of the +Colours not to be the whole Length of the Spectrum, but the Length of +its Rectilinear Sides, so compleating the semicircular Ends into +Circles, when either of the observed Colours fell within those Circles, +I measured the distance of that Colour from the semicircular End of the +Spectrum, and subducting half this distance from the measured distance +of the two Colours, I took the Remainder for their corrected distance; +and in these Observations set down this corrected distance for the +difference of the distances of their Foci from the Lens. For, as the +Length of the Rectilinear Sides of the Spectrum would be the whole +Length of all the Colours, were the Circles of which (as we shewed) that +Spectrum consists contracted and reduced to Physical Points, so in that +Case this corrected distance would be the real distance of the two +observed Colours. + +When therefore I farther observed the deepest sensible red, and that +blue whose corrected distance from it was 7/12 Parts of the Length of +the Rectilinear Sides of the Spectrum, the difference of the distances +of their Foci from the Lens was about 3-1/4 Inches, and as 7 to 12, so +is 3-1/4 to 5-4/7. + +When I observed the deepest sensible red, and that indigo whose +corrected distance was 8/12 or 2/3 of the Length of the Rectilinear +Sides of the Spectrum, the difference of the distances of their Foci +from the Lens, was about 3-2/3 Inches, and as 2 to 3, so is 3-2/3 to +5-1/2. + +When I observed the deepest sensible red, and that deep indigo whose +corrected distance from one another was 9/12 or 3/4 of the Length of the +Rectilinear Sides of the Spectrum, the difference of the distances of +their Foci from the Lens was about 4 Inches; and as 3 to 4, so is 4 to +5-1/3. + +When I observed the deepest sensible red, and that Part of the violet +next the indigo, whose corrected distance from the red was 10/12 or 5/6 +of the Length of the Rectilinear Sides of the Spectrum, the difference +of the distances of their Foci from the Lens was about 4-1/2 Inches, and +as 5 to 6, so is 4-1/2 to 5-2/5. For sometimes, when the Lens was +advantageously placed, so that its Axis respected the blue, and all +Things else were well ordered, and the Sun shone clear, and I held my +Eye very near to the Paper on which the Lens cast the Species of the +Lines, I could see pretty distinctly the Species of those Lines by that +Part of the violet which was next the indigo; and sometimes I could see +them by above half the violet, For in making these Experiments I had +observed, that the Species of those Colours only appear distinct, which +were in or near the Axis of the Lens: So that if the blue or indigo were +in the Axis, I could see their Species distinctly; and then the red +appeared much less distinct than before. Wherefore I contrived to make +the Spectrum of Colours shorter than before, so that both its Ends might +be nearer to the Axis of the Lens. And now its Length was about 2-1/2 +Inches, and Breadth about 1/5 or 1/6 of an Inch. Also instead of the +black Lines on which the Spectrum was cast, I made one black Line +broader than those, that I might see its Species more easily; and this +Line I divided by short cross Lines into equal Parts, for measuring the +distances of the observed Colours. And now I could sometimes see the +Species of this Line with its Divisions almost as far as the Center of +the semicircular violet End of the Spectrum, and made these farther +Observations. + +When I observed the deepest sensible red, and that Part of the violet, +whose corrected distance from it was about 8/9 Parts of the Rectilinear +Sides of the Spectrum, the Difference of the distances of the Foci of +those Colours from the Lens, was one time 4-2/3, another time 4-3/4, +another time 4-7/8 Inches; and as 8 to 9, so are 4-2/3, 4-3/4, 4-7/8, to +5-1/4, 5-11/32, 5-31/64 respectively. + +When I observed the deepest sensible red, and deepest sensible violet, +(the corrected distance of which Colours, when all Things were ordered +to the best Advantage, and the Sun shone very clear, was about 11/12 or +15/16 Parts of the Length of the Rectilinear Sides of the coloured +Spectrum) I found the Difference of the distances of their Foci from the +Lens sometimes 4-3/4 sometimes 5-1/4, and for the most part 5 Inches or +thereabouts; and as 11 to 12, or 15 to 16, so is five Inches to 5-2/2 or +5-1/3 Inches. + +And by this Progression of Experiments I satisfied my self, that had the +Light at the very Ends of the Spectrum been strong enough to make the +Species of the black Lines appear plainly on the Paper, the Focus of the +deepest violet would have been found nearer to the Lens, than the Focus +of the deepest red, by about 5-1/3 Inches at least. And this is a +farther Evidence, that the Sines of Incidence and Refraction of the +several sorts of Rays, hold the same Proportion to one another in the +smallest Refractions which they do in the greatest. + +My Progress in making this nice and troublesome Experiment I have set +down more at large, that they that shall try it after me may be aware of +the Circumspection requisite to make it succeed well. And if they cannot +make it succeed so well as I did, they may notwithstanding collect by +the Proportion of the distance of the Colours of the Spectrum, to the +Difference of the distances of their Foci from the Lens, what would be +the Success in the more distant Colours by a better trial. And yet, if +they use a broader Lens than I did, and fix it to a long strait Staff, +by means of which it may be readily and truly directed to the Colour +whose Focus is desired, I question not but the Experiment will succeed +better with them than it did with me. For I directed the Axis as nearly +as I could to the middle of the Colours, and then the faint Ends of the +Spectrum being remote from the Axis, cast their Species less distinctly +on the Paper than they would have done, had the Axis been successively +directed to them. + +Now by what has been said, it's certain that the Rays which differ in +Refrangibility do not converge to the same Focus; but if they flow from +a lucid Point, as far from the Lens on one side as their Foci are on the +other, the Focus of the most refrangible Rays shall be nearer to the +Lens than that of the least refrangible, by above the fourteenth Part of +the whole distance; and if they flow from a lucid Point, so very remote +from the Lens, that before their Incidence they may be accounted +parallel, the Focus of the most refrangible Rays shall be nearer to the +Lens than the Focus of the least refrangible, by about the 27th or 28th +Part of their whole distance from it. And the Diameter of the Circle in +the middle Space between those two Foci which they illuminate, when they +fall there on any Plane, perpendicular to the Axis (which Circle is the +least into which they can all be gathered) is about the 55th Part of the +Diameter of the Aperture of the Glass. So that 'tis a wonder, that +Telescopes represent Objects so distinct as they do. But were all the +Rays of Light equally refrangible, the Error arising only from the +Sphericalness of the Figures of Glasses would be many hundred times +less. For, if the Object-glass of a Telescope be Plano-convex, and the +Plane side be turned towards the Object, and the Diameter of the +Sphere, whereof this Glass is a Segment, be called D, and the +Semi-diameter of the Aperture of the Glass be called S, and the Sine of +Incidence out of Glass into Air, be to the Sine of Refraction as I to R; +the Rays which come parallel to the Axis of the Glass, shall in the +Place where the Image of the Object is most distinctly made, be +scattered all over a little Circle, whose Diameter is _(Rq/Iq) × (S +cub./D quad.)_ very nearly,[H] as I gather by computing the Errors of +the Rays by the Method of infinite Series, and rejecting the Terms, +whose Quantities are inconsiderable. As for instance, if the Sine of +Incidence I, be to the Sine of Refraction R, as 20 to 31, and if D the +Diameter of the Sphere, to which the Convex-side of the Glass is ground, +be 100 Feet or 1200 Inches, and S the Semi-diameter of the Aperture be +two Inches, the Diameter of the little Circle, (that is (_Rq × S +cub.)/(Iq × D quad._)) will be (31 × 31 × 8)/(20 × 20 × 1200 × 1200) (or +961/72000000) Parts of an Inch. But the Diameter of the little Circle, +through which these Rays are scattered by unequal Refrangibility, will +be about the 55th Part of the Aperture of the Object-glass, which here +is four Inches. And therefore, the Error arising from the Spherical +Figure of the Glass, is to the Error arising from the different +Refrangibility of the Rays, as 961/72000000 to 4/55, that is as 1 to +5449; and therefore being in comparison so very little, deserves not to +be considered. + +[Illustration: FIG. 27.] + +But you will say, if the Errors caused by the different Refrangibility +be so very great, how comes it to pass, that Objects appear through +Telescopes so distinct as they do? I answer, 'tis because the erring +Rays are not scattered uniformly over all that Circular Space, but +collected infinitely more densely in the Center than in any other Part +of the Circle, and in the Way from the Center to the Circumference, grow +continually rarer and rarer, so as at the Circumference to become +infinitely rare; and by reason of their Rarity are not strong enough to +be visible, unless in the Center and very near it. Let ADE [in _Fig._ +27.] represent one of those Circles described with the Center C, and +Semi-diameter AC, and let BFG be a smaller Circle concentrick to the +former, cutting with its Circumference the Diameter AC in B, and bisect +AC in N; and by my reckoning, the Density of the Light in any Place B, +will be to its Density in N, as AB to BC; and the whole Light within the +lesser Circle BFG, will be to the whole Light within the greater AED, as +the Excess of the Square of AC above the Square of AB, is to the Square +of AC. As if BC be the fifth Part of AC, the Light will be four times +denser in B than in N, and the whole Light within the less Circle, will +be to the whole Light within the greater, as nine to twenty-five. Whence +it's evident, that the Light within the less Circle, must strike the +Sense much more strongly, than that faint and dilated Light round about +between it and the Circumference of the greater. + +But it's farther to be noted, that the most luminous of the Prismatick +Colours are the yellow and orange. These affect the Senses more strongly +than all the rest together, and next to these in strength are the red +and green. The blue compared with these is a faint and dark Colour, and +the indigo and violet are much darker and fainter, so that these +compared with the stronger Colours are little to be regarded. The Images +of Objects are therefore to be placed, not in the Focus of the mean +refrangible Rays, which are in the Confine of green and blue, but in the +Focus of those Rays which are in the middle of the orange and yellow; +there where the Colour is most luminous and fulgent, that is in the +brightest yellow, that yellow which inclines more to orange than to +green. And by the Refraction of these Rays (whose Sines of Incidence and +Refraction in Glass are as 17 and 11) the Refraction of Glass and +Crystal for Optical Uses is to be measured. Let us therefore place the +Image of the Object in the Focus of these Rays, and all the yellow and +orange will fall within a Circle, whose Diameter is about the 250th +Part of the Diameter of the Aperture of the Glass. And if you add the +brighter half of the red, (that half which is next the orange) and the +brighter half of the green, (that half which is next the yellow) about +three fifth Parts of the Light of these two Colours will fall within the +same Circle, and two fifth Parts will fall without it round about; and +that which falls without will be spread through almost as much more +space as that which falls within, and so in the gross be almost three +times rarer. Of the other half of the red and green, (that is of the +deep dark red and willow green) about one quarter will fall within this +Circle, and three quarters without, and that which falls without will be +spread through about four or five times more space than that which falls +within; and so in the gross be rarer, and if compared with the whole +Light within it, will be about 25 times rarer than all that taken in the +gross; or rather more than 30 or 40 times rarer, because the deep red in +the end of the Spectrum of Colours made by a Prism is very thin and +rare, and the willow green is something rarer than the orange and +yellow. The Light of these Colours therefore being so very much rarer +than that within the Circle, will scarce affect the Sense, especially +since the deep red and willow green of this Light, are much darker +Colours than the rest. And for the same reason the blue and violet being +much darker Colours than these, and much more rarified, may be +neglected. For the dense and bright Light of the Circle, will obscure +the rare and weak Light of these dark Colours round about it, and +render them almost insensible. The sensible Image of a lucid Point is +therefore scarce broader than a Circle, whose Diameter is the 250th Part +of the Diameter of the Aperture of the Object-glass of a good Telescope, +or not much broader, if you except a faint and dark misty Light round +about it, which a Spectator will scarce regard. And therefore in a +Telescope, whose Aperture is four Inches, and Length an hundred Feet, it +exceeds not 2´´ 45´´´, or 3´´. And in a Telescope whose Aperture is two +Inches, and Length 20 or 30 Feet, it may be 5´´ or 6´´, and scarce +above. And this answers well to Experience: For some Astronomers have +found the Diameters of the fix'd Stars, in Telescopes of between 20 and +60 Feet in length, to be about 5´´ or 6´´, or at most 8´´ or 10´´ in +diameter. But if the Eye-Glass be tincted faintly with the Smoak of a +Lamp or Torch, to obscure the Light of the Star, the fainter Light in +the Circumference of the Star ceases to be visible, and the Star (if the +Glass be sufficiently soiled with Smoak) appears something more like a +mathematical Point. And for the same Reason, the enormous Part of the +Light in the Circumference of every lucid Point ought to be less +discernible in shorter Telescopes than in longer, because the shorter +transmit less Light to the Eye. + +Now, that the fix'd Stars, by reason of their immense Distance, appear +like Points, unless so far as their Light is dilated by Refraction, may +appear from hence; that when the Moon passes over them and eclipses +them, their Light vanishes, not gradually like that of the Planets, but +all at once; and in the end of the Eclipse it returns into Sight all at +once, or certainly in less time than the second of a Minute; the +Refraction of the Moon's Atmosphere a little protracting the time in +which the Light of the Star first vanishes, and afterwards returns into +Sight. + +Now, if we suppose the sensible Image of a lucid Point, to be even 250 +times narrower than the Aperture of the Glass; yet this Image would be +still much greater than if it were only from the spherical Figure of the +Glass. For were it not for the different Refrangibility of the Rays, its +breadth in an 100 Foot Telescope whose aperture is 4 Inches, would be +but 961/72000000 parts of an Inch, as is manifest by the foregoing +Computation. And therefore in this case the greatest Errors arising from +the spherical Figure of the Glass, would be to the greatest sensible +Errors arising from the different Refrangibility of the Rays as +961/72000000 to 4/250 at most, that is only as 1 to 1200. And this +sufficiently shews that it is not the spherical Figures of Glasses, but +the different Refrangibility of the Rays which hinders the perfection of +Telescopes. + +There is another Argument by which it may appear that the different +Refrangibility of Rays, is the true cause of the imperfection of +Telescopes. For the Errors of the Rays arising from the spherical +Figures of Object-glasses, are as the Cubes of the Apertures of the +Object Glasses; and thence to make Telescopes of various Lengths magnify +with equal distinctness, the Apertures of the Object-glasses, and the +Charges or magnifying Powers ought to be as the Cubes of the square +Roots of their lengths; which doth not answer to Experience. But the +Errors of the Rays arising from the different Refrangibility, are as the +Apertures of the Object-glasses; and thence to make Telescopes of +various lengths, magnify with equal distinctness, their Apertures and +Charges ought to be as the square Roots of their lengths; and this +answers to Experience, as is well known. For Instance, a Telescope of 64 +Feet in length, with an Aperture of 2-2/3 Inches, magnifies about 120 +times, with as much distinctness as one of a Foot in length, with 1/3 of +an Inch aperture, magnifies 15 times. + +[Illustration: FIG. 28.] + +Now were it not for this different Refrangibility of Rays, Telescopes +might be brought to a greater perfection than we have yet describ'd, by +composing the Object-glass of two Glasses with Water between them. Let +ADFC [in _Fig._ 28.] represent the Object-glass composed of two Glasses +ABED and BEFC, alike convex on the outsides AGD and CHF, and alike +concave on the insides BME, BNE, with Water in the concavity BMEN. Let +the Sine of Incidence out of Glass into Air be as I to R, and out of +Water into Air, as K to R, and by consequence out of Glass into Water, +as I to K: and let the Diameter of the Sphere to which the convex sides +AGD and CHF are ground be D, and the Diameter of the Sphere to which the +concave sides BME and BNE, are ground be to D, as the Cube Root of +KK--KI to the Cube Root of RK--RI: and the Refractions on the concave +sides of the Glasses, will very much correct the Errors of the +Refractions on the convex sides, so far as they arise from the +sphericalness of the Figure. And by this means might Telescopes be +brought to sufficient perfection, were it not for the different +Refrangibility of several sorts of Rays. But by reason of this different +Refrangibility, I do not yet see any other means of improving Telescopes +by Refractions alone, than that of increasing their lengths, for which +end the late Contrivance of _Hugenius_ seems well accommodated. For very +long Tubes are cumbersome, and scarce to be readily managed, and by +reason of their length are very apt to bend, and shake by bending, so as +to cause a continual trembling in the Objects, whereby it becomes +difficult to see them distinctly: whereas by his Contrivance the Glasses +are readily manageable, and the Object-glass being fix'd upon a strong +upright Pole becomes more steady. + +Seeing therefore the Improvement of Telescopes of given lengths by +Refractions is desperate; I contrived heretofore a Perspective by +Reflexion, using instead of an Object-glass a concave Metal. The +diameter of the Sphere to which the Metal was ground concave was about +25 _English_ Inches, and by consequence the length of the Instrument +about six Inches and a quarter. The Eye-glass was Plano-convex, and the +diameter of the Sphere to which the convex side was ground was about 1/5 +of an Inch, or a little less, and by consequence it magnified between 30 +and 40 times. By another way of measuring I found that it magnified +about 35 times. The concave Metal bore an Aperture of an Inch and a +third part; but the Aperture was limited not by an opake Circle, +covering the Limb of the Metal round about, but by an opake Circle +placed between the Eyeglass and the Eye, and perforated in the middle +with a little round hole for the Rays to pass through to the Eye. For +this Circle by being placed here, stopp'd much of the erroneous Light, +which otherwise would have disturbed the Vision. By comparing it with a +pretty good Perspective of four Feet in length, made with a concave +Eye-glass, I could read at a greater distance with my own Instrument +than with the Glass. Yet Objects appeared much darker in it than in the +Glass, and that partly because more Light was lost by Reflexion in the +Metal, than by Refraction in the Glass, and partly because my Instrument +was overcharged. Had it magnified but 30 or 25 times, it would have made +the Object appear more brisk and pleasant. Two of these I made about 16 +Years ago, and have one of them still by me, by which I can prove the +truth of what I write. Yet it is not so good as at the first. For the +concave has been divers times tarnished and cleared again, by rubbing +it with very soft Leather. When I made these an Artist in _London_ +undertook to imitate it; but using another way of polishing them than I +did, he fell much short of what I had attained to, as I afterwards +understood by discoursing the Under-workman he had employed. The Polish +I used was in this manner. I had two round Copper Plates, each six +Inches in Diameter, the one convex, the other concave, ground very true +to one another. On the convex I ground the Object-Metal or Concave which +was to be polish'd, 'till it had taken the Figure of the Convex and was +ready for a Polish. Then I pitched over the convex very thinly, by +dropping melted Pitch upon it, and warming it to keep the Pitch soft, +whilst I ground it with the concave Copper wetted to make it spread +eavenly all over the convex. Thus by working it well I made it as thin +as a Groat, and after the convex was cold I ground it again to give it +as true a Figure as I could. Then I took Putty which I had made very +fine by washing it from all its grosser Particles, and laying a little +of this upon the Pitch, I ground it upon the Pitch with the concave +Copper, till it had done making a Noise; and then upon the Pitch I +ground the Object-Metal with a brisk motion, for about two or three +Minutes of time, leaning hard upon it. Then I put fresh Putty upon the +Pitch, and ground it again till it had done making a noise, and +afterwards ground the Object-Metal upon it as before. And this Work I +repeated till the Metal was polished, grinding it the last time with all +my strength for a good while together, and frequently breathing upon +the Pitch, to keep it moist without laying on any more fresh Putty. The +Object-Metal was two Inches broad, and about one third part of an Inch +thick, to keep it from bending. I had two of these Metals, and when I +had polished them both, I tried which was best, and ground the other +again, to see if I could make it better than that which I kept. And thus +by many Trials I learn'd the way of polishing, till I made those two +reflecting Perspectives I spake of above. For this Art of polishing will +be better learn'd by repeated Practice than by my Description. Before I +ground the Object-Metal on the Pitch, I always ground the Putty on it +with the concave Copper, till it had done making a noise, because if the +Particles of the Putty were not by this means made to stick fast in the +Pitch, they would by rolling up and down grate and fret the Object-Metal +and fill it full of little holes. + +But because Metal is more difficult to polish than Glass, and is +afterwards very apt to be spoiled by tarnishing, and reflects not so +much Light as Glass quick-silver'd over does: I would propound to use +instead of the Metal, a Glass ground concave on the foreside, and as +much convex on the backside, and quick-silver'd over on the convex side. +The Glass must be every where of the same thickness exactly. Otherwise +it will make Objects look colour'd and indistinct. By such a Glass I +tried about five or six Years ago to make a reflecting Telescope of four +Feet in length to magnify about 150 times, and I satisfied my self that +there wants nothing but a good Artist to bring the Design to +perfection. For the Glass being wrought by one of our _London_ Artists +after such a manner as they grind Glasses for Telescopes, though it +seemed as well wrought as the Object-glasses use to be, yet when it was +quick-silver'd, the Reflexion discovered innumerable Inequalities all +over the Glass. And by reason of these Inequalities, Objects appeared +indistinct in this Instrument. For the Errors of reflected Rays caused +by any Inequality of the Glass, are about six times greater than the +Errors of refracted Rays caused by the like Inequalities. Yet by this +Experiment I satisfied my self that the Reflexion on the concave side of +the Glass, which I feared would disturb the Vision, did no sensible +prejudice to it, and by consequence that nothing is wanting to perfect +these Telescopes, but good Workmen who can grind and polish Glasses +truly spherical. An Object-glass of a fourteen Foot Telescope, made by +an Artificer at _London_, I once mended considerably, by grinding it on +Pitch with Putty, and leaning very easily on it in the grinding, lest +the Putty should scratch it. Whether this way may not do well enough for +polishing these reflecting Glasses, I have not yet tried. But he that +shall try either this or any other way of polishing which he may think +better, may do well to make his Glasses ready for polishing, by grinding +them without that Violence, wherewith our _London_ Workmen press their +Glasses in grinding. For by such violent pressure, Glasses are apt to +bend a little in the grinding, and such bending will certainly spoil +their Figure. To recommend therefore the consideration of these +reflecting Glasses to such Artists as are curious in figuring Glasses, I +shall describe this optical Instrument in the following Proposition. + + +_PROP._ VIII. PROB. II. + +_To shorten Telescopes._ + +Let ABCD [in _Fig._ 29.] represent a Glass spherically concave on the +foreside AB, and as much convex on the backside CD, so that it be every +where of an equal thickness. Let it not be thicker on one side than on +the other, lest it make Objects appear colour'd and indistinct, and let +it be very truly wrought and quick-silver'd over on the backside; and +set in the Tube VXYZ which must be very black within. Let EFG represent +a Prism of Glass or Crystal placed near the other end of the Tube, in +the middle of it, by means of a handle of Brass or Iron FGK, to the end +of which made flat it is cemented. Let this Prism be rectangular at E, +and let the other two Angles at F and G be accurately equal to each +other, and by consequence equal to half right ones, and let the plane +sides FE and GE be square, and by consequence the third side FG a +rectangular Parallelogram, whose length is to its breadth in a +subduplicate proportion of two to one. Let it be so placed in the Tube, +that the Axis of the Speculum may pass through the middle of the square +side EF perpendicularly and by consequence through the middle of the +side FG at an Angle of 45 Degrees, and let the side EF be turned towards +the Speculum, and the distance of this Prism from the Speculum be such +that the Rays of the Light PQ, RS, &c. which are incident upon the +Speculum in Lines parallel to the Axis thereof, may enter the Prism at +the side EF, and be reflected by the side FG, and thence go out of it +through the side GE, to the Point T, which must be the common Focus of +the Speculum ABDC, and of a Plano-convex Eye-glass H, through which +those Rays must pass to the Eye. And let the Rays at their coming out of +the Glass pass through a small round hole, or aperture made in a little +plate of Lead, Brass, or Silver, wherewith the Glass is to be covered, +which hole must be no bigger than is necessary for Light enough to pass +through. For so it will render the Object distinct, the Plate in which +'tis made intercepting all the erroneous part of the Light which comes +from the verges of the Speculum AB. Such an Instrument well made, if it +be six Foot long, (reckoning the length from the Speculum to the Prism, +and thence to the Focus T) will bear an aperture of six Inches at the +Speculum, and magnify between two and three hundred times. But the hole +H here limits the aperture with more advantage, than if the aperture was +placed at the Speculum. If the Instrument be made longer or shorter, the +aperture must be in proportion as the Cube of the square-square Root of +the length, and the magnifying as the aperture. But it's convenient that +the Speculum be an Inch or two broader than the aperture at the least, +and that the Glass of the Speculum be thick, that it bend not in the +working. The Prism EFG must be no bigger than is necessary, and its back +side FG must not be quick-silver'd over. For without quicksilver it will +reflect all the Light incident on it from the Speculum. + +[Illustration: FIG. 29.] + +In this Instrument the Object will be inverted, but may be erected by +making the square sides FF and EG of the Prism EFG not plane but +spherically convex, that the Rays may cross as well before they come at +it as afterwards between it and the Eye-glass. If it be desired that the +Instrument bear a larger aperture, that may be also done by composing +the Speculum of two Glasses with Water between them. + +If the Theory of making Telescopes could at length be fully brought into +Practice, yet there would be certain Bounds beyond which Telescopes +could not perform. For the Air through which we look upon the Stars, is +in a perpetual Tremor; as may be seen by the tremulous Motion of Shadows +cast from high Towers, and by the twinkling of the fix'd Stars. But +these Stars do not twinkle when viewed through Telescopes which have +large apertures. For the Rays of Light which pass through divers parts +of the aperture, tremble each of them apart, and by means of their +various and sometimes contrary Tremors, fall at one and the same time +upon different points in the bottom of the Eye, and their trembling +Motions are too quick and confused to be perceived severally. And all +these illuminated Points constitute one broad lucid Point, composed of +those many trembling Points confusedly and insensibly mixed with one +another by very short and swift Tremors, and thereby cause the Star to +appear broader than it is, and without any trembling of the whole. Long +Telescopes may cause Objects to appear brighter and larger than short +ones can do, but they cannot be so formed as to take away that confusion +of the Rays which arises from the Tremors of the Atmosphere. The only +Remedy is a most serene and quiet Air, such as may perhaps be found on +the tops of the highest Mountains above the grosser Clouds. + +FOOTNOTES: + +[C] _See our_ Author's Lectiones Opticæ § 10. _Sect. II. § 29. and Sect. +III. Prop. 25._ + +[D] See our Author's _Lectiones Opticæ_, Part. I. Sect. 1. §5. + +[E] _This is very fully treated of in our_ Author's Lect. Optic. _Part_ +I. _Sect._ II. + +[F] _See our_ Author's Lect. Optic. Part I. Sect. II. § 29. + +[G] _This is demonstrated in our_ Author's Lect. Optic. _Part_ I. +_Sect._ IV. _Prop._ 37. + +[H] _How to do this, is shewn in our_ Author's Lect. Optic. _Part_ I. +_Sect._ IV. _Prop._ 31. + + + + +THE FIRST BOOK OF OPTICKS + + + + +_PART II._ + + +_PROP._ I. THEOR. I. + +_The Phænomena of Colours in refracted or reflected Light are not caused +by new Modifications of the Light variously impress'd, according to the +various Terminations of the Light and Shadow_. + +The PROOF by Experiments. + +_Exper._ 1. For if the Sun shine into a very dark Chamber through an +oblong hole F, [in _Fig._ 1.] whose breadth is the sixth or eighth part +of an Inch, or something less; and his beam FH do afterwards pass first +through a very large Prism ABC, distant about 20 Feet from the hole, and +parallel to it, and then (with its white part) through an oblong hole H, +whose breadth is about the fortieth or sixtieth part of an Inch, and +which is made in a black opake Body GI, and placed at the distance of +two or three Feet from the Prism, in a parallel Situation both to the +Prism and to the former hole, and if this white Light thus transmitted +through the hole H, fall afterwards upon a white Paper _pt_, placed +after that hole H, at the distance of three or four Feet from it, and +there paint the usual Colours of the Prism, suppose red at _t_, yellow +at _s_, green at _r_, blue at _q_, and violet at _p_; you may with an +Iron Wire, or any such like slender opake Body, whose breadth is about +the tenth part of an Inch, by intercepting the Rays at _k_, _l_, _m_, +_n_ or _o_, take away any one of the Colours at _t_, _s_, _r_, _q_ or +_p_, whilst the other Colours remain upon the Paper as before; or with +an Obstacle something bigger you may take away any two, or three, or +four Colours together, the rest remaining: So that any one of the +Colours as well as violet may become outmost in the Confine of the +Shadow towards _p_, and any one of them as well as red may become +outmost in the Confine of the Shadow towards _t_, and any one of them +may also border upon the Shadow made within the Colours by the Obstacle +R intercepting some intermediate part of the Light; and, lastly, any one +of them by being left alone, may border upon the Shadow on either hand. +All the Colours have themselves indifferently to any Confines of Shadow, +and therefore the differences of these Colours from one another, do not +arise from the different Confines of Shadow, whereby Light is variously +modified, as has hitherto been the Opinion of Philosophers. In trying +these things 'tis to be observed, that by how much the holes F and H are +narrower, and the Intervals between them and the Prism greater, and the +Chamber darker, by so much the better doth the Experiment succeed; +provided the Light be not so far diminished, but that the Colours at +_pt_ be sufficiently visible. To procure a Prism of solid Glass large +enough for this Experiment will be difficult, and therefore a prismatick +Vessel must be made of polish'd Glass Plates cemented together, and +filled with salt Water or clear Oil. + +[Illustration: FIG. 1.] + +_Exper._ 2. The Sun's Light let into a dark Chamber through the round +hole F, [in _Fig._ 2.] half an Inch wide, passed first through the Prism +ABC placed at the hole, and then through a Lens PT something more than +four Inches broad, and about eight Feet distant from the Prism, and +thence converged to O the Focus of the Lens distant from it about three +Feet, and there fell upon a white Paper DE. If that Paper was +perpendicular to that Light incident upon it, as 'tis represented in the +posture DE, all the Colours upon it at O appeared white. But if the +Paper being turned about an Axis parallel to the Prism, became very much +inclined to the Light, as 'tis represented in the Positions _de_ and +_[Greek: de]_; the same Light in the one case appeared yellow and red, +in the other blue. Here one and the same part of the Light in one and +the same place, according to the various Inclinations of the Paper, +appeared in one case white, in another yellow or red, in a third blue, +whilst the Confine of Light and shadow, and the Refractions of the Prism +in all these cases remained the same. + +[Illustration: FIG. 2.] + +[Illustration: FIG. 3.] + +_Exper._ 3. Such another Experiment may be more easily tried as follows. +Let a broad beam of the Sun's Light coming into a dark Chamber through a +hole in the Window-shut be refracted by a large Prism ABC, [in _Fig._ +3.] whose refracting Angle C is more than 60 Degrees, and so soon as it +comes out of the Prism, let it fall upon the white Paper DE glewed upon +a stiff Plane; and this Light, when the Paper is perpendicular to it, as +'tis represented in DE, will appear perfectly white upon the Paper; but +when the Paper is very much inclin'd to it in such a manner as to keep +always parallel to the Axis of the Prism, the whiteness of the whole +Light upon the Paper will according to the inclination of the Paper this +way or that way, change either into yellow and red, as in the posture +_de_, or into blue and violet, as in the posture [Greek: de]. And if the +Light before it fall upon the Paper be twice refracted the same way by +two parallel Prisms, these Colours will become the more conspicuous. +Here all the middle parts of the broad beam of white Light which fell +upon the Paper, did without any Confine of Shadow to modify it, become +colour'd all over with one uniform Colour, the Colour being always the +same in the middle of the Paper as at the edges, and this Colour changed +according to the various Obliquity of the reflecting Paper, without any +change in the Refractions or Shadow, or in the Light which fell upon the +Paper. And therefore these Colours are to be derived from some other +Cause than the new Modifications of Light by Refractions and Shadows. + +If it be asked, what then is their Cause? I answer, That the Paper in +the posture _de_, being more oblique to the more refrangible Rays than +to the less refrangible ones, is more strongly illuminated by the latter +than by the former, and therefore the less refrangible Rays are +predominant in the reflected Light. And where-ever they are predominant +in any Light, they tinge it with red or yellow, as may in some measure +appear by the first Proposition of the first Part of this Book, and will +more fully appear hereafter. And the contrary happens in the posture of +the Paper [Greek: de], the more refrangible Rays being then predominant +which always tinge Light with blues and violets. + +_Exper._ 4. The Colours of Bubbles with which Children play are various, +and change their Situation variously, without any respect to any Confine +or Shadow. If such a Bubble be cover'd with a concave Glass, to keep it +from being agitated by any Wind or Motion of the Air, the Colours will +slowly and regularly change their situation, even whilst the Eye and the +Bubble, and all Bodies which emit any Light, or cast any Shadow, remain +unmoved. And therefore their Colours arise from some regular Cause which +depends not on any Confine of Shadow. What this Cause is will be shewed +in the next Book. + +To these Experiments may be added the tenth Experiment of the first Part +of this first Book, where the Sun's Light in a dark Room being +trajected through the parallel Superficies of two Prisms tied together +in the form of a Parallelopipede, became totally of one uniform yellow +or red Colour, at its emerging out of the Prisms. Here, in the +production of these Colours, the Confine of Shadow can have nothing to +do. For the Light changes from white to yellow, orange and red +successively, without any alteration of the Confine of Shadow: And at +both edges of the emerging Light where the contrary Confines of Shadow +ought to produce different Effects, the Colour is one and the same, +whether it be white, yellow, orange or red: And in the middle of the +emerging Light, where there is no Confine of Shadow at all, the Colour +is the very same as at the edges, the whole Light at its very first +Emergence being of one uniform Colour, whether white, yellow, orange or +red, and going on thence perpetually without any change of Colour, such +as the Confine of Shadow is vulgarly supposed to work in refracted Light +after its Emergence. Neither can these Colours arise from any new +Modifications of the Light by Refractions, because they change +successively from white to yellow, orange and red, while the Refractions +remain the same, and also because the Refractions are made contrary ways +by parallel Superficies which destroy one another's Effects. They arise +not therefore from any Modifications of Light made by Refractions and +Shadows, but have some other Cause. What that Cause is we shewed above +in this tenth Experiment, and need not here repeat it. + +There is yet another material Circumstance of this Experiment. For this +emerging Light being by a third Prism HIK [in _Fig._ 22. _Part_ I.][I] +refracted towards the Paper PT, and there painting the usual Colours of +the Prism, red, yellow, green, blue, violet: If these Colours arose from +the Refractions of that Prism modifying the Light, they would not be in +the Light before its Incidence on that Prism. And yet in that Experiment +we found, that when by turning the two first Prisms about their common +Axis all the Colours were made to vanish but the red; the Light which +makes that red being left alone, appeared of the very same red Colour +before its Incidence on the third Prism. And in general we find by other +Experiments, that when the Rays which differ in Refrangibility are +separated from one another, and any one Sort of them is considered +apart, the Colour of the Light which they compose cannot be changed by +any Refraction or Reflexion whatever, as it ought to be were Colours +nothing else than Modifications of Light caused by Refractions, and +Reflexions, and Shadows. This Unchangeableness of Colour I am now to +describe in the following Proposition. + + +_PROP._ II. THEOR. II. + +_All homogeneal Light has its proper Colour answering to its Degree of +Refrangibility, and that Colour cannot be changed by Reflexions and +Refractions._ + +In the Experiments of the fourth Proposition of the first Part of this +first Book, when I had separated the heterogeneous Rays from one +another, the Spectrum _pt_ formed by the separated Rays, did in the +Progress from its End _p_, on which the most refrangible Rays fell, unto +its other End _t_, on which the least refrangible Rays fell, appear +tinged with this Series of Colours, violet, indigo, blue, green, yellow, +orange, red, together with all their intermediate Degrees in a continual +Succession perpetually varying. So that there appeared as many Degrees +of Colours, as there were sorts of Rays differing in Refrangibility. + +_Exper._ 5. Now, that these Colours could not be changed by Refraction, +I knew by refracting with a Prism sometimes one very little Part of this +Light, sometimes another very little Part, as is described in the +twelfth Experiment of the first Part of this Book. For by this +Refraction the Colour of the Light was never changed in the least. If +any Part of the red Light was refracted, it remained totally of the same +red Colour as before. No orange, no yellow, no green or blue, no other +new Colour was produced by that Refraction. Neither did the Colour any +ways change by repeated Refractions, but continued always the same red +entirely as at first. The like Constancy and Immutability I found also +in the blue, green, and other Colours. So also, if I looked through a +Prism upon any Body illuminated with any part of this homogeneal Light, +as in the fourteenth Experiment of the first Part of this Book is +described; I could not perceive any new Colour generated this way. All +Bodies illuminated with compound Light appear through Prisms confused, +(as was said above) and tinged with various new Colours, but those +illuminated with homogeneal Light appeared through Prisms neither less +distinct, nor otherwise colour'd, than when viewed with the naked Eyes. +Their Colours were not in the least changed by the Refraction of the +interposed Prism. I speak here of a sensible Change of Colour: For the +Light which I here call homogeneal, being not absolutely homogeneal, +there ought to arise some little Change of Colour from its +Heterogeneity. But, if that Heterogeneity was so little as it might be +made by the said Experiments of the fourth Proposition, that Change was +not sensible, and therefore in Experiments, where Sense is Judge, ought +to be accounted none at all. + +_Exper._ 6. And as these Colours were not changeable by Refractions, so +neither were they by Reflexions. For all white, grey, red, yellow, +green, blue, violet Bodies, as Paper, Ashes, red Lead, Orpiment, Indico +Bise, Gold, Silver, Copper, Grass, blue Flowers, Violets, Bubbles of +Water tinged with various Colours, Peacock's Feathers, the Tincture of +_Lignum Nephriticum_, and such-like, in red homogeneal Light appeared +totally red, in blue Light totally blue, in green Light totally green, +and so of other Colours. In the homogeneal Light of any Colour they all +appeared totally of that same Colour, with this only Difference, that +some of them reflected that Light more strongly, others more faintly. I +never yet found any Body, which by reflecting homogeneal Light could +sensibly change its Colour. + +From all which it is manifest, that if the Sun's Light consisted of but +one sort of Rays, there would be but one Colour in the whole World, nor +would it be possible to produce any new Colour by Reflexions and +Refractions, and by consequence that the variety of Colours depends upon +the Composition of Light. + + +_DEFINITION._ + +The homogeneal Light and Rays which appear red, or rather make Objects +appear so, I call Rubrifick or Red-making; those which make Objects +appear yellow, green, blue, and violet, I call Yellow-making, +Green-making, Blue-making, Violet-making, and so of the rest. And if at +any time I speak of Light and Rays as coloured or endued with Colours, I +would be understood to speak not philosophically and properly, but +grossly, and accordingly to such Conceptions as vulgar People in seeing +all these Experiments would be apt to frame. For the Rays to speak +properly are not coloured. In them there is nothing else than a certain +Power and Disposition to stir up a Sensation of this or that Colour. +For as Sound in a Bell or musical String, or other sounding Body, is +nothing but a trembling Motion, and in the Air nothing but that Motion +propagated from the Object, and in the Sensorium 'tis a Sense of that +Motion under the Form of Sound; so Colours in the Object are nothing but +a Disposition to reflect this or that sort of Rays more copiously than +the rest; in the Rays they are nothing but their Dispositions to +propagate this or that Motion into the Sensorium, and in the Sensorium +they are Sensations of those Motions under the Forms of Colours. + + +_PROP._ III. PROB. I. + +_To define the Refrangibility of the several sorts of homogeneal Light +answering to the several Colours._ + +For determining this Problem I made the following Experiment.[J] + +_Exper._ 7. When I had caused the Rectilinear Sides AF, GM, [in _Fig._ +4.] of the Spectrum of Colours made by the Prism to be distinctly +defined, as in the fifth Experiment of the first Part of this Book is +described, there were found in it all the homogeneal Colours in the same +Order and Situation one among another as in the Spectrum of simple +Light, described in the fourth Proposition of that Part. For the Circles +of which the Spectrum of compound Light PT is composed, and which in +the middle Parts of the Spectrum interfere, and are intermix'd with one +another, are not intermix'd in their outmost Parts where they touch +those Rectilinear Sides AF and GM. And therefore, in those Rectilinear +Sides when distinctly defined, there is no new Colour generated by +Refraction. I observed also, that if any where between the two outmost +Circles TMF and PGA a Right Line, as [Greek: gd], was cross to the +Spectrum, so as both Ends to fall perpendicularly upon its Rectilinear +Sides, there appeared one and the same Colour, and degree of Colour from +one End of this Line to the other. I delineated therefore in a Paper the +Perimeter of the Spectrum FAP GMT, and in trying the third Experiment of +the first Part of this Book, I held the Paper so that the Spectrum might +fall upon this delineated Figure, and agree with it exactly, whilst an +Assistant, whose Eyes for distinguishing Colours were more critical than +mine, did by Right Lines [Greek: ab, gd, ez,] &c. drawn cross the +Spectrum, note the Confines of the Colours, that is of the red M[Greek: +ab]F, of the orange [Greek: agdb], of the yellow [Greek: gezd], of the +green [Greek: eêthz], of the blue [Greek: êikth], of the indico [Greek: +ilmk], and of the violet [Greek: l]GA[Greek: m]. And this Operation +being divers times repeated both in the same, and in several Papers, I +found that the Observations agreed well enough with one another, and +that the Rectilinear Sides MG and FA were by the said cross Lines +divided after the manner of a Musical Chord. Let GM be produced to X, +that MX may be equal to GM, and conceive GX, [Greek: l]X, [Greek: i]X, +[Greek: ê]X, [Greek: e]X, [Greek: g]X, [Greek: a]X, MX, to be in +proportion to one another, as the Numbers, 1, 8/9, 5/6, 3/4, 2/3, 3/5, +9/16, 1/2, and so to represent the Chords of the Key, and of a Tone, a +third Minor, a fourth, a fifth, a sixth Major, a seventh and an eighth +above that Key: And the Intervals M[Greek: a], [Greek: ag], [Greek: ge], +[Greek: eê], [Greek: êi], [Greek: il], and [Greek: l]G, will be the +Spaces which the several Colours (red, orange, yellow, green, blue, +indigo, violet) take up. + +[Illustration: FIG. 4.] + +[Illustration: FIG. 5.] + +Now these Intervals or Spaces subtending the Differences of the +Refractions of the Rays going to the Limits of those Colours, that is, +to the Points M, [Greek: a], [Greek: g], [Greek: e], [Greek: ê], [Greek: +i], [Greek: l], G, may without any sensible Error be accounted +proportional to the Differences of the Sines of Refraction of those Rays +having one common Sine of Incidence, and therefore since the common Sine +of Incidence of the most and least refrangible Rays out of Glass into +Air was (by a Method described above) found in proportion to their Sines +of Refraction, as 50 to 77 and 78, divide the Difference between the +Sines of Refraction 77 and 78, as the Line GM is divided by those +Intervals, and you will have 77, 77-1/8, 77-1/5, 77-1/3, 77-1/2, 77-2/3, +77-7/9, 78, the Sines of Refraction of those Rays out of Glass into Air, +their common Sine of Incidence being 50. So then the Sines of the +Incidences of all the red-making Rays out of Glass into Air, were to the +Sines of their Refractions, not greater than 50 to 77, nor less than 50 +to 77-1/8, but they varied from one another according to all +intermediate Proportions. And the Sines of the Incidences of the +green-making Rays were to the Sines of their Refractions in all +Proportions from that of 50 to 77-1/3, unto that of 50 to 77-1/2. And +by the like Limits above-mentioned were the Refractions of the Rays +belonging to the rest of the Colours defined, the Sines of the +red-making Rays extending from 77 to 77-1/8, those of the orange-making +from 77-1/8 to 77-1/5, those of the yellow-making from 77-1/5 to 77-1/3, +those of the green-making from 77-1/3 to 77-1/2, those of the +blue-making from 77-1/2 to 77-2/3, those of the indigo-making from +77-2/3 to 77-7/9, and those of the violet from 77-7/9, to 78. + +These are the Laws of the Refractions made out of Glass into Air, and +thence by the third Axiom of the first Part of this Book, the Laws of +the Refractions made out of Air into Glass are easily derived. + +_Exper._ 8. I found moreover, that when Light goes out of Air through +several contiguous refracting Mediums as through Water and Glass, and +thence goes out again into Air, whether the refracting Superficies be +parallel or inclin'd to one another, that Light as often as by contrary +Refractions 'tis so corrected, that it emergeth in Lines parallel to +those in which it was incident, continues ever after to be white. But if +the emergent Rays be inclined to the incident, the Whiteness of the +emerging Light will by degrees in passing on from the Place of +Emergence, become tinged in its Edges with Colours. This I try'd by +refracting Light with Prisms of Glass placed within a Prismatick Vessel +of Water. Now those Colours argue a diverging and separation of the +heterogeneous Rays from one another by means of their unequal +Refractions, as in what follows will more fully appear. And, on the +contrary, the permanent whiteness argues, that in like Incidences of the +Rays there is no such separation of the emerging Rays, and by +consequence no inequality of their whole Refractions. Whence I seem to +gather the two following Theorems. + +1. The Excesses of the Sines of Refraction of several sorts of Rays +above their common Sine of Incidence when the Refractions are made out +of divers denser Mediums immediately into one and the same rarer Medium, +suppose of Air, are to one another in a given Proportion. + +2. The Proportion of the Sine of Incidence to the Sine of Refraction of +one and the same sort of Rays out of one Medium into another, is +composed of the Proportion of the Sine of Incidence to the Sine of +Refraction out of the first Medium into any third Medium, and of the +Proportion of the Sine of Incidence to the Sine of Refraction out of +that third Medium into the second Medium. + +By the first Theorem the Refractions of the Rays of every sort made out +of any Medium into Air are known by having the Refraction of the Rays of +any one sort. As for instance, if the Refractions of the Rays of every +sort out of Rain-water into Air be desired, let the common Sine of +Incidence out of Glass into Air be subducted from the Sines of +Refraction, and the Excesses will be 27, 27-1/8, 27-1/5, 27-1/3, 27-1/2, +27-2/3, 27-7/9, 28. Suppose now that the Sine of Incidence of the least +refrangible Rays be to their Sine of Refraction out of Rain-water into +Air as 3 to 4, and say as 1 the difference of those Sines is to 3 the +Sine of Incidence, so is 27 the least of the Excesses above-mentioned to +a fourth Number 81; and 81 will be the common Sine of Incidence out of +Rain-water into Air, to which Sine if you add all the above-mentioned +Excesses, you will have the desired Sines of the Refractions 108, +108-1/8, 108-1/5, 108-1/3, 108-1/2, 108-2/3, 108-7/9, 109. + +By the latter Theorem the Refraction out of one Medium into another is +gathered as often as you have the Refractions out of them both into any +third Medium. As if the Sine of Incidence of any Ray out of Glass into +Air be to its Sine of Refraction, as 20 to 31, and the Sine of Incidence +of the same Ray out of Air into Water, be to its Sine of Refraction as 4 +to 3; the Sine of Incidence of that Ray out of Glass into Water will be +to its Sine of Refraction as 20 to 31 and 4 to 3 jointly, that is, as +the Factum of 20 and 4 to the Factum of 31 and 3, or as 80 to 93. + +And these Theorems being admitted into Opticks, there would be scope +enough of handling that Science voluminously after a new manner,[K] not +only by teaching those things which tend to the perfection of Vision, +but also by determining mathematically all kinds of Phænomena of Colours +which could be produced by Refractions. For to do this, there is nothing +else requisite than to find out the Separations of heterogeneous Rays, +and their various Mixtures and Proportions in every Mixture. By this +way of arguing I invented almost all the Phænomena described in these +Books, beside some others less necessary to the Argument; and by the +successes I met with in the Trials, I dare promise, that to him who +shall argue truly, and then try all things with good Glasses and +sufficient Circumspection, the expected Event will not be wanting. But +he is first to know what Colours will arise from any others mix'd in any +assigned Proportion. + + +_PROP._ IV. THEOR. III. + +_Colours may be produced by Composition which shall be like to the +Colours of homogeneal Light as to the Appearance of Colour, but not as +to the Immutability of Colour and Constitution of Light. And those +Colours by how much they are more compounded by so much are they less +full and intense, and by too much Composition they maybe diluted and +weaken'd till they cease, and the Mixture becomes white or grey. There +may be also Colours produced by Composition, which are not fully like +any of the Colours of homogeneal Light._ + +For a Mixture of homogeneal red and yellow compounds an Orange, like in +appearance of Colour to that orange which in the series of unmixed +prismatick Colours lies between them; but the Light of one orange is +homogeneal as to Refrangibility, and that of the other is heterogeneal, +and the Colour of the one, if viewed through a Prism, remains unchanged, +that of the other is changed and resolved into its component Colours red +and yellow. And after the same manner other neighbouring homogeneal +Colours may compound new Colours, like the intermediate homogeneal ones, +as yellow and green, the Colour between them both, and afterwards, if +blue be added, there will be made a green the middle Colour of the three +which enter the Composition. For the yellow and blue on either hand, if +they are equal in quantity they draw the intermediate green equally +towards themselves in Composition, and so keep it as it were in +Æquilibrion, that it verge not more to the yellow on the one hand, and +to the blue on the other, but by their mix'd Actions remain still a +middle Colour. To this mix'd green there may be farther added some red +and violet, and yet the green will not presently cease, but only grow +less full and vivid, and by increasing the red and violet, it will grow +more and more dilute, until by the prevalence of the added Colours it be +overcome and turned into whiteness, or some other Colour. So if to the +Colour of any homogeneal Light, the Sun's white Light composed of all +sorts of Rays be added, that Colour will not vanish or change its +Species, but be diluted, and by adding more and more white it will be +diluted more and more perpetually. Lastly, If red and violet be mingled, +there will be generated according to their various Proportions various +Purples, such as are not like in appearance to the Colour of any +homogeneal Light, and of these Purples mix'd with yellow and blue may be +made other new Colours. + + +_PROP._ V. THEOR. IV. + +_Whiteness and all grey Colours between white and black, may be +compounded of Colours, and the whiteness of the Sun's Light is +compounded of all the primary Colours mix'd in a due Proportion._ + +The PROOF by Experiments. + +_Exper._ 9. The Sun shining into a dark Chamber through a little round +hole in the Window-shut, and his Light being there refracted by a Prism +to cast his coloured Image PT [in _Fig._ 5.] upon the opposite Wall: I +held a white Paper V to that image in such manner that it might be +illuminated by the colour'd Light reflected from thence, and yet not +intercept any part of that Light in its passage from the Prism to the +Spectrum. And I found that when the Paper was held nearer to any Colour +than to the rest, it appeared of that Colour to which it approached +nearest; but when it was equally or almost equally distant from all the +Colours, so that it might be equally illuminated by them all it appeared +white. And in this last situation of the Paper, if some Colours were +intercepted, the Paper lost its white Colour, and appeared of the Colour +of the rest of the Light which was not intercepted. So then the Paper +was illuminated with Lights of various Colours, namely, red, yellow, +green, blue and violet, and every part of the Light retained its proper +Colour, until it was incident on the Paper, and became reflected thence +to the Eye; so that if it had been either alone (the rest of the Light +being intercepted) or if it had abounded most, and been predominant in +the Light reflected from the Paper, it would have tinged the Paper with +its own Colour; and yet being mixed with the rest of the Colours in a +due proportion, it made the Paper look white, and therefore by a +Composition with the rest produced that Colour. The several parts of the +coloured Light reflected from the Spectrum, whilst they are propagated +from thence through the Air, do perpetually retain their proper Colours, +because wherever they fall upon the Eyes of any Spectator, they make the +several parts of the Spectrum to appear under their proper Colours. They +retain therefore their proper Colours when they fall upon the Paper V, +and so by the confusion and perfect mixture of those Colours compound +the whiteness of the Light reflected from thence. + +_Exper._ 10. Let that Spectrum or solar Image PT [in _Fig._ 6.] fall now +upon the Lens MN above four Inches broad, and about six Feet distant +from the Prism ABC and so figured that it may cause the coloured Light +which divergeth from the Prism to converge and meet again at its Focus +G, about six or eight Feet distant from the Lens, and there to fall +perpendicularly upon a white Paper DE. And if you move this Paper to and +fro, you will perceive that near the Lens, as at _de_, the whole solar +Image (suppose at _pt_) will appear upon it intensely coloured after the +manner above-explained, and that by receding from the Lens those Colours +will perpetually come towards one another, and by mixing more and more +dilute one another continually, until at length the Paper come to the +Focus G, where by a perfect mixture they will wholly vanish and be +converted into whiteness, the whole Light appearing now upon the Paper +like a little white Circle. And afterwards by receding farther from the +Lens, the Rays which before converged will now cross one another in the +Focus G, and diverge from thence, and thereby make the Colours to appear +again, but yet in a contrary order; suppose at [Greek: de], where the +red _t_ is now above which before was below, and the violet _p_ is below +which before was above. + +Let us now stop the Paper at the Focus G, where the Light appears +totally white and circular, and let us consider its whiteness. I say, +that this is composed of the converging Colours. For if any of those +Colours be intercepted at the Lens, the whiteness will cease and +degenerate into that Colour which ariseth from the composition of the +other Colours which are not intercepted. And then if the intercepted +Colours be let pass and fall upon that compound Colour, they mix with +it, and by their mixture restore the whiteness. So if the violet, blue +and green be intercepted, the remaining yellow, orange and red will +compound upon the Paper an orange, and then if the intercepted Colours +be let pass, they will fall upon this compounded orange, and together +with it decompound a white. So also if the red and violet be +intercepted, the remaining yellow, green and blue, will compound a green +upon the Paper, and then the red and violet being let pass will fall +upon this green, and together with it decompound a white. And that in +this Composition of white the several Rays do not suffer any Change in +their colorific Qualities by acting upon one another, but are only +mixed, and by a mixture of their Colours produce white, may farther +appear by these Arguments. + +[Illustration: FIG. 6.] + +If the Paper be placed beyond the Focus G, suppose at [Greek: de], and +then the red Colour at the Lens be alternately intercepted, and let pass +again, the violet Colour on the Paper will not suffer any Change +thereby, as it ought to do if the several sorts of Rays acted upon one +another in the Focus G, where they cross. Neither will the red upon the +Paper be changed by any alternate stopping, and letting pass the violet +which crosseth it. + +And if the Paper be placed at the Focus G, and the white round Image at +G be viewed through the Prism HIK, and by the Refraction of that Prism +be translated to the place _rv_, and there appear tinged with various +Colours, namely, the violet at _v_ and red at _r_, and others between, +and then the red Colours at the Lens be often stopp'd and let pass by +turns, the red at _r_ will accordingly disappear, and return as often, +but the violet at _v_ will not thereby suffer any Change. And so by +stopping and letting pass alternately the blue at the Lens, the blue at +_v_ will accordingly disappear and return, without any Change made in +the red at _r_. The red therefore depends on one sort of Rays, and the +blue on another sort, which in the Focus G where they are commix'd, do +not act on one another. And there is the same Reason of the other +Colours. + +I considered farther, that when the most refrangible Rays P_p_, and the +least refrangible ones T_t_, are by converging inclined to one another, +the Paper, if held very oblique to those Rays in the Focus G, might +reflect one sort of them more copiously than the other sort, and by that +Means the reflected Light would be tinged in that Focus with the Colour +of the predominant Rays, provided those Rays severally retained their +Colours, or colorific Qualities in the Composition of White made by them +in that Focus. But if they did not retain them in that White, but became +all of them severally endued there with a Disposition to strike the +Sense with the Perception of White, then they could never lose their +Whiteness by such Reflexions. I inclined therefore the Paper to the Rays +very obliquely, as in the second Experiment of this second Part of the +first Book, that the most refrangible Rays, might be more copiously +reflected than the rest, and the Whiteness at Length changed +successively into blue, indigo, and violet. Then I inclined it the +contrary Way, that the least refrangible Rays might be more copious in +the reflected Light than the rest, and the Whiteness turned successively +to yellow, orange, and red. + +Lastly, I made an Instrument XY in fashion of a Comb, whose Teeth being +in number sixteen, were about an Inch and a half broad, and the +Intervals of the Teeth about two Inches wide. Then by interposing +successively the Teeth of this Instrument near the Lens, I intercepted +Part of the Colours by the interposed Tooth, whilst the rest of them +went on through the Interval of the Teeth to the Paper DE, and there +painted a round Solar Image. But the Paper I had first placed so, that +the Image might appear white as often as the Comb was taken away; and +then the Comb being as was said interposed, that Whiteness by reason of +the intercepted Part of the Colours at the Lens did always change into +the Colour compounded of those Colours which were not intercepted, and +that Colour was by the Motion of the Comb perpetually varied so, that in +the passing of every Tooth over the Lens all these Colours, red, yellow, +green, blue, and purple, did always succeed one another. I caused +therefore all the Teeth to pass successively over the Lens, and when the +Motion was slow, there appeared a perpetual Succession of the Colours +upon the Paper: But if I so much accelerated the Motion, that the +Colours by reason of their quick Succession could not be distinguished +from one another, the Appearance of the single Colours ceased. There was +no red, no yellow, no green, no blue, nor purple to be seen any longer, +but from a Confusion of them all there arose one uniform white Colour. +Of the Light which now by the Mixture of all the Colours appeared white, +there was no Part really white. One Part was red, another yellow, a +third green, a fourth blue, a fifth purple, and every Part retains its +proper Colour till it strike the Sensorium. If the Impressions follow +one another slowly, so that they may be severally perceived, there is +made a distinct Sensation of all the Colours one after another in a +continual Succession. But if the Impressions follow one another so +quickly, that they cannot be severally perceived, there ariseth out of +them all one common Sensation, which is neither of this Colour alone nor +of that alone, but hath it self indifferently to 'em all, and this is a +Sensation of Whiteness. By the Quickness of the Successions, the +Impressions of the several Colours are confounded in the Sensorium, and +out of that Confusion ariseth a mix'd Sensation. If a burning Coal be +nimbly moved round in a Circle with Gyrations continually repeated, the +whole Circle will appear like Fire; the reason of which is, that the +Sensation of the Coal in the several Places of that Circle remains +impress'd on the Sensorium, until the Coal return again to the same +Place. And so in a quick Consecution of the Colours the Impression of +every Colour remains in the Sensorium, until a Revolution of all the +Colours be compleated, and that first Colour return again. The +Impressions therefore of all the successive Colours are at once in the +Sensorium, and jointly stir up a Sensation of them all; and so it is +manifest by this Experiment, that the commix'd Impressions of all the +Colours do stir up and beget a Sensation of white, that is, that +Whiteness is compounded of all the Colours. + +And if the Comb be now taken away, that all the Colours may at once pass +from the Lens to the Paper, and be there intermixed, and together +reflected thence to the Spectator's Eyes; their Impressions on the +Sensorium being now more subtilly and perfectly commixed there, ought +much more to stir up a Sensation of Whiteness. + +You may instead of the Lens use two Prisms HIK and LMN, which by +refracting the coloured Light the contrary Way to that of the first +Refraction, may make the diverging Rays converge and meet again in G, as +you see represented in the seventh Figure. For where they meet and mix, +they will compose a white Light, as when a Lens is used. + +_Exper._ 11. Let the Sun's coloured Image PT [in _Fig._ 8.] fall upon +the Wall of a dark Chamber, as in the third Experiment of the first +Book, and let the same be viewed through a Prism _abc_, held parallel to +the Prism ABC, by whose Refraction that Image was made, and let it now +appear lower than before, suppose in the Place S over-against the red +Colour T. And if you go near to the Image PT, the Spectrum S will appear +oblong and coloured like the Image PT; but if you recede from it, the +Colours of the spectrum S will be contracted more and more, and at +length vanish, that Spectrum S becoming perfectly round and white; and +if you recede yet farther, the Colours will emerge again, but in a +contrary Order. Now that Spectrum S appears white in that Case, when the +Rays of several sorts which converge from the several Parts of the Image +PT, to the Prism _abc_, are so refracted unequally by it, that in their +Passage from the Prism to the Eye they may diverge from one and the same +Point of the Spectrum S, and so fall afterwards upon one and the same +Point in the bottom of the Eye, and there be mingled. + +[Illustration: FIG. 7.] + +[Illustration: FIG. 8.] + +And farther, if the Comb be here made use of, by whose Teeth the Colours +at the Image PT may be successively intercepted; the Spectrum S, when +the Comb is moved slowly, will be perpetually tinged with successive +Colours: But when by accelerating the Motion of the Comb, the Succession +of the Colours is so quick that they cannot be severally seen, that +Spectrum S, by a confused and mix'd Sensation of them all, will appear +white. + +_Exper._ 12. The Sun shining through a large Prism ABC [in _Fig._ 9.] +upon a Comb XY, placed immediately behind the Prism, his Light which +passed through the Interstices of the Teeth fell upon a white Paper DE. +The Breadths of the Teeth were equal to their Interstices, and seven +Teeth together with their Interstices took up an Inch in Breadth. Now, +when the Paper was about two or three Inches distant from the Comb, the +Light which passed through its several Interstices painted so many +Ranges of Colours, _kl_, _mn_, _op_, _qr_, &c. which were parallel to +one another, and contiguous, and without any Mixture of white. And these +Ranges of Colours, if the Comb was moved continually up and down with a +reciprocal Motion, ascended and descended in the Paper, and when the +Motion of the Comb was so quick, that the Colours could not be +distinguished from one another, the whole Paper by their Confusion and +Mixture in the Sensorium appeared white. + +[Illustration: FIG. 9.] + +Let the Comb now rest, and let the Paper be removed farther from the +Prism, and the several Ranges of Colours will be dilated and expanded +into one another more and more, and by mixing their Colours will dilute +one another, and at length, when the distance of the Paper from the Comb +is about a Foot, or a little more (suppose in the Place 2D 2E) they will +so far dilute one another, as to become white. + +With any Obstacle, let all the Light be now stopp'd which passes through +any one Interval of the Teeth, so that the Range of Colours which comes +from thence may be taken away, and you will see the Light of the rest of +the Ranges to be expanded into the Place of the Range taken away, and +there to be coloured. Let the intercepted Range pass on as before, and +its Colours falling upon the Colours of the other Ranges, and mixing +with them, will restore the Whiteness. + +Let the Paper 2D 2E be now very much inclined to the Rays, so that the +most refrangible Rays may be more copiously reflected than the rest, and +the white Colour of the Paper through the Excess of those Rays will be +changed into blue and violet. Let the Paper be as much inclined the +contrary way, that the least refrangible Rays may be now more copiously +reflected than the rest, and by their Excess the Whiteness will be +changed into yellow and red. The several Rays therefore in that white +Light do retain their colorific Qualities, by which those of any sort, +whenever they become more copious than the rest, do by their Excess and +Predominance cause their proper Colour to appear. + +And by the same way of arguing, applied to the third Experiment of this +second Part of the first Book, it may be concluded, that the white +Colour of all refracted Light at its very first Emergence, where it +appears as white as before its Incidence, is compounded of various +Colours. + +[Illustration: FIG. 10.] + +_Exper._ 13. In the foregoing Experiment the several Intervals of the +Teeth of the Comb do the Office of so many Prisms, every Interval +producing the Phænomenon of one Prism. Whence instead of those Intervals +using several Prisms, I try'd to compound Whiteness by mixing their +Colours, and did it by using only three Prisms, as also by using only +two as follows. Let two Prisms ABC and _abc_, [in _Fig._ 10.] whose +refracting Angles B and _b_ are equal, be so placed parallel to one +another, that the refracting Angle B of the one may touch the Angle _c_ +at the Base of the other, and their Planes CB and _cb_, at which the +Rays emerge, may lie in Directum. Then let the Light trajected through +them fall upon the Paper MN, distant about 8 or 12 Inches from the +Prisms. And the Colours generated by the interior Limits B and _c_ of +the two Prisms, will be mingled at PT, and there compound white. For if +either Prism be taken away, the Colours made by the other will appear in +that Place PT, and when the Prism is restored to its Place again, so +that its Colours may there fall upon the Colours of the other, the +Mixture of them both will restore the Whiteness. + +This Experiment succeeds also, as I have tried, when the Angle _b_ of +the lower Prism, is a little greater than the Angle B of the upper, and +between the interior Angles B and _c_, there intercedes some Space B_c_, +as is represented in the Figure, and the refracting Planes BC and _bc_, +are neither in Directum, nor parallel to one another. For there is +nothing more requisite to the Success of this Experiment, than that the +Rays of all sorts may be uniformly mixed upon the Paper in the Place PT. +If the most refrangible Rays coming from the superior Prism take up all +the Space from M to P, the Rays of the same sort which come from the +inferior Prism ought to begin at P, and take up all the rest of the +Space from thence towards N. If the least refrangible Rays coming from +the superior Prism take up the Space MT, the Rays of the same kind which +come from the other Prism ought to begin at T, and take up the +remaining Space TN. If one sort of the Rays which have intermediate +Degrees of Refrangibility, and come from the superior Prism be extended +through the Space MQ, and another sort of those Rays through the Space +MR, and a third sort of them through the Space MS, the same sorts of +Rays coming from the lower Prism, ought to illuminate the remaining +Spaces QN, RN, SN, respectively. And the same is to be understood of all +the other sorts of Rays. For thus the Rays of every sort will be +scattered uniformly and evenly through the whole Space MN, and so being +every where mix'd in the same Proportion, they must every where produce +the same Colour. And therefore, since by this Mixture they produce white +in the Exterior Spaces MP and TN, they must also produce white in the +Interior Space PT. This is the reason of the Composition by which +Whiteness was produced in this Experiment, and by what other way soever +I made the like Composition, the Result was Whiteness. + +Lastly, If with the Teeth of a Comb of a due Size, the coloured Lights +of the two Prisms which fall upon the Space PT be alternately +intercepted, that Space PT, when the Motion of the Comb is slow, will +always appear coloured, but by accelerating the Motion of the Comb so +much that the successive Colours cannot be distinguished from one +another, it will appear white. + +_Exper._ 14. Hitherto I have produced Whiteness by mixing the Colours of +Prisms. If now the Colours of natural Bodies are to be mingled, let +Water a little thicken'd with Soap be agitated to raise a Froth, and +after that Froth has stood a little, there will appear to one that shall +view it intently various Colours every where in the Surfaces of the +several Bubbles; but to one that shall go so far off, that he cannot +distinguish the Colours from one another, the whole Froth will grow +white with a perfect Whiteness. + +_Exper._ 15. Lastly, In attempting to compound a white, by mixing the +coloured Powders which Painters use, I consider'd that all colour'd +Powders do suppress and stop in them a very considerable Part of the +Light by which they are illuminated. For they become colour'd by +reflecting the Light of their own Colours more copiously, and that of +all other Colours more sparingly, and yet they do not reflect the Light +of their own Colours so copiously as white Bodies do. If red Lead, for +instance, and a white Paper, be placed in the red Light of the colour'd +Spectrum made in a dark Chamber by the Refraction of a Prism, as is +described in the third Experiment of the first Part of this Book; the +Paper will appear more lucid than the red Lead, and therefore reflects +the red-making Rays more copiously than red Lead doth. And if they be +held in the Light of any other Colour, the Light reflected by the Paper +will exceed the Light reflected by the red Lead in a much greater +Proportion. And the like happens in Powders of other Colours. And +therefore by mixing such Powders, we are not to expect a strong and +full White, such as is that of Paper, but some dusky obscure one, such +as might arise from a Mixture of Light and Darkness, or from white and +black, that is, a grey, or dun, or russet brown, such as are the Colours +of a Man's Nail, of a Mouse, of Ashes, of ordinary Stones, of Mortar, of +Dust and Dirt in High-ways, and the like. And such a dark white I have +often produced by mixing colour'd Powders. For thus one Part of red +Lead, and five Parts of _Viride Æris_, composed a dun Colour like that +of a Mouse. For these two Colours were severally so compounded of +others, that in both together were a Mixture of all Colours; and there +was less red Lead used than _Viride Æris_, because of the Fulness of its +Colour. Again, one Part of red Lead, and four Parts of blue Bise, +composed a dun Colour verging a little to purple, and by adding to this +a certain Mixture of Orpiment and _Viride Æris_ in a due Proportion, the +Mixture lost its purple Tincture, and became perfectly dun. But the +Experiment succeeded best without Minium thus. To Orpiment I added by +little and little a certain full bright purple, which Painters use, +until the Orpiment ceased to be yellow, and became of a pale red. Then I +diluted that red by adding a little _Viride Æris_, and a little more +blue Bise than _Viride Æris_, until it became of such a grey or pale +white, as verged to no one of the Colours more than to another. For thus +it became of a Colour equal in Whiteness to that of Ashes, or of Wood +newly cut, or of a Man's Skin. The Orpiment reflected more Light than +did any other of the Powders, and therefore conduced more to the +Whiteness of the compounded Colour than they. To assign the Proportions +accurately may be difficult, by reason of the different Goodness of +Powders of the same kind. Accordingly, as the Colour of any Powder is +more or less full and luminous, it ought to be used in a less or greater +Proportion. + +Now, considering that these grey and dun Colours may be also produced by +mixing Whites and Blacks, and by consequence differ from perfect Whites, +not in Species of Colours, but only in degree of Luminousness, it is +manifest that there is nothing more requisite to make them perfectly +white than to increase their Light sufficiently; and, on the contrary, +if by increasing their Light they can be brought to perfect Whiteness, +it will thence also follow, that they are of the same Species of Colour +with the best Whites, and differ from them only in the Quantity of +Light. And this I tried as follows. I took the third of the +above-mention'd grey Mixtures, (that which was compounded of Orpiment, +Purple, Bise, and _Viride Æris_) and rubbed it thickly upon the Floor of +my Chamber, where the Sun shone upon it through the opened Casement; and +by it, in the shadow, I laid a Piece of white Paper of the same Bigness. +Then going from them to the distance of 12 or 18 Feet, so that I could +not discern the Unevenness of the Surface of the Powder, nor the little +Shadows let fall from the gritty Particles thereof; the Powder appeared +intensely white, so as to transcend even the Paper it self in Whiteness, +especially if the Paper were a little shaded from the Light of the +Clouds, and then the Paper compared with the Powder appeared of such a +grey Colour as the Powder had done before. But by laying the Paper where +the Sun shines through the Glass of the Window, or by shutting the +Window that the Sun might shine through the Glass upon the Powder, and +by such other fit Means of increasing or decreasing the Lights wherewith +the Powder and Paper were illuminated, the Light wherewith the Powder is +illuminated may be made stronger in such a due Proportion than the Light +wherewith the Paper is illuminated, that they shall both appear exactly +alike in Whiteness. For when I was trying this, a Friend coming to visit +me, I stopp'd him at the Door, and before I told him what the Colours +were, or what I was doing; I asked him, Which of the two Whites were the +best, and wherein they differed? And after he had at that distance +viewed them well, he answer'd, that they were both good Whites, and that +he could not say which was best, nor wherein their Colours differed. +Now, if you consider, that this White of the Powder in the Sun-shine was +compounded of the Colours which the component Powders (Orpiment, Purple, +Bise, and _Viride Æris_) have in the same Sun-shine, you must +acknowledge by this Experiment, as well as by the former, that perfect +Whiteness may be compounded of Colours. + +From what has been said it is also evident, that the Whiteness of the +Sun's Light is compounded of all the Colours wherewith the several sorts +of Rays whereof that Light consists, when by their several +Refrangibilities they are separated from one another, do tinge Paper or +any other white Body whereon they fall. For those Colours (by _Prop._ +II. _Part_ 2.) are unchangeable, and whenever all those Rays with those +their Colours are mix'd again, they reproduce the same white Light as +before. + + +_PROP._ VI. PROB. II. + +_In a mixture of Primary Colours, the Quantity and Quality of each being +given, to know the Colour of the Compound._ + +[Illustration: FIG. 11.] + +With the Center O [in _Fig._ 11.] and Radius OD describe a Circle ADF, +and distinguish its Circumference into seven Parts DE, EF, FG, GA, AB, +BC, CD, proportional to the seven Musical Tones or Intervals of the +eight Sounds, _Sol_, _la_, _fa_, _sol_, _la_, _mi_, _fa_, _sol_, +contained in an eight, that is, proportional to the Number 1/9, 1/16, +1/10, 1/9, 1/16, 1/16, 1/9. Let the first Part DE represent a red +Colour, the second EF orange, the third FG yellow, the fourth CA green, +the fifth AB blue, the sixth BC indigo, and the seventh CD violet. And +conceive that these are all the Colours of uncompounded Light gradually +passing into one another, as they do when made by Prisms; the +Circumference DEFGABCD, representing the whole Series of Colours from +one end of the Sun's colour'd Image to the other, so that from D to E be +all degrees of red, at E the mean Colour between red and orange, from E +to F all degrees of orange, at F the mean between orange and yellow, +from F to G all degrees of yellow, and so on. Let _p_ be the Center of +Gravity of the Arch DE, and _q_, _r_, _s_, _t_, _u_, _x_, the Centers of +Gravity of the Arches EF, FG, GA, AB, BC, and CD respectively, and about +those Centers of Gravity let Circles proportional to the Number of Rays +of each Colour in the given Mixture be describ'd: that is, the Circle +_p_ proportional to the Number of the red-making Rays in the Mixture, +the Circle _q_ proportional to the Number of the orange-making Rays in +the Mixture, and so of the rest. Find the common Center of Gravity of +all those Circles, _p_, _q_, _r_, _s_, _t_, _u_, _x_. Let that Center be +Z; and from the Center of the Circle ADF, through Z to the +Circumference, drawing the Right Line OY, the Place of the Point Y in +the Circumference shall shew the Colour arising from the Composition of +all the Colours in the given Mixture, and the Line OZ shall be +proportional to the Fulness or Intenseness of the Colour, that is, to +its distance from Whiteness. As if Y fall in the middle between F and G, +the compounded Colour shall be the best yellow; if Y verge from the +middle towards F or G, the compound Colour shall accordingly be a +yellow, verging towards orange or green. If Z fall upon the +Circumference, the Colour shall be intense and florid in the highest +Degree; if it fall in the mid-way between the Circumference and Center, +it shall be but half so intense, that is, it shall be such a Colour as +would be made by diluting the intensest yellow with an equal quantity of +whiteness; and if it fall upon the center O, the Colour shall have lost +all its intenseness, and become a white. But it is to be noted, That if +the point Z fall in or near the line OD, the main ingredients being the +red and violet, the Colour compounded shall not be any of the prismatick +Colours, but a purple, inclining to red or violet, accordingly as the +point Z lieth on the side of the line DO towards E or towards C, and in +general the compounded violet is more bright and more fiery than the +uncompounded. Also if only two of the primary Colours which in the +circle are opposite to one another be mixed in an equal proportion, the +point Z shall fall upon the center O, and yet the Colour compounded of +those two shall not be perfectly white, but some faint anonymous Colour. +For I could never yet by mixing only two primary Colours produce a +perfect white. Whether it may be compounded of a mixture of three taken +at equal distances in the circumference I do not know, but of four or +five I do not much question but it may. But these are Curiosities of +little or no moment to the understanding the Phænomena of Nature. For in +all whites produced by Nature, there uses to be a mixture of all sorts +of Rays, and by consequence a composition of all Colours. + +To give an instance of this Rule; suppose a Colour is compounded of +these homogeneal Colours, of violet one part, of indigo one part, of +blue two parts, of green three parts, of yellow five parts, of orange +six parts, and of red ten parts. Proportional to these parts describe +the Circles _x_, _v_, _t_, _s_, _r_, _q_, _p_, respectively, that is, so +that if the Circle _x_ be one, the Circle _v_ may be one, the Circle _t_ +two, the Circle _s_ three, and the Circles _r_, _q_ and _p_, five, six +and ten. Then I find Z the common center of gravity of these Circles, +and through Z drawing the Line OY, the Point Y falls upon the +circumference between E and F, something nearer to E than to F, and +thence I conclude, that the Colour compounded of these Ingredients will +be an orange, verging a little more to red than to yellow. Also I find +that OZ is a little less than one half of OY, and thence I conclude, +that this orange hath a little less than half the fulness or intenseness +of an uncompounded orange; that is to say, that it is such an orange as +may be made by mixing an homogeneal orange with a good white in the +proportion of the Line OZ to the Line ZY, this Proportion being not of +the quantities of mixed orange and white Powders, but of the quantities +of the Lights reflected from them. + +This Rule I conceive accurate enough for practice, though not +mathematically accurate; and the truth of it may be sufficiently proved +to Sense, by stopping any of the Colours at the Lens in the tenth +Experiment of this Book. For the rest of the Colours which are not +stopp'd, but pass on to the Focus of the Lens, will there compound +either accurately or very nearly such a Colour, as by this Rule ought to +result from their Mixture. + + +_PROP._ VII. THEOR. V. + +_All the Colours in the Universe which are made by Light, and depend not +on the Power of Imagination, are either the Colours of homogeneal +Lights, or compounded of these, and that either accurately or very +nearly, according to the Rule of the foregoing Problem._ + +For it has been proved (in _Prop. 1. Part 2._) that the changes of +Colours made by Refractions do not arise from any new Modifications of +the Rays impress'd by those Refractions, and by the various Terminations +of Light and Shadow, as has been the constant and general Opinion of +Philosophers. It has also been proved that the several Colours of the +homogeneal Rays do constantly answer to their degrees of Refrangibility, +(_Prop._ 1. _Part_ 1. and _Prop._ 2. _Part_ 2.) and that their degrees +of Refrangibility cannot be changed by Refractions and Reflexions +(_Prop._ 2. _Part_ 1.) and by consequence that those their Colours are +likewise immutable. It has also been proved directly by refracting and +reflecting homogeneal Lights apart, that their Colours cannot be +changed, (_Prop._ 2. _Part_ 2.) It has been proved also, that when the +several sorts of Rays are mixed, and in crossing pass through the same +space, they do not act on one another so as to change each others +colorific qualities. (_Exper._ 10. _Part_ 2.) but by mixing their +Actions in the Sensorium beget a Sensation differing from what either +would do apart, that is a Sensation of a mean Colour between their +proper Colours; and particularly when by the concourse and mixtures of +all sorts of Rays, a white Colour is produced, the white is a mixture of +all the Colours which the Rays would have apart, (_Prop._ 5. _Part_ 2.) +The Rays in that mixture do not lose or alter their several colorific +qualities, but by all their various kinds of Actions mix'd in the +Sensorium, beget a Sensation of a middling Colour between all their +Colours, which is whiteness. For whiteness is a mean between all +Colours, having it self indifferently to them all, so as with equal +facility to be tinged with any of them. A red Powder mixed with a little +blue, or a blue with a little red, doth not presently lose its Colour, +but a white Powder mix'd with any Colour is presently tinged with that +Colour, and is equally capable of being tinged with any Colour whatever. +It has been shewed also, that as the Sun's Light is mix'd of all sorts +of Rays, so its whiteness is a mixture of the Colours of all sorts of +Rays; those Rays having from the beginning their several colorific +qualities as well as their several Refrangibilities, and retaining them +perpetually unchanged notwithstanding any Refractions or Reflexions they +may at any time suffer, and that whenever any sort of the Sun's Rays is +by any means (as by Reflexion in _Exper._ 9, and 10. _Part_ 1. or by +Refraction as happens in all Refractions) separated from the rest, they +then manifest their proper Colours. These things have been prov'd, and +the sum of all this amounts to the Proposition here to be proved. For if +the Sun's Light is mix'd of several sorts of Rays, each of which have +originally their several Refrangibilities and colorific Qualities, and +notwithstanding their Refractions and Reflexions, and their various +Separations or Mixtures, keep those their original Properties +perpetually the same without alteration; then all the Colours in the +World must be such as constantly ought to arise from the original +colorific qualities of the Rays whereof the Lights consist by which +those Colours are seen. And therefore if the reason of any Colour +whatever be required, we have nothing else to do than to consider how +the Rays in the Sun's Light have by Reflexions or Refractions, or other +causes, been parted from one another, or mixed together; or otherwise to +find out what sorts of Rays are in the Light by which that Colour is +made, and in what Proportion; and then by the last Problem to learn the +Colour which ought to arise by mixing those Rays (or their Colours) in +that proportion. I speak here of Colours so far as they arise from +Light. For they appear sometimes by other Causes, as when by the power +of Phantasy we see Colours in a Dream, or a Mad-man sees things before +him which are not there; or when we see Fire by striking the Eye, or see +Colours like the Eye of a Peacock's Feather, by pressing our Eyes in +either corner whilst we look the other way. Where these and such like +Causes interpose not, the Colour always answers to the sort or sorts of +the Rays whereof the Light consists, as I have constantly found in +whatever Phænomena of Colours I have hitherto been able to examine. I +shall in the following Propositions give instances of this in the +Phænomena of chiefest note. + + +_PROP._ VIII. PROB. III. + +_By the discovered Properties of Light to explain the Colours made by +Prisms._ + +Let ABC [in _Fig._ 12.] represent a Prism refracting the Light of the +Sun, which comes into a dark Chamber through a hole F[Greek: ph] almost +as broad as the Prism, and let MN represent a white Paper on which the +refracted Light is cast, and suppose the most refrangible or deepest +violet-making Rays fall upon the Space P[Greek: p], the least +refrangible or deepest red-making Rays upon the Space T[Greek: t], the +middle sort between the indigo-making and blue-making Rays upon the +Space Q[Greek: ch], the middle sort of the green-making Rays upon the +Space R, the middle sort between the yellow-making and orange-making +Rays upon the Space S[Greek: s], and other intermediate sorts upon +intermediate Spaces. For so the Spaces upon which the several sorts +adequately fall will by reason of the different Refrangibility of those +sorts be one lower than another. Now if the Paper MN be so near the +Prism that the Spaces PT and [Greek: pt] do not interfere with one +another, the distance between them T[Greek: p] will be illuminated by +all the sorts of Rays in that proportion to one another which they have +at their very first coming out of the Prism, and consequently be white. +But the Spaces PT and [Greek: pt] on either hand, will not be +illuminated by them all, and therefore will appear coloured. And +particularly at P, where the outmost violet-making Rays fall alone, the +Colour must be the deepest violet. At Q where the violet-making and +indigo-making Rays are mixed, it must be a violet inclining much to +indigo. At R where the violet-making, indigo-making, blue-making, and +one half of the green-making Rays are mixed, their Colours must (by the +construction of the second Problem) compound a middle Colour between +indigo and blue. At S where all the Rays are mixed, except the +red-making and orange-making, their Colours ought by the same Rule to +compound a faint blue, verging more to green than indigo. And in the +progress from S to T, this blue will grow more and more faint and +dilute, till at T, where all the Colours begin to be mixed, it ends in +whiteness. + +[Illustration: FIG. 12.] + +So again, on the other side of the white at [Greek: t], where the least +refrangible or utmost red-making Rays are alone, the Colour must be the +deepest red. At [Greek: s] the mixture of red and orange will compound a +red inclining to orange. At [Greek: r] the mixture of red, orange, +yellow, and one half of the green must compound a middle Colour between +orange and yellow. At [Greek: ch] the mixture of all Colours but violet +and indigo will compound a faint yellow, verging more to green than to +orange. And this yellow will grow more faint and dilute continually in +its progress from [Greek: ch] to [Greek: p], where by a mixture of all +sorts of Rays it will become white. + +These Colours ought to appear were the Sun's Light perfectly white: But +because it inclines to yellow, the Excess of the yellow-making Rays +whereby 'tis tinged with that Colour, being mixed with the faint blue +between S and T, will draw it to a faint green. And so the Colours in +order from P to [Greek: t] ought to be violet, indigo, blue, very faint +green, white, faint yellow, orange, red. Thus it is by the computation: +And they that please to view the Colours made by a Prism will find it so +in Nature. + +These are the Colours on both sides the white when the Paper is held +between the Prism and the Point X where the Colours meet, and the +interjacent white vanishes. For if the Paper be held still farther off +from the Prism, the most refrangible and least refrangible Rays will be +wanting in the middle of the Light, and the rest of the Rays which are +found there, will by mixture produce a fuller green than before. Also +the yellow and blue will now become less compounded, and by consequence +more intense than before. And this also agrees with experience. + +And if one look through a Prism upon a white Object encompassed with +blackness or darkness, the reason of the Colours arising on the edges is +much the same, as will appear to one that shall a little consider it. If +a black Object be encompassed with a white one, the Colours which appear +through the Prism are to be derived from the Light of the white one, +spreading into the Regions of the black, and therefore they appear in a +contrary order to that, when a white Object is surrounded with black. +And the same is to be understood when an Object is viewed, whose parts +are some of them less luminous than others. For in the borders of the +more and less luminous Parts, Colours ought always by the same +Principles to arise from the Excess of the Light of the more luminous, +and to be of the same kind as if the darker parts were black, but yet to +be more faint and dilute. + +What is said of Colours made by Prisms may be easily applied to Colours +made by the Glasses of Telescopes or Microscopes, or by the Humours of +the Eye. For if the Object-glass of a Telescope be thicker on one side +than on the other, or if one half of the Glass, or one half of the Pupil +of the Eye be cover'd with any opake substance; the Object-glass, or +that part of it or of the Eye which is not cover'd, may be consider'd as +a Wedge with crooked Sides, and every Wedge of Glass or other pellucid +Substance has the effect of a Prism in refracting the Light which passes +through it.[L] + +How the Colours in the ninth and tenth Experiments of the first Part +arise from the different Reflexibility of Light, is evident by what was +there said. But it is observable in the ninth Experiment, that whilst +the Sun's direct Light is yellow, the Excess of the blue-making Rays in +the reflected beam of Light MN, suffices only to bring that yellow to a +pale white inclining to blue, and not to tinge it with a manifestly blue +Colour. To obtain therefore a better blue, I used instead of the yellow +Light of the Sun the white Light of the Clouds, by varying a little the +Experiment, as follows. + +[Illustration: FIG. 13.] + +_Exper._ 16 Let HFG [in _Fig._ 13.] represent a Prism in the open Air, +and S the Eye of the Spectator, viewing the Clouds by their Light coming +into the Prism at the Plane Side FIGK, and reflected in it by its Base +HEIG, and thence going out through its Plane Side HEFK to the Eye. And +when the Prism and Eye are conveniently placed, so that the Angles of +Incidence and Reflexion at the Base may be about 40 Degrees, the +Spectator will see a Bow MN of a blue Colour, running from one End of +the Base to the other, with the Concave Side towards him, and the Part +of the Base IMNG beyond this Bow will be brighter than the other Part +EMNH on the other Side of it. This blue Colour MN being made by nothing +else than by Reflexion of a specular Superficies, seems so odd a +Phænomenon, and so difficult to be explained by the vulgar Hypothesis of +Philosophers, that I could not but think it deserved to be taken Notice +of. Now for understanding the Reason of it, suppose the Plane ABC to cut +the Plane Sides and Base of the Prism perpendicularly. From the Eye to +the Line BC, wherein that Plane cuts the Base, draw the Lines S_p_ and +S_t_, in the Angles S_pc_ 50 degr. 1/9, and S_tc_ 49 degr. 1/28, and the +Point _p_ will be the Limit beyond which none of the most refrangible +Rays can pass through the Base of the Prism, and be refracted, whose +Incidence is such that they may be reflected to the Eye; and the Point +_t_ will be the like Limit for the least refrangible Rays, that is, +beyond which none of them can pass through the Base, whose Incidence is +such that by Reflexion they may come to the Eye. And the Point _r_ taken +in the middle Way between _p_ and _t_, will be the like Limit for the +meanly refrangible Rays. And therefore all the least refrangible Rays +which fall upon the Base beyond _t_, that is, between _t_ and B, and can +come from thence to the Eye, will be reflected thither: But on this side +_t_, that is, between _t_ and _c_, many of these Rays will be +transmitted through the Base. And all the most refrangible Rays which +fall upon the Base beyond _p_, that is, between, _p_ and B, and can by +Reflexion come from thence to the Eye, will be reflected thither, but +every where between _p_ and _c_, many of these Rays will get through the +Base, and be refracted; and the same is to be understood of the meanly +refrangible Rays on either side of the Point _r_. Whence it follows, +that the Base of the Prism must every where between _t_ and B, by a +total Reflexion of all sorts of Rays to the Eye, look white and bright. +And every where between _p_ and C, by reason of the Transmission of many +Rays of every sort, look more pale, obscure, and dark. But at _r_, and +in other Places between _p_ and _t_, where all the more refrangible Rays +are reflected to the Eye, and many of the less refrangible are +transmitted, the Excess of the most refrangible in the reflected Light +will tinge that Light with their Colour, which is violet and blue. And +this happens by taking the Line C _prt_ B any where between the Ends of +the Prism HG and EI. + + +_PROP._ IX. PROB. IV. + +_By the discovered Properties of Light to explain the Colours of the +Rain-bow._ + +[Illustration: FIG. 14.] + +This Bow never appears, but where it rains in the Sun-shine, and may be +made artificially by spouting up Water which may break aloft, and +scatter into Drops, and fall down like Rain. For the Sun shining upon +these Drops certainly causes the Bow to appear to a Spectator standing +in a due Position to the Rain and Sun. And hence it is now agreed upon, +that this Bow is made by Refraction of the Sun's Light in drops of +falling Rain. This was understood by some of the Antients, and of late +more fully discover'd and explain'd by the famous _Antonius de Dominis_ +Archbishop of _Spalato_, in his book _De Radiis Visûs & Lucis_, +published by his Friend _Bartolus_ at _Venice_, in the Year 1611, and +written above 20 Years before. For he teaches there how the interior Bow +is made in round Drops of Rain by two Refractions of the Sun's Light, +and one Reflexion between them, and the exterior by two Refractions, and +two sorts of Reflexions between them in each Drop of Water, and proves +his Explications by Experiments made with a Phial full of Water, and +with Globes of Glass filled with Water, and placed in the Sun to make +the Colours of the two Bows appear in them. The same Explication +_Des-Cartes_ hath pursued in his Meteors, and mended that of the +exterior Bow. But whilst they understood not the true Origin of Colours, +it's necessary to pursue it here a little farther. For understanding +therefore how the Bow is made, let a Drop of Rain, or any other +spherical transparent Body be represented by the Sphere BNFG, [in _Fig._ +14.] described with the Center C, and Semi-diameter CN. And let AN be +one of the Sun's Rays incident upon it at N, and thence refracted to F, +where let it either go out of the Sphere by Refraction towards V, or be +reflected to G; and at G let it either go out by Refraction to R, or be +reflected to H; and at H let it go out by Refraction towards S, cutting +the incident Ray in Y. Produce AN and RG, till they meet in X, and upon +AX and NF, let fall the Perpendiculars CD and CE, and produce CD till it +fall upon the Circumference at L. Parallel to the incident Ray AN draw +the Diameter BQ, and let the Sine of Incidence out of Air into Water be +to the Sine of Refraction as I to R. Now, if you suppose the Point of +Incidence N to move from the Point B, continually till it come to L, the +Arch QF will first increase and then decrease, and so will the Angle AXR +which the Rays AN and GR contain; and the Arch QF and Angle AXR will be +biggest when ND is to CN as sqrt(II - RR) to sqrt(3)RR, in which +case NE will be to ND as 2R to I. Also the Angle AYS, which the Rays AN +and HS contain will first decrease, and then increase and grow least +when ND is to CN as sqrt(II - RR) to sqrt(8)RR, in which case NE +will be to ND, as 3R to I. And so the Angle which the next emergent Ray +(that is, the emergent Ray after three Reflexions) contains with the +incident Ray AN will come to its Limit when ND is to CN as sqrt(II - +RR) to sqrt(15)RR, in which case NE will be to ND as 4R to I. And the +Angle which the Ray next after that Emergent, that is, the Ray emergent +after four Reflexions, contains with the Incident, will come to its +Limit, when ND is to CN as sqrt(II - RR) to sqrt(24)RR, in which +case NE will be to ND as 5R to I; and so on infinitely, the Numbers 3, +8, 15, 24, &c. being gather'd by continual Addition of the Terms of the +arithmetical Progression 3, 5, 7, 9, &c. The Truth of all this +Mathematicians will easily examine.[M] + +Now it is to be observed, that as when the Sun comes to his Tropicks, +Days increase and decrease but a very little for a great while together; +so when by increasing the distance CD, these Angles come to their +Limits, they vary their quantity but very little for some time together, +and therefore a far greater number of the Rays which fall upon all the +Points N in the Quadrant BL, shall emerge in the Limits of these Angles, +than in any other Inclinations. And farther it is to be observed, that +the Rays which differ in Refrangibility will have different Limits of +their Angles of Emergence, and by consequence according to their +different Degrees of Refrangibility emerge most copiously in different +Angles, and being separated from one another appear each in their proper +Colours. And what those Angles are may be easily gather'd from the +foregoing Theorem by Computation. + +For in the least refrangible Rays the Sines I and R (as was found above) +are 108 and 81, and thence by Computation the greatest Angle AXR will be +found 42 Degrees and 2 Minutes, and the least Angle AYS, 50 Degrees and +57 Minutes. And in the most refrangible Rays the Sines I and R are 109 +and 81, and thence by Computation the greatest Angle AXR will be found +40 Degrees and 17 Minutes, and the least Angle AYS 54 Degrees and 7 +Minutes. + +Suppose now that O [in _Fig._ 15.] is the Spectator's Eye, and OP a Line +drawn parallel to the Sun's Rays and let POE, POF, POG, POH, be Angles +of 40 Degr. 17 Min. 42 Degr. 2 Min. 50 Degr. 57 Min. and 54 Degr. 7 Min. +respectively, and these Angles turned about their common Side OP, shall +with their other Sides OE, OF; OG, OH, describe the Verges of two +Rain-bows AF, BE and CHDG. For if E, F, G, H, be drops placed any where +in the conical Superficies described by OE, OF, OG, OH, and be +illuminated by the Sun's Rays SE, SF, SG, SH; the Angle SEO being equal +to the Angle POE, or 40 Degr. 17 Min. shall be the greatest Angle in +which the most refrangible Rays can after one Reflexion be refracted to +the Eye, and therefore all the Drops in the Line OE shall send the most +refrangible Rays most copiously to the Eye, and thereby strike the +Senses with the deepest violet Colour in that Region. And in like +manner the Angle SFO being equal to the Angle POF, or 42 Degr. 2 Min. +shall be the greatest in which the least refrangible Rays after one +Reflexion can emerge out of the Drops, and therefore those Rays shall +come most copiously to the Eye from the Drops in the Line OF, and strike +the Senses with the deepest red Colour in that Region. And by the same +Argument, the Rays which have intermediate Degrees of Refrangibility +shall come most copiously from Drops between E and F, and strike the +Senses with the intermediate Colours, in the Order which their Degrees +of Refrangibility require, that is in the Progress from E to F, or from +the inside of the Bow to the outside in this order, violet, indigo, +blue, green, yellow, orange, red. But the violet, by the mixture of the +white Light of the Clouds, will appear faint and incline to purple. + +[Illustration: FIG. 15.] + +Again, the Angle SGO being equal to the Angle POG, or 50 Gr. 51 Min. +shall be the least Angle in which the least refrangible Rays can after +two Reflexions emerge out of the Drops, and therefore the least +refrangible Rays shall come most copiously to the Eye from the Drops in +the Line OG, and strike the Sense with the deepest red in that Region. +And the Angle SHO being equal to the Angle POH, or 54 Gr. 7 Min. shall +be the least Angle, in which the most refrangible Rays after two +Reflexions can emerge out of the Drops; and therefore those Rays shall +come most copiously to the Eye from the Drops in the Line OH, and strike +the Senses with the deepest violet in that Region. And by the same +Argument, the Drops in the Regions between G and H shall strike the +Sense with the intermediate Colours in the Order which their Degrees of +Refrangibility require, that is, in the Progress from G to H, or from +the inside of the Bow to the outside in this order, red, orange, yellow, +green, blue, indigo, violet. And since these four Lines OE, OF, OG, OH, +may be situated any where in the above-mention'd conical Superficies; +what is said of the Drops and Colours in these Lines is to be understood +of the Drops and Colours every where in those Superficies. + +Thus shall there be made two Bows of Colours, an interior and stronger, +by one Reflexion in the Drops, and an exterior and fainter by two; for +the Light becomes fainter by every Reflexion. And their Colours shall +lie in a contrary Order to one another, the red of both Bows bordering +upon the Space GF, which is between the Bows. The Breadth of the +interior Bow EOF measured cross the Colours shall be 1 Degr. 45 Min. and +the Breadth of the exterior GOH shall be 3 Degr. 10 Min. and the +distance between them GOF shall be 8 Gr. 15 Min. the greatest +Semi-diameter of the innermost, that is, the Angle POF being 42 Gr. 2 +Min. and the least Semi-diameter of the outermost POG, being 50 Gr. 57 +Min. These are the Measures of the Bows, as they would be were the Sun +but a Point; for by the Breadth of his Body, the Breadth of the Bows +will be increased, and their Distance decreased by half a Degree, and so +the breadth of the interior Iris will be 2 Degr. 15 Min. that of the +exterior 3 Degr. 40 Min. their distance 8 Degr. 25 Min. the greatest +Semi-diameter of the interior Bow 42 Degr. 17 Min. and the least of the +exterior 50 Degr. 42 Min. And such are the Dimensions of the Bows in the +Heavens found to be very nearly, when their Colours appear strong and +perfect. For once, by such means as I then had, I measured the greatest +Semi-diameter of the interior Iris about 42 Degrees, and the breadth of +the red, yellow and green in that Iris 63 or 64 Minutes, besides the +outmost faint red obscured by the brightness of the Clouds, for which we +may allow 3 or 4 Minutes more. The breadth of the blue was about 40 +Minutes more besides the violet, which was so much obscured by the +brightness of the Clouds, that I could not measure its breadth. But +supposing the breadth of the blue and violet together to equal that of +the red, yellow and green together, the whole breadth of this Iris will +be about 2-1/4 Degrees, as above. The least distance between this Iris +and the exterior Iris was about 8 Degrees and 30 Minutes. The exterior +Iris was broader than the interior, but so faint, especially on the blue +side, that I could not measure its breadth distinctly. At another time +when both Bows appeared more distinct, I measured the breadth of the +interior Iris 2 Gr. 10´, and the breadth of the red, yellow and green in +the exterior Iris, was to the breadth of the same Colours in the +interior as 3 to 2. + +This Explication of the Rain-bow is yet farther confirmed by the known +Experiment (made by _Antonius de Dominis_ and _Des-Cartes_) of hanging +up any where in the Sun-shine a Glass Globe filled with Water, and +viewing it in such a posture, that the Rays which come from the Globe to +the Eye may contain with the Sun's Rays an Angle of either 42 or 50 +Degrees. For if the Angle be about 42 or 43 Degrees, the Spectator +(suppose at O) shall see a full red Colour in that side of the Globe +opposed to the Sun as 'tis represented at F, and if that Angle become +less (suppose by depressing the Globe to E) there will appear other +Colours, yellow, green and blue successive in the same side of the +Globe. But if the Angle be made about 50 Degrees (suppose by lifting up +the Globe to G) there will appear a red Colour in that side of the Globe +towards the Sun, and if the Angle be made greater (suppose by lifting +up the Globe to H) the red will turn successively to the other Colours, +yellow, green and blue. The same thing I have tried, by letting a Globe +rest, and raising or depressing the Eye, or otherwise moving it to make +the Angle of a just magnitude. + +I have heard it represented, that if the Light of a Candle be refracted +by a Prism to the Eye; when the blue Colour falls upon the Eye, the +Spectator shall see red in the Prism, and when the red falls upon the +Eye he shall see blue; and if this were certain, the Colours of the +Globe and Rain-bow ought to appear in a contrary order to what we find. +But the Colours of the Candle being very faint, the mistake seems to +arise from the difficulty of discerning what Colours fall on the Eye. +For, on the contrary, I have sometimes had occasion to observe in the +Sun's Light refracted by a Prism, that the Spectator always sees that +Colour in the Prism which falls upon his Eye. And the same I have found +true also in Candle-light. For when the Prism is moved slowly from the +Line which is drawn directly from the Candle to the Eye, the red appears +first in the Prism and then the blue, and therefore each of them is seen +when it falls upon the Eye. For the red passes over the Eye first, and +then the blue. + +The Light which comes through drops of Rain by two Refractions without +any Reflexion, ought to appear strongest at the distance of about 26 +Degrees from the Sun, and to decay gradually both ways as the distance +from him increases and decreases. And the same is to be understood of +Light transmitted through spherical Hail-stones. And if the Hail be a +little flatted, as it often is, the Light transmitted may grow so strong +at a little less distance than that of 26 Degrees, as to form a Halo +about the Sun or Moon; which Halo, as often as the Hail-stones are duly +figured may be colour'd, and then it must be red within by the least +refrangible Rays, and blue without by the most refrangible ones, +especially if the Hail-stones have opake Globules of Snow in their +center to intercept the Light within the Halo (as _Hugenius_ has +observ'd) and make the inside thereof more distinctly defined than it +would otherwise be. For such Hail-stones, though spherical, by +terminating the Light by the Snow, may make a Halo red within and +colourless without, and darker in the red than without, as Halos used to +be. For of those Rays which pass close by the Snow the Rubriform will be +least refracted, and so come to the Eye in the directest Lines. + +The Light which passes through a drop of Rain after two Refractions, and +three or more Reflexions, is scarce strong enough to cause a sensible +Bow; but in those Cylinders of Ice by which _Hugenius_ explains the +_Parhelia_, it may perhaps be sensible. + + +_PROP._ X. PROB. V. + +_By the discovered Properties of Light to explain the permanent Colours +of Natural Bodies._ + +These Colours arise from hence, that some natural Bodies reflect some +sorts of Rays, others other sorts more copiously than the rest. Minium +reflects the least refrangible or red-making Rays most copiously, and +thence appears red. Violets reflect the most refrangible most copiously, +and thence have their Colour, and so of other Bodies. Every Body +reflects the Rays of its own Colour more copiously than the rest, and +from their excess and predominance in the reflected Light has its +Colour. + +_Exper._ 17. For if in the homogeneal Lights obtained by the solution of +the Problem proposed in the fourth Proposition of the first Part of this +Book, you place Bodies of several Colours, you will find, as I have +done, that every Body looks most splendid and luminous in the Light of +its own Colour. Cinnaber in the homogeneal red Light is most +resplendent, in the green Light it is manifestly less resplendent, and +in the blue Light still less. Indigo in the violet blue Light is most +resplendent, and its splendor is gradually diminish'd, as it is removed +thence by degrees through the green and yellow Light to the red. By a +Leek the green Light, and next that the blue and yellow which compound +green, are more strongly reflected than the other Colours red and +violet, and so of the rest. But to make these Experiments the more +manifest, such Bodies ought to be chosen as have the fullest and most +vivid Colours, and two of those Bodies are to be compared together. +Thus, for instance, if Cinnaber and _ultra_-marine blue, or some other +full blue be held together in the red homogeneal Light, they will both +appear red, but the Cinnaber will appear of a strongly luminous and +resplendent red, and the _ultra_-marine blue of a faint obscure and dark +red; and if they be held together in the blue homogeneal Light, they +will both appear blue, but the _ultra_-marine will appear of a strongly +luminous and resplendent blue, and the Cinnaber of a faint and dark +blue. Which puts it out of dispute that the Cinnaber reflects the red +Light much more copiously than the _ultra_-marine doth, and the +_ultra_-marine reflects the blue Light much more copiously than the +Cinnaber doth. The same Experiment may be tried successfully with red +Lead and Indigo, or with any other two colour'd Bodies, if due allowance +be made for the different strength or weakness of their Colour and +Light. + +And as the reason of the Colours of natural Bodies is evident by these +Experiments, so it is farther confirmed and put past dispute by the two +first Experiments of the first Part, whereby 'twas proved in such Bodies +that the reflected Lights which differ in Colours do differ also in +degrees of Refrangibility. For thence it's certain, that some Bodies +reflect the more refrangible, others the less refrangible Rays more +copiously. + +And that this is not only a true reason of these Colours, but even the +only reason, may appear farther from this Consideration, that the Colour +of homogeneal Light cannot be changed by the Reflexion of natural +Bodies. + +For if Bodies by Reflexion cannot in the least change the Colour of any +one sort of Rays, they cannot appear colour'd by any other means than by +reflecting those which either are of their own Colour, or which by +mixture must produce it. + +But in trying Experiments of this kind care must be had that the Light +be sufficiently homogeneal. For if Bodies be illuminated by the ordinary +prismatick Colours, they will appear neither of their own Day-light +Colours, nor of the Colour of the Light cast on them, but of some middle +Colour between both, as I have found by Experience. Thus red Lead (for +instance) illuminated with the ordinary prismatick green will not appear +either red or green, but orange or yellow, or between yellow and green, +accordingly as the green Light by which 'tis illuminated is more or less +compounded. For because red Lead appears red when illuminated with white +Light, wherein all sorts of Rays are equally mix'd, and in the green +Light all sorts of Rays are not equally mix'd, the Excess of the +yellow-making, green-making and blue-making Rays in the incident green +Light, will cause those Rays to abound so much in the reflected Light, +as to draw the Colour from red towards their Colour. And because the red +Lead reflects the red-making Rays most copiously in proportion to their +number, and next after them the orange-making and yellow-making Rays; +these Rays in the reflected Light will be more in proportion to the +Light than they were in the incident green Light, and thereby will draw +the reflected Light from green towards their Colour. And therefore the +red Lead will appear neither red nor green, but of a Colour between +both. + +In transparently colour'd Liquors 'tis observable, that their Colour +uses to vary with their thickness. Thus, for instance, a red Liquor in a +conical Glass held between the Light and the Eye, looks of a pale and +dilute yellow at the bottom where 'tis thin, and a little higher where +'tis thicker grows orange, and where 'tis still thicker becomes red, and +where 'tis thickest the red is deepest and darkest. For it is to be +conceiv'd that such a Liquor stops the indigo-making and violet-making +Rays most easily, the blue-making Rays more difficultly, the +green-making Rays still more difficultly, and the red-making most +difficultly: And that if the thickness of the Liquor be only so much as +suffices to stop a competent number of the violet-making and +indigo-making Rays, without diminishing much the number of the rest, the +rest must (by _Prop._ 6. _Part_ 2.) compound a pale yellow. But if the +Liquor be so much thicker as to stop also a great number of the +blue-making Rays, and some of the green-making, the rest must compound +an orange; and where it is so thick as to stop also a great number of +the green-making and a considerable number of the yellow-making, the +rest must begin to compound a red, and this red must grow deeper and +darker as the yellow-making and orange-making Rays are more and more +stopp'd by increasing the thickness of the Liquor, so that few Rays +besides the red-making can get through. + +Of this kind is an Experiment lately related to me by Mr. _Halley_, who, +in diving deep into the Sea in a diving Vessel, found in a clear +Sun-shine Day, that when he was sunk many Fathoms deep into the Water +the upper part of his Hand on which the Sun shone directly through the +Water and through a small Glass Window in the Vessel appeared of a red +Colour, like that of a Damask Rose, and the Water below and the under +part of his Hand illuminated by Light reflected from the Water below +look'd green. For thence it may be gather'd, that the Sea-Water reflects +back the violet and blue-making Rays most easily, and lets the +red-making Rays pass most freely and copiously to great Depths. For +thereby the Sun's direct Light at all great Depths, by reason of the +predominating red-making Rays, must appear red; and the greater the +Depth is, the fuller and intenser must that red be. And at such Depths +as the violet-making Rays scarce penetrate unto, the blue-making, +green-making, and yellow-making Rays being reflected from below more +copiously than the red-making ones, must compound a green. + +Now, if there be two Liquors of full Colours, suppose a red and blue, +and both of them so thick as suffices to make their Colours sufficiently +full; though either Liquor be sufficiently transparent apart, yet will +you not be able to see through both together. For, if only the +red-making Rays pass through one Liquor, and only the blue-making +through the other, no Rays can pass through both. This Mr. _Hook_ tried +casually with Glass Wedges filled with red and blue Liquors, and was +surprized at the unexpected Event, the reason of it being then unknown; +which makes me trust the more to his Experiment, though I have not tried +it my self. But he that would repeat it, must take care the Liquors be +of very good and full Colours. + +Now, whilst Bodies become coloured by reflecting or transmitting this or +that sort of Rays more copiously than the rest, it is to be conceived +that they stop and stifle in themselves the Rays which they do not +reflect or transmit. For, if Gold be foliated and held between your Eye +and the Light, the Light looks of a greenish blue, and therefore massy +Gold lets into its Body the blue-making Rays to be reflected to and fro +within it till they be stopp'd and stifled, whilst it reflects the +yellow-making outwards, and thereby looks yellow. And much after the +same manner that Leaf Gold is yellow by reflected, and blue by +transmitted Light, and massy Gold is yellow in all Positions of the Eye; +there are some Liquors, as the Tincture of _Lignum Nephriticum_, and +some sorts of Glass which transmit one sort of Light most copiously, and +reflect another sort, and thereby look of several Colours, according to +the Position of the Eye to the Light. But, if these Liquors or Glasses +were so thick and massy that no Light could get through them, I question +not but they would like all other opake Bodies appear of one and the +same Colour in all Positions of the Eye, though this I cannot yet affirm +by Experience. For all colour'd Bodies, so far as my Observation +reaches, may be seen through if made sufficiently thin, and therefore +are in some measure transparent, and differ only in degrees of +Transparency from tinged transparent Liquors; these Liquors, as well as +those Bodies, by a sufficient Thickness becoming opake. A transparent +Body which looks of any Colour by transmitted Light, may also look of +the same Colour by reflected Light, the Light of that Colour being +reflected by the farther Surface of the Body, or by the Air beyond it. +And then the reflected Colour will be diminished, and perhaps cease, by +making the Body very thick, and pitching it on the backside to diminish +the Reflexion of its farther Surface, so that the Light reflected from +the tinging Particles may predominate. In such Cases, the Colour of the +reflected Light will be apt to vary from that of the Light transmitted. +But whence it is that tinged Bodies and Liquors reflect some sort of +Rays, and intromit or transmit other sorts, shall be said in the next +Book. In this Proposition I content my self to have put it past dispute, +that Bodies have such Properties, and thence appear colour'd. + + +_PROP._ XI. PROB. VI. + +_By mixing colour'd Lights to compound a beam of Light of the same +Colour and Nature with a beam of the Sun's direct Light, and therein to +experience the Truth of the foregoing Propositions._ + +[Illustration: FIG. 16.] + +Let ABC _abc_ [in _Fig._ 16.] represent a Prism, by which the Sun's +Light let into a dark Chamber through the Hole F, may be refracted +towards the Lens MN, and paint upon it at _p_, _q_, _r_, _s_, and _t_, +the usual Colours violet, blue, green, yellow, and red, and let the +diverging Rays by the Refraction of this Lens converge again towards X, +and there, by the mixture of all those their Colours, compound a white +according to what was shewn above. Then let another Prism DEG _deg_, +parallel to the former, be placed at X, to refract that white Light +upwards towards Y. Let the refracting Angles of the Prisms, and their +distances from the Lens be equal, so that the Rays which converged from +the Lens towards X, and without Refraction, would there have crossed and +diverged again, may by the Refraction of the second Prism be reduced +into Parallelism and diverge no more. For then those Rays will recompose +a beam of white Light XY. If the refracting Angle of either Prism be the +bigger, that Prism must be so much the nearer to the Lens. You will know +when the Prisms and the Lens are well set together, by observing if the +beam of Light XY, which comes out of the second Prism be perfectly white +to the very edges of the Light, and at all distances from the Prism +continue perfectly and totally white like a beam of the Sun's Light. For +till this happens, the Position of the Prisms and Lens to one another +must be corrected; and then if by the help of a long beam of Wood, as is +represented in the Figure, or by a Tube, or some other such Instrument, +made for that Purpose, they be made fast in that Situation, you may try +all the same Experiments in this compounded beam of Light XY, which have +been made in the Sun's direct Light. For this compounded beam of Light +has the same appearance, and is endow'd with all the same Properties +with a direct beam of the Sun's Light, so far as my Observation reaches. +And in trying Experiments in this beam you may by stopping any of the +Colours, _p_, _q_, _r_, _s_, and _t_, at the Lens, see how the Colours +produced in the Experiments are no other than those which the Rays had +at the Lens before they entered the Composition of this Beam: And by +consequence, that they arise not from any new Modifications of the Light +by Refractions and Reflexions, but from the various Separations and +Mixtures of the Rays originally endow'd with their colour-making +Qualities. + +So, for instance, having with a Lens 4-1/4 Inches broad, and two Prisms +on either hand 6-1/4 Feet distant from the Lens, made such a beam of +compounded Light; to examine the reason of the Colours made by Prisms, I +refracted this compounded beam of Light XY with another Prism HIK _kh_, +and thereby cast the usual Prismatick Colours PQRST upon the Paper LV +placed behind. And then by stopping any of the Colours _p_, _q_, _r_, +_s_, _t_, at the Lens, I found that the same Colour would vanish at the +Paper. So if the Purple _p_ was stopp'd at the Lens, the Purple P upon +the Paper would vanish, and the rest of the Colours would remain +unalter'd, unless perhaps the blue, so far as some purple latent in it +at the Lens might be separated from it by the following Refractions. And +so by intercepting the green upon the Lens, the green R upon the Paper +would vanish, and so of the rest; which plainly shews, that as the white +beam of Light XY was compounded of several Lights variously colour'd at +the Lens, so the Colours which afterwards emerge out of it by new +Refractions are no other than those of which its Whiteness was +compounded. The Refraction of the Prism HIK _kh_ generates the Colours +PQRST upon the Paper, not by changing the colorific Qualities of the +Rays, but by separating the Rays which had the very same colorific +Qualities before they enter'd the Composition of the refracted beam of +white Light XY. For otherwise the Rays which were of one Colour at the +Lens might be of another upon the Paper, contrary to what we find. + +So again, to examine the reason of the Colours of natural Bodies, I +placed such Bodies in the Beam of Light XY, and found that they all +appeared there of those their own Colours which they have in Day-light, +and that those Colours depend upon the Rays which had the same Colours +at the Lens before they enter'd the Composition of that beam. Thus, for +instance, Cinnaber illuminated by this beam appears of the same red +Colour as in Day-light; and if at the Lens you intercept the +green-making and blue-making Rays, its redness will become more full and +lively: But if you there intercept the red-making Rays, it will not any +longer appear red, but become yellow or green, or of some other Colour, +according to the sorts of Rays which you do not intercept. So Gold in +this Light XY appears of the same yellow Colour as in Day-light, but by +intercepting at the Lens a due Quantity of the yellow-making Rays it +will appear white like Silver (as I have tried) which shews that its +yellowness arises from the Excess of the intercepted Rays tinging that +Whiteness with their Colour when they are let pass. So the Infusion of +_Lignum Nephriticum_ (as I have also tried) when held in this beam of +Light XY, looks blue by the reflected Part of the Light, and red by the +transmitted Part of it, as when 'tis view'd in Day-light; but if you +intercept the blue at the Lens the Infusion will lose its reflected blue +Colour, whilst its transmitted red remains perfect, and by the loss of +some blue-making Rays, wherewith it was allay'd, becomes more intense +and full. And, on the contrary, if the red and orange-making Rays be +intercepted at the Lens, the Infusion will lose its transmitted red, +whilst its blue will remain and become more full and perfect. Which +shews, that the Infusion does not tinge the Rays with blue and red, but +only transmits those most copiously which were red-making before, and +reflects those most copiously which were blue-making before. And after +the same manner may the Reasons of other Phænomena be examined, by +trying them in this artificial beam of Light XY. + +FOOTNOTES: + +[I] See p. 59. + +[J] _See our_ Author's Lect. Optic. _Part_ II. _Sect._ II. _p._ 239. + +[K] _As is done in our_ Author's Lect. Optic. _Part_ I. _Sect._ III. +_and_ IV. _and Part_ II. _Sect._ II. + +[L] _See our_ Author's Lect. Optic. _Part_ II. _Sect._ II. _pag._ 269, +&c. + +[M] _This is demonstrated in our_ Author's Lect. Optic. _Part_ I. +_Sect._ IV. _Prop._ 35 _and_ 36. + + + + +THE + +SECOND BOOK + +OF + +OPTICKS + + + + +_PART I._ + +_Observations concerning the Reflexions, Refractions, and Colours of +thin transparent Bodies._ + + +It has been observed by others, that transparent Substances, as Glass, +Water, Air, &c. when made very thin by being blown into Bubbles, or +otherwise formed into Plates, do exhibit various Colours according to +their various thinness, altho' at a greater thickness they appear very +clear and colourless. In the former Book I forbore to treat of these +Colours, because they seemed of a more difficult Consideration, and were +not necessary for establishing the Properties of Light there discoursed +of. But because they may conduce to farther Discoveries for compleating +the Theory of Light, especially as to the constitution of the parts of +natural Bodies, on which their Colours or Transparency depend; I have +here set down an account of them. To render this Discourse short and +distinct, I have first described the principal of my Observations, and +then consider'd and made use of them. The Observations are these. + +_Obs._ 1. Compressing two Prisms hard together that their sides (which +by chance were a very little convex) might somewhere touch one another: +I found the place in which they touched to become absolutely +transparent, as if they had there been one continued piece of Glass. For +when the Light fell so obliquely on the Air, which in other places was +between them, as to be all reflected; it seemed in that place of contact +to be wholly transmitted, insomuch that when look'd upon, it appeared +like a black or dark spot, by reason that little or no sensible Light +was reflected from thence, as from other places; and when looked through +it seemed (as it were) a hole in that Air which was formed into a thin +Plate, by being compress'd between the Glasses. And through this hole +Objects that were beyond might be seen distinctly, which could not at +all be seen through other parts of the Glasses where the Air was +interjacent. Although the Glasses were a little convex, yet this +transparent spot was of a considerable breadth, which breadth seemed +principally to proceed from the yielding inwards of the parts of the +Glasses, by reason of their mutual pressure. For by pressing them very +hard together it would become much broader than otherwise. + +_Obs._ 2. When the Plate of Air, by turning the Prisms about their +common Axis, became so little inclined to the incident Rays, that some +of them began to be transmitted, there arose in it many slender Arcs of +Colours which at first were shaped almost like the Conchoid, as you see +them delineated in the first Figure. And by continuing the Motion of the +Prisms, these Arcs increased and bended more and more about the said +transparent spot, till they were compleated into Circles or Rings +incompassing it, and afterwards continually grew more and more +contracted. + +[Illustration: FIG. 1.] + +These Arcs at their first appearance were of a violet and blue Colour, +and between them were white Arcs of Circles, which presently by +continuing the Motion of the Prisms became a little tinged in their +inward Limbs with red and yellow, and to their outward Limbs the blue +was adjacent. So that the order of these Colours from the central dark +spot, was at that time white, blue, violet; black, red, orange, yellow, +white, blue, violet, &c. But the yellow and red were much fainter than +the blue and violet. + +The Motion of the Prisms about their Axis being continued, these Colours +contracted more and more, shrinking towards the whiteness on either +side of it, until they totally vanished into it. And then the Circles in +those parts appear'd black and white, without any other Colours +intermix'd. But by farther moving the Prisms about, the Colours again +emerged out of the whiteness, the violet and blue at its inward Limb, +and at its outward Limb the red and yellow. So that now their order from +the central Spot was white, yellow, red; black; violet, blue, white, +yellow, red, &c. contrary to what it was before. + +_Obs._ 3. When the Rings or some parts of them appeared only black and +white, they were very distinct and well defined, and the blackness +seemed as intense as that of the central Spot. Also in the Borders of +the Rings, where the Colours began to emerge out of the whiteness, they +were pretty distinct, which made them visible to a very great multitude. +I have sometimes number'd above thirty Successions (reckoning every +black and white Ring for one Succession) and seen more of them, which by +reason of their smalness I could not number. But in other Positions of +the Prisms, at which the Rings appeared of many Colours, I could not +distinguish above eight or nine of them, and the Exterior of those were +very confused and dilute. + +In these two Observations to see the Rings distinct, and without any +other Colour than Black and white, I found it necessary to hold my Eye +at a good distance from them. For by approaching nearer, although in the +same inclination of my Eye to the Plane of the Rings, there emerged a +bluish Colour out of the white, which by dilating it self more and more +into the black, render'd the Circles less distinct, and left the white a +little tinged with red and yellow. I found also by looking through a +slit or oblong hole, which was narrower than the pupil of my Eye, and +held close to it parallel to the Prisms, I could see the Circles much +distincter and visible to a far greater number than otherwise. + +_Obs._ 4. To observe more nicely the order of the Colours which arose +out of the white Circles as the Rays became less and less inclined to +the Plate of Air; I took two Object-glasses, the one a Plano-convex for +a fourteen Foot Telescope, and the other a large double Convex for one +of about fifty Foot; and upon this, laying the other with its plane side +downwards, I pressed them slowly together, to make the Colours +successively emerge in the middle of the Circles, and then slowly lifted +the upper Glass from the lower to make them successively vanish again in +the same place. The Colour, which by pressing the Glasses together, +emerged last in the middle of the other Colours, would upon its first +appearance look like a Circle of a Colour almost uniform from the +circumference to the center and by compressing the Glasses still more, +grow continually broader until a new Colour emerged in its center, and +thereby it became a Ring encompassing that new Colour. And by +compressing the Glasses still more, the diameter of this Ring would +increase, and the breadth of its Orbit or Perimeter decrease until +another new Colour emerged in the center of the last: And so on until a +third, a fourth, a fifth, and other following new Colours successively +emerged there, and became Rings encompassing the innermost Colour, the +last of which was the black Spot. And, on the contrary, by lifting up +the upper Glass from the lower, the diameter of the Rings would +decrease, and the breadth of their Orbit increase, until their Colours +reached successively to the center; and then they being of a +considerable breadth, I could more easily discern and distinguish their +Species than before. And by this means I observ'd their Succession and +Quantity to be as followeth. + +Next to the pellucid central Spot made by the contact of the Glasses +succeeded blue, white, yellow, and red. The blue was so little in +quantity, that I could not discern it in the Circles made by the Prisms, +nor could I well distinguish any violet in it, but the yellow and red +were pretty copious, and seemed about as much in extent as the white, +and four or five times more than the blue. The next Circuit in order of +Colours immediately encompassing these were violet, blue, green, yellow, +and red: and these were all of them copious and vivid, excepting the +green, which was very little in quantity, and seemed much more faint and +dilute than the other Colours. Of the other four, the violet was the +least in extent, and the blue less than the yellow or red. The third +Circuit or Order was purple, blue, green, yellow, and red; in which the +purple seemed more reddish than the violet in the former Circuit, and +the green was much more conspicuous, being as brisk and copious as any +of the other Colours, except the yellow, but the red began to be a +little faded, inclining very much to purple. After this succeeded the +fourth Circuit of green and red. The green was very copious and lively, +inclining on the one side to blue, and on the other side to yellow. But +in this fourth Circuit there was neither violet, blue, nor yellow, and +the red was very imperfect and dirty. Also the succeeding Colours became +more and more imperfect and dilute, till after three or four revolutions +they ended in perfect whiteness. Their form, when the Glasses were most +compress'd so as to make the black Spot appear in the center, is +delineated in the second Figure; where _a_, _b_, _c_, _d_, _e_: _f_, +_g_, _h_, _i_, _k_: _l_, _m_, _n_, _o_, _p_: _q_, _r_: _s_, _t_: _v_, +_x_: _y_, _z_, denote the Colours reckon'd in order from the center, +black, blue, white, yellow, red: violet, blue, green, yellow, red: +purple, blue, green, yellow, red: green, red: greenish blue, red: +greenish blue, pale red: greenish blue, reddish white. + +[Illustration: FIG. 2.] + +_Obs._ 5. To determine the interval of the Glasses, or thickness of the +interjacent Air, by which each Colour was produced, I measured the +Diameters of the first six Rings at the most lucid part of their Orbits, +and squaring them, I found their Squares to be in the arithmetical +Progression of the odd Numbers, 1, 3, 5, 7, 9, 11. And since one of +these Glasses was plane, and the other spherical, their Intervals at +those Rings must be in the same Progression. I measured also the +Diameters of the dark or faint Rings between the more lucid Colours, and +found their Squares to be in the arithmetical Progression of the even +Numbers, 2, 4, 6, 8, 10, 12. And it being very nice and difficult to +take these measures exactly; I repeated them divers times at divers +parts of the Glasses, that by their Agreement I might be confirmed in +them. And the same method I used in determining some others of the +following Observations. + +_Obs._ 6. The Diameter of the sixth Ring at the most lucid part of its +Orbit was 58/100 parts of an Inch, and the Diameter of the Sphere on +which the double convex Object-glass was ground was about 102 Feet, and +hence I gathered the thickness of the Air or Aereal Interval of the +Glasses at that Ring. But some time after, suspecting that in making +this Observation I had not determined the Diameter of the Sphere with +sufficient accurateness, and being uncertain whether the Plano-convex +Glass was truly plane, and not something concave or convex on that side +which I accounted plane; and whether I had not pressed the Glasses +together, as I often did, to make them touch; (For by pressing such +Glasses together their parts easily yield inwards, and the Rings thereby +become sensibly broader than they would be, did the Glasses keep their +Figures.) I repeated the Experiment, and found the Diameter of the sixth +lucid Ring about 55/100 parts of an Inch. I repeated the Experiment also +with such an Object-glass of another Telescope as I had at hand. This +was a double Convex ground on both sides to one and the same Sphere, and +its Focus was distant from it 83-2/5 Inches. And thence, if the Sines of +Incidence and Refraction of the bright yellow Light be assumed in +proportion as 11 to 17, the Diameter of the Sphere to which the Glass +was figured will by computation be found 182 Inches. This Glass I laid +upon a flat one, so that the black Spot appeared in the middle of the +Rings of Colours without any other Pressure than that of the weight of +the Glass. And now measuring the Diameter of the fifth dark Circle as +accurately as I could, I found it the fifth part of an Inch precisely. +This Measure was taken with the points of a pair of Compasses on the +upper Surface on the upper Glass, and my Eye was about eight or nine +Inches distance from the Glass, almost perpendicularly over it, and the +Glass was 1/6 of an Inch thick, and thence it is easy to collect that +the true Diameter of the Ring between the Glasses was greater than its +measur'd Diameter above the Glasses in the Proportion of 80 to 79, or +thereabouts, and by consequence equal to 16/79 parts of an Inch, and its +true Semi-diameter equal to 8/79 parts. Now as the Diameter of the +Sphere (182 Inches) is to the Semi-diameter of this fifth dark Ring +(8/79 parts of an Inch) so is this Semi-diameter to the thickness of the +Air at this fifth dark Ring; which is therefore 32/567931 or +100/1774784. Parts of an Inch; and the fifth Part thereof, _viz._ the +1/88739 Part of an Inch, is the Thickness of the Air at the first of +these dark Rings. + +The same Experiment I repeated with another double convex Object-glass +ground on both sides to one and the same Sphere. Its Focus was distant +from it 168-1/2 Inches, and therefore the Diameter of that Sphere was +184 Inches. This Glass being laid upon the same plain Glass, the +Diameter of the fifth of the dark Rings, when the black Spot in their +Center appear'd plainly without pressing the Glasses, was by the measure +of the Compasses upon the upper Glass 121/600 Parts of an Inch, and by +consequence between the Glasses it was 1222/6000: For the upper Glass +was 1/8 of an Inch thick, and my Eye was distant from it 8 Inches. And a +third proportional to half this from the Diameter of the Sphere is +5/88850 Parts of an Inch. This is therefore the Thickness of the Air at +this Ring, and a fifth Part thereof, _viz._ the 1/88850th Part of an +Inch is the Thickness thereof at the first of the Rings, as above. + +I tried the same Thing, by laying these Object-glasses upon flat Pieces +of a broken Looking-glass, and found the same Measures of the Rings: +Which makes me rely upon them till they can be determin'd more +accurately by Glasses ground to larger Spheres, though in such Glasses +greater care must be taken of a true Plane. + +These Dimensions were taken, when my Eye was placed almost +perpendicularly over the Glasses, being about an Inch, or an Inch and a +quarter, distant from the incident Rays, and eight Inches distant from +the Glass; so that the Rays were inclined to the Glass in an Angle of +about four Degrees. Whence by the following Observation you will +understand, that had the Rays been perpendicular to the Glasses, the +Thickness of the Air at these Rings would have been less in the +Proportion of the Radius to the Secant of four Degrees, that is, of +10000 to 10024. Let the Thicknesses found be therefore diminish'd in +this Proportion, and they will become 1/88952 and 1/89063, or (to use +the nearest round Number) the 1/89000th Part of an Inch. This is the +Thickness of the Air at the darkest Part of the first dark Ring made by +perpendicular Rays; and half this Thickness multiplied by the +Progression, 1, 3, 5, 7, 9, 11, &c. gives the Thicknesses of the Air at +the most luminous Parts of all the brightest Rings, _viz._ 1/178000, +3/178000, 5/178000, 7/178000, &c. their arithmetical Means 2/178000, +4/178000, 6/178000, &c. being its Thicknesses at the darkest Parts of +all the dark ones. + +_Obs._ 7. The Rings were least, when my Eye was placed perpendicularly +over the Glasses in the Axis of the Rings: And when I view'd them +obliquely they became bigger, continually swelling as I removed my Eye +farther from the Axis. And partly by measuring the Diameter of the same +Circle at several Obliquities of my Eye, partly by other Means, as also +by making use of the two Prisms for very great Obliquities, I found its +Diameter, and consequently the Thickness of the Air at its Perimeter in +all those Obliquities to be very nearly in the Proportions express'd in +this Table. + +-------------------+--------------------+----------+---------- +Angle of Incidence |Angle of Refraction |Diameter |Thickness + on | into | of the | of the + the Air. | the Air. | Ring. | Air. +-------------------+--------------------+----------+---------- + Deg. Min. | | | + | | | + 00 00 | 00 00 | 10 | 10 + | | | + 06 26 | 10 00 | 10-1/13 | 10-2/13 + | | | + 12 45 | 20 00 | 10-1/3 | 10-2/3 + | | | + 18 49 | 30 00 | 10-3/4 | 11-1/2 + | | | + 24 30 | 40 00 | 11-2/5 | 13 + | | | + 29 37 | 50 00 | 12-1/2 | 15-1/2 + | | | + 33 58 | 60 00 | 14 | 20 + | | | + 35 47 | 65 00 | 15-1/4 | 23-1/4 + | | | + 37 19 | 70 00 | 16-4/5 | 28-1/4 + | | | + 38 33 | 75 00 | 19-1/4 | 37 + | | | + 39 27 | 80 00 | 22-6/7 | 52-1/4 + | | | + 40 00 | 85 00 | 29 | 84-1/12 + | | | + 40 11 | 90 00 | 35 | 122-1/2 +-------------------+--------------------+----------+---------- + +In the two first Columns are express'd the Obliquities of the incident +and emergent Rays to the Plate of the Air, that is, their Angles of +Incidence and Refraction. In the third Column the Diameter of any +colour'd Ring at those Obliquities is expressed in Parts, of which ten +constitute that Diameter when the Rays are perpendicular. And in the +fourth Column the Thickness of the Air at the Circumference of that Ring +is expressed in Parts, of which also ten constitute its Thickness when +the Rays are perpendicular. + +And from these Measures I seem to gather this Rule: That the Thickness +of the Air is proportional to the Secant of an Angle, whose Sine is a +certain mean Proportional between the Sines of Incidence and Refraction. +And that mean Proportional, so far as by these Measures I can determine +it, is the first of an hundred and six arithmetical mean Proportionals +between those Sines counted from the bigger Sine, that is, from the Sine +of Refraction when the Refraction is made out of the Glass into the +Plate of Air, or from the Sine of Incidence when the Refraction is made +out of the Plate of Air into the Glass. + +_Obs._ 8. The dark Spot in the middle of the Rings increased also by the +Obliquation of the Eye, although almost insensibly. But, if instead of +the Object-glasses the Prisms were made use of, its Increase was more +manifest when viewed so obliquely that no Colours appear'd about it. It +was least when the Rays were incident most obliquely on the interjacent +Air, and as the obliquity decreased it increased more and more until the +colour'd Rings appear'd, and then decreased again, but not so much as it +increased before. And hence it is evident, that the Transparency was +not only at the absolute Contact of the Glasses, but also where they had +some little Interval. I have sometimes observed the Diameter of that +Spot to be between half and two fifth parts of the Diameter of the +exterior Circumference of the red in the first Circuit or Revolution of +Colours when view'd almost perpendicularly; whereas when view'd +obliquely it hath wholly vanish'd and become opake and white like the +other parts of the Glass; whence it may be collected that the Glasses +did then scarcely, or not at all, touch one another, and that their +Interval at the perimeter of that Spot when view'd perpendicularly was +about a fifth or sixth part of their Interval at the circumference of +the said red. + +_Obs._ 9. By looking through the two contiguous Object-glasses, I found +that the interjacent Air exhibited Rings of Colours, as well by +transmitting Light as by reflecting it. The central Spot was now white, +and from it the order of the Colours were yellowish red; black, violet, +blue, white, yellow, red; violet, blue, green, yellow, red, &c. But +these Colours were very faint and dilute, unless when the Light was +trajected very obliquely through the Glasses: For by that means they +became pretty vivid. Only the first yellowish red, like the blue in the +fourth Observation, was so little and faint as scarcely to be discern'd. +Comparing the colour'd Rings made by Reflexion, with these made by +transmission of the Light; I found that white was opposite to black, red +to blue, yellow to violet, and green to a Compound of red and violet. +That is, those parts of the Glass were black when looked through, which +when looked upon appeared white, and on the contrary. And so those which +in one case exhibited blue, did in the other case exhibit red. And the +like of the other Colours. The manner you have represented in the third +Figure, where AB, CD, are the Surfaces of the Glasses contiguous at E, +and the black Lines between them are their Distances in arithmetical +Progression, and the Colours written above are seen by reflected Light, +and those below by Light transmitted (p. 209). + +_Obs._ 10. Wetting the Object-glasses a little at their edges, the Water +crept in slowly between them, and the Circles thereby became less and +the Colours more faint: Insomuch that as the Water crept along, one half +of them at which it first arrived would appear broken off from the other +half, and contracted into a less Room. By measuring them I found the +Proportions of their Diameters to the Diameters of the like Circles made +by Air to be about seven to eight, and consequently the Intervals of the +Glasses at like Circles, caused by those two Mediums Water and Air, are +as about three to four. Perhaps it may be a general Rule, That if any +other Medium more or less dense than Water be compress'd between the +Glasses, their Intervals at the Rings caused thereby will be to their +Intervals caused by interjacent Air, as the Sines are which measure the +Refraction made out of that Medium into Air. + +_Obs._ 11. When the Water was between the Glasses, if I pressed the +upper Glass variously at its edges to make the Rings move nimbly from +one place to another, a little white Spot would immediately follow the +center of them, which upon creeping in of the ambient Water into that +place would presently vanish. Its appearance was such as interjacent Air +would have caused, and it exhibited the same Colours. But it was not +air, for where any Bubbles of Air were in the Water they would not +vanish. The Reflexion must have rather been caused by a subtiler Medium, +which could recede through the Glasses at the creeping in of the Water. + +_Obs._ 12. These Observations were made in the open Air. But farther to +examine the Effects of colour'd Light falling on the Glasses, I darken'd +the Room, and view'd them by Reflexion of the Colours of a Prism cast on +a Sheet of white Paper, my Eye being so placed that I could see the +colour'd Paper by Reflexion in the Glasses, as in a Looking-glass. And +by this means the Rings became distincter and visible to a far greater +number than in the open Air. I have sometimes seen more than twenty of +them, whereas in the open Air I could not discern above eight or nine. + +[Illustration: FIG. 3.] + +_Obs._ 13. Appointing an Assistant to move the Prism to and fro about +its Axis, that all the Colours might successively fall on that part of +the Paper which I saw by Reflexion from that part of the Glasses, where +the Circles appear'd, so that all the Colours might be successively +reflected from the Circles to my Eye, whilst I held it immovable, I +found the Circles which the red Light made to be manifestly bigger than +those which were made by the blue and violet. And it was very pleasant +to see them gradually swell or contract accordingly as the Colour of the +Light was changed. The Interval of the Glasses at any of the Rings when +they were made by the utmost red Light, was to their Interval at the +same Ring when made by the utmost violet, greater than as 3 to 2, and +less than as 13 to 8. By the most of my Observations it was as 14 to 9. +And this Proportion seem'd very nearly the same in all Obliquities of my +Eye; unless when two Prisms were made use of instead of the +Object-glasses. For then at a certain great obliquity of my Eye, the +Rings made by the several Colours seem'd equal, and at a greater +obliquity those made by the violet would be greater than the same Rings +made by the red: the Refraction of the Prism in this case causing the +most refrangible Rays to fall more obliquely on that plate of the Air +than the least refrangible ones. Thus the Experiment succeeded in the +colour'd Light, which was sufficiently strong and copious to make the +Rings sensible. And thence it may be gather'd, that if the most +refrangible and least refrangible Rays had been copious enough to make +the Rings sensible without the mixture of other Rays, the Proportion +which here was 14 to 9 would have been a little greater, suppose 14-1/4 +or 14-1/3 to 9. + +_Obs._ 14. Whilst the Prism was turn'd about its Axis with an uniform +Motion, to make all the several Colours fall successively upon the +Object-glasses, and thereby to make the Rings contract and dilate: The +Contraction or Dilatation of each Ring thus made by the variation of its +Colour was swiftest in the red, and slowest in the violet, and in the +intermediate Colours it had intermediate degrees of Celerity. Comparing +the quantity of Contraction and Dilatation made by all the degrees of +each Colour, I found that it was greatest in the red; less in the +yellow, still less in the blue, and least in the violet. And to make as +just an Estimation as I could of the Proportions of their Contractions +or Dilatations, I observ'd that the whole Contraction or Dilatation of +the Diameter of any Ring made by all the degrees of red, was to that of +the Diameter of the same Ring made by all the degrees of violet, as +about four to three, or five to four, and that when the Light was of the +middle Colour between yellow and green, the Diameter of the Ring was +very nearly an arithmetical Mean between the greatest Diameter of the +same Ring made by the outmost red, and the least Diameter thereof made +by the outmost violet: Contrary to what happens in the Colours of the +oblong Spectrum made by the Refraction of a Prism, where the red is most +contracted, the violet most expanded, and in the midst of all the +Colours is the Confine of green and blue. And hence I seem to collect +that the thicknesses of the Air between the Glasses there, where the +Ring is successively made by the limits of the five principal Colours +(red, yellow, green, blue, violet) in order (that is, by the extreme +red, by the limit of red and yellow in the middle of the orange, by the +limit of yellow and green, by the limit of green and blue, by the limit +of blue and violet in the middle of the indigo, and by the extreme +violet) are to one another very nearly as the sixth lengths of a Chord +which found the Notes in a sixth Major, _sol_, _la_, _mi_, _fa_, _sol_, +_la_. But it agrees something better with the Observation to say, that +the thicknesses of the Air between the Glasses there, where the Rings +are successively made by the limits of the seven Colours, red, orange, +yellow, green, blue, indigo, violet in order, are to one another as the +Cube Roots of the Squares of the eight lengths of a Chord, which found +the Notes in an eighth, _sol_, _la_, _fa_, _sol_, _la_, _mi_, _fa_, +_sol_; that is, as the Cube Roots of the Squares of the Numbers, 1, 8/9, +5/6, 3/4, 2/3, 3/5, 9/16, 1/2. + +_Obs._ 15. These Rings were not of various Colours like those made in +the open Air, but appeared all over of that prismatick Colour only with +which they were illuminated. And by projecting the prismatick Colours +immediately upon the Glasses, I found that the Light which fell on the +dark Spaces which were between the Colour'd Rings was transmitted +through the Glasses without any variation of Colour. For on a white +Paper placed behind, it would paint Rings of the same Colour with those +which were reflected, and of the bigness of their immediate Spaces. And +from thence the origin of these Rings is manifest; namely, that the Air +between the Glasses, according to its various thickness, is disposed in +some places to reflect, and in others to transmit the Light of any one +Colour (as you may see represented in the fourth Figure) and in the same +place to reflect that of one Colour where it transmits that of another. + +[Illustration: FIG. 4.] + +_Obs._ 16. The Squares of the Diameters of these Rings made by any +prismatick Colour were in arithmetical Progression, as in the fifth +Observation. And the Diameter of the sixth Circle, when made by the +citrine yellow, and viewed almost perpendicularly was about 58/100 parts +of an Inch, or a little less, agreeable to the sixth Observation. + +The precedent Observations were made with a rarer thin Medium, +terminated by a denser, such as was Air or Water compress'd between two +Glasses. In those that follow are set down the Appearances of a denser +Medium thin'd within a rarer, such as are Plates of Muscovy Glass, +Bubbles of Water, and some other thin Substances terminated on all sides +with air. + +_Obs._ 17. If a Bubble be blown with Water first made tenacious by +dissolving a little Soap in it, 'tis a common Observation, that after a +while it will appear tinged with a great variety of Colours. To defend +these Bubbles from being agitated by the external Air (whereby their +Colours are irregularly moved one among another, so that no accurate +Observation can be made of them,) as soon as I had blown any of them I +cover'd it with a clear Glass, and by that means its Colours emerged in +a very regular order, like so many concentrick Rings encompassing the +top of the Bubble. And as the Bubble grew thinner by the continual +subsiding of the Water, these Rings dilated slowly and overspread the +whole Bubble, descending in order to the bottom of it, where they +vanish'd successively. In the mean while, after all the Colours were +emerged at the top, there grew in the center of the Rings a small round +black Spot, like that in the first Observation, which continually +dilated it self till it became sometimes more than 1/2 or 3/4 of an Inch +in breadth before the Bubble broke. At first I thought there had been no +Light reflected from the Water in that place, but observing it more +curiously, I saw within it several smaller round Spots, which appeared +much blacker and darker than the rest, whereby I knew that there was +some Reflexion at the other places which were not so dark as those +Spots. And by farther Tryal I found that I could see the Images of some +things (as of a Candle or the Sun) very faintly reflected, not only from +the great black Spot, but also from the little darker Spots which were +within it. + +Besides the aforesaid colour'd Rings there would often appear small +Spots of Colours, ascending and descending up and down the sides of the +Bubble, by reason of some Inequalities in the subsiding of the Water. +And sometimes small black Spots generated at the sides would ascend up +to the larger black Spot at the top of the Bubble, and unite with it. + +_Obs._ 18. Because the Colours of these Bubbles were more extended and +lively than those of the Air thinn'd between two Glasses, and so more +easy to be distinguish'd, I shall here give you a farther description of +their order, as they were observ'd in viewing them by Reflexion of the +Skies when of a white Colour, whilst a black substance was placed +behind the Bubble. And they were these, red, blue; red, blue; red, blue; +red, green; red, yellow, green, blue, purple; red, yellow, green, blue, +violet; red, yellow, white, blue, black. + +The three first Successions of red and blue were very dilute and dirty, +especially the first, where the red seem'd in a manner to be white. +Among these there was scarce any other Colour sensible besides red and +blue, only the blues (and principally the second blue) inclined a little +to green. + +The fourth red was also dilute and dirty, but not so much as the former +three; after that succeeded little or no yellow, but a copious green, +which at first inclined a little to yellow, and then became a pretty +brisk and good willow green, and afterwards changed to a bluish Colour; +but there succeeded neither blue nor violet. + +The fifth red at first inclined very much to purple, and afterwards +became more bright and brisk, but yet not very pure. This was succeeded +with a very bright and intense yellow, which was but little in quantity, +and soon chang'd to green: But that green was copious and something more +pure, deep and lively, than the former green. After that follow'd an +excellent blue of a bright Sky-colour, and then a purple, which was less +in quantity than the blue, and much inclined to red. + +The sixth red was at first of a very fair and lively scarlet, and soon +after of a brighter Colour, being very pure and brisk, and the best of +all the reds. Then after a lively orange follow'd an intense bright and +copious yellow, which was also the best of all the yellows, and this +changed first to a greenish yellow, and then to a greenish blue; but the +green between the yellow and the blue, was very little and dilute, +seeming rather a greenish white than a green. The blue which succeeded +became very good, and of a very bright Sky-colour, but yet something +inferior to the former blue; and the violet was intense and deep with +little or no redness in it. And less in quantity than the blue. + +In the last red appeared a tincture of scarlet next to violet, which +soon changed to a brighter Colour, inclining to an orange; and the +yellow which follow'd was at first pretty good and lively, but +afterwards it grew more dilute until by degrees it ended in perfect +whiteness. And this whiteness, if the Water was very tenacious and +well-temper'd, would slowly spread and dilate it self over the greater +part of the Bubble; continually growing paler at the top, where at +length it would crack in many places, and those cracks, as they dilated, +would appear of a pretty good, but yet obscure and dark Sky-colour; the +white between the blue Spots diminishing, until it resembled the Threds +of an irregular Net-work, and soon after vanish'd, and left all the +upper part of the Bubble of the said dark blue Colour. And this Colour, +after the aforesaid manner, dilated it self downwards, until sometimes +it hath overspread the whole Bubble. In the mean while at the top, which +was of a darker blue than the bottom, and appear'd also full of many +round blue Spots, something darker than the rest, there would emerge +one or more very black Spots, and within those, other Spots of an +intenser blackness, which I mention'd in the former Observation; and +these continually dilated themselves until the Bubble broke. + +If the Water was not very tenacious, the black Spots would break forth +in the white, without any sensible intervention of the blue. And +sometimes they would break forth within the precedent yellow, or red, or +perhaps within the blue of the second order, before the intermediate +Colours had time to display themselves. + +By this description you may perceive how great an affinity these Colours +have with those of Air described in the fourth Observation, although set +down in a contrary order, by reason that they begin to appear when the +Bubble is thickest, and are most conveniently reckon'd from the lowest +and thickest part of the Bubble upwards. + +_Obs._ 19. Viewing in several oblique Positions of my Eye the Rings of +Colours emerging on the top of the Bubble, I found that they were +sensibly dilated by increasing the obliquity, but yet not so much by far +as those made by thinn'd Air in the seventh Observation. For there they +were dilated so much as, when view'd most obliquely, to arrive at a part +of the Plate more than twelve times thicker than that where they +appear'd when viewed perpendicularly; whereas in this case the thickness +of the Water, at which they arrived when viewed most obliquely, was to +that thickness which exhibited them by perpendicular Rays, something +less than as 8 to 5. By the best of my Observations it was between 15 +and 15-1/2 to 10; an increase about 24 times less than in the other +case. + +Sometimes the Bubble would become of an uniform thickness all over, +except at the top of it near the black Spot, as I knew, because it would +exhibit the same appearance of Colours in all Positions of the Eye. And +then the Colours which were seen at its apparent circumference by the +obliquest Rays, would be different from those that were seen in other +places, by Rays less oblique to it. And divers Spectators might see the +same part of it of differing Colours, by viewing it at very differing +Obliquities. Now observing how much the Colours at the same places of +the Bubble, or at divers places of equal thickness, were varied by the +several Obliquities of the Rays; by the assistance of the 4th, 14th, +16th and 18th Observations, as they are hereafter explain'd, I collect +the thickness of the Water requisite to exhibit any one and the same +Colour, at several Obliquities, to be very nearly in the Proportion +expressed in this Table. + +-----------------+------------------+---------------- + Incidence on | Refraction into | Thickness of + the Water. | the Water. | the Water. +-----------------+------------------+---------------- + Deg. Min. | Deg. Min. | + | | + 00 00 | 00 00 | 10 + | | + 15 00 | 11 11 | 10-1/4 + | | + 30 00 | 22 1 | 10-4/5 + | | + 45 00 | 32 2 | 11-4/5 + | | + 60 00 | 40 30 | 13 + | | + 75 00 | 46 25 | 14-1/2 + | | + 90 00 | 48 35 | 15-1/5 +-----------------+------------------+---------------- + +In the two first Columns are express'd the Obliquities of the Rays to +the Superficies of the Water, that is, their Angles of Incidence and +Refraction. Where I suppose, that the Sines which measure them are in +round Numbers, as 3 to 4, though probably the Dissolution of Soap in the +Water, may a little alter its refractive Virtue. In the third Column, +the Thickness of the Bubble, at which any one Colour is exhibited in +those several Obliquities, is express'd in Parts, of which ten +constitute its Thickness when the Rays are perpendicular. And the Rule +found by the seventh Observation agrees well with these Measures, if +duly apply'd; namely, that the Thickness of a Plate of Water requisite +to exhibit one and the same Colour at several Obliquities of the Eye, is +proportional to the Secant of an Angle, whose Sine is the first of an +hundred and six arithmetical mean Proportionals between the Sines of +Incidence and Refraction counted from the lesser Sine, that is, from the +Sine of Refraction when the Refraction is made out of Air into Water, +otherwise from the Sine of Incidence. + +I have sometimes observ'd, that the Colours which arise on polish'd +Steel by heating it, or on Bell-metal, and some other metalline +Substances, when melted and pour'd on the Ground, where they may cool in +the open Air, have, like the Colours of Water-bubbles, been a little +changed by viewing them at divers Obliquities, and particularly that a +deep blue, or violet, when view'd very obliquely, hath been changed to a +deep red. But the Changes of these Colours are not so great and +sensible as of those made by Water. For the Scoria, or vitrified Part of +the Metal, which most Metals when heated or melted do continually +protrude, and send out to their Surface, and which by covering the +Metals in form of a thin glassy Skin, causes these Colours, is much +denser than Water; and I find that the Change made by the Obliquation of +the Eye is least in Colours of the densest thin Substances. + +_Obs._ 20. As in the ninth Observation, so here, the Bubble, by +transmitted Light, appear'd of a contrary Colour to that, which it +exhibited by Reflexion. Thus when the Bubble being look'd on by the +Light of the Clouds reflected from it, seemed red at its apparent +Circumference, if the Clouds at the same time, or immediately after, +were view'd through it, the Colour at its Circumference would be blue. +And, on the contrary, when by reflected Light it appeared blue, it would +appear red by transmitted Light. + +_Obs._ 21. By wetting very thin Plates of _Muscovy_ Glass, whose +thinness made the like Colours appear, the Colours became more faint and +languid, especially by wetting the Plates on that side opposite to the +Eye: But I could not perceive any variation of their Species. So then +the thickness of a Plate requisite to produce any Colour, depends only +on the density of the Plate, and not on that of the ambient Medium. And +hence, by the 10th and 16th Observations, may be known the thickness +which Bubbles of Water, or Plates of _Muscovy_ Glass, or other +Substances, have at any Colour produced by them. + +_Obs._ 22. A thin transparent Body, which is denser than its ambient +Medium, exhibits more brisk and vivid Colours than that which is so much +rarer; as I have particularly observed in the Air and Glass. For blowing +Glass very thin at a Lamp Furnace, those Plates encompassed with Air did +exhibit Colours much more vivid than those of Air made thin between two +Glasses. + +_Obs._ 23. Comparing the quantity of Light reflected from the several +Rings, I found that it was most copious from the first or inmost, and in +the exterior Rings became gradually less and less. Also the whiteness of +the first Ring was stronger than that reflected from those parts of the +thin Medium or Plate which were without the Rings; as I could manifestly +perceive by viewing at a distance the Rings made by the two +Object-glasses; or by comparing two Bubbles of Water blown at distant +Times, in the first of which the Whiteness appear'd, which succeeded all +the Colours, and in the other, the Whiteness which preceded them all. + +_Obs._ 24. When the two Object-glasses were lay'd upon one another, so +as to make the Rings of the Colours appear, though with my naked Eye I +could not discern above eight or nine of those Rings, yet by viewing +them through a Prism I have seen a far greater Multitude, insomuch that +I could number more than forty, besides many others, that were so very +small and close together, that I could not keep my Eye steady on them +severally so as to number them, but by their Extent I have sometimes +estimated them to be more than an hundred. And I believe the Experiment +may be improved to the Discovery of far greater Numbers. For they seem +to be really unlimited, though visible only so far as they can be +separated by the Refraction of the Prism, as I shall hereafter explain. + +[Illustration: FIG. 5.] + +But it was but one side of these Rings, namely, that towards which the +Refraction was made, which by that Refraction was render'd distinct, and +the other side became more confused than when view'd by the naked Eye, +insomuch that there I could not discern above one or two, and sometimes +none of those Rings, of which I could discern eight or nine with my +naked Eye. And their Segments or Arcs, which on the other side appear'd +so numerous, for the most part exceeded not the third Part of a Circle. +If the Refraction was very great, or the Prism very distant from the +Object-glasses, the middle Part of those Arcs became also confused, so +as to disappear and constitute an even Whiteness, whilst on either side +their Ends, as also the whole Arcs farthest from the Center, became +distincter than before, appearing in the Form as you see them design'd +in the fifth Figure. + +The Arcs, where they seem'd distinctest, were only white and black +successively, without any other Colours intermix'd. But in other Places +there appeared Colours, whose Order was inverted by the refraction in +such manner, that if I first held the Prism very near the +Object-glasses, and then gradually removed it farther off towards my +Eye, the Colours of the 2d, 3d, 4th, and following Rings, shrunk towards +the white that emerged between them, until they wholly vanish'd into it +at the middle of the Arcs, and afterwards emerged again in a contrary +Order. But at the Ends of the Arcs they retain'd their Order unchanged. + +I have sometimes so lay'd one Object-glass upon the other, that to the +naked Eye they have all over seem'd uniformly white, without the least +Appearance of any of the colour'd Rings; and yet by viewing them through +a Prism, great Multitudes of those Rings have discover'd themselves. And +in like manner Plates of _Muscovy_ Glass, and Bubbles of Glass blown at +a Lamp-Furnace, which were not so thin as to exhibit any Colours to the +naked Eye, have through the Prism exhibited a great Variety of them +ranged irregularly up and down in the Form of Waves. And so Bubbles of +Water, before they began to exhibit their Colours to the naked Eye of a +Bystander, have appeared through a Prism, girded about with many +parallel and horizontal Rings; to produce which Effect, it was necessary +to hold the Prism parallel, or very nearly parallel to the Horizon, and +to dispose it so that the Rays might be refracted upwards. + + + + +THE + +SECOND BOOK + +OF + +OPTICKS + + +_PART II._ + +_Remarks upon the foregoing Observations._ + + +Having given my Observations of these Colours, before I make use of them +to unfold the Causes of the Colours of natural Bodies, it is convenient +that by the simplest of them, such as are the 2d, 3d, 4th, 9th, 12th, +18th, 20th, and 24th, I first explain the more compounded. And first to +shew how the Colours in the fourth and eighteenth Observations are +produced, let there be taken in any Right Line from the Point Y, [in +_Fig._ 6.] the Lengths YA, YB, YC, YD, YE, YF, YG, YH, in proportion to +one another, as the Cube-Roots of the Squares of the Numbers, 1/2, 9/16, +3/5, 2/3, 3/4, 5/6, 8/9, 1, whereby the Lengths of a Musical Chord to +sound all the Notes in an eighth are represented; that is, in the +Proportion of the Numbers 6300, 6814, 7114, 7631, 8255, 8855, 9243, +10000. And at the Points A, B, C, D, E, F, G, H, let Perpendiculars +A[Greek: a], B[Greek: b], &c. be erected, by whose Intervals the Extent +of the several Colours set underneath against them, is to be +represented. Then divide the Line _A[Greek: a]_ in such Proportion as +the Numbers 1, 2, 3, 5, 6, 7, 9, 10, 11, &c. set at the Points of +Division denote. And through those Divisions from Y draw Lines 1I, 2K, +3L, 5M, 6N, 7O, &c. + +Now, if A2 be supposed to represent the Thickness of any thin +transparent Body, at which the outmost Violet is most copiously +reflected in the first Ring, or Series of Colours, then by the 13th +Observation, HK will represent its Thickness, at which the utmost Red is +most copiously reflected in the same Series. Also by the 5th and 16th +Observations, A6 and HN will denote the Thicknesses at which those +extreme Colours are most copiously reflected in the second Series, and +A10 and HQ the Thicknesses at which they are most copiously reflected in +the third Series, and so on. And the Thickness at which any of the +intermediate Colours are reflected most copiously, will, according to +the 14th Observation, be defined by the distance of the Line AH from the +intermediate parts of the Lines 2K, 6N, 10Q, &c. against which the Names +of those Colours are written below. + +[Illustration: FIG. 6.] + +But farther, to define the Latitude of these Colours in each Ring or +Series, let A1 design the least thickness, and A3 the greatest +thickness, at which the extreme violet in the first Series is reflected, +and let HI, and HL, design the like limits for the extreme red, and let +the intermediate Colours be limited by the intermediate parts of the +Lines 1I, and 3L, against which the Names of those Colours are written, +and so on: But yet with this caution, that the Reflexions be supposed +strongest at the intermediate Spaces, 2K, 6N, 10Q, &c. and from thence +to decrease gradually towards these limits, 1I, 3L, 5M, 7O, &c. on +either side; where you must not conceive them to be precisely limited, +but to decay indefinitely. And whereas I have assign'd the same Latitude +to every Series, I did it, because although the Colours in the first +Series seem to be a little broader than the rest, by reason of a +stronger Reflexion there, yet that inequality is so insensible as +scarcely to be determin'd by Observation. + +Now according to this Description, conceiving that the Rays originally +of several Colours are by turns reflected at the Spaces 1I, L3, 5M, O7, +9PR11, &c. and transmitted at the Spaces AHI1, 3LM5, 7OP9, &c. it is +easy to know what Colour must in the open Air be exhibited at any +thickness of a transparent thin Body. For if a Ruler be applied parallel +to AH, at that distance from it by which the thickness of the Body is +represented, the alternate Spaces 1IL3, 5MO7, &c. which it crosseth will +denote the reflected original Colours, of which the Colour exhibited in +the open Air is compounded. Thus if the constitution of the green in the +third Series of Colours be desired, apply the Ruler as you see at +[Greek: prsph], and by its passing through some of the blue at [Greek: +p] and yellow at [Greek: s], as well as through the green at [Greek: r], +you may conclude that the green exhibited at that thickness of the Body +is principally constituted of original green, but not without a mixture +of some blue and yellow. + +By this means you may know how the Colours from the center of the Rings +outward ought to succeed in order as they were described in the 4th and +18th Observations. For if you move the Ruler gradually from AH through +all distances, having pass'd over the first Space which denotes little +or no Reflexion to be made by thinnest Substances, it will first arrive +at 1 the violet, and then very quickly at the blue and green, which +together with that violet compound blue, and then at the yellow and red, +by whose farther addition that blue is converted into whiteness, which +whiteness continues during the transit of the edge of the Ruler from I +to 3, and after that by the successive deficience of its component +Colours, turns first to compound yellow, and then to red, and last of +all the red ceaseth at L. Then begin the Colours of the second Series, +which succeed in order during the transit of the edge of the Ruler from +5 to O, and are more lively than before, because more expanded and +severed. And for the same reason instead of the former white there +intercedes between the blue and yellow a mixture of orange, yellow, +green, blue and indigo, all which together ought to exhibit a dilute and +imperfect green. So the Colours of the third Series all succeed in +order; first, the violet, which a little interferes with the red of the +second order, and is thereby inclined to a reddish purple; then the blue +and green, which are less mix'd with other Colours, and consequently +more lively than before, especially the green: Then follows the yellow, +some of which towards the green is distinct and good, but that part of +it towards the succeeding red, as also that red is mix'd with the violet +and blue of the fourth Series, whereby various degrees of red very much +inclining to purple are compounded. This violet and blue, which should +succeed this red, being mixed with, and hidden in it, there succeeds a +green. And this at first is much inclined to blue, but soon becomes a +good green, the only unmix'd and lively Colour in this fourth Series. +For as it verges towards the yellow, it begins to interfere with the +Colours of the fifth Series, by whose mixture the succeeding yellow and +red are very much diluted and made dirty, especially the yellow, which +being the weaker Colour is scarce able to shew it self. After this the +several Series interfere more and more, and their Colours become more +and more intermix'd, till after three or four more revolutions (in which +the red and blue predominate by turns) all sorts of Colours are in all +places pretty equally blended, and compound an even whiteness. + +And since by the 15th Observation the Rays endued with one Colour are +transmitted, where those of another Colour are reflected, the reason of +the Colours made by the transmitted Light in the 9th and 20th +Observations is from hence evident. + +If not only the Order and Species of these Colours, but also the precise +thickness of the Plate, or thin Body at which they are exhibited, be +desired in parts of an Inch, that may be also obtained by assistance of +the 6th or 16th Observations. For according to those Observations the +thickness of the thinned Air, which between two Glasses exhibited the +most luminous parts of the first six Rings were 1/178000, 3/178000, +5/178000, 7/178000, 9/178000, 11/178000 parts of an Inch. Suppose the +Light reflected most copiously at these thicknesses be the bright +citrine yellow, or confine of yellow and orange, and these thicknesses +will be F[Greek: l], F[Greek: m], F[Greek: u], F[Greek: x], F[Greek: o], +F[Greek: t]. And this being known, it is easy to determine what +thickness of Air is represented by G[Greek: ph], or by any other +distance of the Ruler from AH. + +But farther, since by the 10th Observation the thickness of Air was to +the thickness of Water, which between the same Glasses exhibited the +same Colour, as 4 to 3, and by the 21st Observation the Colours of thin +Bodies are not varied by varying the ambient Medium; the thickness of a +Bubble of Water, exhibiting any Colour, will be 3/4 of the thickness of +Air producing the same Colour. And so according to the same 10th and +21st Observations, the thickness of a Plate of Glass, whose Refraction +of the mean refrangible Ray, is measured by the proportion of the Sines +31 to 20, may be 20/31 of the thickness of Air producing the same +Colours; and the like of other Mediums. I do not affirm, that this +proportion of 20 to 31, holds in all the Rays; for the Sines of other +sorts of Rays have other Proportions. But the differences of those +Proportions are so little that I do not here consider them. On these +Grounds I have composed the following Table, wherein the thickness of +Air, Water, and Glass, at which each Colour is most intense and +specifick, is expressed in parts of an Inch divided into ten hundred +thousand equal parts. + +Now if this Table be compared with the 6th Scheme, you will there see +the constitution of each Colour, as to its Ingredients, or the original +Colours of which it is compounded, and thence be enabled to judge of its +Intenseness or Imperfection; which may suffice in explication of the 4th +and 18th Observations, unless it be farther desired to delineate the +manner how the Colours appear, when the two Object-glasses are laid upon +one another. To do which, let there be described a large Arc of a +Circle, and a streight Line which may touch that Arc, and parallel to +that Tangent several occult Lines, at such distances from it, as the +Numbers set against the several Colours in the Table denote. For the +Arc, and its Tangent, will represent the Superficies of the Glasses +terminating the interjacent Air; and the places where the occult Lines +cut the Arc will show at what distances from the center, or Point of +contact, each Colour is reflected. + +_The thickness of colour'd Plates and Particles of_ + _____________|_______________ + / \ + Air. Water. Glass. + |---------+----------+----------+ + {Very black | 1/2 | 3/8 | 10/31 | + {Black | 1 | 3/4 | 20/31 | + {Beginning of | | | | + { Black | 2 | 1-1/2 | 1-2/7 | +Their Colours of the {Blue | 2-2/5 | 1-4/5 | 1-11/22 | +first Order, {White | 5-1/4 | 3-7/8 | 3-2/5 | + {Yellow | 7-1/9 | 5-1/3 | 4-3/5 | + {Orange | 8 | 6 | 5-1/6 | + {Red | 9 | 6-3/4 | 5-4/5 | + |---------+----------+----------| + {Violet | 11-1/6 | 8-3/8 | 7-1/5 | + {Indigo | 12-5/6 | 9-5/8 | 8-2/11 | + {Blue | 14 | 10-1/2 | 9 | + {Green | 15-1/8 | 11-2/3 | 9-5/7 | +Of the second order, {Yellow | 16-2/7 | 12-1/5 | 10-2/5 | + {Orange | 17-2/9 | 13 | 11-1/9 | + {Bright red | 18-1/3 | 13-3/4 | 11-5/6 | + {Scarlet | 19-2/3 | 14-3/4 | 12-2/3 | + |---------+----------+----------| + {Purple | 21 | 15-3/4 | 13-11/20 | + {Indigo | 22-1/10 | 16-4/7 | 14-1/4 | + {Blue | 23-2/5 | 17-11/20 | 15-1/10 | +Of the third Order, {Green | 25-1/5 | 18-9/10 | 16-1/4 | + {Yellow | 27-1/7 | 20-1/3 | 17-1/2 | + {Red | 29 | 21-3/4 | 18-5/7 | + {Bluish red | 32 | 24 | 20-2/3 | + |---------+----------+----------| + {Bluish green | 34 | 25-1/2 | 22 | + {Green | 35-2/7 | 26-1/2 | 22-3/4 | +Of the fourth Order, {Yellowish green | 36 | 27 | 23-2/9 | + {Red | 40-1/3 | 30-1/4 | 26 | + |---------+----------+----------| + {Greenish blue | 46 | 34-1/2 | 29-2/3 | +Of the fifth Order, {Red | 52-1/2 | 39-3/8 | 34 | + |---------+----------+----------| + {Greenish blue | 58-3/4 | 44 | 38 | +Of the sixth Order, {Red | 65 | 48-3/4 | 42 | + |---------+----------+----------| +Of the seventh Order, {Greenish blue | 71 | 53-1/4 | 45-4/5 | + {Ruddy White | 77 | 57-3/4 | 49-2/3 | + |---------+----------+----------| + +There are also other Uses of this Table: For by its assistance the +thickness of the Bubble in the 19th Observation was determin'd by the +Colours which it exhibited. And so the bigness of the parts of natural +Bodies may be conjectured by their Colours, as shall be hereafter shewn. +Also, if two or more very thin Plates be laid one upon another, so as to +compose one Plate equalling them all in thickness, the resulting Colour +may be hereby determin'd. For instance, Mr. _Hook_ observed, as is +mentioned in his _Micrographia_, that a faint yellow Plate of _Muscovy_ +Glass laid upon a blue one, constituted a very deep purple. The yellow +of the first Order is a faint one, and the thickness of the Plate +exhibiting it, according to the Table is 4-3/5, to which add 9, the +thickness exhibiting blue of the second Order, and the Sum will be +13-3/5, which is the thickness exhibiting the purple of the third Order. + +To explain, in the next place, the circumstances of the 2d and 3d +Observations; that is, how the Rings of the Colours may (by turning the +Prisms about their common Axis the contrary way to that expressed in +those Observations) be converted into white and black Rings, and +afterwards into Rings of Colours again, the Colours of each Ring lying +now in an inverted order; it must be remember'd, that those Rings of +Colours are dilated by the obliquation of the Rays to the Air which +intercedes the Glasses, and that according to the Table in the 7th +Observation, their Dilatation or Increase of their Diameter is most +manifest and speedy when they are obliquest. Now the Rays of yellow +being more refracted by the first Superficies of the said Air than those +of red, are thereby made more oblique to the second Superficies, at +which they are reflected to produce the colour'd Rings, and consequently +the yellow Circle in each Ring will be more dilated than the red; and +the Excess of its Dilatation will be so much the greater, by how much +the greater is the obliquity of the Rays, until at last it become of +equal extent with the red of the same Ring. And for the same reason the +green, blue and violet, will be also so much dilated by the still +greater obliquity of their Rays, as to become all very nearly of equal +extent with the red, that is, equally distant from the center of the +Rings. And then all the Colours of the same Ring must be co-incident, +and by their mixture exhibit a white Ring. And these white Rings must +have black and dark Rings between them, because they do not spread and +interfere with one another, as before. And for that reason also they +must become distincter, and visible to far greater numbers. But yet the +violet being obliquest will be something more dilated, in proportion to +its extent, than the other Colours, and so very apt to appear at the +exterior Verges of the white. + +Afterwards, by a greater obliquity of the Rays, the violet and blue +become more sensibly dilated than the red and yellow, and so being +farther removed from the center of the Rings, the Colours must emerge +out of the white in an order contrary to that which they had before; the +violet and blue at the exterior Limbs of each Ring, and the red and +yellow at the interior. And the violet, by reason of the greatest +obliquity of its Rays, being in proportion most of all expanded, will +soonest appear at the exterior Limb of each white Ring, and become more +conspicuous than the rest. And the several Series of Colours belonging +to the several Rings, will, by their unfolding and spreading, begin +again to interfere, and thereby render the Rings less distinct, and not +visible to so great numbers. + +If instead of the Prisms the Object-glasses be made use of, the Rings +which they exhibit become not white and distinct by the obliquity of the +Eye, by reason that the Rays in their passage through that Air which +intercedes the Glasses are very nearly parallel to those Lines in which +they were first incident on the Glasses, and consequently the Rays +endued with several Colours are not inclined one more than another to +that Air, as it happens in the Prisms. + +There is yet another circumstance of these Experiments to be consider'd, +and that is why the black and white Rings which when view'd at a +distance appear distinct, should not only become confused by viewing +them near at hand, but also yield a violet Colour at both the edges of +every white Ring. And the reason is, that the Rays which enter the Eye +at several parts of the Pupil, have several Obliquities to the Glasses, +and those which are most oblique, if consider'd apart, would represent +the Rings bigger than those which are the least oblique. Whence the +breadth of the Perimeter of every white Ring is expanded outwards by the +obliquest Rays, and inwards by the least oblique. And this Expansion is +so much the greater by how much the greater is the difference of the +Obliquity; that is, by how much the Pupil is wider, or the Eye nearer to +the Glasses. And the breadth of the violet must be most expanded, +because the Rays apt to excite a Sensation of that Colour are most +oblique to a second or farther Superficies of the thinn'd Air at which +they are reflected, and have also the greatest variation of Obliquity, +which makes that Colour soonest emerge out of the edges of the white. +And as the breadth of every Ring is thus augmented, the dark Intervals +must be diminish'd, until the neighbouring Rings become continuous, and +are blended, the exterior first, and then those nearer the center; so +that they can no longer be distinguish'd apart, but seem to constitute +an even and uniform whiteness. + +Among all the Observations there is none accompanied with so odd +circumstances as the twenty-fourth. Of those the principal are, that in +thin Plates, which to the naked Eye seem of an even and uniform +transparent whiteness, without any terminations of Shadows, the +Refraction of a Prism should make Rings of Colours appear, whereas it +usually makes Objects appear colour'd only there where they are +terminated with Shadows, or have parts unequally luminous; and that it +should make those Rings exceedingly distinct and white, although it +usually renders Objects confused and coloured. The Cause of these things +you will understand by considering, that all the Rings of Colours are +really in the Plate, when view'd with the naked Eye, although by reason +of the great breadth of their Circumferences they so much interfere and +are blended together, that they seem to constitute an uniform whiteness. +But when the Rays pass through the Prism to the Eye, the Orbits of the +several Colours in every Ring are refracted, some more than others, +according to their degrees of Refrangibility: By which means the Colours +on one side of the Ring (that is in the circumference on one side of its +center), become more unfolded and dilated, and those on the other side +more complicated and contracted. And where by a due Refraction they are +so much contracted, that the several Rings become narrower than to +interfere with one another, they must appear distinct, and also white, +if the constituent Colours be so much contracted as to be wholly +co-incident. But on the other side, where the Orbit of every Ring is +made broader by the farther unfolding of its Colours, it must interfere +more with other Rings than before, and so become less distinct. + +[Illustration: FIG. 7.] + +To explain this a little farther, suppose the concentrick Circles AV, +and BX, [in _Fig._ 7.] represent the red and violet of any Order, which, +together with the intermediate Colours, constitute any one of these +Rings. Now these being view'd through a Prism, the violet Circle BX, +will, by a greater Refraction, be farther translated from its place than +the red AV, and so approach nearer to it on that side of the Circles, +towards which the Refractions are made. For instance, if the red be +translated to _av_, the violet may be translated to _bx_, so as to +approach nearer to it at _x_ than before; and if the red be farther +translated to av, the violet may be so much farther translated to bx as +to convene with it at x; and if the red be yet farther translated to +[Greek: aY], the violet may be still so much farther translated to +[Greek: bx] as to pass beyond it at [Greek: x], and convene with it at +_e_ and _f_. And this being understood not only of the red and violet, +but of all the other intermediate Colours, and also of every revolution +of those Colours, you will easily perceive how those of the same +revolution or order, by their nearness at _xv_ and [Greek: Yx], and +their coincidence at xv, _e_ and _f_, ought to constitute pretty +distinct Arcs of Circles, especially at xv, or at _e_ and _f_; and that +they will appear severally at _x_[Greek: u] and at xv exhibit whiteness +by their coincidence, and again appear severally at [Greek: Yx], but yet +in a contrary order to that which they had before, and still retain +beyond _e_ and _f_. But on the other side, at _ab_, ab, or [Greek: ab], +these Colours must become much more confused by being dilated and spread +so as to interfere with those of other Orders. And the same confusion +will happen at [Greek: Ux] between _e_ and _f_, if the Refraction be +very great, or the Prism very distant from the Object-glasses: In which +case no parts of the Rings will be seen, save only two little Arcs at +_e_ and _f_, whose distance from one another will be augmented by +removing the Prism still farther from the Object-glasses: And these +little Arcs must be distinctest and whitest at their middle, and at +their ends, where they begin to grow confused, they must be colour'd. +And the Colours at one end of every Arc must be in a contrary order to +those at the other end, by reason that they cross in the intermediate +white; namely, their ends, which verge towards [Greek: Ux], will be red +and yellow on that side next the center, and blue and violet on the +other side. But their other ends which verge from [Greek: Ux], will on +the contrary be blue and violet on that side towards the center, and on +the other side red and yellow. + +Now as all these things follow from the properties of Light by a +mathematical way of reasoning, so the truth of them may be manifested by +Experiments. For in a dark Room, by viewing these Rings through a Prism, +by reflexion of the several prismatick Colours, which an assistant +causes to move to and fro upon a Wall or Paper from whence they are +reflected, whilst the Spectator's Eye, the Prism, and the +Object-glasses, (as in the 13th Observation,) are placed steady; the +Position of the Circles made successively by the several Colours, will +be found such, in respect of one another, as I have described in the +Figures _abxv_, or abxv, or _[Greek: abxU]_. And by the same method the +truth of the Explications of other Observations may be examined. + +By what hath been said, the like Phænomena of Water and thin Plates of +Glass may be understood. But in small fragments of those Plates there is +this farther observable, that where they lie flat upon a Table, and are +turned about their centers whilst they are view'd through a Prism, they +will in some postures exhibit Waves of various Colours; and some of them +exhibit these Waves in one or two Positions only, but the most of them +do in all Positions exhibit them, and make them for the most part appear +almost all over the Plates. The reason is, that the Superficies of such +Plates are not even, but have many Cavities and Swellings, which, how +shallow soever, do a little vary the thickness of the Plate. For at the +several sides of those Cavities, for the Reasons newly described, there +ought to be produced Waves in several postures of the Prism. Now though +it be but some very small and narrower parts of the Glass, by which +these Waves for the most part are caused, yet they may seem to extend +themselves over the whole Glass, because from the narrowest of those +parts there are Colours of several Orders, that is, of several Rings, +confusedly reflected, which by Refraction of the Prism are unfolded, +separated, and, according to their degrees of Refraction, dispersed to +several places, so as to constitute so many several Waves, as there were +divers orders of Colours promiscuously reflected from that part of the +Glass. + +These are the principal Phænomena of thin Plates or Bubbles, whose +Explications depend on the properties of Light, which I have heretofore +deliver'd. And these you see do necessarily follow from them, and agree +with them, even to their very least circumstances; and not only so, but +do very much tend to their proof. Thus, by the 24th Observation it +appears, that the Rays of several Colours, made as well by thin Plates +or Bubbles, as by Refractions of a Prism, have several degrees of +Refrangibility; whereby those of each order, which at the reflexion from +the Plate or Bubble are intermix'd with those of other orders, are +separated from them by Refraction, and associated together so as to +become visible by themselves like Arcs of Circles. For if the Rays were +all alike refrangible, 'tis impossible that the whiteness, which to the +naked Sense appears uniform, should by Refraction have its parts +transposed and ranged into those black and white Arcs. + +It appears also that the unequal Refractions of difform Rays proceed not +from any contingent irregularities; such as are Veins, an uneven Polish, +or fortuitous Position of the Pores of Glass; unequal and casual Motions +in the Air or Æther, the spreading, breaking, or dividing the same Ray +into many diverging parts; or the like. For, admitting any such +irregularities, it would be impossible for Refractions to render those +Rings so very distinct, and well defined, as they do in the 24th +Observation. It is necessary therefore that every Ray have its proper +and constant degree of Refrangibility connate with it, according to +which its refraction is ever justly and regularly perform'd; and that +several Rays have several of those degrees. + +And what is said of their Refrangibility may be also understood of their +Reflexibility, that is, of their Dispositions to be reflected, some at a +greater, and others at a less thickness of thin Plates or Bubbles; +namely, that those Dispositions are also connate with the Rays, and +immutable; as may appear by the 13th, 14th, and 15th Observations, +compared with the fourth and eighteenth. + +By the Precedent Observations it appears also, that whiteness is a +dissimilar mixture of all Colours, and that Light is a mixture of Rays +endued with all those Colours. For, considering the multitude of the +Rings of Colours in the 3d, 12th, and 24th Observations, it is manifest, +that although in the 4th and 18th Observations there appear no more than +eight or nine of those Rings, yet there are really a far greater number, +which so much interfere and mingle with one another, as after those +eight or nine revolutions to dilute one another wholly, and constitute +an even and sensibly uniform whiteness. And consequently that whiteness +must be allow'd a mixture of all Colours, and the Light which conveys it +to the Eye must be a mixture of Rays endued with all those Colours. + +But farther; by the 24th Observation it appears, that there is a +constant relation between Colours and Refrangibility; the most +refrangible Rays being violet, the least refrangible red, and those of +intermediate Colours having proportionably intermediate degrees of +Refrangibility. And by the 13th, 14th, and 15th Observations, compared +with the 4th or 18th there appears to be the same constant relation +between Colour and Reflexibility; the violet being in like circumstances +reflected at least thicknesses of any thin Plate or Bubble, the red at +greatest thicknesses, and the intermediate Colours at intermediate +thicknesses. Whence it follows, that the colorifick Dispositions of +Rays are also connate with them, and immutable; and by consequence, that +all the Productions and Appearances of Colours in the World are derived, +not from any physical Change caused in Light by Refraction or Reflexion, +but only from the various Mixtures or Separations of Rays, by virtue of +their different Refrangibility or Reflexibility. And in this respect the +Science of Colours becomes a Speculation as truly mathematical as any +other part of Opticks. I mean, so far as they depend on the Nature of +Light, and are not produced or alter'd by the Power of Imagination, or +by striking or pressing the Eye. + + + + +THE + +SECOND BOOK + +OF + +OPTICKS + + +_PART III._ + +_Of the permanent Colours of natural Bodies, and the Analogy between +them and the Colours of thin transparent Plates._ + +I am now come to another part of this Design, which is to consider how +the Phænomena of thin transparent Plates stand related to those of all +other natural Bodies. Of these Bodies I have already told you that they +appear of divers Colours, accordingly as they are disposed to reflect +most copiously the Rays originally endued with those Colours. But their +Constitutions, whereby they reflect some Rays more copiously than +others, remain to be discover'd; and these I shall endeavour to manifest +in the following Propositions. + + +PROP. I. + +_Those Superficies of transparent Bodies reflect the greatest quantity +of Light, which have the greatest refracting Power; that is, which +intercede Mediums that differ most in their refractive Densities. And in +the Confines of equally refracting Mediums there is no Reflexion._ + +The Analogy between Reflexion and Refraction will appear by considering, +that when Light passeth obliquely out of one Medium into another which +refracts from the perpendicular, the greater is the difference of their +refractive Density, the less Obliquity of Incidence is requisite to +cause a total Reflexion. For as the Sines are which measure the +Refraction, so is the Sine of Incidence at which the total Reflexion +begins, to the Radius of the Circle; and consequently that Angle of +Incidence is least where there is the greatest difference of the Sines. +Thus in the passing of Light out of Water into Air, where the Refraction +is measured by the Ratio of the Sines 3 to 4, the total Reflexion begins +when the Angle of Incidence is about 48 Degrees 35 Minutes. In passing +out of Glass into Air, where the Refraction is measured by the Ratio of +the Sines 20 to 31, the total Reflexion begins when the Angle of +Incidence is 40 Degrees 10 Minutes; and so in passing out of Crystal, or +more strongly refracting Mediums into Air, there is still a less +obliquity requisite to cause a total reflexion. Superficies therefore +which refract most do soonest reflect all the Light which is incident on +them, and so must be allowed most strongly reflexive. + +But the truth of this Proposition will farther appear by observing, that +in the Superficies interceding two transparent Mediums, (such as are +Air, Water, Oil, common Glass, Crystal, metalline Glasses, Island +Glasses, white transparent Arsenick, Diamonds, &c.) the Reflexion is +stronger or weaker accordingly, as the Superficies hath a greater or +less refracting Power. For in the Confine of Air and Sal-gem 'tis +stronger than in the Confine of Air and Water, and still stronger in the +Confine of Air and common Glass or Crystal, and stronger in the Confine +of Air and a Diamond. If any of these, and such like transparent Solids, +be immerged in Water, its Reflexion becomes, much weaker than before; +and still weaker if they be immerged in the more strongly refracting +Liquors of well rectified Oil of Vitriol or Spirit of Turpentine. If +Water be distinguish'd into two parts by any imaginary Surface, the +Reflexion in the Confine of those two parts is none at all. In the +Confine of Water and Ice 'tis very little; in that of Water and Oil 'tis +something greater; in that of Water and Sal-gem still greater; and in +that of Water and Glass, or Crystal or other denser Substances still +greater, accordingly as those Mediums differ more or less in their +refracting Powers. Hence in the Confine of common Glass and Crystal, +there ought to be a weak Reflexion, and a stronger Reflexion in the +Confine of common and metalline Glass; though I have not yet tried +this. But in the Confine of two Glasses of equal density, there is not +any sensible Reflexion; as was shewn in the first Observation. And the +same may be understood of the Superficies interceding two Crystals, or +two Liquors, or any other Substances in which no Refraction is caused. +So then the reason why uniform pellucid Mediums (such as Water, Glass, +or Crystal,) have no sensible Reflexion but in their external +Superficies, where they are adjacent to other Mediums of a different +density, is because all their contiguous parts have one and the same +degree of density. + + +PROP. II. + +_The least parts of almost all natural Bodies are in some measure +transparent: And the Opacity of those Bodies ariseth from the multitude +of Reflexions caused in their internal Parts._ + +That this is so has been observed by others, and will easily be granted +by them that have been conversant with Microscopes. And it may be also +tried by applying any substance to a hole through which some Light is +immitted into a dark Room. For how opake soever that Substance may seem +in the open Air, it will by that means appear very manifestly +transparent, if it be of a sufficient thinness. Only white metalline +Bodies must be excepted, which by reason of their excessive density seem +to reflect almost all the Light incident on their first Superficies; +unless by solution in Menstruums they be reduced into very small +Particles, and then they become transparent. + + +PROP. III. + +_Between the parts of opake and colour'd Bodies are many Spaces, either +empty, or replenish'd with Mediums of other Densities; as Water between +the tinging Corpuscles wherewith any Liquor is impregnated, Air between +the aqueous Globules that constitute Clouds or Mists; and for the most +part Spaces void of both Air and Water, but yet perhaps not wholly void +of all Substance, between the parts of hard Bodies._ + +The truth of this is evinced by the two precedent Propositions: For by +the second Proposition there are many Reflexions made by the internal +parts of Bodies, which, by the first Proposition, would not happen if +the parts of those Bodies were continued without any such Interstices +between them; because Reflexions are caused only in Superficies, which +intercede Mediums of a differing density, by _Prop._ 1. + +But farther, that this discontinuity of parts is the principal Cause of +the opacity of Bodies, will appear by considering, that opake Substances +become transparent by filling their Pores with any Substance of equal or +almost equal density with their parts. Thus Paper dipped in Water or +Oil, the _Oculus Mundi_ Stone steep'd in Water, Linnen Cloth oiled or +varnish'd, and many other Substances soaked in such Liquors as will +intimately pervade their little Pores, become by that means more +transparent than otherwise; so, on the contrary, the most transparent +Substances, may, by evacuating their Pores, or separating their parts, +be render'd sufficiently opake; as Salts or wet Paper, or the _Oculus +Mundi_ Stone by being dried, Horn by being scraped, Glass by being +reduced to Powder, or otherwise flawed; Turpentine by being stirred +about with Water till they mix imperfectly, and Water by being form'd +into many small Bubbles, either alone in the form of Froth, or by +shaking it together with Oil of Turpentine, or Oil Olive, or with some +other convenient Liquor, with which it will not perfectly incorporate. +And to the increase of the opacity of these Bodies, it conduces +something, that by the 23d Observation the Reflexions of very thin +transparent Substances are considerably stronger than those made by the +same Substances of a greater thickness. + + +PROP. IV. + +_The Parts of Bodies and their Interstices must not be less than of some +definite bigness, to render them opake and colour'd._ + +For the opakest Bodies, if their parts be subtilly divided, (as Metals, +by being dissolved in acid Menstruums, &c.) become perfectly +transparent. And you may also remember, that in the eighth Observation +there was no sensible reflexion at the Superficies of the +Object-glasses, where they were very near one another, though they did +not absolutely touch. And in the 17th Observation the Reflexion of the +Water-bubble where it became thinnest was almost insensible, so as to +cause very black Spots to appear on the top of the Bubble, by the want +of reflected Light. + +On these grounds I perceive it is that Water, Salt, Glass, Stones, and +such like Substances, are transparent. For, upon divers Considerations, +they seem to be as full of Pores or Interstices between their parts as +other Bodies are, but yet their Parts and Interstices to be too small to +cause Reflexions in their common Surfaces. + + +PROP. V. + +_The transparent parts of Bodies, according to their several sizes, +reflect Rays of one Colour, and transmit those of another, on the same +grounds that thin Plates or Bubbles do reflect or transmit those Rays. +And this I take to be the ground of all their Colours._ + +For if a thinn'd or plated Body, which being of an even thickness, +appears all over of one uniform Colour, should be slit into Threads, or +broken into Fragments, of the same thickness with the Plate; I see no +reason why every Thread or Fragment should not keep its Colour, and by +consequence why a heap of those Threads or Fragments should not +constitute a Mass or Powder of the same Colour, which the Plate +exhibited before it was broken. And the parts of all natural Bodies +being like so many Fragments of a Plate, must on the same grounds +exhibit the same Colours. + +Now, that they do so will appear by the affinity of their Properties. +The finely colour'd Feathers of some Birds, and particularly those of +Peacocks Tails, do, in the very same part of the Feather, appear of +several Colours in several Positions of the Eye, after the very same +manner that thin Plates were found to do in the 7th and 19th +Observations, and therefore their Colours arise from the thinness of the +transparent parts of the Feathers; that is, from the slenderness of the +very fine Hairs, or _Capillamenta_, which grow out of the sides of the +grosser lateral Branches or Fibres of those Feathers. And to the same +purpose it is, that the Webs of some Spiders, by being spun very fine, +have appeared colour'd, as some have observ'd, and that the colour'd +Fibres of some Silks, by varying the Position of the Eye, do vary their +Colour. Also the Colours of Silks, Cloths, and other Substances, which +Water or Oil can intimately penetrate, become more faint and obscure by +being immerged in those Liquors, and recover their Vigor again by being +dried; much after the manner declared of thin Bodies in the 10th and +21st Observations. Leaf-Gold, some sorts of painted Glass, the Infusion +of _Lignum Nephriticum_, and some other Substances, reflect one Colour, +and transmit another; like thin Bodies in the 9th and 20th Observations. +And some of those colour'd Powders which Painters use, may have their +Colours a little changed, by being very elaborately and finely ground. +Where I see not what can be justly pretended for those changes, besides +the breaking of their parts into less parts by that contrition, after +the same manner that the Colour of a thin Plate is changed by varying +its thickness. For which reason also it is that the colour'd Flowers of +Plants and Vegetables, by being bruised, usually become more transparent +than before, or at least in some degree or other change their Colours. +Nor is it much less to my purpose, that, by mixing divers Liquors, very +odd and remarkable Productions and Changes of Colours may be effected, +of which no cause can be more obvious and rational than that the saline +Corpuscles of one Liquor do variously act upon or unite with the tinging +Corpuscles of another, so as to make them swell, or shrink, (whereby not +only their bulk but their density also may be changed,) or to divide +them into smaller Corpuscles, (whereby a colour'd Liquor may become +transparent,) or to make many of them associate into one cluster, +whereby two transparent Liquors may compose a colour'd one. For we see +how apt those saline Menstruums are to penetrate and dissolve Substances +to which they are applied, and some of them to precipitate what others +dissolve. In like manner, if we consider the various Phænomena of the +Atmosphere, we may observe, that when Vapours are first raised, they +hinder not the transparency of the Air, being divided into parts too +small to cause any Reflexion in their Superficies. But when in order to +compose drops of Rain they begin to coalesce and constitute Globules of +all intermediate sizes, those Globules, when they become of convenient +size to reflect some Colours and transmit others, may constitute Clouds +of various Colours according to their sizes. And I see not what can be +rationally conceived in so transparent a Substance as Water for the +production of these Colours, besides the various sizes of its fluid and +globular Parcels. + + +PROP. VI. + +_The parts of Bodies on which their Colours depend, are denser than the +Medium which pervades their Interstices._ + +This will appear by considering, that the Colour of a Body depends not +only on the Rays which are incident perpendicularly on its parts, but on +those also which are incident at all other Angles. And that according to +the 7th Observation, a very little variation of obliquity will change +the reflected Colour, where the thin Body or small Particles is rarer +than the ambient Medium, insomuch that such a small Particle will at +diversly oblique Incidences reflect all sorts of Colours, in so great a +variety that the Colour resulting from them all, confusedly reflected +from a heap of such Particles, must rather be a white or grey than any +other Colour, or at best it must be but a very imperfect and dirty +Colour. Whereas if the thin Body or small Particle be much denser than +the ambient Medium, the Colours, according to the 19th Observation, are +so little changed by the variation of obliquity, that the Rays which +are reflected least obliquely may predominate over the rest, so much as +to cause a heap of such Particles to appear very intensely of their +Colour. + +It conduces also something to the confirmation of this Proposition, +that, according to the 22d Observation, the Colours exhibited by the +denser thin Body within the rarer, are more brisk than those exhibited +by the rarer within the denser. + + +PROP. VII. + +_The bigness of the component parts of natural Bodies may be conjectured +by their Colours._ + +For since the parts of these Bodies, by _Prop._ 5. do most probably +exhibit the same Colours with a Plate of equal thickness, provided they +have the same refractive density; and since their parts seem for the +most part to have much the same density with Water or Glass, as by many +circumstances is obvious to collect; to determine the sizes of those +parts, you need only have recourse to the precedent Tables, in which the +thickness of Water or Glass exhibiting any Colour is expressed. Thus if +it be desired to know the diameter of a Corpuscle, which being of equal +density with Glass shall reflect green of the third Order; the Number +16-1/4 shews it to be (16-1/4)/10000 parts of an Inch. + +The greatest difficulty is here to know of what Order the Colour of any +Body is. And for this end we must have recourse to the 4th and 18th +Observations; from whence may be collected these particulars. + +_Scarlets_, and other _reds_, _oranges_, and _yellows_, if they be pure +and intense, are most probably of the second order. Those of the first +and third order also may be pretty good; only the yellow of the first +order is faint, and the orange and red of the third Order have a great +Mixture of violet and blue. + +There may be good _Greens_ of the fourth Order, but the purest are of +the third. And of this Order the green of all Vegetables seems to be, +partly by reason of the Intenseness of their Colours, and partly because +when they wither some of them turn to a greenish yellow, and others to a +more perfect yellow or orange, or perhaps to red, passing first through +all the aforesaid intermediate Colours. Which Changes seem to be +effected by the exhaling of the Moisture which may leave the tinging +Corpuscles more dense, and something augmented by the Accretion of the +oily and earthy Part of that Moisture. Now the green, without doubt, is +of the same Order with those Colours into which it changeth, because the +Changes are gradual, and those Colours, though usually not very full, +yet are often too full and lively to be of the fourth Order. + +_Blues_ and _Purples_ may be either of the second or third Order, but +the best are of the third. Thus the Colour of Violets seems to be of +that Order, because their Syrup by acid Liquors turns red, and by +urinous and alcalizate turns green. For since it is of the Nature of +Acids to dissolve or attenuate, and of Alcalies to precipitate or +incrassate, if the Purple Colour of the Syrup was of the second Order, +an acid Liquor by attenuating its tinging Corpuscles would change it to +a red of the first Order, and an Alcali by incrassating them would +change it to a green of the second Order; which red and green, +especially the green, seem too imperfect to be the Colours produced by +these Changes. But if the said Purple be supposed of the third Order, +its Change to red of the second, and green of the third, may without any +Inconvenience be allow'd. + +If there be found any Body of a deeper and less reddish Purple than that +of the Violets, its Colour most probably is of the second Order. But yet +there being no Body commonly known whose Colour is constantly more deep +than theirs, I have made use of their Name to denote the deepest and +least reddish Purples, such as manifestly transcend their Colour in +purity. + +The _blue_ of the first Order, though very faint and little, may +possibly be the Colour of some Substances; and particularly the azure +Colour of the Skies seems to be of this Order. For all Vapours when they +begin to condense and coalesce into small Parcels, become first of that +Bigness, whereby such an Azure must be reflected before they can +constitute Clouds of other Colours. And so this being the first Colour +which Vapours begin to reflect, it ought to be the Colour of the finest +and most transparent Skies, in which Vapours are not arrived to that +Grossness requisite to reflect other Colours, as we find it is by +Experience. + +_Whiteness_, if most intense and luminous, is that of the first Order, +if less strong and luminous, a Mixture of the Colours of several Orders. +Of this last kind is the Whiteness of Froth, Paper, Linnen, and most +white Substances; of the former I reckon that of white Metals to be. For +whilst the densest of Metals, Gold, if foliated, is transparent, and all +Metals become transparent if dissolved in Menstruums or vitrified, the +Opacity of white Metals ariseth not from their Density alone. They being +less dense than Gold would be more transparent than it, did not some +other Cause concur with their Density to make them opake. And this Cause +I take to be such a Bigness of their Particles as fits them to reflect +the white of the first order. For, if they be of other Thicknesses they +may reflect other Colours, as is manifest by the Colours which appear +upon hot Steel in tempering it, and sometimes upon the Surface of melted +Metals in the Skin or Scoria which arises upon them in their cooling. +And as the white of the first order is the strongest which can be made +by Plates of transparent Substances, so it ought to be stronger in the +denser Substances of Metals than in the rarer of Air, Water, and Glass. +Nor do I see but that metallick Substances of such a Thickness as may +fit them to reflect the white of the first order, may, by reason of +their great Density (according to the Tenor of the first of these +Propositions) reflect all the Light incident upon them, and so be as +opake and splendent as it's possible for any Body to be. Gold, or Copper +mix'd with less than half their Weight of Silver, or Tin, or Regulus of +Antimony, in fusion, or amalgamed with a very little Mercury, become +white; which shews both that the Particles of white Metals have much +more Superficies, and so are smaller, than those of Gold and Copper, and +also that they are so opake as not to suffer the Particles of Gold or +Copper to shine through them. Now it is scarce to be doubted but that +the Colours of Gold and Copper are of the second and third order, and +therefore the Particles of white Metals cannot be much bigger than is +requisite to make them reflect the white of the first order. The +Volatility of Mercury argues that they are not much bigger, nor may they +be much less, lest they lose their Opacity, and become either +transparent as they do when attenuated by Vitrification, or by Solution +in Menstruums, or black as they do when ground smaller, by rubbing +Silver, or Tin, or Lead, upon other Substances to draw black Lines. The +first and only Colour which white Metals take by grinding their +Particles smaller, is black, and therefore their white ought to be that +which borders upon the black Spot in the Center of the Rings of Colours, +that is, the white of the first order. But, if you would hence gather +the Bigness of metallick Particles, you must allow for their Density. +For were Mercury transparent, its Density is such that the Sine of +Incidence upon it (by my Computation) would be to the Sine of its +Refraction, as 71 to 20, or 7 to 2. And therefore the Thickness of its +Particles, that they may exhibit the same Colours with those of Bubbles +of Water, ought to be less than the Thickness of the Skin of those +Bubbles in the Proportion of 2 to 7. Whence it's possible, that the +Particles of Mercury may be as little as the Particles of some +transparent and volatile Fluids, and yet reflect the white of the first +order. + +Lastly, for the production of _black_, the Corpuscles must be less than +any of those which exhibit Colours. For at all greater sizes there is +too much Light reflected to constitute this Colour. But if they be +supposed a little less than is requisite to reflect the white and very +faint blue of the first order, they will, according to the 4th, 8th, +17th and 18th Observations, reflect so very little Light as to appear +intensely black, and yet may perhaps variously refract it to and fro +within themselves so long, until it happen to be stifled and lost, by +which means they will appear black in all positions of the Eye without +any transparency. And from hence may be understood why Fire, and the +more subtile dissolver Putrefaction, by dividing the Particles of +Substances, turn them to black, why small quantities of black Substances +impart their Colour very freely and intensely to other Substances to +which they are applied; the minute Particles of these, by reason of +their very great number, easily overspreading the gross Particles of +others; why Glass ground very elaborately with Sand on a Copper Plate, +'till it be well polish'd, makes the Sand, together with what is worn +off from the Glass and Copper, become very black: why black Substances +do soonest of all others become hot in the Sun's Light and burn, (which +Effect may proceed partly from the multitude of Refractions in a little +room, and partly from the easy Commotion of so very small Corpuscles;) +and why blacks are usually a little inclined to a bluish Colour. For +that they are so may be seen by illuminating white Paper by Light +reflected from black Substances. For the Paper will usually appear of a +bluish white; and the reason is, that black borders in the obscure blue +of the order described in the 18th Observation, and therefore reflects +more Rays of that Colour than of any other. + +In these Descriptions I have been the more particular, because it is not +impossible but that Microscopes may at length be improved to the +discovery of the Particles of Bodies on which their Colours depend, if +they are not already in some measure arrived to that degree of +perfection. For if those Instruments are or can be so far improved as +with sufficient distinctness to represent Objects five or six hundred +times bigger than at a Foot distance they appear to our naked Eyes, I +should hope that we might be able to discover some of the greatest of +those Corpuscles. And by one that would magnify three or four thousand +times perhaps they might all be discover'd, but those which produce +blackness. In the mean while I see nothing material in this Discourse +that may rationally be doubted of, excepting this Position: That +transparent Corpuscles of the same thickness and density with a Plate, +do exhibit the same Colour. And this I would have understood not without +some Latitude, as well because those Corpuscles may be of irregular +Figures, and many Rays must be obliquely incident on them, and so have +a shorter way through them than the length of their Diameters, as +because the straitness of the Medium put in on all sides within such +Corpuscles may a little alter its Motions or other qualities on which +the Reflexion depends. But yet I cannot much suspect the last, because I +have observed of some small Plates of Muscovy Glass which were of an +even thickness, that through a Microscope they have appeared of the same +Colour at their edges and corners where the included Medium was +terminated, which they appeared of in other places. However it will add +much to our Satisfaction, if those Corpuscles can be discover'd with +Microscopes; which if we shall at length attain to, I fear it will be +the utmost improvement of this Sense. For it seems impossible to see the +more secret and noble Works of Nature within the Corpuscles by reason of +their transparency. + + +PROP. VIII. + +_The Cause of Reflexion is not the impinging of Light on the solid or +impervious parts of Bodies, as is commonly believed._ + +This will appear by the following Considerations. First, That in the +passage of Light out of Glass into Air there is a Reflexion as strong as +in its passage out of Air into Glass, or rather a little stronger, and +by many degrees stronger than in its passage out of Glass into Water. +And it seems not probable that Air should have more strongly reflecting +parts than Water or Glass. But if that should possibly be supposed, yet +it will avail nothing; for the Reflexion is as strong or stronger when +the Air is drawn away from the Glass, (suppose by the Air-Pump invented +by _Otto Gueriet_, and improved and made useful by Mr. _Boyle_) as when +it is adjacent to it. Secondly, If Light in its passage out of Glass +into Air be incident more obliquely than at an Angle of 40 or 41 Degrees +it is wholly reflected, if less obliquely it is in great measure +transmitted. Now it is not to be imagined that Light at one degree of +obliquity should meet with Pores enough in the Air to transmit the +greater part of it, and at another degree of obliquity should meet with +nothing but parts to reflect it wholly, especially considering that in +its passage out of Air into Glass, how oblique soever be its Incidence, +it finds Pores enough in the Glass to transmit a great part of it. If +any Man suppose that it is not reflected by the Air, but by the outmost +superficial parts of the Glass, there is still the same difficulty: +Besides, that such a Supposition is unintelligible, and will also appear +to be false by applying Water behind some part of the Glass instead of +Air. For so in a convenient obliquity of the Rays, suppose of 45 or 46 +Degrees, at which they are all reflected where the Air is adjacent to +the Glass, they shall be in great measure transmitted where the Water is +adjacent to it; which argues, that their Reflexion or Transmission +depends on the constitution of the Air and Water behind the Glass, and +not on the striking of the Rays upon the parts of the Glass. Thirdly, +If the Colours made by a Prism placed at the entrance of a Beam of Light +into a darken'd Room be successively cast on a second Prism placed at a +greater distance from the former, in such manner that they are all alike +incident upon it, the second Prism may be so inclined to the incident +Rays, that those which are of a blue Colour shall be all reflected by +it, and yet those of a red Colour pretty copiously transmitted. Now if +the Reflexion be caused by the parts of Air or Glass, I would ask, why +at the same Obliquity of Incidence the blue should wholly impinge on +those parts, so as to be all reflected, and yet the red find Pores +enough to be in a great measure transmitted. Fourthly, Where two Glasses +touch one another, there is no sensible Reflexion, as was declared in +the first Observation; and yet I see no reason why the Rays should not +impinge on the parts of Glass, as much when contiguous to other Glass as +when contiguous to Air. Fifthly, When the top of a Water-Bubble (in the +17th Observation,) by the continual subsiding and exhaling of the Water +grew very thin, there was such a little and almost insensible quantity +of Light reflected from it, that it appeared intensely black; whereas +round about that black Spot, where the Water was thicker, the Reflexion +was so strong as to make the Water seem very white. Nor is it only at +the least thickness of thin Plates or Bubbles, that there is no manifest +Reflexion, but at many other thicknesses continually greater and +greater. For in the 15th Observation the Rays of the same Colour were by +turns transmitted at one thickness, and reflected at another thickness, +for an indeterminate number of Successions. And yet in the Superficies +of the thinned Body, where it is of any one thickness, there are as many +parts for the Rays to impinge on, as where it is of any other thickness. +Sixthly, If Reflexion were caused by the parts of reflecting Bodies, it +would be impossible for thin Plates or Bubbles, at one and the same +place, to reflect the Rays of one Colour, and transmit those of another, +as they do according to the 13th and 15th Observations. For it is not to +be imagined that at one place the Rays which, for instance, exhibit a +blue Colour, should have the fortune to dash upon the parts, and those +which exhibit a red to hit upon the Pores of the Body; and then at +another place, where the Body is either a little thicker or a little +thinner, that on the contrary the blue should hit upon its pores, and +the red upon its parts. Lastly, Were the Rays of Light reflected by +impinging on the solid parts of Bodies, their Reflexions from polish'd +Bodies could not be so regular as they are. For in polishing Glass with +Sand, Putty, or Tripoly, it is not to be imagined that those Substances +can, by grating and fretting the Glass, bring all its least Particles to +an accurate Polish; so that all their Surfaces shall be truly plain or +truly spherical, and look all the same way, so as together to compose +one even Surface. The smaller the Particles of those Substances are, the +smaller will be the Scratches by which they continually fret and wear +away the Glass until it be polish'd; but be they never so small they can +wear away the Glass no otherwise than by grating and scratching it, and +breaking the Protuberances; and therefore polish it no otherwise than by +bringing its roughness to a very fine Grain, so that the Scratches and +Frettings of the Surface become too small to be visible. And therefore +if Light were reflected by impinging upon the solid parts of the Glass, +it would be scatter'd as much by the most polish'd Glass as by the +roughest. So then it remains a Problem, how Glass polish'd by fretting +Substances can reflect Light so regularly as it does. And this Problem +is scarce otherwise to be solved, than by saying, that the Reflexion of +a Ray is effected, not by a single point of the reflecting Body, but by +some power of the Body which is evenly diffused all over its Surface, +and by which it acts upon the Ray without immediate Contact. For that +the parts of Bodies do act upon Light at a distance shall be shewn +hereafter. + +Now if Light be reflected, not by impinging on the solid parts of +Bodies, but by some other principle; it's probable that as many of its +Rays as impinge on the solid parts of Bodies are not reflected but +stifled and lost in the Bodies. For otherwise we must allow two sorts of +Reflexions. Should all the Rays be reflected which impinge on the +internal parts of clear Water or Crystal, those Substances would rather +have a cloudy Colour than a clear Transparency. To make Bodies look +black, it's necessary that many Rays be stopp'd, retained, and lost in +them; and it seems not probable that any Rays can be stopp'd and +stifled in them which do not impinge on their parts. + +And hence we may understand that Bodies are much more rare and porous +than is commonly believed. Water is nineteen times lighter, and by +consequence nineteen times rarer than Gold; and Gold is so rare as very +readily and without the least opposition to transmit the magnetick +Effluvia, and easily to admit Quicksilver into its Pores, and to let +Water pass through it. For a concave Sphere of Gold filled with Water, +and solder'd up, has, upon pressing the Sphere with great force, let the +Water squeeze through it, and stand all over its outside in multitudes +of small Drops, like Dew, without bursting or cracking the Body of the +Gold, as I have been inform'd by an Eye witness. From all which we may +conclude, that Gold has more Pores than solid parts, and by consequence +that Water has above forty times more Pores than Parts. And he that +shall find out an Hypothesis, by which Water may be so rare, and yet not +be capable of compression by force, may doubtless by the same Hypothesis +make Gold, and Water, and all other Bodies, as much rarer as he pleases; +so that Light may find a ready passage through transparent Substances. + +The Magnet acts upon Iron through all dense Bodies not magnetick nor red +hot, without any diminution of its Virtue; as for instance, through +Gold, Silver, Lead, Glass, Water. The gravitating Power of the Sun is +transmitted through the vast Bodies of the Planets without any +diminution, so as to act upon all their parts to their very centers +with the same Force and according to the same Laws, as if the part upon +which it acts were not surrounded with the Body of the Planet, The Rays +of Light, whether they be very small Bodies projected, or only Motion or +Force propagated, are moved in right Lines; and whenever a Ray of Light +is by any Obstacle turned out of its rectilinear way, it will never +return into the same rectilinear way, unless perhaps by very great +accident. And yet Light is transmitted through pellucid solid Bodies in +right Lines to very great distances. How Bodies can have a sufficient +quantity of Pores for producing these Effects is very difficult to +conceive, but perhaps not altogether impossible. For the Colours of +Bodies arise from the Magnitudes of the Particles which reflect them, as +was explained above. Now if we conceive these Particles of Bodies to be +so disposed amongst themselves, that the Intervals or empty Spaces +between them may be equal in magnitude to them all; and that these +Particles may be composed of other Particles much smaller, which have as +much empty Space between them as equals all the Magnitudes of these +smaller Particles: And that in like manner these smaller Particles are +again composed of others much smaller, all which together are equal to +all the Pores or empty Spaces between them; and so on perpetually till +you come to solid Particles, such as have no Pores or empty Spaces +within them: And if in any gross Body there be, for instance, three such +degrees of Particles, the least of which are solid; this Body will have +seven times more Pores than solid Parts. But if there be four such +degrees of Particles, the least of which are solid, the Body will have +fifteen times more Pores than solid Parts. If there be five degrees, the +Body will have one and thirty times more Pores than solid Parts. If six +degrees, the Body will have sixty and three times more Pores than solid +Parts. And so on perpetually. And there are other ways of conceiving how +Bodies may be exceeding porous. But what is really their inward Frame is +not yet known to us. + + +PROP. IX. + +_Bodies reflect and refract Light by one and the same power, variously +exercised in various Circumstances._ + +This appears by several Considerations. First, Because when Light goes +out of Glass into Air, as obliquely as it can possibly do. If its +Incidence be made still more oblique, it becomes totally reflected. For +the power of the Glass after it has refracted the Light as obliquely as +is possible, if the Incidence be still made more oblique, becomes too +strong to let any of its Rays go through, and by consequence causes +total Reflexions. Secondly, Because Light is alternately reflected and +transmitted by thin Plates of Glass for many Successions, accordingly as +the thickness of the Plate increases in an arithmetical Progression. For +here the thickness of the Glass determines whether that Power by which +Glass acts upon Light shall cause it to be reflected, or suffer it to +be transmitted. And, Thirdly, because those Surfaces of transparent +Bodies which have the greatest refracting power, reflect the greatest +quantity of Light, as was shewn in the first Proposition. + + +PROP. X. + +_If Light be swifter in Bodies than in Vacuo, in the proportion of the +Sines which measure the Refraction of the Bodies, the Forces of the +Bodies to reflect and refract Light, are very nearly proportional to the +densities of the same Bodies; excepting that unctuous and sulphureous +Bodies refract more than others of this same density._ + +[Illustration: FIG. 8.] + +Let AB represent the refracting plane Surface of any Body, and IC a Ray +incident very obliquely upon the Body in C, so that the Angle ACI may be +infinitely little, and let CR be the refracted Ray. From a given Point B +perpendicular to the refracting Surface erect BR meeting with the +refracting Ray CR in R, and if CR represent the Motion of the refracted +Ray, and this Motion be distinguish'd into two Motions CB and BR, +whereof CB is parallel to the refracting Plane, and BR perpendicular to +it: CB shall represent the Motion of the incident Ray, and BR the +Motion generated by the Refraction, as Opticians have of late explain'd. + +Now if any Body or Thing, in moving through any Space of a given breadth +terminated on both sides by two parallel Planes, be urged forward in all +parts of that Space by Forces tending directly forwards towards the last +Plane, and before its Incidence on the first Plane, had no Motion +towards it, or but an infinitely little one; and if the Forces in all +parts of that Space, between the Planes, be at equal distances from the +Planes equal to one another, but at several distances be bigger or less +in any given Proportion, the Motion generated by the Forces in the whole +passage of the Body or thing through that Space shall be in a +subduplicate Proportion of the Forces, as Mathematicians will easily +understand. And therefore, if the Space of activity of the refracting +Superficies of the Body be consider'd as such a Space, the Motion of the +Ray generated by the refracting Force of the Body, during its passage +through that Space, that is, the Motion BR, must be in subduplicate +Proportion of that refracting Force. I say therefore, that the Square of +the Line BR, and by consequence the refracting Force of the Body, is +very nearly as the density of the same Body. For this will appear by the +following Table, wherein the Proportion of the Sines which measure the +Refractions of several Bodies, the Square of BR, supposing CB an unite, +the Densities of the Bodies estimated by their Specifick Gravities, and +their Refractive Power in respect of their Densities are set down in +several Columns. + +---------------------+----------------+----------------+----------+----------- + | | | | + | | The Square | The | The + | | of BR, to | density | refractive + | The Proportion | which the | and | Power of + | of the Sines of| refracting | specifick| the Body + | Incidence and | force of the | gravity | in respect + The refracting | Refraction of | Body is | of the | of its + Bodies. | yellow Light. | proportionate. | Body. | density. +---------------------+----------------+----------------+----------+----------- +A Pseudo-Topazius, | | | | + being a natural, | | | | + pellucid, brittle, | 23 to 14 | 1'699 | 4'27 | 3979 + hairy Stone, of a | | | | + yellow Colour. | | | | +Air. | 3201 to 3200 | 0'000625 | 0'0012 | 5208 +Glass of Antimony. | 17 to 9 | 2'568 | 5'28 | 4864 +A Selenitis. | 61 to 41 | 1'213 | 2'252 | 5386 +Glass vulgar. | 31 to 20 | 1'4025 | 2'58 | 5436 +Crystal of the Rock. | 25 to 16 | 1'445 | 2'65 | 5450 +Island Crystal. | 5 to 3 | 1'778 | 2'72 | 6536 +Sal Gemmæ. | 17 to 11 | 1'388 | 2'143 | 6477 +Alume. | 35 to 24 | 1'1267 | 1'714 | 6570 +Borax. | 22 to 15 | 1'1511 | 1'714 | 6716 +Niter. | 32 to 21 | 1'345 | 1'9 | 7079 +Dantzick Vitriol. | 303 to 200 | 1'295 | 1'715 | 7551 +Oil of Vitriol. | 10 to 7 | 1'041 | 1'7 | 6124 +Rain Water. | 529 to 396 | 0'7845 | 1' | 7845 +Gum Arabick. | 31 to 21 | 1'179 | 1'375 | 8574 +Spirit of Wine well | | | | + rectified. | 100 to 73 | 0'8765 | 0'866 | 10121 +Camphire. | 3 to 2 | 1'25 | 0'996 | 12551 +Oil Olive. | 22 to 15 | 1'1511 | 0'913 | 12607 +Linseed Oil. | 40 to 27 | 1'1948 | 0'932 | 12819 +Spirit of Turpentine.| 25 to 17 | 1'1626 | 0'874 | 13222 +Amber. | 14 to 9 | 1'42 | 1'04 | 13654 +A Diamond. | 100 to 41 | 4'949 | 3'4 | 14556 +---------------------+----------------+----------------+----------+----------- + +The Refraction of the Air in this Table is determin'd by that of the +Atmosphere observed by Astronomers. For, if Light pass through many +refracting Substances or Mediums gradually denser and denser, and +terminated with parallel Surfaces, the Sum of all the Refractions will +be equal to the single Refraction which it would have suffer'd in +passing immediately out of the first Medium into the last. And this +holds true, though the Number of the refracting Substances be increased +to Infinity, and the Distances from one another as much decreased, so +that the Light may be refracted in every Point of its Passage, and by +continual Refractions bent into a Curve-Line. And therefore the whole +Refraction of Light in passing through the Atmosphere from the highest +and rarest Part thereof down to the lowest and densest Part, must be +equal to the Refraction which it would suffer in passing at like +Obliquity out of a Vacuum immediately into Air of equal Density with +that in the lowest Part of the Atmosphere. + +Now, although a Pseudo-Topaz, a Selenitis, Rock Crystal, Island Crystal, +Vulgar Glass (that is, Sand melted together) and Glass of Antimony, +which are terrestrial stony alcalizate Concretes, and Air which probably +arises from such Substances by Fermentation, be Substances very +differing from one another in Density, yet by this Table, they have +their refractive Powers almost in the same Proportion to one another as +their Densities are, excepting that the Refraction of that strange +Substance, Island Crystal is a little bigger than the rest. And +particularly Air, which is 3500 Times rarer than the Pseudo-Topaz, and +4400 Times rarer than Glass of Antimony, and 2000 Times rarer than the +Selenitis, Glass vulgar, or Crystal of the Rock, has notwithstanding its +rarity the same refractive Power in respect of its Density which those +very dense Substances have in respect of theirs, excepting so far as +those differ from one another. + +Again, the Refraction of Camphire, Oil Olive, Linseed Oil, Spirit of +Turpentine and Amber, which are fat sulphureous unctuous Bodies, and a +Diamond, which probably is an unctuous Substance coagulated, have their +refractive Powers in Proportion to one another as their Densities +without any considerable Variation. But the refractive Powers of these +unctuous Substances are two or three Times greater in respect of their +Densities than the refractive Powers of the former Substances in respect +of theirs. + +Water has a refractive Power in a middle degree between those two sorts +of Substances, and probably is of a middle nature. For out of it grow +all vegetable and animal Substances, which consist as well of +sulphureous fat and inflamable Parts, as of earthy lean and alcalizate +ones. + +Salts and Vitriols have refractive Powers in a middle degree between +those of earthy Substances and Water, and accordingly are composed of +those two sorts of Substances. For by distillation and rectification of +their Spirits a great Part of them goes into Water, and a great Part +remains behind in the form of a dry fix'd Earth capable of +Vitrification. + +Spirit of Wine has a refractive Power in a middle degree between those +of Water and oily Substances, and accordingly seems to be composed of +both, united by Fermentation; the Water, by means of some saline Spirits +with which 'tis impregnated, dissolving the Oil, and volatizing it by +the Action. For Spirit of Wine is inflamable by means of its oily Parts, +and being distilled often from Salt of Tartar, grow by every +distillation more and more aqueous and phlegmatick. And Chymists +observe, that Vegetables (as Lavender, Rue, Marjoram, &c.) distilled +_per se_, before fermentation yield Oils without any burning Spirits, +but after fermentation yield ardent Spirits without Oils: Which shews, +that their Oil is by fermentation converted into Spirit. They find also, +that if Oils be poured in a small quantity upon fermentating Vegetables, +they distil over after fermentation in the form of Spirits. + +So then, by the foregoing Table, all Bodies seem to have their +refractive Powers proportional to their Densities, (or very nearly;) +excepting so far as they partake more or less of sulphureous oily +Particles, and thereby have their refractive Power made greater or less. +Whence it seems rational to attribute the refractive Power of all Bodies +chiefly, if not wholly, to the sulphureous Parts with which they abound. +For it's probable that all Bodies abound more or less with Sulphurs. And +as Light congregated by a Burning-glass acts most upon sulphureous +Bodies, to turn them into Fire and Flame; so, since all Action is +mutual, Sulphurs ought to act most upon Light. For that the action +between Light and Bodies is mutual, may appear from this Consideration; +That the densest Bodies which refract and reflect Light most strongly, +grow hottest in the Summer Sun, by the action of the refracted or +reflected Light. + +I have hitherto explain'd the power of Bodies to reflect and refract, +and shew'd, that thin transparent Plates, Fibres, and Particles, do, +according to their several thicknesses and densities, reflect several +sorts of Rays, and thereby appear of several Colours; and by consequence +that nothing more is requisite for producing all the Colours of natural +Bodies, than the several sizes and densities of their transparent +Particles. But whence it is that these Plates, Fibres, and Particles, +do, according to their several thicknesses and densities, reflect +several sorts of Rays, I have not yet explain'd. To give some insight +into this matter, and make way for understanding the next part of this +Book, I shall conclude this part with a few more Propositions. Those +which preceded respect the nature of Bodies, these the nature of Light: +For both must be understood, before the reason of their Actions upon one +another can be known. And because the last Proposition depended upon the +velocity of Light, I will begin with a Proposition of that kind. + + +PROP. XI. + +_Light is propagated from luminous Bodies in time, and spends about +seven or eight Minutes of an Hour in passing from the Sun to the Earth._ + +This was observed first by _Roemer_, and then by others, by means of the +Eclipses of the Satellites of _Jupiter_. For these Eclipses, when the +Earth is between the Sun and _Jupiter_, happen about seven or eight +Minutes sooner than they ought to do by the Tables, and when the Earth +is beyond the Sun they happen about seven or eight Minutes later than +they ought to do; the reason being, that the Light of the Satellites has +farther to go in the latter case than in the former by the Diameter of +the Earth's Orbit. Some inequalities of time may arise from the +Excentricities of the Orbs of the Satellites; but those cannot answer in +all the Satellites, and at all times to the Position and Distance of the +Earth from the Sun. The mean motions of _Jupiter_'s Satellites is also +swifter in his descent from his Aphelium to his Perihelium, than in his +ascent in the other half of his Orb. But this inequality has no respect +to the position of the Earth, and in the three interior Satellites is +insensible, as I find by computation from the Theory of their Gravity. + + +PROP. XII. + +_Every Ray of Light in its passage through any refracting Surface is put +into a certain transient Constitution or State, which in the progress of +the Ray returns at equal Intervals, and disposes the Ray at every return +to be easily transmitted through the next refracting Surface, and +between the returns to be easily reflected by it._ + +This is manifest by the 5th, 9th, 12th, and 15th Observations. For by +those Observations it appears, that one and the same sort of Rays at +equal Angles of Incidence on any thin transparent Plate, is alternately +reflected and transmitted for many Successions accordingly as the +thickness of the Plate increases in arithmetical Progression of the +Numbers, 0, 1, 2, 3, 4, 5, 6, 7, 8, &c. so that if the first Reflexion +(that which makes the first or innermost of the Rings of Colours there +described) be made at the thickness 1, the Rays shall be transmitted at +the thicknesses 0, 2, 4, 6, 8, 10, 12, &c. and thereby make the central +Spot and Rings of Light, which appear by transmission, and be reflected +at the thickness 1, 3, 5, 7, 9, 11, &c. and thereby make the Rings which +appear by Reflexion. And this alternate Reflexion and Transmission, as I +gather by the 24th Observation, continues for above an hundred +vicissitudes, and by the Observations in the next part of this Book, for +many thousands, being propagated from one Surface of a Glass Plate to +the other, though the thickness of the Plate be a quarter of an Inch or +above: So that this alternation seems to be propagated from every +refracting Surface to all distances without end or limitation. + +This alternate Reflexion and Refraction depends on both the Surfaces of +every thin Plate, because it depends on their distance. By the 21st +Observation, if either Surface of a thin Plate of _Muscovy_ Glass be +wetted, the Colours caused by the alternate Reflexion and Refraction +grow faint, and therefore it depends on them both. + +It is therefore perform'd at the second Surface; for if it were +perform'd at the first, before the Rays arrive at the second, it would +not depend on the second. + +It is also influenced by some action or disposition, propagated from the +first to the second, because otherwise at the second it would not depend +on the first. And this action or disposition, in its propagation, +intermits and returns by equal Intervals, because in all its progress it +inclines the Ray at one distance from the first Surface to be reflected +by the second, at another to be transmitted by it, and that by equal +Intervals for innumerable vicissitudes. And because the Ray is disposed +to Reflexion at the distances 1, 3, 5, 7, 9, &c. and to Transmission at +the distances 0, 2, 4, 6, 8, 10, &c. (for its transmission through the +first Surface, is at the distance 0, and it is transmitted through both +together, if their distance be infinitely little or much less than 1) +the disposition to be transmitted at the distances 2, 4, 6, 8, 10, &c. +is to be accounted a return of the same disposition which the Ray first +had at the distance 0, that is at its transmission through the first +refracting Surface. All which is the thing I would prove. + +What kind of action or disposition this is; Whether it consists in a +circulating or a vibrating motion of the Ray, or of the Medium, or +something else, I do not here enquire. Those that are averse from +assenting to any new Discoveries, but such as they can explain by an +Hypothesis, may for the present suppose, that as Stones by falling upon +Water put the Water into an undulating Motion, and all Bodies by +percussion excite vibrations in the Air; so the Rays of Light, by +impinging on any refracting or reflecting Surface, excite vibrations in +the refracting or reflecting Medium or Substance, and by exciting them +agitate the solid parts of the refracting or reflecting Body, and by +agitating them cause the Body to grow warm or hot; that the vibrations +thus excited are propagated in the refracting or reflecting Medium or +Substance, much after the manner that vibrations are propagated in the +Air for causing Sound, and move faster than the Rays so as to overtake +them; and that when any Ray is in that part of the vibration which +conspires with its Motion, it easily breaks through a refracting +Surface, but when it is in the contrary part of the vibration which +impedes its Motion, it is easily reflected; and, by consequence, that +every Ray is successively disposed to be easily reflected, or easily +transmitted, by every vibration which overtakes it. But whether this +Hypothesis be true or false I do not here consider. I content my self +with the bare Discovery, that the Rays of Light are by some cause or +other alternately disposed to be reflected or refracted for many +vicissitudes. + + +DEFINITION. + +_The returns of the disposition of any Ray to be reflected I will call +its_ Fits of easy Reflexion, _and those of its disposition to be +transmitted its_ Fits of easy Transmission, _and the space it passes +between every return and the next return, the_ Interval of its Fits. + + +PROP. XIII. + +_The reason why the Surfaces of all thick transparent Bodies reflect +part of the Light incident on them, and refract the rest, is, that some +Rays at their Incidence are in Fits of easy Reflexion, and others in +Fits of easy Transmission._ + +This may be gather'd from the 24th Observation, where the Light +reflected by thin Plates of Air and Glass, which to the naked Eye +appear'd evenly white all over the Plate, did through a Prism appear +waved with many Successions of Light and Darkness made by alternate Fits +of easy Reflexion and easy Transmission, the Prism severing and +distinguishing the Waves of which the white reflected Light was +composed, as was explain'd above. + +And hence Light is in Fits of easy Reflexion and easy Transmission, +before its Incidence on transparent Bodies. And probably it is put into +such fits at its first emission from luminous Bodies, and continues in +them during all its progress. For these Fits are of a lasting nature, as +will appear by the next part of this Book. + +In this Proposition I suppose the transparent Bodies to be thick; +because if the thickness of the Body be much less than the Interval of +the Fits of easy Reflexion and Transmission of the Rays, the Body loseth +its reflecting power. For if the Rays, which at their entering into the +Body are put into Fits of easy Transmission, arrive at the farthest +Surface of the Body before they be out of those Fits, they must be +transmitted. And this is the reason why Bubbles of Water lose their +reflecting power when they grow very thin; and why all opake Bodies, +when reduced into very small parts, become transparent. + + +PROP. XIV. + +_Those Surfaces of transparent Bodies, which if the Ray be in a Fit of +Refraction do refract it most strongly, if the Ray be in a Fit of +Reflexion do reflect it most easily._ + +For we shewed above, in _Prop._ 8. that the cause of Reflexion is not +the impinging of Light on the solid impervious parts of Bodies, but some +other power by which those solid parts act on Light at a distance. We +shewed also in _Prop._ 9. that Bodies reflect and refract Light by one +and the same power, variously exercised in various circumstances; and in +_Prop._ 1. that the most strongly refracting Surfaces reflect the most +Light: All which compared together evince and rarify both this and the +last Proposition. + + +PROP. XV. + +_In any one and the same sort of Rays, emerging in any Angle out of any +refracting Surface into one and the same Medium, the Interval of the +following Fits of easy Reflexion and Transmission are either accurately +or very nearly, as the Rectangle of the Secant of the Angle of +Refraction, and of the Secant of another Angle, whose Sine is the first +of 106 arithmetical mean Proportionals, between the Sines of Incidence +and Refraction, counted from the Sine of Refraction._ + +This is manifest by the 7th and 19th Observations. + + +PROP. XVI. + +_In several sorts of Rays emerging in equal Angles out of any refracting +Surface into the same Medium, the Intervals of the following Fits of +easy Reflexion and easy Transmission are either accurately, or very +nearly, as the Cube-Roots of the Squares of the lengths of a Chord, +which found the Notes in an Eight_, sol, la, fa, sol, la, mi, fa, sol, +_with all their intermediate degrees answering to the Colours of those +Rays, according to the Analogy described in the seventh Experiment of +the second Part of the first Book._ + +This is manifest by the 13th and 14th Observations. + + +PROP. XVII. + +_If Rays of any sort pass perpendicularly into several Mediums, the +Intervals of the Fits of easy Reflexion and Transmission in any one +Medium, are to those Intervals in any other, as the Sine of Incidence to +the Sine of Refraction, when the Rays pass out of the first of those two +Mediums into the second._ + +This is manifest by the 10th Observation. + + +PROP. XVIII. + +_If the Rays which paint the Colour in the Confine of yellow and orange +pass perpendicularly out of any Medium into Air, the Intervals of their +Fits of easy Reflexion are the 1/89000th part of an Inch. And of the +same length are the Intervals of their Fits of easy Transmission._ + +This is manifest by the 6th Observation. From these Propositions it is +easy to collect the Intervals of the Fits of easy Reflexion and easy +Transmission of any sort of Rays refracted in any angle into any Medium; +and thence to know, whether the Rays shall be reflected or transmitted +at their subsequent Incidence upon any other pellucid Medium. Which +thing, being useful for understanding the next part of this Book, was +here to be set down. And for the same reason I add the two following +Propositions. + + +PROP. XIX. + +_If any sort of Rays falling on the polite Surface of any pellucid +Medium be reflected back, the Fits of easy Reflexion, which they have at +the point of Reflexion, shall still continue to return; and the Returns +shall be at distances from the point of Reflexion in the arithmetical +progression of the Numbers 2, 4, 6, 8, 10, 12, &c. and between these +Fits the Rays shall be in Fits of easy Transmission._ + +For since the Fits of easy Reflexion and easy Transmission are of a +returning nature, there is no reason why these Fits, which continued +till the Ray arrived at the reflecting Medium, and there inclined the +Ray to Reflexion, should there cease. And if the Ray at the point of +Reflexion was in a Fit of easy Reflexion, the progression of the +distances of these Fits from that point must begin from 0, and so be of +the Numbers 0, 2, 4, 6, 8, &c. And therefore the progression of the +distances of the intermediate Fits of easy Transmission, reckon'd from +the same point, must be in the progression of the odd Numbers 1, 3, 5, +7, 9, &c. contrary to what happens when the Fits are propagated from +points of Refraction. + + +PROP. XX. + +_The Intervals of the Fits of easy Reflexion and easy Transmission, +propagated from points of Reflexion into any Medium, are equal to the +Intervals of the like Fits, which the same Rays would have, if refracted +into the same Medium in Angles of Refraction equal to their Angles of +Reflexion._ + +For when Light is reflected by the second Surface of thin Plates, it +goes out afterwards freely at the first Surface to make the Rings of +Colours which appear by Reflexion; and, by the freedom of its egress, +makes the Colours of these Rings more vivid and strong than those which +appear on the other side of the Plates by the transmitted Light. The +reflected Rays are therefore in Fits of easy Transmission at their +egress; which would not always happen, if the Intervals of the Fits +within the Plate after Reflexion were not equal, both in length and +number, to their Intervals before it. And this confirms also the +proportions set down in the former Proposition. For if the Rays both in +going in and out at the first Surface be in Fits of easy Transmission, +and the Intervals and Numbers of those Fits between the first and second +Surface, before and after Reflexion, be equal, the distances of the Fits +of easy Transmission from either Surface, must be in the same +progression after Reflexion as before; that is, from the first Surface +which transmitted them in the progression of the even Numbers 0, 2, 4, +6, 8, &c. and from the second which reflected them, in that of the odd +Numbers 1, 3, 5, 7, &c. But these two Propositions will become much more +evident by the Observations in the following part of this Book. + + + + +THE + +SECOND BOOK + +OF + +OPTICKS + + +_PART IV._ + +_Observations concerning the Reflexions and Colours of thick transparent +polish'd Plates._ + +There is no Glass or Speculum how well soever polished, but, besides the +Light which it refracts or reflects regularly, scatters every way +irregularly a faint Light, by means of which the polish'd Surface, when +illuminated in a dark room by a beam of the Sun's Light, may be easily +seen in all positions of the Eye. There are certain Phænomena of this +scatter'd Light, which when I first observed them, seem'd very strange +and surprizing to me. My Observations were as follows. + +_Obs._ 1. The Sun shining into my darken'd Chamber through a hole one +third of an Inch wide, I let the intromitted beam of Light fall +perpendicularly upon a Glass Speculum ground concave on one side and +convex on the other, to a Sphere of five Feet and eleven Inches Radius, +and Quick-silver'd over on the convex side. And holding a white opake +Chart, or a Quire of Paper at the center of the Spheres to which the +Speculum was ground, that is, at the distance of about five Feet and +eleven Inches from the Speculum, in such manner, that the beam of Light +might pass through a little hole made in the middle of the Chart to the +Speculum, and thence be reflected back to the same hole: I observed upon +the Chart four or five concentric Irises or Rings of Colours, like +Rain-bows, encompassing the hole much after the manner that those, which +in the fourth and following Observations of the first part of this Book +appear'd between the Object-glasses, encompassed the black Spot, but yet +larger and fainter than those. These Rings as they grew larger and +larger became diluter and fainter, so that the fifth was scarce visible. +Yet sometimes, when the Sun shone very clear, there appear'd faint +Lineaments of a sixth and seventh. If the distance of the Chart from the +Speculum was much greater or much less than that of six Feet, the Rings +became dilute and vanish'd. And if the distance of the Speculum from the +Window was much greater than that of six Feet, the reflected beam of +Light would be so broad at the distance of six Feet from the Speculum +where the Rings appear'd, as to obscure one or two of the innermost +Rings. And therefore I usually placed the Speculum at about six Feet +from the Window; so that its Focus might there fall in with the center +of its concavity at the Rings upon the Chart. And this Posture is always +to be understood in the following Observations where no other is +express'd. + +_Obs._ 2. The Colours of these Rain-bows succeeded one another from the +center outwards, in the same form and order with those which were made +in the ninth Observation of the first Part of this Book by Light not +reflected, but transmitted through the two Object-glasses. For, first, +there was in their common center a white round Spot of faint Light, +something broader than the reflected beam of Light, which beam sometimes +fell upon the middle of the Spot, and sometimes by a little inclination +of the Speculum receded from the middle, and left the Spot white to the +center. + +This white Spot was immediately encompassed with a dark grey or russet, +and that dark grey with the Colours of the first Iris; which Colours on +the inside next the dark grey were a little violet and indigo, and next +to that a blue, which on the outside grew pale, and then succeeded a +little greenish yellow, and after that a brighter yellow, and then on +the outward edge of the Iris a red which on the outside inclined to +purple. + +This Iris was immediately encompassed with a second, whose Colours were +in order from the inside outwards, purple, blue, green, yellow, light +red, a red mix'd with purple. + +Then immediately follow'd the Colours of the third Iris, which were in +order outwards a green inclining to purple, a good green, and a red more +bright than that of the former Iris. + +The fourth and fifth Iris seem'd of a bluish green within, and red +without, but so faintly that it was difficult to discern the Colours. + +_Obs._ 3. Measuring the Diameters of these Rings upon the Chart as +accurately as I could, I found them also in the same proportion to one +another with the Rings made by Light transmitted through the two +Object-glasses. For the Diameters of the four first of the bright Rings +measured between the brightest parts of their Orbits, at the distance of +six Feet from the Speculum were 1-11/16, 2-3/8, 2-11/12, 3-3/8 Inches, +whose Squares are in arithmetical progression of the numbers 1, 2, 3, 4. +If the white circular Spot in the middle be reckon'd amongst the Rings, +and its central Light, where it seems to be most luminous, be put +equipollent to an infinitely little Ring; the Squares of the Diameters +of the Rings will be in the progression 0, 1, 2, 3, 4, &c. I measured +also the Diameters of the dark Circles between these luminous ones, and +found their Squares in the progression of the numbers 1/2, 1-1/2, 2-1/2, +3-1/2, &c. the Diameters of the first four at the distance of six Feet +from the Speculum, being 1-3/16, 2-1/16, 2-2/3, 3-3/20 Inches. If the +distance of the Chart from the Speculum was increased or diminished, the +Diameters of the Circles were increased or diminished proportionally. + +_Obs._ 4. By the analogy between these Rings and those described in the +Observations of the first Part of this Book, I suspected that there +were many more of them which spread into one another, and by interfering +mix'd their Colours, and diluted one another so that they could not be +seen apart. I viewed them therefore through a Prism, as I did those in +the 24th Observation of the first Part of this Book. And when the Prism +was so placed as by refracting the Light of their mix'd Colours to +separate them, and distinguish the Rings from one another, as it did +those in that Observation, I could then see them distincter than before, +and easily number eight or nine of them, and sometimes twelve or +thirteen. And had not their Light been so very faint, I question not but +that I might have seen many more. + +_Obs._ 5. Placing a Prism at the Window to refract the intromitted beam +of Light, and cast the oblong Spectrum of Colours on the Speculum: I +covered the Speculum with a black Paper which had in the middle of it a +hole to let any one of the Colours pass through to the Speculum, whilst +the rest were intercepted by the Paper. And now I found Rings of that +Colour only which fell upon the Speculum. If the Speculum was +illuminated with red, the Rings were totally red with dark Intervals, if +with blue they were totally blue, and so of the other Colours. And when +they were illuminated with any one Colour, the Squares of their +Diameters measured between their most luminous Parts, were in the +arithmetical Progression of the Numbers, 0, 1, 2, 3, 4 and the Squares +of the Diameters of their dark Intervals in the Progression of the +intermediate Numbers 1/2, 1-1/2, 2-1/2, 3-1/2. But if the Colour was +varied, they varied their Magnitude. In the red they were largest, in +the indigo and violet least, and in the intermediate Colours yellow, +green, and blue, they were of several intermediate Bignesses answering +to the Colour, that is, greater in yellow than in green, and greater in +green than in blue. And hence I knew, that when the Speculum was +illuminated with white Light, the red and yellow on the outside of the +Rings were produced by the least refrangible Rays, and the blue and +violet by the most refrangible, and that the Colours of each Ring spread +into the Colours of the neighbouring Rings on either side, after the +manner explain'd in the first and second Part of this Book, and by +mixing diluted one another so that they could not be distinguish'd, +unless near the Center where they were least mix'd. For in this +Observation I could see the Rings more distinctly, and to a greater +Number than before, being able in the yellow Light to number eight or +nine of them, besides a faint shadow of a tenth. To satisfy my self how +much the Colours of the several Rings spread into one another, I +measured the Diameters of the second and third Rings, and found them +when made by the Confine of the red and orange to be to the same +Diameters when made by the Confine of blue and indigo, as 9 to 8, or +thereabouts. For it was hard to determine this Proportion accurately. +Also the Circles made successively by the red, yellow, and green, +differ'd more from one another than those made successively by the +green, blue, and indigo. For the Circle made by the violet was too dark +to be seen. To carry on the Computation, let us therefore suppose that +the Differences of the Diameters of the Circles made by the outmost red, +the Confine of red and orange, the Confine of orange and yellow, the +Confine of yellow and green, the Confine of green and blue, the Confine +of blue and indigo, the Confine of indigo and violet, and outmost +violet, are in proportion as the Differences of the Lengths of a +Monochord which sound the Tones in an Eight; _sol_, _la_, _fa_, _sol_, +_la_, _mi_, _fa_, _sol_, that is, as the Numbers 1/9, 1/18, 1/12, 1/12, +2/27, 1/27, 1/18. And if the Diameter of the Circle made by the Confine +of red and orange be 9A, and that of the Circle made by the Confine of +blue and indigo be 8A as above; their difference 9A-8A will be to the +difference of the Diameters of the Circles made by the outmost red, and +by the Confine of red and orange, as 1/18 + 1/12 + 1/12 + 2/27 to 1/9, +that is as 8/27 to 1/9, or 8 to 3, and to the difference of the Circles +made by the outmost violet, and by the Confine of blue and indigo, as +1/18 + 1/12 + 1/12 + 2/27 to 1/27 + 1/18, that is, as 8/27 to 5/54, or +as 16 to 5. And therefore these differences will be 3/8A and 5/16A. Add +the first to 9A and subduct the last from 8A, and you will have the +Diameters of the Circles made by the least and most refrangible Rays +75/8A and ((61-1/2)/8)A. These diameters are therefore to one another as +75 to 61-1/2 or 50 to 41, and their Squares as 2500 to 1681, that is, as +3 to 2 very nearly. Which proportion differs not much from the +proportion of the Diameters of the Circles made by the outmost red and +outmost violet, in the 13th Observation of the first part of this Book. + +_Obs._ 6. Placing my Eye where these Rings appear'd plainest, I saw the +Speculum tinged all over with Waves of Colours, (red, yellow, green, +blue;) like those which in the Observations of the first part of this +Book appeared between the Object-glasses, and upon Bubbles of Water, but +much larger. And after the manner of those, they were of various +magnitudes in various Positions of the Eye, swelling and shrinking as I +moved my Eye this way and that way. They were formed like Arcs of +concentrick Circles, as those were; and when my Eye was over against the +center of the concavity of the Speculum, (that is, 5 Feet and 10 Inches +distant from the Speculum,) their common center was in a right Line with +that center of concavity, and with the hole in the Window. But in other +postures of my Eye their center had other positions. They appear'd by +the Light of the Clouds propagated to the Speculum through the hole in +the Window; and when the Sun shone through that hole upon the Speculum, +his Light upon it was of the Colour of the Ring whereon it fell, but by +its splendor obscured the Rings made by the Light of the Clouds, unless +when the Speculum was removed to a great distance from the Window, so +that his Light upon it might be broad and faint. By varying the position +of my Eye, and moving it nearer to or farther from the direct beam of +the Sun's Light, the Colour of the Sun's reflected Light constantly +varied upon the Speculum, as it did upon my Eye, the same Colour always +appearing to a Bystander upon my Eye which to me appear'd upon the +Speculum. And thence I knew that the Rings of Colours upon the Chart +were made by these reflected Colours, propagated thither from the +Speculum in several Angles, and that their production depended not upon +the termination of Light and Shadow. + +_Obs._ 7. By the Analogy of all these Phænomena with those of the like +Rings of Colours described in the first part of this Book, it seemed to +me that these Colours were produced by this thick Plate of Glass, much +after the manner that those were produced by very thin Plates. For, upon +trial, I found that if the Quick-silver were rubb'd off from the +backside of the Speculum, the Glass alone would cause the same Rings of +Colours, but much more faint than before; and therefore the Phænomenon +depends not upon the Quick-silver, unless so far as the Quick-silver by +increasing the Reflexion of the backside of the Glass increases the +Light of the Rings of Colours. I found also that a Speculum of Metal +without Glass made some Years since for optical uses, and very well +wrought, produced none of those Rings; and thence I understood that +these Rings arise not from one specular Surface alone, but depend upon +the two Surfaces of the Plate of Glass whereof the Speculum was made, +and upon the thickness of the Glass between them. For as in the 7th and +19th Observations of the first part of this Book a thin Plate of Air, +Water, or Glass of an even thickness appeared of one Colour when the +Rays were perpendicular to it, of another when they were a little +oblique, of another when more oblique, of another when still more +oblique, and so on; so here, in the sixth Observation, the Light which +emerged out of the Glass in several Obliquities, made the Glass appear +of several Colours, and being propagated in those Obliquities to the +Chart, there painted Rings of those Colours. And as the reason why a +thin Plate appeared of several Colours in several Obliquities of the +Rays, was, that the Rays of one and the same sort are reflected by the +thin Plate at one obliquity and transmitted at another, and those of +other sorts transmitted where these are reflected, and reflected where +these are transmitted: So the reason why the thick Plate of Glass +whereof the Speculum was made did appear of various Colours in various +Obliquities, and in those Obliquities propagated those Colours to the +Chart, was, that the Rays of one and the same sort did at one Obliquity +emerge out of the Glass, at another did not emerge, but were reflected +back towards the Quick-silver by the hither Surface of the Glass, and +accordingly as the Obliquity became greater and greater, emerged and +were reflected alternately for many Successions; and that in one and the +same Obliquity the Rays of one sort were reflected, and those of another +transmitted. This is manifest by the fifth Observation of this part of +this Book. For in that Observation, when the Speculum was illuminated by +any one of the prismatick Colours, that Light made many Rings of the +same Colour upon the Chart with dark Intervals, and therefore at its +emergence out of the Speculum was alternately transmitted and not +transmitted from the Speculum to the Chart for many Successions, +according to the various Obliquities of its Emergence. And when the +Colour cast on the Speculum by the Prism was varied, the Rings became of +the Colour cast on it, and varied their bigness with their Colour, and +therefore the Light was now alternately transmitted and not transmitted +from the Speculum to the Chart at other Obliquities than before. It +seemed to me therefore that these Rings were of one and the same +original with those of thin Plates, but yet with this difference, that +those of thin Plates are made by the alternate Reflexions and +Transmissions of the Rays at the second Surface of the Plate, after one +passage through it; but here the Rays go twice through the Plate before +they are alternately reflected and transmitted. First, they go through +it from the first Surface to the Quick-silver, and then return through +it from the Quick-silver to the first Surface, and there are either +transmitted to the Chart or reflected back to the Quick-silver, +accordingly as they are in their Fits of easy Reflexion or Transmission +when they arrive at that Surface. For the Intervals of the Fits of the +Rays which fall perpendicularly on the Speculum, and are reflected back +in the same perpendicular Lines, by reason of the equality of these +Angles and Lines, are of the same length and number within the Glass +after Reflexion as before, by the 19th Proposition of the third part of +this Book. And therefore since all the Rays that enter through the +first Surface are in their Fits of easy Transmission at their entrance, +and as many of these as are reflected by the second are in their Fits of +easy Reflexion there, all these must be again in their Fits of easy +Transmission at their return to the first, and by consequence there go +out of the Glass to the Chart, and form upon it the white Spot of Light +in the center of the Rings. For the reason holds good in all sorts of +Rays, and therefore all sorts must go out promiscuously to that Spot, +and by their mixture cause it to be white. But the Intervals of the Fits +of those Rays which are reflected more obliquely than they enter, must +be greater after Reflexion than before, by the 15th and 20th +Propositions. And thence it may happen that the Rays at their return to +the first Surface, may in certain Obliquities be in Fits of easy +Reflexion, and return back to the Quick-silver, and in other +intermediate Obliquities be again in Fits of easy Transmission, and so +go out to the Chart, and paint on it the Rings of Colours about the +white Spot. And because the Intervals of the Fits at equal obliquities +are greater and fewer in the less refrangible Rays, and less and more +numerous in the more refrangible, therefore the less refrangible at +equal obliquities shall make fewer Rings than the more refrangible, and +the Rings made by those shall be larger than the like number of Rings +made by these; that is, the red Rings shall be larger than the yellow, +the yellow than the green, the green than the blue, and the blue than +the violet, as they were really found to be in the fifth Observation. +And therefore the first Ring of all Colours encompassing the white Spot +of Light shall be red without any violet within, and yellow, and green, +and blue in the middle, as it was found in the second Observation; and +these Colours in the second Ring, and those that follow, shall be more +expanded, till they spread into one another, and blend one another by +interfering. + +These seem to be the reasons of these Rings in general; and this put me +upon observing the thickness of the Glass, and considering whether the +dimensions and proportions of the Rings may be truly derived from it by +computation. + +_Obs._ 8. I measured therefore the thickness of this concavo-convex +Plate of Glass, and found it every where 1/4 of an Inch precisely. Now, +by the sixth Observation of the first Part of this Book, a thin Plate of +Air transmits the brightest Light of the first Ring, that is, the bright +yellow, when its thickness is the 1/89000th part of an Inch; and by the +tenth Observation of the same Part, a thin Plate of Glass transmits the +same Light of the same Ring, when its thickness is less in proportion of +the Sine of Refraction to the Sine of Incidence, that is, when its +thickness is the 11/1513000th or 1/137545th part of an Inch, supposing +the Sines are as 11 to 17. And if this thickness be doubled, it +transmits the same bright Light of the second Ring; if tripled, it +transmits that of the third, and so on; the bright yellow Light in all +these cases being in its Fits of Transmission. And therefore if its +thickness be multiplied 34386 times, so as to become 1/4 of an Inch, it +transmits the same bright Light of the 34386th Ring. Suppose this be the +bright yellow Light transmitted perpendicularly from the reflecting +convex side of the Glass through the concave side to the white Spot in +the center of the Rings of Colours on the Chart: And by a Rule in the +7th and 19th Observations in the first Part of this Book, and by the +15th and 20th Propositions of the third Part of this Book, if the Rays +be made oblique to the Glass, the thickness of the Glass requisite to +transmit the same bright Light of the same Ring in any obliquity, is to +this thickness of 1/4 of an Inch, as the Secant of a certain Angle to +the Radius, the Sine of which Angle is the first of an hundred and six +arithmetical Means between the Sines of Incidence and Refraction, +counted from the Sine of Incidence when the Refraction is made out of +any plated Body into any Medium encompassing it; that is, in this case, +out of Glass into Air. Now if the thickness of the Glass be increased by +degrees, so as to bear to its first thickness, (_viz._ that of a quarter +of an Inch,) the Proportions which 34386 (the number of Fits of the +perpendicular Rays in going through the Glass towards the white Spot in +the center of the Rings,) hath to 34385, 34384, 34383, and 34382, (the +numbers of the Fits of the oblique Rays in going through the Glass +towards the first, second, third, and fourth Rings of Colours,) and if +the first thickness be divided into 100000000 equal parts, the increased +thicknesses will be 100002908, 100005816, 100008725, and 100011633, and +the Angles of which these thicknesses are Secants will be 26´ 13´´, 37´ +5´´, 45´ 6´´, and 52´ 26´´, the Radius being 100000000; and the Sines of +these Angles are 762, 1079, 1321, and 1525, and the proportional Sines +of Refraction 1172, 1659, 2031, and 2345, the Radius being 100000. For +since the Sines of Incidence out of Glass into Air are to the Sines of +Refraction as 11 to 17, and to the above-mentioned Secants as 11 to the +first of 106 arithmetical Means between 11 and 17, that is, as 11 to +11-6/106, those Secants will be to the Sines of Refraction as 11-6/106, +to 17, and by this Analogy will give these Sines. So then, if the +obliquities of the Rays to the concave Surface of the Glass be such that +the Sines of their Refraction in passing out of the Glass through that +Surface into the Air be 1172, 1659, 2031, 2345, the bright Light of the +34386th Ring shall emerge at the thicknesses of the Glass, which are to +1/4 of an Inch as 34386 to 34385, 34384, 34383, 34382, respectively. And +therefore, if the thickness in all these Cases be 1/4 of an Inch (as it +is in the Glass of which the Speculum was made) the bright Light of the +34385th Ring shall emerge where the Sine of Refraction is 1172, and that +of the 34384th, 34383th, and 34382th Ring where the Sine is 1659, 2031, +and 2345 respectively. And in these Angles of Refraction the Light of +these Rings shall be propagated from the Speculum to the Chart, and +there paint Rings about the white central round Spot of Light which we +said was the Light of the 34386th Ring. And the Semidiameters of these +Rings shall subtend the Angles of Refraction made at the +Concave-Surface of the Speculum, and by consequence their Diameters +shall be to the distance of the Chart from the Speculum as those Sines +of Refraction doubled are to the Radius, that is, as 1172, 1659, 2031, +and 2345, doubled are to 100000. And therefore, if the distance of the +Chart from the Concave-Surface of the Speculum be six Feet (as it was in +the third of these Observations) the Diameters of the Rings of this +bright yellow Light upon the Chart shall be 1'688, 2'389, 2'925, 3'375 +Inches: For these Diameters are to six Feet, as the above-mention'd +Sines doubled are to the Radius. Now, these Diameters of the bright +yellow Rings, thus found by Computation are the very same with those +found in the third of these Observations by measuring them, _viz._ with +1-11/16, 2-3/8, 2-11/12, and 3-3/8 Inches, and therefore the Theory of +deriving these Rings from the thickness of the Plate of Glass of which +the Speculum was made, and from the Obliquity of the emerging Rays +agrees with the Observation. In this Computation I have equalled the +Diameters of the bright Rings made by Light of all Colours, to the +Diameters of the Rings made by the bright yellow. For this yellow makes +the brightest Part of the Rings of all Colours. If you desire the +Diameters of the Rings made by the Light of any other unmix'd Colour, +you may find them readily by putting them to the Diameters of the bright +yellow ones in a subduplicate Proportion of the Intervals of the Fits of +the Rays of those Colours when equally inclined to the refracting or +reflecting Surface which caused those Fits, that is, by putting the +Diameters of the Rings made by the Rays in the Extremities and Limits of +the seven Colours, red, orange, yellow, green, blue, indigo, violet, +proportional to the Cube-roots of the Numbers, 1, 8/9, 5/6, 3/4, 2/3, +3/5, 9/16, 1/2, which express the Lengths of a Monochord sounding the +Notes in an Eighth: For by this means the Diameters of the Rings of +these Colours will be found pretty nearly in the same Proportion to one +another, which they ought to have by the fifth of these Observations. + +And thus I satisfy'd my self, that these Rings were of the same kind and +Original with those of thin Plates, and by consequence that the Fits or +alternate Dispositions of the Rays to be reflected and transmitted are +propagated to great distances from every reflecting and refracting +Surface. But yet to put the matter out of doubt, I added the following +Observation. + +_Obs._ 9. If these Rings thus depend on the thickness of the Plate of +Glass, their Diameters at equal distances from several Speculums made of +such concavo-convex Plates of Glass as are ground on the same Sphere, +ought to be reciprocally in a subduplicate Proportion of the thicknesses +of the Plates of Glass. And if this Proportion be found true by +experience it will amount to a demonstration that these Rings (like +those formed in thin Plates) do depend on the thickness of the Glass. I +procured therefore another concavo-convex Plate of Glass ground on both +sides to the same Sphere with the former Plate. Its thickness was 5/62 +Parts of an Inch; and the Diameters of the three first bright Rings +measured between the brightest Parts of their Orbits at the distance of +six Feet from the Glass were 3·4-1/6·5-1/8· Inches. Now, the thickness +of the other Glass being 1/4 of an Inch was to the thickness of this +Glass as 1/4 to 5/62, that is as 31 to 10, or 310000000 to 100000000, +and the Roots of these Numbers are 17607 and 10000, and in the +Proportion of the first of these Roots to the second are the Diameters +of the bright Rings made in this Observation by the thinner Glass, +3·4-1/6·5-1/8, to the Diameters of the same Rings made in the third of +these Observations by the thicker Glass 1-11/16, 2-3/8. 2-11/12, that +is, the Diameters of the Rings are reciprocally in a subduplicate +Proportion of the thicknesses of the Plates of Glass. + +So then in Plates of Glass which are alike concave on one side, and +alike convex on the other side, and alike quick-silver'd on the convex +sides, and differ in nothing but their thickness, the Diameters of the +Rings are reciprocally in a subduplicate Proportion of the thicknesses +of the Plates. And this shews sufficiently that the Rings depend on both +the Surfaces of the Glass. They depend on the convex Surface, because +they are more luminous when that Surface is quick-silver'd over than +when it is without Quick-silver. They depend also upon the concave +Surface, because without that Surface a Speculum makes them not. They +depend on both Surfaces, and on the distances between them, because +their bigness is varied by varying only that distance. And this +dependence is of the same kind with that which the Colours of thin +Plates have on the distance of the Surfaces of those Plates, because the +bigness of the Rings, and their Proportion to one another, and the +variation of their bigness arising from the variation of the thickness +of the Glass, and the Orders of their Colours, is such as ought to +result from the Propositions in the end of the third Part of this Book, +derived from the Phænomena of the Colours of thin Plates set down in the +first Part. + +There are yet other Phænomena of these Rings of Colours, but such as +follow from the same Propositions, and therefore confirm both the Truth +of those Propositions, and the Analogy between these Rings and the Rings +of Colours made by very thin Plates. I shall subjoin some of them. + +_Obs._ 10. When the beam of the Sun's Light was reflected back from the +Speculum not directly to the hole in the Window, but to a place a little +distant from it, the common center of that Spot, and of all the Rings of +Colours fell in the middle way between the beam of the incident Light, +and the beam of the reflected Light, and by consequence in the center of +the spherical concavity of the Speculum, whenever the Chart on which the +Rings of Colours fell was placed at that center. And as the beam of +reflected Light by inclining the Speculum receded more and more from the +beam of incident Light and from the common center of the colour'd Rings +between them, those Rings grew bigger and bigger, and so also did the +white round Spot, and new Rings of Colours emerged successively out of +their common center, and the white Spot became a white Ring +encompassing them; and the incident and reflected beams of Light always +fell upon the opposite parts of this white Ring, illuminating its +Perimeter like two mock Suns in the opposite parts of an Iris. So then +the Diameter of this Ring, measured from the middle of its Light on one +side to the middle of its Light on the other side, was always equal to +the distance between the middle of the incident beam of Light, and the +middle of the reflected beam measured at the Chart on which the Rings +appeared: And the Rays which form'd this Ring were reflected by the +Speculum in Angles equal to their Angles of Incidence, and by +consequence to their Angles of Refraction at their entrance into the +Glass, but yet their Angles of Reflexion were not in the same Planes +with their Angles of Incidence. + +_Obs._ 11. The Colours of the new Rings were in a contrary order to +those of the former, and arose after this manner. The white round Spot +of Light in the middle of the Rings continued white to the center till +the distance of the incident and reflected beams at the Chart was about +7/8 parts of an Inch, and then it began to grow dark in the middle. And +when that distance was about 1-3/16 of an Inch, the white Spot was +become a Ring encompassing a dark round Spot which in the middle +inclined to violet and indigo. And the luminous Rings encompassing it +were grown equal to those dark ones which in the four first Observations +encompassed them, that is to say, the white Spot was grown a white Ring +equal to the first of those dark Rings, and the first of those luminous +Rings was now grown equal to the second of those dark ones, and the +second of those luminous ones to the third of those dark ones, and so +on. For the Diameters of the luminous Rings were now 1-3/16, 2-1/16, +2-2/3, 3-3/20, &c. Inches. + +When the distance between the incident and reflected beams of Light +became a little bigger, there emerged out of the middle of the dark Spot +after the indigo a blue, and then out of that blue a pale green, and +soon after a yellow and red. And when the Colour at the center was +brightest, being between yellow and red, the bright Rings were grown +equal to those Rings which in the four first Observations next +encompassed them; that is to say, the white Spot in the middle of those +Rings was now become a white Ring equal to the first of those bright +Rings, and the first of those bright ones was now become equal to the +second of those, and so on. For the Diameters of the white Ring, and of +the other luminous Rings encompassing it, were now 1-11/16, 2-3/8, +2-11/12, 3-3/8, &c. or thereabouts. + +When the distance of the two beams of Light at the Chart was a little +more increased, there emerged out of the middle in order after the red, +a purple, a blue, a green, a yellow, and a red inclining much to purple, +and when the Colour was brightest being between yellow and red, the +former indigo, blue, green, yellow and red, were become an Iris or Ring +of Colours equal to the first of those luminous Rings which appeared in +the four first Observations, and the white Ring which was now become +the second of the luminous Rings was grown equal to the second of those, +and the first of those which was now become the third Ring was become +equal to the third of those, and so on. For their Diameters were +1-11/16, 2-3/8, 2-11/12, 3-3/8 Inches, the distance of the two beams of +Light, and the Diameter of the white Ring being 2-3/8 Inches. + +When these two beams became more distant there emerged out of the middle +of the purplish red, first a darker round Spot, and then out of the +middle of that Spot a brighter. And now the former Colours (purple, +blue, green, yellow, and purplish red) were become a Ring equal to the +first of the bright Rings mentioned in the four first Observations, and +the Rings about this Ring were grown equal to the Rings about that +respectively; the distance between the two beams of Light and the +Diameter of the white Ring (which was now become the third Ring) being +about 3 Inches. + +The Colours of the Rings in the middle began now to grow very dilute, +and if the distance between the two Beams was increased half an Inch, or +an Inch more, they vanish'd whilst the white Ring, with one or two of +the Rings next it on either side, continued still visible. But if the +distance of the two beams of Light was still more increased, these also +vanished: For the Light which coming from several parts of the hole in +the Window fell upon the Speculum in several Angles of Incidence, made +Rings of several bignesses, which diluted and blotted out one another, +as I knew by intercepting some part of that Light. For if I intercepted +that part which was nearest to the Axis of the Speculum the Rings would +be less, if the other part which was remotest from it they would be +bigger. + +_Obs._ 12. When the Colours of the Prism were cast successively on the +Speculum, that Ring which in the two last Observations was white, was of +the same bigness in all the Colours, but the Rings without it were +greater in the green than in the blue, and still greater in the yellow, +and greatest in the red. And, on the contrary, the Rings within that +white Circle were less in the green than in the blue, and still less in +the yellow, and least in the red. For the Angles of Reflexion of those +Rays which made this Ring, being equal to their Angles of Incidence, the +Fits of every reflected Ray within the Glass after Reflexion are equal +in length and number to the Fits of the same Ray within the Glass before +its Incidence on the reflecting Surface. And therefore since all the +Rays of all sorts at their entrance into the Glass were in a Fit of +Transmission, they were also in a Fit of Transmission at their returning +to the same Surface after Reflexion; and by consequence were +transmitted, and went out to the white Ring on the Chart. This is the +reason why that Ring was of the same bigness in all the Colours, and why +in a mixture of all it appears white. But in Rays which are reflected in +other Angles, the Intervals of the Fits of the least refrangible being +greatest, make the Rings of their Colour in their progress from this +white Ring, either outwards or inwards, increase or decrease by the +greatest steps; so that the Rings of this Colour without are greatest, +and within least. And this is the reason why in the last Observation, +when the Speculum was illuminated with white Light, the exterior Rings +made by all Colours appeared red without and blue within, and the +interior blue without and red within. + +These are the Phænomena of thick convexo-concave Plates of Glass, which +are every where of the same thickness. There are yet other Phænomena +when these Plates are a little thicker on one side than on the other, +and others when the Plates are more or less concave than convex, or +plano-convex, or double-convex. For in all these cases the Plates make +Rings of Colours, but after various manners; all which, so far as I have +yet observed, follow from the Propositions in the end of the third part +of this Book, and so conspire to confirm the truth of those +Propositions. But the Phænomena are too various, and the Calculations +whereby they follow from those Propositions too intricate to be here +prosecuted. I content my self with having prosecuted this kind of +Phænomena so far as to discover their Cause, and by discovering it to +ratify the Propositions in the third Part of this Book. + +_Obs._ 13. As Light reflected by a Lens quick-silver'd on the backside +makes the Rings of Colours above described, so it ought to make the like +Rings of Colours in passing through a drop of Water. At the first +Reflexion of the Rays within the drop, some Colours ought to be +transmitted, as in the case of a Lens, and others to be reflected back +to the Eye. For instance, if the Diameter of a small drop or globule of +Water be about the 500th part of an Inch, so that a red-making Ray in +passing through the middle of this globule has 250 Fits of easy +Transmission within the globule, and that all the red-making Rays which +are at a certain distance from this middle Ray round about it have 249 +Fits within the globule, and all the like Rays at a certain farther +distance round about it have 248 Fits, and all those at a certain +farther distance 247 Fits, and so on; these concentrick Circles of Rays +after their transmission, falling on a white Paper, will make +concentrick Rings of red upon the Paper, supposing the Light which +passes through one single globule, strong enough to be sensible. And, in +like manner, the Rays of other Colours will make Rings of other Colours. +Suppose now that in a fair Day the Sun shines through a thin Cloud of +such globules of Water or Hail, and that the globules are all of the +same bigness; and the Sun seen through this Cloud shall appear +encompassed with the like concentrick Rings of Colours, and the Diameter +of the first Ring of red shall be 7-1/4 Degrees, that of the second +10-1/4 Degrees, that of the third 12 Degrees 33 Minutes. And accordingly +as the Globules of Water are bigger or less, the Rings shall be less or +bigger. This is the Theory, and Experience answers it. For in _June_ +1692, I saw by reflexion in a Vessel of stagnating Water three Halos, +Crowns, or Rings of Colours about the Sun, like three little Rain-bows, +concentrick to his Body. The Colours of the first or innermost Crown +were blue next the Sun, red without, and white in the middle between the +blue and red. Those of the second Crown were purple and blue within, and +pale red without, and green in the middle. And those of the third were +pale blue within, and pale red without; these Crowns enclosed one +another immediately, so that their Colours proceeded in this continual +order from the Sun outward: blue, white, red; purple, blue, green, pale +yellow and red; pale blue, pale red. The Diameter of the second Crown +measured from the middle of the yellow and red on one side of the Sun, +to the middle of the same Colour on the other side was 9-1/3 Degrees, or +thereabouts. The Diameters of the first and third I had not time to +measure, but that of the first seemed to be about five or six Degrees, +and that of the third about twelve. The like Crowns appear sometimes +about the Moon; for in the beginning of the Year 1664, _Febr._ 19th at +Night, I saw two such Crowns about her. The Diameter of the first or +innermost was about three Degrees, and that of the second about five +Degrees and an half. Next about the Moon was a Circle of white, and next +about that the inner Crown, which was of a bluish green within next the +white, and of a yellow and red without, and next about these Colours +were blue and green on the inside of the outward Crown, and red on the +outside of it. At the same time there appear'd a Halo about 22 Degrees +35´ distant from the center of the Moon. It was elliptical, and its long +Diameter was perpendicular to the Horizon, verging below farthest from +the Moon. I am told that the Moon has sometimes three or more +concentrick Crowns of Colours encompassing one another next about her +Body. The more equal the globules of Water or Ice are to one another, +the more Crowns of Colours will appear, and the Colours will be the more +lively. The Halo at the distance of 22-1/2 Degrees from the Moon is of +another sort. By its being oval and remoter from the Moon below than +above, I conclude, that it was made by Refraction in some sort of Hail +or Snow floating in the Air in an horizontal posture, the refracting +Angle being about 58 or 60 Degrees. + + + + +THE + +THIRD BOOK + +OF + +OPTICKS + + +_PART I._ + +_Observations concerning the Inflexions of the Rays of Light, and the +Colours made thereby._ + +Grimaldo has inform'd us, that if a beam of the Sun's Light be let into +a dark Room through a very small hole, the Shadows of things in this +Light will be larger than they ought to be if the Rays went on by the +Bodies in straight Lines, and that these Shadows have three parallel +Fringes, Bands or Ranks of colour'd Light adjacent to them. But if the +Hole be enlarged the Fringes grow broad and run into one another, so +that they cannot be distinguish'd. These broad Shadows and Fringes have +been reckon'd by some to proceed from the ordinary refraction of the +Air, but without due examination of the Matter. For the circumstances of +the Phænomenon, so far as I have observed them, are as follows. + +_Obs._ 1. I made in a piece of Lead a small Hole with a Pin, whose +breadth was the 42d part of an Inch. For 21 of those Pins laid together +took up the breadth of half an Inch. Through this Hole I let into my +darken'd Chamber a beam of the Sun's Light, and found that the Shadows +of Hairs, Thred, Pins, Straws, and such like slender Substances placed +in this beam of Light, were considerably broader than they ought to be, +if the Rays of Light passed on by these Bodies in right Lines. And +particularly a Hair of a Man's Head, whose breadth was but the 280th +part of an Inch, being held in this Light, at the distance of about +twelve Feet from the Hole, did cast a Shadow which at the distance of +four Inches from the Hair was the sixtieth part of an Inch broad, that +is, above four times broader than the Hair, and at the distance of two +Feet from the Hair was about the eight and twentieth part of an Inch +broad, that is, ten times broader than the Hair, and at the distance of +ten Feet was the eighth part of an Inch broad, that is 35 times broader. + +Nor is it material whether the Hair be encompassed with Air, or with any +other pellucid Substance. For I wetted a polish'd Plate of Glass, and +laid the Hair in the Water upon the Glass, and then laying another +polish'd Plate of Glass upon it, so that the Water might fill up the +space between the Glasses, I held them in the aforesaid beam of Light, +so that the Light might pass through them perpendicularly, and the +Shadow of the Hair was at the same distances as big as before. The +Shadows of Scratches made in polish'd Plates of Glass were also much +broader than they ought to be, and the Veins in polish'd Plates of Glass +did also cast the like broad Shadows. And therefore the great breadth of +these Shadows proceeds from some other cause than the Refraction of the +Air. + +Let the Circle X [in _Fig._ 1.] represent the middle of the Hair; ADG, +BEH, CFI, three Rays passing by one side of the Hair at several +distances; KNQ, LOR, MPS, three other Rays passing by the other side of +the Hair at the like distances; D, E, F, and N, O, P, the places where +the Rays are bent in their passage by the Hair; G, H, I, and Q, R, S, +the places where the Rays fall on a Paper GQ; IS the breadth of the +Shadow of the Hair cast on the Paper, and TI, VS, two Rays passing to +the Points I and S without bending when the Hair is taken away. And it's +manifest that all the Light between these two Rays TI and VS is bent in +passing by the Hair, and turned aside from the Shadow IS, because if any +part of this Light were not bent it would fall on the Paper within the +Shadow, and there illuminate the Paper, contrary to experience. And +because when the Paper is at a great distance from the Hair, the Shadow +is broad, and therefore the Rays TI and VS are at a great distance from +one another, it follows that the Hair acts upon the Rays of Light at a +good distance in their passing by it. But the Action is strongest on the +Rays which pass by at least distances, and grows weaker and weaker +accordingly as the Rays pass by at distances greater and greater, as is +represented in the Scheme: For thence it comes to pass, that the Shadow +of the Hair is much broader in proportion to the distance of the Paper +from the Hair, when the Paper is nearer the Hair, than when it is at a +great distance from it. + +_Obs._ 2. The Shadows of all Bodies (Metals, Stones, Glass, Wood, Horn, +Ice, &c.) in this Light were border'd with three Parallel Fringes or +Bands of colour'd Light, whereof that which was contiguous to the Shadow +was broadest and most luminous, and that which was remotest from it was +narrowest, and so faint, as not easily to be visible. It was difficult +to distinguish the Colours, unless when the Light fell very obliquely +upon a smooth Paper, or some other smooth white Body, so as to make them +appear much broader than they would otherwise do. And then the Colours +were plainly visible in this Order: The first or innermost Fringe was +violet and deep blue next the Shadow, and then light blue, green, and +yellow in the middle, and red without. The second Fringe was almost +contiguous to the first, and the third to the second, and both were blue +within, and yellow and red without, but their Colours were very faint, +especially those of the third. The Colours therefore proceeded in this +order from the Shadow; violet, indigo, pale blue, green, yellow, red; +blue, yellow, red; pale blue, pale yellow and red. The Shadows made by +Scratches and Bubbles in polish'd Plates of Glass were border'd with the +like Fringes of colour'd Light. And if Plates of Looking-glass sloop'd +off near the edges with a Diamond-cut, be held in the same beam of +Light, the Light which passes through the parallel Planes of the Glass +will be border'd with the like Fringes of Colours where those Planes +meet with the Diamond-cut, and by this means there will sometimes appear +four or five Fringes of Colours. Let AB, CD [in _Fig._ 2.] represent the +parallel Planes of a Looking-glass, and BD the Plane of the Diamond-cut, +making at B a very obtuse Angle with the Plane AB. And let all the Light +between the Rays ENI and FBM pass directly through the parallel Planes +of the Glass, and fall upon the Paper between I and M, and all the Light +between the Rays GO and HD be refracted by the oblique Plane of the +Diamond-cut BD, and fall upon the Paper between K and L; and the Light +which passes directly through the parallel Planes of the Glass, and +falls upon the Paper between I and M, will be border'd with three or +more Fringes at M. + +[Illustration: FIG. 1.] + +[Illustration: FIG. 2.] + +So by looking on the Sun through a Feather or black Ribband held close +to the Eye, several Rain-bows will appear; the Shadows which the Fibres +or Threds cast on the _Tunica Retina_, being border'd with the like +Fringes of Colours. + +_Obs._ 3. When the Hair was twelve Feet distant from this Hole, and its +Shadow fell obliquely upon a flat white Scale of Inches and Parts of an +Inch placed half a Foot beyond it, and also when the Shadow fell +perpendicularly upon the same Scale placed nine Feet beyond it; I +measured the breadth of the Shadow and Fringes as accurately as I could, +and found them in Parts of an Inch as follows. + +-------------------------------------------+-----------+-------- + | half a | Nine + At the Distance of | Foot | Feet +-------------------------------------------+-----------+-------- +The breadth of the Shadow | 1/54 | 1/9 +-------------------------------------------+-----------+-------- +The breadth between the Middles of the | 1/38 | + brightest Light of the innermost Fringes | or | + on either side the Shadow | 1/39 | 7/50 +-------------------------------------------+-----------+-------- +The breadth between the Middles of the | | + brightest Light of the middlemost Fringes| | + on either side the Shadow | 1/23-1/2 | 4/17 +-------------------------------------------+-----------+-------- +The breadth between the Middles of the | 1/18 | + brightest Light of the outmost Fringes | or | + on either side the Shadow | 1/18-1/2 | 3/10 +-------------------------------------------+-----------+-------- +The distance between the Middles of the | | + brightest Light of the first and second | | + Fringes | 1/120 | 1/21 +-------------------------------------------+-----------+-------- +The distance between the Middles of the | | + brightest Light of the second and third | | + Fringes | 1/170 | 1/31 +-------------------------------------------+-----------+-------- +The breadth of the luminous Part (green, | | + white, yellow, and red) of the first | | + Fringe | 1/170 | 1/32 +-------------------------------------------+-----------+-------- +The breadth of the darker Space between | | + the first and second Fringes | 1/240 | 1/45 +-------------------------------------------+-----------+-------- +The breadth of the luminous Part of the | | + second Fringe | 1/290 | 1/55 +-------------------------------------------+-----------+-------- +The breadth of the darker Space between | | + the second and third Fringes | 1/340 | 1/63 +-------------------------------------------+-----------+-------- + +These Measures I took by letting the Shadow of the Hair, at half a Foot +distance, fall so obliquely on the Scale, as to appear twelve times +broader than when it fell perpendicularly on it at the same distance, +and setting down in this Table the twelfth part of the Measures I then +took. + +_Obs._ 4. When the Shadow and Fringes were cast obliquely upon a smooth +white Body, and that Body was removed farther and farther from the Hair, +the first Fringe began to appear and look brighter than the rest of the +Light at the distance of less than a quarter of an Inch from the Hair, +and the dark Line or Shadow between that and the second Fringe began to +appear at a less distance from the Hair than that of the third part of +an Inch. The second Fringe began to appear at a distance from the Hair +of less than half an Inch, and the Shadow between that and the third +Fringe at a distance less than an inch, and the third Fringe at a +distance less than three Inches. At greater distances they became much +more sensible, but kept very nearly the same proportion of their +breadths and intervals which they had at their first appearing. For the +distance between the middle of the first, and middle of the second +Fringe, was to the distance between the middle of the second and middle +of the third Fringe, as three to two, or ten to seven. And the last of +these two distances was equal to the breadth of the bright Light or +luminous part of the first Fringe. And this breadth was to the breadth +of the bright Light of the second Fringe as seven to four, and to the +dark Interval of the first and second Fringe as three to two, and to +the like dark Interval between the second and third as two to one. For +the breadths of the Fringes seem'd to be in the progression of the +Numbers 1, sqrt(1/3), sqrt(1/5), and their Intervals to be in the +same progression with them; that is, the Fringes and their Intervals +together to be in the continual progression of the Numbers 1, +sqrt(1/2), sqrt(1/3), sqrt(1/4), sqrt(1/5), or thereabouts. And +these Proportions held the same very nearly at all distances from the +Hair; the dark Intervals of the Fringes being as broad in proportion to +the breadth of the Fringes at their first appearance as afterwards at +great distances from the Hair, though not so dark and distinct. + +_Obs._ 5. The Sun shining into my darken'd Chamber through a hole a +quarter of an Inch broad, I placed at the distance of two or three Feet +from the Hole a Sheet of Pasteboard, which was black'd all over on both +sides, and in the middle of it had a hole about three quarters of an +Inch square for the Light to pass through. And behind the hole I +fasten'd to the Pasteboard with Pitch the blade of a sharp Knife, to +intercept some part of the Light which passed through the hole. The +Planes of the Pasteboard and blade of the Knife were parallel to one +another, and perpendicular to the Rays. And when they were so placed +that none of the Sun's Light fell on the Pasteboard, but all of it +passed through the hole to the Knife, and there part of it fell upon the +blade of the Knife, and part of it passed by its edge; I let this part +of the Light which passed by, fall on a white Paper two or three Feet +beyond the Knife, and there saw two streams of faint Light shoot out +both ways from the beam of Light into the shadow, like the Tails of +Comets. But because the Sun's direct Light by its brightness upon the +Paper obscured these faint streams, so that I could scarce see them, I +made a little hole in the midst of the Paper for that Light to pass +through and fall on a black Cloth behind it; and then I saw the two +streams plainly. They were like one another, and pretty nearly equal in +length, and breadth, and quantity of Light. Their Light at that end next +the Sun's direct Light was pretty strong for the space of about a +quarter of an Inch, or half an Inch, and in all its progress from that +direct Light decreased gradually till it became insensible. The whole +length of either of these streams measured upon the paper at the +distance of three Feet from the Knife was about six or eight Inches; so +that it subtended an Angle at the edge of the Knife of about 10 or 12, +or at most 14 Degrees. Yet sometimes I thought I saw it shoot three or +four Degrees farther, but with a Light so very faint that I could scarce +perceive it, and suspected it might (in some measure at least) arise +from some other cause than the two streams did. For placing my Eye in +that Light beyond the end of that stream which was behind the Knife, and +looking towards the Knife, I could see a line of Light upon its edge, +and that not only when my Eye was in the line of the Streams, but also +when it was without that line either towards the point of the Knife, or +towards the handle. This line of Light appear'd contiguous to the edge +of the Knife, and was narrower than the Light of the innermost Fringe, +and narrowest when my Eye was farthest from the direct Light, and +therefore seem'd to pass between the Light of that Fringe and the edge +of the Knife, and that which passed nearest the edge to be most bent, +though not all of it. + +_Obs._ 6. I placed another Knife by this, so that their edges might be +parallel, and look towards one another, and that the beam of Light might +fall upon both the Knives, and some part of it pass between their edges. +And when the distance of their edges was about the 400th part of an +Inch, the stream parted in the middle, and left a Shadow between the two +parts. This Shadow was so black and dark that all the Light which passed +between the Knives seem'd to be bent, and turn'd aside to the one hand +or to the other. And as the Knives still approach'd one another the +Shadow grew broader, and the streams shorter at their inward ends which +were next the Shadow, until upon the contact of the Knives the whole +Light vanish'd, leaving its place to the Shadow. + +And hence I gather that the Light which is least bent, and goes to the +inward ends of the streams, passes by the edges of the Knives at the +greatest distance, and this distance when the Shadow begins to appear +between the streams, is about the 800th part of an Inch. And the Light +which passes by the edges of the Knives at distances still less and +less, is more and more bent, and goes to those parts of the streams +which are farther and farther from the direct Light; because when the +Knives approach one another till they touch, those parts of the streams +vanish last which are farthest from the direct Light. + +_Obs._ 7. In the fifth Observation the Fringes did not appear, but by +reason of the breadth of the hole in the Window became so broad as to +run into one another, and by joining, to make one continued Light in the +beginning of the streams. But in the sixth, as the Knives approached one +another, a little before the Shadow appeared between the two streams, +the Fringes began to appear on the inner ends of the Streams on either +side of the direct Light; three on one side made by the edge of one +Knife, and three on the other side made by the edge of the other Knife. +They were distinctest when the Knives were placed at the greatest +distance from the hole in the Window, and still became more distinct by +making the hole less, insomuch that I could sometimes see a faint +lineament of a fourth Fringe beyond the three above mention'd. And as +the Knives continually approach'd one another, the Fringes grew +distincter and larger, until they vanish'd. The outmost Fringe vanish'd +first, and the middlemost next, and the innermost last. And after they +were all vanish'd, and the line of Light which was in the middle between +them was grown very broad, enlarging it self on both sides into the +streams of Light described in the fifth Observation, the above-mention'd +Shadow began to appear in the middle of this line, and divide it along +the middle into two lines of Light, and increased until the whole Light +vanish'd. This enlargement of the Fringes was so great that the Rays +which go to the innermost Fringe seem'd to be bent above twenty times +more when this Fringe was ready to vanish, than when one of the Knives +was taken away. + +And from this and the former Observation compared, I gather, that the +Light of the first Fringe passed by the edge of the Knife at a distance +greater than the 800th part of an Inch, and the Light of the second +Fringe passed by the edge of the Knife at a greater distance than the +Light of the first Fringe did, and that of the third at a greater +distance than that of the second, and that of the streams of Light +described in the fifth and sixth Observations passed by the edges of the +Knives at less distances than that of any of the Fringes. + +_Obs._ 8. I caused the edges of two Knives to be ground truly strait, +and pricking their points into a Board so that their edges might look +towards one another, and meeting near their points contain a rectilinear +Angle, I fasten'd their Handles together with Pitch to make this Angle +invariable. The distance of the edges of the Knives from one another at +the distance of four Inches from the angular Point, where the edges of +the Knives met, was the eighth part of an Inch; and therefore the Angle +contain'd by the edges was about one Degree 54: The Knives thus fix'd +together I placed in a beam of the Sun's Light, let into my darken'd +Chamber through a Hole the 42d Part of an Inch wide, at the distance of +10 or 15 Feet from the Hole, and let the Light which passed between +their edges fall very obliquely upon a smooth white Ruler at the +distance of half an Inch, or an Inch from the Knives, and there saw the +Fringes by the two edges of the Knives run along the edges of the +Shadows of the Knives in Lines parallel to those edges without growing +sensibly broader, till they met in Angles equal to the Angle contained +by the edges of the Knives, and where they met and joined they ended +without crossing one another. But if the Ruler was held at a much +greater distance from the Knives, the Fringes where they were farther +from the Place of their Meeting, were a little narrower, and became +something broader and broader as they approach'd nearer and nearer to +one another, and after they met they cross'd one another, and then +became much broader than before. + +Whence I gather that the distances at which the Fringes pass by the +Knives are not increased nor alter'd by the approach of the Knives, but +the Angles in which the Rays are there bent are much increased by that +approach; and that the Knife which is nearest any Ray determines which +way the Ray shall be bent, and the other Knife increases the bent. + +_Obs._ 9. When the Rays fell very obliquely upon the Ruler at the +distance of the third Part of an Inch from the Knives, the dark Line +between the first and second Fringe of the Shadow of one Knife, and the +dark Line between the first and second Fringe of the Shadow of the other +knife met with one another, at the distance of the fifth Part of an Inch +from the end of the Light which passed between the Knives at the +concourse of their edges. And therefore the distance of the edges of the +Knives at the meeting of these dark Lines was the 160th Part of an Inch. +For as four Inches to the eighth Part of an Inch, so is any Length of +the edges of the Knives measured from the point of their concourse to +the distance of the edges of the Knives at the end of that Length, and +so is the fifth Part of an Inch to the 160th Part. So then the dark +Lines above-mention'd meet in the middle of the Light which passes +between the Knives where they are distant the 160th Part of an Inch, and +the one half of that Light passes by the edge of one Knife at a distance +not greater than the 320th Part of an Inch, and falling upon the Paper +makes the Fringes of the Shadow of that Knife, and the other half passes +by the edge of the other Knife, at a distance not greater than the 320th +Part of an Inch, and falling upon the Paper makes the Fringes of the +Shadow of the other Knife. But if the Paper be held at a distance from +the Knives greater than the third Part of an Inch, the dark Lines +above-mention'd meet at a greater distance than the fifth Part of an +Inch from the end of the Light which passed between the Knives at the +concourse of their edges; and therefore the Light which falls upon the +Paper where those dark Lines meet passes between the Knives where the +edges are distant above the 160th part of an Inch. + +For at another time, when the two Knives were distant eight Feet and +five Inches from the little hole in the Window, made with a small Pin as +above, the Light which fell upon the Paper where the aforesaid dark +lines met, passed between the Knives, where the distance between their +edges was as in the following Table, when the distance of the Paper from +the Knives was also as follows. + +-----------------------------+------------------------------ + | Distances between the edges + Distances of the Paper | of the Knives in millesimal + from the Knives in Inches. | parts of an Inch. +-----------------------------+------------------------------ + 1-1/2. | 0'012 + 3-1/3. | 0'020 + 8-3/5. | 0'034 + 32. | 0'057 + 96. | 0'081 + 131. | 0'087 +_____________________________|______________________________ + +And hence I gather, that the Light which makes the Fringes upon the +Paper is not the same Light at all distances of the Paper from the +Knives, but when the Paper is held near the Knives, the Fringes are made +by Light which passes by the edges of the Knives at a less distance, and +is more bent than when the Paper is held at a greater distance from the +Knives. + +[Illustration: FIG. 3.] + +_Obs._ 10. When the Fringes of the Shadows of the Knives fell +perpendicularly upon a Paper at a great distance from the Knives, they +were in the form of Hyperbola's, and their Dimensions were as follows. +Let CA, CB [in _Fig._ 3.] represent Lines drawn upon the Paper parallel +to the edges of the Knives, and between which all the Light would fall, +if it passed between the edges of the Knives without inflexion; DE a +Right Line drawn through C making the Angles ACD, BCE, equal to one +another, and terminating all the Light which falls upon the Paper from +the point where the edges of the Knives meet; _eis_, _fkt_, and _glv_, +three hyperbolical Lines representing the Terminus of the Shadow of one +of the Knives, the dark Line between the first and second Fringes of +that Shadow, and the dark Line between the second and third Fringes of +the same Shadow; _xip_, _ykq_, and _zlr_, three other hyperbolical Lines +representing the Terminus of the Shadow of the other Knife, the dark +Line between the first and second Fringes of that Shadow, and the dark +line between the second and third Fringes of the same Shadow. And +conceive that these three Hyperbola's are like and equal to the former +three, and cross them in the points _i_, _k_, and _l_, and that the +Shadows of the Knives are terminated and distinguish'd from the first +luminous Fringes by the lines _eis_ and _xip_, until the meeting and +crossing of the Fringes, and then those lines cross the Fringes in the +form of dark lines, terminating the first luminous Fringes within side, +and distinguishing them from another Light which begins to appear at +_i_, and illuminates all the triangular space _ip_DE_s_ comprehended by +these dark lines, and the right line DE. Of these Hyperbola's one +Asymptote is the line DE, and their other Asymptotes are parallel to the +lines CA and CB. Let _rv_ represent a line drawn any where upon the +Paper parallel to the Asymptote DE, and let this line cross the right +lines AC in _m_, and BC in _n_, and the six dark hyperbolical lines in +_p_, _q_, _r_; _s_, _t_, _v_; and by measuring the distances _ps_, _qt_, +_rv_, and thence collecting the lengths of the Ordinates _np_, _nq_, +_nr_ or _ms_, _mt_, _mv_, and doing this at several distances of the +line _rv_ from the Asymptote DD, you may find as many points of these +Hyperbola's as you please, and thereby know that these curve lines are +Hyperbola's differing little from the conical Hyperbola. And by +measuring the lines C_i_, C_k_, C_l_, you may find other points of these +Curves. + +For instance; when the Knives were distant from the hole in the Window +ten Feet, and the Paper from the Knives nine Feet, and the Angle +contained by the edges of the Knives to which the Angle ACB is equal, +was subtended by a Chord which was to the Radius as 1 to 32, and the +distance of the line _rv_ from the Asymptote DE was half an Inch: I +measured the lines _ps_, _qt_, _rv_, and found them 0'35, 0'65, 0'98 +Inches respectively; and by adding to their halfs the line 1/2 _mn_, +(which here was the 128th part of an Inch, or 0'0078 Inches,) the Sums +_np_, _nq_, _nr_, were 0'1828, 0'3328, 0'4978 Inches. I measured also +the distances of the brightest parts of the Fringes which run between +_pq_ and _st_, _qr_ and _tv_, and next beyond _r_ and _v_, and found +them 0'5, 0'8, and 1'17 Inches. + +_Obs._ 11. The Sun shining into my darken'd Room through a small round +hole made in a Plate of Lead with a slender Pin, as above; I placed at +the hole a Prism to refract the Light, and form on the opposite Wall the +Spectrum of Colours, described in the third Experiment of the first +Book. And then I found that the Shadows of all Bodies held in the +colour'd Light between the Prism and the Wall, were border'd with +Fringes of the Colour of that Light in which they were held. In the full +red Light they were totally red without any sensible blue or violet, and +in the deep blue Light they were totally blue without any sensible red +or yellow; and so in the green Light they were totally green, excepting +a little yellow and blue, which were mixed in the green Light of the +Prism. And comparing the Fringes made in the several colour'd Lights, I +found that those made in the red Light were largest, those made in the +violet were least, and those made in the green were of a middle bigness. +For the Fringes with which the Shadow of a Man's Hair were bordered, +being measured cross the Shadow at the distance of six Inches from the +Hair, the distance between the middle and most luminous part of the +first or innermost Fringe on one side of the Shadow, and that of the +like Fringe on the other side of the Shadow, was in the full red Light +1/37-1/4 of an Inch, and in the full violet 7/46. And the like distance +between the middle and most luminous parts of the second Fringes on +either side the Shadow was in the full red Light 1/22, and in the violet +1/27 of an Inch. And these distances of the Fringes held the same +proportion at all distances from the Hair without any sensible +variation. + +So then the Rays which made these Fringes in the red Light passed by the +Hair at a greater distance than those did which made the like Fringes in +the violet; and therefore the Hair in causing these Fringes acted alike +upon the red Light or least refrangible Rays at a greater distance, and +upon the violet or most refrangible Rays at a less distance, and by +those actions disposed the red Light into Larger Fringes, and the violet +into smaller, and the Lights of intermediate Colours into Fringes of +intermediate bignesses without changing the Colour of any sort of Light. + +When therefore the Hair in the first and second of these Observations +was held in the white beam of the Sun's Light, and cast a Shadow which +was border'd with three Fringes of coloured Light, those Colours arose +not from any new modifications impress'd upon the Rays of Light by the +Hair, but only from the various inflexions whereby the several Sorts of +Rays were separated from one another, which before separation, by the +mixture of all their Colours, composed the white beam of the Sun's +Light, but whenever separated compose Lights of the several Colours +which they are originally disposed to exhibit. In this 11th Observation, +where the Colours are separated before the Light passes by the Hair, the +least refrangible Rays, which when separated from the rest make red, +were inflected at a greater distance from the Hair, so as to make three +red Fringes at a greater distance from the middle of the Shadow of the +Hair; and the most refrangible Rays which when separated make violet, +were inflected at a less distance from the Hair, so as to make three +violet Fringes at a less distance from the middle of the Shadow of the +Hair. And other Rays of intermediate degrees of Refrangibility were +inflected at intermediate distances from the Hair, so as to make Fringes +of intermediate Colours at intermediate distances from the middle of the +Shadow of the Hair. And in the second Observation, where all the Colours +are mix'd in the white Light which passes by the Hair, these Colours are +separated by the various inflexions of the Rays, and the Fringes which +they make appear all together, and the innermost Fringes being +contiguous make one broad Fringe composed of all the Colours in due +order, the violet lying on the inside of the Fringe next the Shadow, the +red on the outside farthest from the Shadow, and the blue, green, and +yellow, in the middle. And, in like manner, the middlemost Fringes of +all the Colours lying in order, and being contiguous, make another broad +Fringe composed of all the Colours; and the outmost Fringes of all the +Colours lying in order, and being contiguous, make a third broad Fringe +composed of all the Colours. These are the three Fringes of colour'd +Light with which the Shadows of all Bodies are border'd in the second +Observation. + +When I made the foregoing Observations, I design'd to repeat most of +them with more care and exactness, and to make some new ones for +determining the manner how the Rays of Light are bent in their passage +by Bodies, for making the Fringes of Colours with the dark lines between +them. But I was then interrupted, and cannot now think of taking these +things into farther Consideration. And since I have not finish'd this +part of my Design, I shall conclude with proposing only some Queries, in +order to a farther search to be made by others. + +_Query_ 1. Do not Bodies act upon Light at a distance, and by their +action bend its Rays; and is not this action (_cæteris paribus_) +strongest at the least distance? + +_Qu._ 2. Do not the Rays which differ in Refrangibility differ also in +Flexibity; and are they not by their different Inflexions separated from +one another, so as after separation to make the Colours in the three +Fringes above described? And after what manner are they inflected to +make those Fringes? + +_Qu._ 3. Are not the Rays of Light in passing by the edges and sides of +Bodies, bent several times backwards and forwards, with a motion like +that of an Eel? And do not the three Fringes of colour'd Light +above-mention'd arise from three such bendings? + +_Qu._ 4. Do not the Rays of Light which fall upon Bodies, and are +reflected or refracted, begin to bend before they arrive at the Bodies; +and are they not reflected, refracted, and inflected, by one and the +same Principle, acting variously in various Circumstances? + +_Qu._ 5. Do not Bodies and Light act mutually upon one another; that is +to say, Bodies upon Light in emitting, reflecting, refracting and +inflecting it, and Light upon Bodies for heating them, and putting their +parts into a vibrating motion wherein heat consists? + +_Qu._ 6. Do not black Bodies conceive heat more easily from Light than +those of other Colours do, by reason that the Light falling on them is +not reflected outwards, but enters the Bodies, and is often reflected +and refracted within them, until it be stifled and lost? + +_Qu._ 7. Is not the strength and vigor of the action between Light and +sulphureous Bodies observed above, one reason why sulphureous Bodies +take fire more readily, and burn more vehemently than other Bodies do? + +_Qu._ 8. Do not all fix'd Bodies, when heated beyond a certain degree, +emit Light and shine; and is not this Emission perform'd by the +vibrating motions of their parts? And do not all Bodies which abound +with terrestrial parts, and especially with sulphureous ones, emit Light +as often as those parts are sufficiently agitated; whether that +agitation be made by Heat, or by Friction, or Percussion, or +Putrefaction, or by any vital Motion, or any other Cause? As for +instance; Sea-Water in a raging Storm; Quick-silver agitated in _vacuo_; +the Back of a Cat, or Neck of a Horse, obliquely struck or rubbed in a +dark place; Wood, Flesh and Fish while they putrefy; Vapours arising +from putrefy'd Waters, usually call'd _Ignes Fatui_; Stacks of moist Hay +or Corn growing hot by fermentation; Glow-worms and the Eyes of some +Animals by vital Motions; the vulgar _Phosphorus_ agitated by the +attrition of any Body, or by the acid Particles of the Air; Amber and +some Diamonds by striking, pressing or rubbing them; Scrapings of Steel +struck off with a Flint; Iron hammer'd very nimbly till it become so hot +as to kindle Sulphur thrown upon it; the Axletrees of Chariots taking +fire by the rapid rotation of the Wheels; and some Liquors mix'd with +one another whose Particles come together with an Impetus, as Oil of +Vitriol distilled from its weight of Nitre, and then mix'd with twice +its weight of Oil of Anniseeds. So also a Globe of Glass about 8 or 10 +Inches in diameter, being put into a Frame where it may be swiftly +turn'd round its Axis, will in turning shine where it rubs against the +palm of ones Hand apply'd to it: And if at the same time a piece of +white Paper or white Cloth, or the end of ones Finger be held at the +distance of about a quarter of an Inch or half an Inch from that part of +the Glass where it is most in motion, the electrick Vapour which is +excited by the friction of the Glass against the Hand, will by dashing +against the white Paper, Cloth or Finger, be put into such an agitation +as to emit Light, and make the white Paper, Cloth or Finger, appear +lucid like a Glowworm; and in rushing out of the Glass will sometimes +push against the finger so as to be felt. And the same things have been +found by rubbing a long and large Cylinder or Glass or Amber with a +Paper held in ones hand, and continuing the friction till the Glass grew +warm. + +_Qu._ 9. Is not Fire a Body heated so hot as to emit Light copiously? +For what else is a red hot Iron than Fire? And what else is a burning +Coal than red hot Wood? + +_Qu._ 10. Is not Flame a Vapour, Fume or Exhalation heated red hot, that +is, so hot as to shine? For Bodies do not flame without emitting a +copious Fume, and this Fume burns in the Flame. The _Ignis Fatuus_ is a +Vapour shining without heat, and is there not the same difference +between this Vapour and Flame, as between rotten Wood shining without +heat and burning Coals of Fire? In distilling hot Spirits, if the Head +of the Still be taken off, the Vapour which ascends out of the Still +will take fire at the Flame of a Candle, and turn into Flame, and the +Flame will run along the Vapour from the Candle to the Still. Some +Bodies heated by Motion, or Fermentation, if the heat grow intense, fume +copiously, and if the heat be great enough the Fumes will shine and +become Flame. Metals in fusion do not flame for want of a copious Fume, +except Spelter, which fumes copiously, and thereby flames. All flaming +Bodies, as Oil, Tallow, Wax, Wood, fossil Coals, Pitch, Sulphur, by +flaming waste and vanish into burning Smoke, which Smoke, if the Flame +be put out, is very thick and visible, and sometimes smells strongly, +but in the Flame loses its smell by burning, and according to the nature +of the Smoke the Flame is of several Colours, as that of Sulphur blue, +that of Copper open'd with sublimate green, that of Tallow yellow, that +of Camphire white. Smoke passing through Flame cannot but grow red hot, +and red hot Smoke can have no other appearance than that of Flame. When +Gun-powder takes fire, it goes away into Flaming Smoke. For the Charcoal +and Sulphur easily take fire, and set fire to the Nitre, and the Spirit +of the Nitre being thereby rarified into Vapour, rushes out with +Explosion much after the manner that the Vapour of Water rushes out of +an Æolipile; the Sulphur also being volatile is converted into Vapour, +and augments the Explosion. And the acid Vapour of the Sulphur (namely +that which distils under a Bell into Oil of Sulphur,) entring violently +into the fix'd Body of the Nitre, sets loose the Spirit of the Nitre, +and excites a great Fermentation, whereby the Heat is farther augmented, +and the fix'd Body of the Nitre is also rarified into Fume, and the +Explosion is thereby made more vehement and quick. For if Salt of Tartar +be mix'd with Gun-powder, and that Mixture be warm'd till it takes fire, +the Explosion will be more violent and quick than that of Gun-powder +alone; which cannot proceed from any other cause than the action of the +Vapour of the Gun-powder upon the Salt of Tartar, whereby that Salt is +rarified. The Explosion of Gun-powder arises therefore from the violent +action whereby all the Mixture being quickly and vehemently heated, is +rarified and converted into Fume and Vapour: which Vapour, by the +violence of that action, becoming so hot as to shine, appears in the +form of Flame. + +_Qu._ 11. Do not great Bodies conserve their heat the longest, their +parts heating one another, and may not great dense and fix'd Bodies, +when heated beyond a certain degree, emit Light so copiously, as by the +Emission and Re-action of its Light, and the Reflexions and Refractions +of its Rays within its Pores to grow still hotter, till it comes to a +certain period of heat, such as is that of the Sun? And are not the Sun +and fix'd Stars great Earths vehemently hot, whose heat is conserved by +the greatness of the Bodies, and the mutual Action and Reaction between +them, and the Light which they emit, and whose parts are kept from +fuming away, not only by their fixity, but also by the vast weight and +density of the Atmospheres incumbent upon them; and very strongly +compressing them, and condensing the Vapours and Exhalations which arise +from them? For if Water be made warm in any pellucid Vessel emptied of +Air, that Water in the _Vacuum_ will bubble and boil as vehemently as it +would in the open Air in a Vessel set upon the Fire till it conceives a +much greater heat. For the weight of the incumbent Atmosphere keeps down +the Vapours, and hinders the Water from boiling, until it grow much +hotter than is requisite to make it boil _in vacuo_. Also a mixture of +Tin and Lead being put upon a red hot Iron _in vacuo_ emits a Fume and +Flame, but the same Mixture in the open Air, by reason of the incumbent +Atmosphere, does not so much as emit any Fume which can be perceived by +Sight. In like manner the great weight of the Atmosphere which lies upon +the Globe of the Sun may hinder Bodies there from rising up and going +away from the Sun in the form of Vapours and Fumes, unless by means of a +far greater heat than that which on the Surface of our Earth would very +easily turn them into Vapours and Fumes. And the same great weight may +condense those Vapours and Exhalations as soon as they shall at any time +begin to ascend from the Sun, and make them presently fall back again +into him, and by that action increase his Heat much after the manner +that in our Earth the Air increases the Heat of a culinary Fire. And the +same weight may hinder the Globe of the Sun from being diminish'd, +unless by the Emission of Light, and a very small quantity of Vapours +and Exhalations. + +_Qu._ 12. Do not the Rays of Light in falling upon the bottom of the Eye +excite Vibrations in the _Tunica Retina_? Which Vibrations, being +propagated along the solid Fibres of the optick Nerves into the Brain, +cause the Sense of seeing. For because dense Bodies conserve their Heat +a long time, and the densest Bodies conserve their Heat the longest, the +Vibrations of their parts are of a lasting nature, and therefore may be +propagated along solid Fibres of uniform dense Matter to a great +distance, for conveying into the Brain the impressions made upon all the +Organs of Sense. For that Motion which can continue long in one and the +same part of a Body, can be propagated a long way from one part to +another, supposing the Body homogeneal, so that the Motion may not be +reflected, refracted, interrupted or disorder'd by any unevenness of the +Body. + +_Qu._ 13. Do not several sorts of Rays make Vibrations of several +bignesses, which according to their bignesses excite Sensations of +several Colours, much after the manner that the Vibrations of the Air, +according to their several bignesses excite Sensations of several +Sounds? And particularly do not the most refrangible Rays excite the +shortest Vibrations for making a Sensation of deep violet, the least +refrangible the largest for making a Sensation of deep red, and the +several intermediate sorts of Rays, Vibrations of several intermediate +bignesses to make Sensations of the several intermediate Colours? + +_Qu._ 14. May not the harmony and discord of Colours arise from the +proportions of the Vibrations propagated through the Fibres of the +optick Nerves into the Brain, as the harmony and discord of Sounds arise +from the proportions of the Vibrations of the Air? For some Colours, if +they be view'd together, are agreeable to one another, as those of Gold +and Indigo, and others disagree. + +_Qu._ 15. Are not the Species of Objects seen with both Eyes united +where the optick Nerves meet before they come into the Brain, the Fibres +on the right side of both Nerves uniting there, and after union going +thence into the Brain in the Nerve which is on the right side of the +Head, and the Fibres on the left side of both Nerves uniting in the same +place, and after union going into the Brain in the Nerve which is on the +left side of the Head, and these two Nerves meeting in the Brain in such +a manner that their Fibres make but one entire Species or Picture, half +of which on the right side of the Sensorium comes from the right side of +both Eyes through the right side of both optick Nerves to the place +where the Nerves meet, and from thence on the right side of the Head +into the Brain, and the other half on the left side of the Sensorium +comes in like manner from the left side of both Eyes. For the optick +Nerves of such Animals as look the same way with both Eyes (as of Men, +Dogs, Sheep, Oxen, &c.) meet before they come into the Brain, but the +optick Nerves of such Animals as do not look the same way with both Eyes +(as of Fishes, and of the Chameleon,) do not meet, if I am rightly +inform'd. + +_Qu._ 16. When a Man in the dark presses either corner of his Eye with +his Finger, and turns his Eye away from his Finger, he will see a Circle +of Colours like those in the Feather of a Peacock's Tail. If the Eye and +the Finger remain quiet these Colours vanish in a second Minute of Time, +but if the Finger be moved with a quavering Motion they appear again. Do +not these Colours arise from such Motions excited in the bottom of the +Eye by the Pressure and Motion of the Finger, as, at other times are +excited there by Light for causing Vision? And do not the Motions once +excited continue about a Second of Time before they cease? And when a +Man by a stroke upon his Eye sees a flash of Light, are not the like +Motions excited in the _Retina_ by the stroke? And when a Coal of Fire +moved nimbly in the circumference of a Circle, makes the whole +circumference appear like a Circle of Fire; is it not because the +Motions excited in the bottom of the Eye by the Rays of Light are of a +lasting nature, and continue till the Coal of Fire in going round +returns to its former place? And considering the lastingness of the +Motions excited in the bottom of the Eye by Light, are they not of a +vibrating nature? + +_Qu._ 17. If a stone be thrown into stagnating Water, the Waves excited +thereby continue some time to arise in the place where the Stone fell +into the Water, and are propagated from thence in concentrick Circles +upon the Surface of the Water to great distances. And the Vibrations or +Tremors excited in the Air by percussion, continue a little time to move +from the place of percussion in concentrick Spheres to great distances. +And in like manner, when a Ray of Light falls upon the Surface of any +pellucid Body, and is there refracted or reflected, may not Waves of +Vibrations, or Tremors, be thereby excited in the refracting or +reflecting Medium at the point of Incidence, and continue to arise +there, and to be propagated from thence as long as they continue to +arise and be propagated, when they are excited in the bottom of the Eye +by the Pressure or Motion of the Finger, or by the Light which comes +from the Coal of Fire in the Experiments above-mention'd? and are not +these Vibrations propagated from the point of Incidence to great +distances? And do they not overtake the Rays of Light, and by overtaking +them successively, do they not put them into the Fits of easy Reflexion +and easy Transmission described above? For if the Rays endeavour to +recede from the densest part of the Vibration, they may be alternately +accelerated and retarded by the Vibrations overtaking them. + +_Qu._ 18. If in two large tall cylindrical Vessels of Glass inverted, +two little Thermometers be suspended so as not to touch the Vessels, and +the Air be drawn out of one of these Vessels, and these Vessels thus +prepared be carried out of a cold place into a warm one; the Thermometer +_in vacuo_ will grow warm as much, and almost as soon as the Thermometer +which is not _in vacuo_. And when the Vessels are carried back into the +cold place, the Thermometer _in vacuo_ will grow cold almost as soon as +the other Thermometer. Is not the Heat of the warm Room convey'd through +the _Vacuum_ by the Vibrations of a much subtiler Medium than Air, which +after the Air was drawn out remained in the _Vacuum_? And is not this +Medium the same with that Medium by which Light is refracted and +reflected, and by whose Vibrations Light communicates Heat to Bodies, +and is put into Fits of easy Reflexion and easy Transmission? And do not +the Vibrations of this Medium in hot Bodies contribute to the +intenseness and duration of their Heat? And do not hot Bodies +communicate their Heat to contiguous cold ones, by the Vibrations of +this Medium propagated from them into the cold ones? And is not this +Medium exceedingly more rare and subtile than the Air, and exceedingly +more elastick and active? And doth it not readily pervade all Bodies? +And is it not (by its elastick force) expanded through all the Heavens? + +_Qu._ 19. Doth not the Refraction of Light proceed from the different +density of this Æthereal Medium in different places, the Light receding +always from the denser parts of the Medium? And is not the density +thereof greater in free and open Spaces void of Air and other grosser +Bodies, than within the Pores of Water, Glass, Crystal, Gems, and other +compact Bodies? For when Light passes through Glass or Crystal, and +falling very obliquely upon the farther Surface thereof is totally +reflected, the total Reflexion ought to proceed rather from the density +and vigour of the Medium without and beyond the Glass, than from the +rarity and weakness thereof. + +_Qu._ 20. Doth not this Æthereal Medium in passing out of Water, Glass, +Crystal, and other compact and dense Bodies into empty Spaces, grow +denser and denser by degrees, and by that means refract the Rays of +Light not in a point, but by bending them gradually in curve Lines? And +doth not the gradual condensation of this Medium extend to some distance +from the Bodies, and thereby cause the Inflexions of the Rays of Light, +which pass by the edges of dense Bodies, at some distance from the +Bodies? + +_Qu._ 21. Is not this Medium much rarer within the dense Bodies of the +Sun, Stars, Planets and Comets, than in the empty celestial Spaces +between them? And in passing from them to great distances, doth it not +grow denser and denser perpetually, and thereby cause the gravity of +those great Bodies towards one another, and of their parts towards the +Bodies; every Body endeavouring to go from the denser parts of the +Medium towards the rarer? For if this Medium be rarer within the Sun's +Body than at its Surface, and rarer there than at the hundredth part of +an Inch from its Body, and rarer there than at the fiftieth part of an +Inch from its Body, and rarer there than at the Orb of _Saturn_; I see +no reason why the Increase of density should stop any where, and not +rather be continued through all distances from the Sun to _Saturn_, and +beyond. And though this Increase of density may at great distances be +exceeding slow, yet if the elastick force of this Medium be exceeding +great, it may suffice to impel Bodies from the denser parts of the +Medium towards the rarer, with all that power which we call Gravity. And +that the elastick force of this Medium is exceeding great, may be +gather'd from the swiftness of its Vibrations. Sounds move about 1140 +_English_ Feet in a second Minute of Time, and in seven or eight Minutes +of Time they move about one hundred _English_ Miles. Light moves from +the Sun to us in about seven or eight Minutes of Time, which distance is +about 70,000,000 _English_ Miles, supposing the horizontal Parallax of +the Sun to be about 12´´. And the Vibrations or Pulses of this Medium, +that they may cause the alternate Fits of easy Transmission and easy +Reflexion, must be swifter than Light, and by consequence above 700,000 +times swifter than Sounds. And therefore the elastick force of this +Medium, in proportion to its density, must be above 700000 x 700000 +(that is, above 490,000,000,000) times greater than the elastick force +of the Air is in proportion to its density. For the Velocities of the +Pulses of elastick Mediums are in a subduplicate _Ratio_ of the +Elasticities and the Rarities of the Mediums taken together. + +As Attraction is stronger in small Magnets than in great ones in +proportion to their Bulk, and Gravity is greater in the Surfaces of +small Planets than in those of great ones in proportion to their bulk, +and small Bodies are agitated much more by electric attraction than +great ones; so the smallness of the Rays of Light may contribute very +much to the power of the Agent by which they are refracted. And so if +any one should suppose that _Æther_ (like our Air) may contain Particles +which endeavour to recede from one another (for I do not know what this +_Æther_ is) and that its Particles are exceedingly smaller than those of +Air, or even than those of Light: The exceeding smallness of its +Particles may contribute to the greatness of the force by which those +Particles may recede from one another, and thereby make that Medium +exceedingly more rare and elastick than Air, and by consequence +exceedingly less able to resist the motions of Projectiles, and +exceedingly more able to press upon gross Bodies, by endeavouring to +expand it self. + +_Qu._ 22. May not Planets and Comets, and all gross Bodies, perform +their Motions more freely, and with less resistance in this Æthereal +Medium than in any Fluid, which fills all Space adequately without +leaving any Pores, and by consequence is much denser than Quick-silver +or Gold? And may not its resistance be so small, as to be +inconsiderable? For instance; If this _Æther_ (for so I will call it) +should be supposed 700000 times more elastick than our Air, and above +700000 times more rare; its resistance would be above 600,000,000 times +less than that of Water. And so small a resistance would scarce make any +sensible alteration in the Motions of the Planets in ten thousand +Years. If any one would ask how a Medium can be so rare, let him tell me +how the Air, in the upper parts of the Atmosphere, can be above an +hundred thousand thousand times rarer than Gold. Let him also tell me, +how an electrick Body can by Friction emit an Exhalation so rare and +subtile, and yet so potent, as by its Emission to cause no sensible +Diminution of the weight of the electrick Body, and to be expanded +through a Sphere, whose Diameter is above two Feet, and yet to be able +to agitate and carry up Leaf Copper, or Leaf Gold, at the distance of +above a Foot from the electrick Body? And how the Effluvia of a Magnet +can be so rare and subtile, as to pass through a Plate of Glass without +any Resistance or Diminution of their Force, and yet so potent as to +turn a magnetick Needle beyond the Glass? + +_Qu._ 23. Is not Vision perform'd chiefly by the Vibrations of this +Medium, excited in the bottom of the Eye by the Rays of Light, and +propagated through the solid, pellucid and uniform Capillamenta of the +optick Nerves into the place of Sensation? And is not Hearing perform'd +by the Vibrations either of this or some other Medium, excited in the +auditory Nerves by the Tremors of the Air, and propagated through the +solid, pellucid and uniform Capillamenta of those Nerves into the place +of Sensation? And so of the other Senses. + +_Qu._ 24. Is not Animal Motion perform'd by the Vibrations of this +Medium, excited in the Brain by the power of the Will, and propagated +from thence through the solid, pellucid and uniform Capillamenta of the +Nerves into the Muscles, for contracting and dilating them? I suppose +that the Capillamenta of the Nerves are each of them solid and uniform, +that the vibrating Motion of the Æthereal Medium may be propagated along +them from one end to the other uniformly, and without interruption: For +Obstructions in the Nerves create Palsies. And that they may be +sufficiently uniform, I suppose them to be pellucid when view'd singly, +tho' the Reflexions in their cylindrical Surfaces may make the whole +Nerve (composed of many Capillamenta) appear opake and white. For +opacity arises from reflecting Surfaces, such as may disturb and +interrupt the Motions of this Medium. + +[Sidenote: _See the following Scheme, p. 356._] + +_Qu._ 25. Are there not other original Properties of the Rays of Light, +besides those already described? An instance of another original +Property we have in the Refraction of Island Crystal, described first by +_Erasmus Bartholine_, and afterwards more exactly by _Hugenius_, in his +Book _De la Lumiere_. This Crystal is a pellucid fissile Stone, clear as +Water or Crystal of the Rock, and without Colour; enduring a red Heat +without losing its transparency, and in a very strong Heat calcining +without Fusion. Steep'd a Day or two in Water, it loses its natural +Polish. Being rubb'd on Cloth, it attracts pieces of Straws and other +light things, like Ambar or Glass; and with _Aqua fortis_ it makes an +Ebullition. It seems to be a sort of Talk, and is found in form of an +oblique Parallelopiped, with six parallelogram Sides and eight solid +Angles. The obtuse Angles of the Parallelograms are each of them 101 +Degrees and 52 Minutes; the acute ones 78 Degrees and 8 Minutes. Two of +the solid Angles opposite to one another, as C and E, are compassed each +of them with three of these obtuse Angles, and each of the other six +with one obtuse and two acute ones. It cleaves easily in planes parallel +to any of its Sides, and not in any other Planes. It cleaves with a +glossy polite Surface not perfectly plane, but with some little +unevenness. It is easily scratch'd, and by reason of its softness it +takes a Polish very difficultly. It polishes better upon polish'd +Looking-glass than upon Metal, and perhaps better upon Pitch, Leather or +Parchment. Afterwards it must be rubb'd with a little Oil or white of an +Egg, to fill up its Scratches; whereby it will become very transparent +and polite. But for several Experiments, it is not necessary to polish +it. If a piece of this crystalline Stone be laid upon a Book, every +Letter of the Book seen through it will appear double, by means of a +double Refraction. And if any beam of Light falls either +perpendicularly, or in any oblique Angle upon any Surface of this +Crystal, it becomes divided into two beams by means of the same double +Refraction. Which beams are of the same Colour with the incident beam of +Light, and seem equal to one another in the quantity of their Light, or +very nearly equal. One of these Refractions is perform'd by the usual +Rule of Opticks, the Sine of Incidence out of Air into this Crystal +being to the Sine of Refraction, as five to three. The other +Refraction, which may be called the unusual Refraction, is perform'd by +the following Rule. + +[Illustration: FIG. 4.] + +Let ADBC represent the refracting Surface of the Crystal, C the biggest +solid Angle at that Surface, GEHF the opposite Surface, and CK a +perpendicular on that Surface. This perpendicular makes with the edge of +the Crystal CF, an Angle of 19 Degr. 3'. Join KF, and in it take KL, so +that the Angle KCL be 6 Degr. 40'. and the Angle LCF 12 Degr. 23'. And +if ST represent any beam of Light incident at T in any Angle upon the +refracting Surface ADBC, let TV be the refracted beam determin'd by the +given Portion of the Sines 5 to 3, according to the usual Rule of +Opticks. Draw VX parallel and equal to KL. Draw it the same way from V +in which L lieth from K; and joining TX, this line TX shall be the other +refracted beam carried from T to X, by the unusual Refraction. + +If therefore the incident beam ST be perpendicular to the refracting +Surface, the two beams TV and TX, into which it shall become divided, +shall be parallel to the lines CK and CL; one of those beams going +through the Crystal perpendicularly, as it ought to do by the usual Laws +of Opticks, and the other TX by an unusual Refraction diverging from the +perpendicular, and making with it an Angle VTX of about 6-2/3 Degrees, +as is found by Experience. And hence, the Plane VTX, and such like +Planes which are parallel to the Plane CFK, may be called the Planes of +perpendicular Refraction. And the Coast towards which the lines KL and +VX are drawn, may be call'd the Coast of unusual Refraction. + +In like manner Crystal of the Rock has a double Refraction: But the +difference of the two Refractions is not so great and manifest as in +Island Crystal. + +When the beam ST incident on Island Crystal is divided into two beams TV +and TX, and these two beams arrive at the farther Surface of the Glass; +the beam TV, which was refracted at the first Surface after the usual +manner, shall be again refracted entirely after the usual manner at the +second Surface; and the beam TX, which was refracted after the unusual +manner in the first Surface, shall be again refracted entirely after the +unusual manner in the second Surface; so that both these beams shall +emerge out of the second Surface in lines parallel to the first incident +beam ST. + +And if two pieces of Island Crystal be placed one after another, in such +manner that all the Surfaces of the latter be parallel to all the +corresponding Surfaces of the former: The Rays which are refracted after +the usual manner in the first Surface of the first Crystal, shall be +refracted after the usual manner in all the following Surfaces; and the +Rays which are refracted after the unusual manner in the first Surface, +shall be refracted after the unusual manner in all the following +Surfaces. And the same thing happens, though the Surfaces of the +Crystals be any ways inclined to one another, provided that their Planes +of perpendicular Refraction be parallel to one another. + +And therefore there is an original difference in the Rays of Light, by +means of which some Rays are in this Experiment constantly refracted +after the usual manner, and others constantly after the unusual manner: +For if the difference be not original, but arises from new Modifications +impress'd on the Rays at their first Refraction, it would be alter'd by +new Modifications in the three following Refractions; whereas it suffers +no alteration, but is constant, and has the same effect upon the Rays in +all the Refractions. The unusual Refraction is therefore perform'd by an +original property of the Rays. And it remains to be enquired, whether +the Rays have not more original Properties than are yet discover'd. + +_Qu._ 26. Have not the Rays of Light several sides, endued with several +original Properties? For if the Planes of perpendicular Refraction of +the second Crystal be at right Angles with the Planes of perpendicular +Refraction of the first Crystal, the Rays which are refracted after the +usual manner in passing through the first Crystal, will be all of them +refracted after the unusual manner in passing through the second +Crystal; and the Rays which are refracted after the unusual manner in +passing through the first Crystal, will be all of them refracted after +the usual manner in passing through the second Crystal. And therefore +there are not two sorts of Rays differing in their nature from one +another, one of which is constantly and in all Positions refracted after +the usual manner, and the other constantly and in all Positions after +the unusual manner. The difference between the two sorts of Rays in the +Experiment mention'd in the 25th Question, was only in the Positions of +the Sides of the Rays to the Planes of perpendicular Refraction. For one +and the same Ray is here refracted sometimes after the usual, and +sometimes after the unusual manner, according to the Position which its +Sides have to the Crystals. If the Sides of the Ray are posited the same +way to both Crystals, it is refracted after the same manner in them +both: But if that side of the Ray which looks towards the Coast of the +unusual Refraction of the first Crystal, be 90 Degrees from that side of +the same Ray which looks toward the Coast of the unusual Refraction of +the second Crystal, (which may be effected by varying the Position of +the second Crystal to the first, and by consequence to the Rays of +Light,) the Ray shall be refracted after several manners in the several +Crystals. There is nothing more required to determine whether the Rays +of Light which fall upon the second Crystal shall be refracted after +the usual or after the unusual manner, but to turn about this Crystal, +so that the Coast of this Crystal's unusual Refraction may be on this or +on that side of the Ray. And therefore every Ray may be consider'd as +having four Sides or Quarters, two of which opposite to one another +incline the Ray to be refracted after the unusual manner, as often as +either of them are turn'd towards the Coast of unusual Refraction; and +the other two, whenever either of them are turn'd towards the Coast of +unusual Refraction, do not incline it to be otherwise refracted than +after the usual manner. The two first may therefore be call'd the Sides +of unusual Refraction. And since these Dispositions were in the Rays +before their Incidence on the second, third, and fourth Surfaces of the +two Crystals, and suffered no alteration (so far as appears,) by the +Refraction of the Rays in their passage through those Surfaces, and the +Rays were refracted by the same Laws in all the four Surfaces; it +appears that those Dispositions were in the Rays originally, and +suffer'd no alteration by the first Refraction, and that by means of +those Dispositions the Rays were refracted at their Incidence on the +first Surface of the first Crystal, some of them after the usual, and +some of them after the unusual manner, accordingly as their Sides of +unusual Refraction were then turn'd towards the Coast of the unusual +Refraction of that Crystal, or sideways from it. + +Every Ray of Light has therefore two opposite Sides, originally endued +with a Property on which the unusual Refraction depends, and the other +two opposite Sides not endued with that Property. And it remains to be +enquired, whether there are not more Properties of Light by which the +Sides of the Rays differ, and are distinguished from one another. + +In explaining the difference of the Sides of the Rays above mention'd, I +have supposed that the Rays fall perpendicularly on the first Crystal. +But if they fall obliquely on it, the Success is the same. Those Rays +which are refracted after the usual manner in the first Crystal, will be +refracted after the unusual manner in the second Crystal, supposing the +Planes of perpendicular Refraction to be at right Angles with one +another, as above; and on the contrary. + +If the Planes of the perpendicular Refraction of the two Crystals be +neither parallel nor perpendicular to one another, but contain an acute +Angle: The two beams of Light which emerge out of the first Crystal, +will be each of them divided into two more at their Incidence on the +second Crystal. For in this case the Rays in each of the two beams will +some of them have their Sides of unusual Refraction, and some of them +their other Sides turn'd towards the Coast of the unusual Refraction of +the second Crystal. + +_Qu._ 27. Are not all Hypotheses erroneous which have hitherto been +invented for explaining the Phænomena of Light, by new Modifications of +the Rays? For those Phænomena depend not upon new Modifications, as has +been supposed, but upon the original and unchangeable Properties of the +Rays. + +_Qu._ 28. Are not all Hypotheses erroneous, in which Light is supposed +to consist in Pression or Motion, propagated through a fluid Medium? For +in all these Hypotheses the Phænomena of Light have been hitherto +explain'd by supposing that they arise from new Modifications of the +Rays; which is an erroneous Supposition. + +If Light consisted only in Pression propagated without actual Motion, it +would not be able to agitate and heat the Bodies which refract and +reflect it. If it consisted in Motion propagated to all distances in an +instant, it would require an infinite force every moment, in every +shining Particle, to generate that Motion. And if it consisted in +Pression or Motion, propagated either in an instant or in time, it would +bend into the Shadow. For Pression or Motion cannot be propagated in a +Fluid in right Lines, beyond an Obstacle which stops part of the Motion, +but will bend and spread every way into the quiescent Medium which lies +beyond the Obstacle. Gravity tends downwards, but the Pressure of Water +arising from Gravity tends every way with equal Force, and is propagated +as readily, and with as much force sideways as downwards, and through +crooked passages as through strait ones. The Waves on the Surface of +stagnating Water, passing by the sides of a broad Obstacle which stops +part of them, bend afterwards and dilate themselves gradually into the +quiet Water behind the Obstacle. The Waves, Pulses or Vibrations of the +Air, wherein Sounds consist, bend manifestly, though not so much as the +Waves of Water. For a Bell or a Cannon may be heard beyond a Hill which +intercepts the sight of the sounding Body, and Sounds are propagated as +readily through crooked Pipes as through streight ones. But Light is +never known to follow crooked Passages nor to bend into the Shadow. For +the fix'd Stars by the Interposition of any of the Planets cease to be +seen. And so do the Parts of the Sun by the Interposition of the Moon, +_Mercury_ or _Venus_. The Rays which pass very near to the edges of any +Body, are bent a little by the action of the Body, as we shew'd above; +but this bending is not towards but from the Shadow, and is perform'd +only in the passage of the Ray by the Body, and at a very small distance +from it. So soon as the Ray is past the Body, it goes right on. + +[Sidenote: _Mais pour dire comment cela se fait, je n'ay rien trove +jusqu' ici qui me satisfasse._ C. H. de la lumiere, c. 5, p. 91.] + +To explain the unusual Refraction of Island Crystal by Pression or +Motion propagated, has not hitherto been attempted (to my knowledge) +except by _Huygens_, who for that end supposed two several vibrating +Mediums within that Crystal. But when he tried the Refractions in two +successive pieces of that Crystal, and found them such as is mention'd +above; he confessed himself at a loss for explaining them. For Pressions +or Motions, propagated from a shining Body through an uniform Medium, +must be on all sides alike; whereas by those Experiments it appears, +that the Rays of Light have different Properties in their different +Sides. He suspected that the Pulses of _Æther_ in passing through the +first Crystal might receive certain new Modifications, which might +determine them to be propagated in this or that Medium within the +second Crystal, according to the Position of that Crystal. But what +Modifications those might be he could not say, nor think of any thing +satisfactory in that Point. And if he had known that the unusual +Refraction depends not on new Modifications, but on the original and +unchangeable Dispositions of the Rays, he would have found it as +difficult to explain how those Dispositions which he supposed to be +impress'd on the Rays by the first Crystal, could be in them before +their Incidence on that Crystal, and in general, how all Rays emitted by +shining Bodies, can have those Dispositions in them from the beginning. +To me, at least, this seems inexplicable, if Light be nothing else than +Pression or Motion propagated through _Æther_. + +And it is as difficult to explain by these Hypotheses, how Rays can be +alternately in Fits of easy Reflexion and easy Transmission; unless +perhaps one might suppose that there are in all Space two Æthereal +vibrating Mediums, and that the Vibrations of one of them constitute +Light, and the Vibrations of the other are swifter, and as often as they +overtake the Vibrations of the first, put them into those Fits. But how +two _Æthers_ can be diffused through all Space, one of which acts upon +the other, and by consequence is re-acted upon, without retarding, +shattering, dispersing and confounding one anothers Motions, is +inconceivable. And against filling the Heavens with fluid Mediums, +unless they be exceeding rare, a great Objection arises from the regular +and very lasting Motions of the Planets and Comets in all manner of +Courses through the Heavens. For thence it is manifest, that the Heavens +are void of all sensible Resistance, and by consequence of all sensible +Matter. + +For the resisting Power of fluid Mediums arises partly from the +Attrition of the Parts of the Medium, and partly from the _Vis inertiæ_ +of the Matter. That part of the Resistance of a spherical Body which +arises from the Attrition of the Parts of the Medium is very nearly as +the Diameter, or, at the most, as the _Factum_ of the Diameter, and the +Velocity of the spherical Body together. And that part of the Resistance +which arises from the _Vis inertiæ_ of the Matter, is as the Square of +that _Factum_. And by this difference the two sorts of Resistance may be +distinguish'd from one another in any Medium; and these being +distinguish'd, it will be found that almost all the Resistance of Bodies +of a competent Magnitude moving in Air, Water, Quick-silver, and such +like Fluids with a competent Velocity, arises from the _Vis inertiæ_ of +the Parts of the Fluid. + +Now that part of the resisting Power of any Medium which arises from the +Tenacity, Friction or Attrition of the Parts of the Medium, may be +diminish'd by dividing the Matter into smaller Parts, and making the +Parts more smooth and slippery: But that part of the Resistance which +arises from the _Vis inertiæ_, is proportional to the Density of the +Matter, and cannot be diminish'd by dividing the Matter into smaller +Parts, nor by any other means than by decreasing the Density of the +Medium. And for these Reasons the Density of fluid Mediums is very +nearly proportional to their Resistance. Liquors which differ not much +in Density, as Water, Spirit of Wine, Spirit of Turpentine, hot Oil, +differ not much in Resistance. Water is thirteen or fourteen times +lighter than Quick-silver and by consequence thirteen or fourteen times +rarer, and its Resistance is less than that of Quick-silver in the same +Proportion, or thereabouts, as I have found by Experiments made with +Pendulums. The open Air in which we breathe is eight or nine hundred +times lighter than Water, and by consequence eight or nine hundred times +rarer, and accordingly its Resistance is less than that of Water in the +same Proportion, or thereabouts; as I have also found by Experiments +made with Pendulums. And in thinner Air the Resistance is still less, +and at length, by ratifying the Air, becomes insensible. For small +Feathers falling in the open Air meet with great Resistance, but in a +tall Glass well emptied of Air, they fall as fast as Lead or Gold, as I +have seen tried several times. Whence the Resistance seems still to +decrease in proportion to the Density of the Fluid. For I do not find by +any Experiments, that Bodies moving in Quick-silver, Water or Air, meet +with any other sensible Resistance than what arises from the Density and +Tenacity of those sensible Fluids, as they would do if the Pores of +those Fluids, and all other Spaces, were filled with a dense and +subtile Fluid. Now if the Resistance in a Vessel well emptied of Air, +was but an hundred times less than in the open Air, it would be about a +million of times less than in Quick-silver. But it seems to be much less +in such a Vessel, and still much less in the Heavens, at the height of +three or four hundred Miles from the Earth, or above. For Mr. _Boyle_ +has shew'd that Air may be rarified above ten thousand times in Vessels +of Glass; and the Heavens are much emptier of Air than any _Vacuum_ we +can make below. For since the Air is compress'd by the Weight of the +incumbent Atmosphere, and the Density of Air is proportional to the +Force compressing it, it follows by Computation, that at the height of +about seven and a half _English_ Miles from the Earth, the Air is four +times rarer than at the Surface of the Earth; and at the height of 15 +Miles it is sixteen times rarer than that at the Surface of the Earth; +and at the height of 22-1/2, 30, or 38 Miles, it is respectively 64, +256, or 1024 times rarer, or thereabouts; and at the height of 76, 152, +228 Miles, it is about 1000000, 1000000000000, or 1000000000000000000 +times rarer; and so on. + +Heat promotes Fluidity very much by diminishing the Tenacity of Bodies. +It makes many Bodies fluid which are not fluid in cold, and increases +the Fluidity of tenacious Liquids, as of Oil, Balsam, and Honey, and +thereby decreases their Resistance. But it decreases not the Resistance +of Water considerably, as it would do if any considerable part of the +Resistance of Water arose from the Attrition or Tenacity of its Parts. +And therefore the Resistance of Water arises principally and almost +entirely from the _Vis inertiæ_ of its Matter; and by consequence, if +the Heavens were as dense as Water, they would not have much less +Resistance than Water; if as dense as Quick-silver, they would not have +much less Resistance than Quick-silver; if absolutely dense, or full of +Matter without any _Vacuum_, let the Matter be never so subtil and +fluid, they would have a greater Resistance than Quick-silver. A solid +Globe in such a Medium would lose above half its Motion in moving three +times the length of its Diameter, and a Globe not solid (such as are the +Planets,) would be retarded sooner. And therefore to make way for the +regular and lasting Motions of the Planets and Comets, it's necessary to +empty the Heavens of all Matter, except perhaps some very thin Vapours, +Steams, or Effluvia, arising from the Atmospheres of the Earth, Planets, +and Comets, and from such an exceedingly rare Æthereal Medium as we +described above. A dense Fluid can be of no use for explaining the +Phænomena of Nature, the Motions of the Planets and Comets being better +explain'd without it. It serves only to disturb and retard the Motions +of those great Bodies, and make the Frame of Nature languish: And in the +Pores of Bodies, it serves only to stop the vibrating Motions of their +Parts, wherein their Heat and Activity consists. And as it is of no use, +and hinders the Operations of Nature, and makes her languish, so there +is no evidence for its Existence, and therefore it ought to be rejected. +And if it be rejected, the Hypotheses that Light consists in Pression +or Motion, propagated through such a Medium, are rejected with it. + +And for rejecting such a Medium, we have the Authority of those the +oldest and most celebrated Philosophers of _Greece_ and _Phoenicia_, +who made a _Vacuum_, and Atoms, and the Gravity of Atoms, the first +Principles of their Philosophy; tacitly attributing Gravity to some +other Cause than dense Matter. Later Philosophers banish the +Consideration of such a Cause out of natural Philosophy, feigning +Hypotheses for explaining all things mechanically, and referring other +Causes to Metaphysicks: Whereas the main Business of natural Philosophy +is to argue from Phænomena without feigning Hypotheses, and to deduce +Causes from Effects, till we come to the very first Cause, which +certainly is not mechanical; and not only to unfold the Mechanism of the +World, but chiefly to resolve these and such like Questions. What is +there in places almost empty of Matter, and whence is it that the Sun +and Planets gravitate towards one another, without dense Matter between +them? Whence is it that Nature doth nothing in vain; and whence arises +all that Order and Beauty which we see in the World? To what end are +Comets, and whence is it that Planets move all one and the same way in +Orbs concentrick, while Comets move all manner of ways in Orbs very +excentrick; and what hinders the fix'd Stars from falling upon one +another? How came the Bodies of Animals to be contrived with so much +Art, and for what ends were their several Parts? Was the Eye contrived +without Skill in Opticks, and the Ear without Knowledge of Sounds? How +do the Motions of the Body follow from the Will, and whence is the +Instinct in Animals? Is not the Sensory of Animals that place to which +the sensitive Substance is present, and into which the sensible Species +of Things are carried through the Nerves and Brain, that there they may +be perceived by their immediate presence to that Substance? And these +things being rightly dispatch'd, does it not appear from Phænomena that +there is a Being incorporeal, living, intelligent, omnipresent, who in +infinite Space, as it were in his Sensory, sees the things themselves +intimately, and throughly perceives them, and comprehends them wholly by +their immediate presence to himself: Of which things the Images only +carried through the Organs of Sense into our little Sensoriums, are +there seen and beheld by that which in us perceives and thinks. And +though every true Step made in this Philosophy brings us not immediately +to the Knowledge of the first Cause, yet it brings us nearer to it, and +on that account is to be highly valued. + +_Qu._ 29. Are not the Rays of Light very small Bodies emitted from +shining Substances? For such Bodies will pass through uniform Mediums in +right Lines without bending into the Shadow, which is the Nature of the +Rays of Light. They will also be capable of several Properties, and be +able to conserve their Properties unchanged in passing through several +Mediums, which is another Condition of the Rays of Light. Pellucid +Substances act upon the Rays of Light at a distance in refracting, +reflecting, and inflecting them, and the Rays mutually agitate the Parts +of those Substances at a distance for heating them; and this Action and +Re-action at a distance very much resembles an attractive Force between +Bodies. If Refraction be perform'd by Attraction of the Rays, the Sines +of Incidence must be to the Sines of Refraction in a given Proportion, +as we shew'd in our Principles of Philosophy: And this Rule is true by +Experience. The Rays of Light in going out of Glass into a _Vacuum_, are +bent towards the Glass; and if they fall too obliquely on the _Vacuum_, +they are bent backwards into the Glass, and totally reflected; and this +Reflexion cannot be ascribed to the Resistance of an absolute _Vacuum_, +but must be caused by the Power of the Glass attracting the Rays at +their going out of it into the _Vacuum_, and bringing them back. For if +the farther Surface of the Glass be moisten'd with Water or clear Oil, +or liquid and clear Honey, the Rays which would otherwise be reflected +will go into the Water, Oil, or Honey; and therefore are not reflected +before they arrive at the farther Surface of the Glass, and begin to go +out of it. If they go out of it into the Water, Oil, or Honey, they go +on, because the Attraction of the Glass is almost balanced and rendered +ineffectual by the contrary Attraction of the Liquor. But if they go out +of it into a _Vacuum_ which has no Attraction to balance that of the +Glass, the Attraction of the Glass either bends and refracts them, or +brings them back and reflects them. And this is still more evident by +laying together two Prisms of Glass, or two Object-glasses of very long +Telescopes, the one plane, the other a little convex, and so compressing +them that they do not fully touch, nor are too far asunder. For the +Light which falls upon the farther Surface of the first Glass where the +Interval between the Glasses is not above the ten hundred thousandth +Part of an Inch, will go through that Surface, and through the Air or +_Vacuum_ between the Glasses, and enter into the second Glass, as was +explain'd in the first, fourth, and eighth Observations of the first +Part of the second Book. But, if the second Glass be taken away, the +Light which goes out of the second Surface of the first Glass into the +Air or _Vacuum_, will not go on forwards, but turns back into the first +Glass, and is reflected; and therefore it is drawn back by the Power of +the first Glass, there being nothing else to turn it back. Nothing more +is requisite for producing all the variety of Colours, and degrees of +Refrangibility, than that the Rays of Light be Bodies of different +Sizes, the least of which may take violet the weakest and darkest of the +Colours, and be more easily diverted by refracting Surfaces from the +right Course; and the rest as they are bigger and bigger, may make the +stronger and more lucid Colours, blue, green, yellow, and red, and be +more and more difficultly diverted. Nothing more is requisite for +putting the Rays of Light into Fits of easy Reflexion and easy +Transmission, than that they be small Bodies which by their attractive +Powers, or some other Force, stir up Vibrations in what they act upon, +which Vibrations being swifter than the Rays, overtake them +successively, and agitate them so as by turns to increase and decrease +their Velocities, and thereby put them into those Fits. And lastly, the +unusual Refraction of Island-Crystal looks very much as if it were +perform'd by some kind of attractive virtue lodged in certain Sides both +of the Rays, and of the Particles of the Crystal. For were it not for +some kind of Disposition or Virtue lodged in some Sides of the Particles +of the Crystal, and not in their other Sides, and which inclines and +bends the Rays towards the Coast of unusual Refraction, the Rays which +fall perpendicularly on the Crystal, would not be refracted towards that +Coast rather than towards any other Coast, both at their Incidence and +at their Emergence, so as to emerge perpendicularly by a contrary +Situation of the Coast of unusual Refraction at the second Surface; the +Crystal acting upon the Rays after they have pass'd through it, and are +emerging into the Air; or, if you please, into a _Vacuum_. And since the +Crystal by this Disposition or Virtue does not act upon the Rays, unless +when one of their Sides of unusual Refraction looks towards that Coast, +this argues a Virtue or Disposition in those Sides of the Rays, which +answers to, and sympathizes with that Virtue or Disposition of the +Crystal, as the Poles of two Magnets answer to one another. And as +Magnetism may be intended and remitted, and is found only in the Magnet +and in Iron: So this Virtue of refracting the perpendicular Rays is +greater in Island-Crystal, less in Crystal of the Rock, and is not yet +found in other Bodies. I do not say that this Virtue is magnetical: It +seems to be of another kind. I only say, that whatever it be, it's +difficult to conceive how the Rays of Light, unless they be Bodies, can +have a permanent Virtue in two of their Sides which is not in their +other Sides, and this without any regard to their Position to the Space +or Medium through which they pass. + +What I mean in this Question by a _Vacuum_, and by the Attractions of +the Rays of Light towards Glass or Crystal, may be understood by what +was said in the 18th, 19th, and 20th Questions. + +_Quest._ 30. Are not gross Bodies and Light convertible into one +another, and may not Bodies receive much of their Activity from the +Particles of Light which enter their Composition? For all fix'd Bodies +being heated emit Light so long as they continue sufficiently hot, and +Light mutually stops in Bodies as often as its Rays strike upon their +Parts, as we shew'd above. I know no Body less apt to shine than Water; +and yet Water by frequent Distillations changes into fix'd Earth, as Mr. +_Boyle_ has try'd; and then this Earth being enabled to endure a +sufficient Heat, shines by Heat like other Bodies. + +The changing of Bodies into Light, and Light into Bodies, is very +conformable to the Course of Nature, which seems delighted with +Transmutations. Water, which is a very fluid tasteless Salt, she changes +by Heat into Vapour, which is a sort of Air, and by Cold into Ice, which +is a hard, pellucid, brittle, fusible Stone; and this Stone returns into +Water by Heat, and Vapour returns into Water by Cold. Earth by Heat +becomes Fire, and by Cold returns into Earth. Dense Bodies by +Fermentation rarify into several sorts of Air, and this Air by +Fermentation, and sometimes without it, returns into dense Bodies. +Mercury appears sometimes in the form of a fluid Metal, sometimes in the +form of a hard brittle Metal, sometimes in the form of a corrosive +pellucid Salt call'd Sublimate, sometimes in the form of a tasteless, +pellucid, volatile white Earth, call'd _Mercurius Dulcis_; or in that of +a red opake volatile Earth, call'd Cinnaber; or in that of a red or +white Precipitate, or in that of a fluid Salt; and in Distillation it +turns into Vapour, and being agitated _in Vacuo_, it shines like Fire. +And after all these Changes it returns again into its first form of +Mercury. Eggs grow from insensible Magnitudes, and change into Animals; +Tadpoles into Frogs; and Worms into Flies. All Birds, Beasts and Fishes, +Insects, Trees, and other Vegetables, with their several Parts, grow out +of Water and watry Tinctures and Salts, and by Putrefaction return again +into watry Substances. And Water standing a few Days in the open Air, +yields a Tincture, which (like that of Malt) by standing longer yields a +Sediment and a Spirit, but before Putrefaction is fit Nourishment for +Animals and Vegetables. And among such various and strange +Transmutations, why may not Nature change Bodies into Light, and Light +into Bodies? + +_Quest._ 31. Have not the small Particles of Bodies certain Powers, +Virtues, or Forces, by which they act at a distance, not only upon the +Rays of Light for reflecting, refracting, and inflecting them, but also +upon one another for producing a great Part of the Phænomena of Nature? +For it's well known, that Bodies act one upon another by the Attractions +of Gravity, Magnetism, and Electricity; and these Instances shew the +Tenor and Course of Nature, and make it not improbable but that there +may be more attractive Powers than these. For Nature is very consonant +and conformable to her self. How these Attractions may be perform'd, I +do not here consider. What I call Attraction may be perform'd by +impulse, or by some other means unknown to me. I use that Word here to +signify only in general any Force by which Bodies tend towards one +another, whatsoever be the Cause. For we must learn from the Phænomena +of Nature what Bodies attract one another, and what are the Laws and +Properties of the Attraction, before we enquire the Cause by which the +Attraction is perform'd. The Attractions of Gravity, Magnetism, and +Electricity, reach to very sensible distances, and so have been observed +by vulgar Eyes, and there may be others which reach to so small +distances as hitherto escape Observation; and perhaps electrical +Attraction may reach to such small distances, even without being excited +by Friction. + +For when Salt of Tartar runs _per Deliquium_, is not this done by an +Attraction between the Particles of the Salt of Tartar, and the +Particles of the Water which float in the Air in the form of Vapours? +And why does not common Salt, or Salt-petre, or Vitriol, run _per +Deliquium_, but for want of such an Attraction? Or why does not Salt of +Tartar draw more Water out of the Air than in a certain Proportion to +its quantity, but for want of an attractive Force after it is satiated +with Water? And whence is it but from this attractive Power that Water +which alone distils with a gentle luke-warm Heat, will not distil from +Salt of Tartar without a great Heat? And is it not from the like +attractive Power between the Particles of Oil of Vitriol and the +Particles of Water, that Oil of Vitriol draws to it a good quantity of +Water out of the Air, and after it is satiated draws no more, and in +Distillation lets go the Water very difficultly? And when Water and Oil +of Vitriol poured successively into the same Vessel grow very hot in the +mixing, does not this Heat argue a great Motion in the Parts of the +Liquors? And does not this Motion argue, that the Parts of the two +Liquors in mixing coalesce with Violence, and by consequence rush +towards one another with an accelerated Motion? And when _Aqua fortis_, +or Spirit of Vitriol poured upon Filings of Iron dissolves the Filings +with a great Heat and Ebullition, is not this Heat and Ebullition +effected by a violent Motion of the Parts, and does not that Motion +argue that the acid Parts of the Liquor rush towards the Parts of the +Metal with violence, and run forcibly into its Pores till they get +between its outmost Particles, and the main Mass of the Metal, and +surrounding those Particles loosen them from the main Mass, and set them +at liberty to float off into the Water? And when the acid Particles, +which alone would distil with an easy Heat, will not separate from the +Particles of the Metal without a very violent Heat, does not this +confirm the Attraction between them? + +When Spirit of Vitriol poured upon common Salt or Salt-petre makes an +Ebullition with the Salt, and unites with it, and in Distillation the +Spirit of the common Salt or Salt-petre comes over much easier than it +would do before, and the acid part of the Spirit of Vitriol stays +behind; does not this argue that the fix'd Alcaly of the Salt attracts +the acid Spirit of the Vitriol more strongly than its own Spirit, and +not being able to hold them both, lets go its own? And when Oil of +Vitriol is drawn off from its weight of Nitre, and from both the +Ingredients a compound Spirit of Nitre is distilled, and two parts of +this Spirit are poured on one part of Oil of Cloves or Carraway Seeds, +or of any ponderous Oil of vegetable or animal Substances, or Oil of +Turpentine thicken'd with a little Balsam of Sulphur, and the Liquors +grow so very hot in mixing, as presently to send up a burning Flame; +does not this very great and sudden Heat argue that the two Liquors mix +with violence, and that their Parts in mixing run towards one another +with an accelerated Motion, and clash with the greatest Force? And is it +not for the same reason that well rectified Spirit of Wine poured on the +same compound Spirit flashes; and that the _Pulvis fulminans_, composed +of Sulphur, Nitre, and Salt of Tartar, goes off with a more sudden and +violent Explosion than Gun-powder, the acid Spirits of the Sulphur and +Nitre rushing towards one another, and towards the Salt of Tartar, with +so great a violence, as by the shock to turn the whole at once into +Vapour and Flame? Where the Dissolution is slow, it makes a slow +Ebullition and a gentle Heat; and where it is quicker, it makes a +greater Ebullition with more heat; and where it is done at once, the +Ebullition is contracted into a sudden Blast or violent Explosion, with +a heat equal to that of Fire and Flame. So when a Drachm of the +above-mention'd compound Spirit of Nitre was poured upon half a Drachm +of Oil of Carraway Seeds _in vacuo_, the Mixture immediately made a +flash like Gun-powder, and burst the exhausted Receiver, which was a +Glass six Inches wide, and eight Inches deep. And even the gross Body of +Sulphur powder'd, and with an equal weight of Iron Filings and a little +Water made into Paste, acts upon the Iron, and in five or six hours +grows too hot to be touch'd, and emits a Flame. And by these Experiments +compared with the great quantity of Sulphur with which the Earth +abounds, and the warmth of the interior Parts of the Earth, and hot +Springs, and burning Mountains, and with Damps, mineral Coruscations, +Earthquakes, hot suffocating Exhalations, Hurricanes, and Spouts; we may +learn that sulphureous Steams abound in the Bowels of the Earth and +ferment with Minerals, and sometimes take fire with a sudden Coruscation +and Explosion; and if pent up in subterraneous Caverns, burst the +Caverns with a great shaking of the Earth, as in springing of a Mine. +And then the Vapour generated by the Explosion, expiring through the +Pores of the Earth, feels hot and suffocates, and makes Tempests and +Hurricanes, and sometimes causes the Land to slide, or the Sea to boil, +and carries up the Water thereof in Drops, which by their weight fall +down again in Spouts. Also some sulphureous Steams, at all times when +the Earth is dry, ascending into the Air, ferment there with nitrous +Acids, and sometimes taking fire cause Lightning and Thunder, and fiery +Meteors. For the Air abounds with acid Vapours fit to promote +Fermentations, as appears by the rusting of Iron and Copper in it, the +kindling of Fire by blowing, and the beating of the Heart by means of +Respiration. Now the above-mention'd Motions are so great and violent as +to shew that in Fermentations the Particles of Bodies which almost rest, +are put into new Motions by a very potent Principle, which acts upon +them only when they approach one another, and causes them to meet and +clash with great violence, and grow hot with the motion, and dash one +another into pieces, and vanish into Air, and Vapour, and Flame. + +When Salt of Tartar _per deliquium_, being poured into the Solution of +any Metal, precipitates the Metal and makes it fall down to the bottom +of the Liquor in the form of Mud: Does not this argue that the acid +Particles are attracted more strongly by the Salt of Tartar than by the +Metal, and by the stronger Attraction go from the Metal to the Salt of +Tartar? And so when a Solution of Iron in _Aqua fortis_ dissolves the +_Lapis Calaminaris_, and lets go the Iron, or a Solution of Copper +dissolves Iron immersed in it and lets go the Copper, or a Solution of +Silver dissolves Copper and lets go the Silver, or a Solution of Mercury +in _Aqua fortis_ being poured upon Iron, Copper, Tin, or Lead, dissolves +the Metal and lets go the Mercury; does not this argue that the acid +Particles of the _Aqua fortis_ are attracted more strongly by the _Lapis +Calaminaris_ than by Iron, and more strongly by Iron than by Copper, and +more strongly by Copper than by Silver, and more strongly by Iron, +Copper, Tin, and Lead, than by Mercury? And is it not for the same +reason that Iron requires more _Aqua fortis_ to dissolve it than Copper, +and Copper more than the other Metals; and that of all Metals, Iron is +dissolved most easily, and is most apt to rust; and next after Iron, +Copper? + +When Oil of Vitriol is mix'd with a little Water, or is run _per +deliquium_, and in Distillation the Water ascends difficultly, and +brings over with it some part of the Oil of Vitriol in the form of +Spirit of Vitriol, and this Spirit being poured upon Iron, Copper, or +Salt of Tartar, unites with the Body and lets go the Water; doth not +this shew that the acid Spirit is attracted by the Water, and more +attracted by the fix'd Body than by the Water, and therefore lets go the +Water to close with the fix'd Body? And is it not for the same reason +that the Water and acid Spirits which are mix'd together in Vinegar, +_Aqua fortis_, and Spirit of Salt, cohere and rise together in +Distillation; but if the _Menstruum_ be poured on Salt of Tartar, or on +Lead, or Iron, or any fix'd Body which it can dissolve, the Acid by a +stronger Attraction adheres to the Body, and lets go the Water? And is +it not also from a mutual Attraction that the Spirits of Soot and +Sea-Salt unite and compose the Particles of Sal-armoniac, which are less +volatile than before, because grosser and freer from Water; and that the +Particles of Sal-armoniac in Sublimation carry up the Particles of +Antimony, which will not sublime alone; and that the Particles of +Mercury uniting with the acid Particles of Spirit of Salt compose +Mercury sublimate, and with the Particles of Sulphur, compose Cinnaber; +and that the Particles of Spirit of Wine and Spirit of Urine well +rectified unite, and letting go the Water which dissolved them, compose +a consistent Body; and that in subliming Cinnaber from Salt of Tartar, +or from quick Lime, the Sulphur by a stronger Attraction of the Salt or +Lime lets go the Mercury, and stays with the fix'd Body; and that when +Mercury sublimate is sublimed from Antimony, or from Regulus of +Antimony, the Spirit of Salt lets go the Mercury, and unites with the +antimonial metal which attracts it more strongly, and stays with it till +the Heat be great enough to make them both ascend together, and then +carries up the Metal with it in the form of a very fusible Salt, called +Butter of Antimony, although the Spirit of Salt alone be almost as +volatile as Water, and the Antimony alone as fix'd as Lead? + +When _Aqua fortis_ dissolves Silver and not Gold, and _Aqua regia_ +dissolves Gold and not Silver, may it not be said that _Aqua fortis_ is +subtil enough to penetrate Gold as well as Silver, but wants the +attractive Force to give it Entrance; and that _Aqua regia_ is subtil +enough to penetrate Silver as well as Gold, but wants the attractive +Force to give it Entrance? For _Aqua regia_ is nothing else than _Aqua +fortis_ mix'd with some Spirit of Salt, or with Sal-armoniac; and even +common Salt dissolved in _Aqua fortis_, enables the _Menstruum_ to +dissolve Gold, though the Salt be a gross Body. When therefore Spirit of +Salt precipitates Silver out of _Aqua fortis_, is it not done by +attracting and mixing with the _Aqua fortis_, and not attracting, or +perhaps repelling Silver? And when Water precipitates Antimony out of +the Sublimate of Antimony and Sal-armoniac, or out of Butter of +Antimony, is it not done by its dissolving, mixing with, and weakening +the Sal-armoniac or Spirit of Salt, and its not attracting, or perhaps +repelling the Antimony? And is it not for want of an attractive virtue +between the Parts of Water and Oil, of Quick-silver and Antimony, of +Lead and Iron, that these Substances do not mix; and by a weak +Attraction, that Quick-silver and Copper mix difficultly; and from a +strong one, that Quick-silver and Tin, Antimony and Iron, Water and +Salts, mix readily? And in general, is it not from the same Principle +that Heat congregates homogeneal Bodies, and separates heterogeneal +ones? + +When Arsenick with Soap gives a Regulus, and with Mercury sublimate a +volatile fusible Salt, like Butter of Antimony, doth not this shew that +Arsenick, which is a Substance totally volatile, is compounded of fix'd +and volatile Parts, strongly cohering by a mutual Attraction, so that +the volatile will not ascend without carrying up the fixed? And so, when +an equal weight of Spirit of Wine and Oil of Vitriol are digested +together, and in Distillation yield two fragrant and volatile Spirits +which will not mix with one another, and a fix'd black Earth remains +behind; doth not this shew that Oil of Vitriol is composed of volatile +and fix'd Parts strongly united by Attraction, so as to ascend together +in form of a volatile, acid, fluid Salt, until the Spirit of Wine +attracts and separates the volatile Parts from the fixed? And therefore, +since Oil of Sulphur _per Campanam_ is of the same Nature with Oil of +Vitriol, may it not be inferred, that Sulphur is also a mixture of +volatile and fix'd Parts so strongly cohering by Attraction, as to +ascend together in Sublimation. By dissolving Flowers of Sulphur in Oil +of Turpentine, and distilling the Solution, it is found that Sulphur is +composed of an inflamable thick Oil or fat Bitumen, an acid Salt, a very +fix'd Earth, and a little Metal. The three first were found not much +unequal to one another, the fourth in so small a quantity as scarce to +be worth considering. The acid Salt dissolved in Water, is the same with +Oil of Sulphur _per Campanam_, and abounding much in the Bowels of the +Earth, and particularly in Markasites, unites it self to the other +Ingredients of the Markasite, which are, Bitumen, Iron, Copper, and +Earth, and with them compounds Allum, Vitriol, and Sulphur. With the +Earth alone it compounds Allum; with the Metal alone, or Metal and +Earth together, it compounds Vitriol; and with the Bitumen and Earth it +compounds Sulphur. Whence it comes to pass that Markasites abound with +those three Minerals. And is it not from the mutual Attraction of the +Ingredients that they stick together for compounding these Minerals, and +that the Bitumen carries up the other Ingredients of the Sulphur, which +without it would not sublime? And the same Question may be put +concerning all, or almost all the gross Bodies in Nature. For all the +Parts of Animals and Vegetables are composed of Substances volatile and +fix'd, fluid and solid, as appears by their Analysis; and so are Salts +and Minerals, so far as Chymists have been hitherto able to examine +their Composition. + +When Mercury sublimate is re-sublimed with fresh Mercury, and becomes +_Mercurius Dulcis_, which is a white tasteless Earth scarce dissolvable +in Water, and _Mercurius Dulcis_ re-sublimed with Spirit of Salt returns +into Mercury sublimate; and when Metals corroded with a little acid turn +into rust, which is an Earth tasteless and indissolvable in Water, and +this Earth imbibed with more acid becomes a metallick Salt; and when +some Stones, as Spar of Lead, dissolved in proper _Menstruums_ become +Salts; do not these things shew that Salts are dry Earth and watry Acid +united by Attraction, and that the Earth will not become a Salt without +so much acid as makes it dissolvable in Water? Do not the sharp and +pungent Tastes of Acids arise from the strong Attraction whereby the +acid Particles rush upon and agitate the Particles of the Tongue? And +when Metals are dissolved in acid _Menstruums_, and the Acids in +conjunction with the Metal act after a different manner, so that the +Compound has a different Taste much milder than before, and sometimes a +sweet one; is it not because the Acids adhere to the metallick +Particles, and thereby lose much of their Activity? And if the Acid be +in too small a Proportion to make the Compound dissolvable in Water, +will it not by adhering strongly to the Metal become unactive and lose +its Taste, and the Compound be a tasteless Earth? For such things as are +not dissolvable by the Moisture of the Tongue, act not upon the Taste. + +As Gravity makes the Sea flow round the denser and weightier Parts of +the Globe of the Earth, so the Attraction may make the watry Acid flow +round the denser and compacter Particles of Earth for composing the +Particles of Salt. For otherwise the Acid would not do the Office of a +Medium between the Earth and common Water, for making Salts dissolvable +in the Water; nor would Salt of Tartar readily draw off the Acid from +dissolved Metals, nor Metals the Acid from Mercury. Now, as in the great +Globe of the Earth and Sea, the densest Bodies by their Gravity sink +down in Water, and always endeavour to go towards the Center of the +Globe; so in Particles of Salt, the densest Matter may always endeavour +to approach the Center of the Particle: So that a Particle of Salt may +be compared to a Chaos; being dense, hard, dry, and earthy in the +Center; and rare, soft, moist, and watry in the Circumference. And +hence it seems to be that Salts are of a lasting Nature, being scarce +destroy'd, unless by drawing away their watry Parts by violence, or by +letting them soak into the Pores of the central Earth by a gentle Heat +in Putrefaction, until the Earth be dissolved by the Water, and +separated into smaller Particles, which by reason of their Smallness +make the rotten Compound appear of a black Colour. Hence also it may be, +that the Parts of Animals and Vegetables preserve their several Forms, +and assimilate their Nourishment; the soft and moist Nourishment easily +changing its Texture by a gentle Heat and Motion, till it becomes like +the dense, hard, dry, and durable Earth in the Center of each Particle. +But when the Nourishment grows unfit to be assimilated, or the central +Earth grows too feeble to assimilate it, the Motion ends in Confusion, +Putrefaction, and Death. + +If a very small quantity of any Salt or Vitriol be dissolved in a great +quantity of Water, the Particles of the Salt or Vitriol will not sink to +the bottom, though they be heavier in Specie than the Water, but will +evenly diffuse themselves into all the Water, so as to make it as saline +at the top as at the bottom. And does not this imply that the Parts of +the Salt or Vitriol recede from one another, and endeavour to expand +themselves, and get as far asunder as the quantity of Water in which +they float, will allow? And does not this Endeavour imply that they have +a repulsive Force by which they fly from one another, or at least, that +they attract the Water more strongly than they do one another? For as +all things ascend in Water which are less attracted than Water, by the +gravitating Power of the Earth; so all the Particles of Salt which float +in Water, and are less attracted than Water by any one Particle of Salt, +must recede from that Particle, and give way to the more attracted +Water. + +When any saline Liquor is evaporated to a Cuticle and let cool, the Salt +concretes in regular Figures; which argues, that the Particles of the +Salt before they concreted, floated in the Liquor at equal distances in +rank and file, and by consequence that they acted upon one another by +some Power which at equal distances is equal, at unequal distances +unequal. For by such a Power they will range themselves uniformly, and +without it they will float irregularly, and come together as +irregularly. And since the Particles of Island-Crystal act all the same +way upon the Rays of Light for causing the unusual Refraction, may it +not be supposed that in the Formation of this Crystal, the Particles not +only ranged themselves in rank and file for concreting in regular +Figures, but also by some kind of polar Virtue turned their homogeneal +Sides the same way. + +The Parts of all homogeneal hard Bodies which fully touch one another, +stick together very strongly. And for explaining how this may be, some +have invented hooked Atoms, which is begging the Question; and others +tell us that Bodies are glued together by rest, that is, by an occult +Quality, or rather by nothing; and others, that they stick together by +conspiring Motions, that is, by relative rest amongst themselves. I had +rather infer from their Cohesion, that their Particles attract one +another by some Force, which in immediate Contact is exceeding strong, +at small distances performs the chymical Operations above-mention'd, and +reaches not far from the Particles with any sensible Effect. + +All Bodies seem to be composed of hard Particles: For otherwise Fluids +would not congeal; as Water, Oils, Vinegar, and Spirit or Oil of Vitriol +do by freezing; Mercury by Fumes of Lead; Spirit of Nitre and Mercury, +by dissolving the Mercury and evaporating the Flegm; Spirit of Wine and +Spirit of Urine, by deflegming and mixing them; and Spirit of Urine and +Spirit of Salt, by subliming them together to make Sal-armoniac. Even +the Rays of Light seem to be hard Bodies; for otherwise they would not +retain different Properties in their different Sides. And therefore +Hardness may be reckon'd the Property of all uncompounded Matter. At +least, this seems to be as evident as the universal Impenetrability of +Matter. For all Bodies, so far as Experience reaches, are either hard, +or may be harden'd; and we have no other Evidence of universal +Impenetrability, besides a large Experience without an experimental +Exception. Now if compound Bodies are so very hard as we find some of +them to be, and yet are very porous, and consist of Parts which are only +laid together; the simple Particles which are void of Pores, and were +never yet divided, must be much harder. For such hard Particles being +heaped up together, can scarce touch one another in more than a few +Points, and therefore must be separable by much less Force than is +requisite to break a solid Particle, whose Parts touch in all the Space +between them, without any Pores or Interstices to weaken their Cohesion. +And how such very hard Particles which are only laid together and touch +only in a few Points, can stick together, and that so firmly as they do, +without the assistance of something which causes them to be attracted or +press'd towards one another, is very difficult to conceive. + +The same thing I infer also from the cohering of two polish'd Marbles +_in vacuo_, and from the standing of Quick-silver in the Barometer at +the height of 50, 60 or 70 Inches, or above, when ever it is well-purged +of Air and carefully poured in, so that its Parts be every where +contiguous both to one another and to the Glass. The Atmosphere by its +weight presses the Quick-silver into the Glass, to the height of 29 or +30 Inches. And some other Agent raises it higher, not by pressing it +into the Glass, but by making its Parts stick to the Glass, and to one +another. For upon any discontinuation of Parts, made either by Bubbles +or by shaking the Glass, the whole Mercury falls down to the height of +29 or 30 Inches. + +And of the same kind with these Experiments are those that follow. If +two plane polish'd Plates of Glass (suppose two pieces of a polish'd +Looking-glass) be laid together, so that their sides be parallel and at +a very small distance from one another, and then their lower edges be +dipped into Water, the Water will rise up between them. And the less +the distance of the Glasses is, the greater will be the height to which +the Water will rise. If the distance be about the hundredth part of an +Inch, the Water will rise to the height of about an Inch; and if the +distance be greater or less in any Proportion, the height will be +reciprocally proportional to the distance very nearly. For the +attractive Force of the Glasses is the same, whether the distance +between them be greater or less; and the weight of the Water drawn up is +the same, if the height of it be reciprocally proportional to the +distance of the Glasses. And in like manner, Water ascends between two +Marbles polish'd plane, when their polish'd sides are parallel, and at a +very little distance from one another, And if slender Pipes of Glass be +dipped at one end into stagnating Water, the Water will rise up within +the Pipe, and the height to which it rises will be reciprocally +proportional to the Diameter of the Cavity of the Pipe, and will equal +the height to which it rises between two Planes of Glass, if the +Semi-diameter of the Cavity of the Pipe be equal to the distance between +the Planes, or thereabouts. And these Experiments succeed after the same +manner _in vacuo_ as in the open Air, (as hath been tried before the +Royal Society,) and therefore are not influenced by the Weight or +Pressure of the Atmosphere. + +And if a large Pipe of Glass be filled with sifted Ashes well pressed +together in the Glass, and one end of the Pipe be dipped into stagnating +Water, the Water will rise up slowly in the Ashes, so as in the space +of a Week or Fortnight to reach up within the Glass, to the height of 30 +or 40 Inches above the stagnating Water. And the Water rises up to this +height by the Action only of those Particles of the Ashes which are upon +the Surface of the elevated Water; the Particles which are within the +Water, attracting or repelling it as much downwards as upwards. And +therefore the Action of the Particles is very strong. But the Particles +of the Ashes being not so dense and close together as those of Glass, +their Action is not so strong as that of Glass, which keeps Quick-silver +suspended to the height of 60 or 70 Inches, and therefore acts with a +Force which would keep Water suspended to the height of above 60 Feet. + +By the same Principle, a Sponge sucks in Water, and the Glands in the +Bodies of Animals, according to their several Natures and Dispositions, +suck in various Juices from the Blood. + +If two plane polish'd Plates of Glass three or four Inches broad, and +twenty or twenty five long, be laid one of them parallel to the Horizon, +the other upon the first, so as at one of their ends to touch one +another, and contain an Angle of about 10 or 15 Minutes, and the same be +first moisten'd on their inward sides with a clean Cloth dipp'd into Oil +of Oranges or Spirit of Turpentine, and a Drop or two of the Oil or +Spirit be let fall upon the lower Glass at the other; so soon as the +upper Glass is laid down upon the lower, so as to touch it at one end as +above, and to touch the Drop at the other end, making with the lower +Glass an Angle of about 10 or 15 Minutes; the Drop will begin to move +towards the Concourse of the Glasses, and will continue to move with an +accelerated Motion, till it arrives at that Concourse of the Glasses. +For the two Glasses attract the Drop, and make it run that way towards +which the Attractions incline. And if when the Drop is in motion you +lift up that end of the Glasses where they meet, and towards which the +Drop moves, the Drop will ascend between the Glasses, and therefore is +attracted. And as you lift up the Glasses more and more, the Drop will +ascend slower and slower, and at length rest, being then carried +downward by its Weight, as much as upwards by the Attraction. And by +this means you may know the Force by which the Drop is attracted at all +distances from the Concourse of the Glasses. + +Now by some Experiments of this kind, (made by Mr. _Hauksbee_) it has +been found that the Attraction is almost reciprocally in a duplicate +Proportion of the distance of the middle of the Drop from the Concourse +of the Glasses, _viz._ reciprocally in a simple Proportion, by reason of +the spreading of the Drop, and its touching each Glass in a larger +Surface; and again reciprocally in a simple Proportion, by reason of the +Attractions growing stronger within the same quantity of attracting +Surface. The Attraction therefore within the same quantity of attracting +Surface, is reciprocally as the distance between the Glasses. And +therefore where the distance is exceeding small, the Attraction must be +exceeding great. By the Table in the second Part of the second Book, +wherein the thicknesses of colour'd Plates of Water between two Glasses +are set down, the thickness of the Plate where it appears very black, is +three eighths of the ten hundred thousandth part of an Inch. And where +the Oil of Oranges between the Glasses is of this thickness, the +Attraction collected by the foregoing Rule, seems to be so strong, as +within a Circle of an Inch in diameter, to suffice to hold up a Weight +equal to that of a Cylinder of Water of an Inch in diameter, and two or +three Furlongs in length. And where it is of a less thickness the +Attraction may be proportionally greater, and continue to increase, +until the thickness do not exceed that of a single Particle of the Oil. +There are therefore Agents in Nature able to make the Particles of +Bodies stick together by very strong Attractions. And it is the Business +of experimental Philosophy to find them out. + +Now the smallest Particles of Matter may cohere by the strongest +Attractions, and compose bigger Particles of weaker Virtue; and many of +these may cohere and compose bigger Particles whose Virtue is still +weaker, and so on for divers Successions, until the Progression end in +the biggest Particles on which the Operations in Chymistry, and the +Colours of natural Bodies depend, and which by cohering compose Bodies +of a sensible Magnitude. If the Body is compact, and bends or yields +inward to Pression without any sliding of its Parts, it is hard and +elastick, returning to its Figure with a Force rising from the mutual +Attraction of its Parts. If the Parts slide upon one another, the Body +is malleable or soft. If they slip easily, and are of a fit Size to be +agitated by Heat, and the Heat is big enough to keep them in Agitation, +the Body is fluid; and if it be apt to stick to things, it is humid; and +the Drops of every fluid affect a round Figure by the mutual Attraction +of their Parts, as the Globe of the Earth and Sea affects a round Figure +by the mutual Attraction of its Parts by Gravity. + +Since Metals dissolved in Acids attract but a small quantity of the +Acid, their attractive Force can reach but to a small distance from +them. And as in Algebra, where affirmative Quantities vanish and cease, +there negative ones begin; so in Mechanicks, where Attraction ceases, +there a repulsive Virtue ought to succeed. And that there is such a +Virtue, seems to follow from the Reflexions and Inflexions of the Rays +of Light. For the Rays are repelled by Bodies in both these Cases, +without the immediate Contact of the reflecting or inflecting Body. It +seems also to follow from the Emission of Light; the Ray so soon as it +is shaken off from a shining Body by the vibrating Motion of the Parts +of the Body, and gets beyond the reach of Attraction, being driven away +with exceeding great Velocity. For that Force which is sufficient to +turn it back in Reflexion, may be sufficient to emit it. It seems also +to follow from the Production of Air and Vapour. The Particles when they +are shaken off from Bodies by Heat or Fermentation, so soon as they are +beyond the reach of the Attraction of the Body, receding from it, and +also from one another with great Strength, and keeping at a distance, +so as sometimes to take up above a Million of Times more space than they +did before in the form of a dense Body. Which vast Contraction and +Expansion seems unintelligible, by feigning the Particles of Air to be +springy and ramous, or rolled up like Hoops, or by any other means than +a repulsive Power. The Particles of Fluids which do not cohere too +strongly, and are of such a Smallness as renders them most susceptible +of those Agitations which keep Liquors in a Fluor, are most easily +separated and rarified into Vapour, and in the Language of the Chymists, +they are volatile, rarifying with an easy Heat, and condensing with +Cold. But those which are grosser, and so less susceptible of Agitation, +or cohere by a stronger Attraction, are not separated without a stronger +Heat, or perhaps not without Fermentation. And these last are the Bodies +which Chymists call fix'd, and being rarified by Fermentation, become +true permanent Air; those Particles receding from one another with the +greatest Force, and being most difficultly brought together, which upon +Contact cohere most strongly. And because the Particles of permanent Air +are grosser, and arise from denser Substances than those of Vapours, +thence it is that true Air is more ponderous than Vapour, and that a +moist Atmosphere is lighter than a dry one, quantity for quantity. From +the same repelling Power it seems to be that Flies walk upon the Water +without wetting their Feet; and that the Object-glasses of long +Telescopes lie upon one another without touching; and that dry Powders +are difficultly made to touch one another so as to stick together, +unless by melting them, or wetting them with Water, which by exhaling +may bring them together; and that two polish'd Marbles, which by +immediate Contact stick together, are difficultly brought so close +together as to stick. + +And thus Nature will be very conformable to her self and very simple, +performing all the great Motions of the heavenly Bodies by the +Attraction of Gravity which intercedes those Bodies, and almost all the +small ones of their Particles by some other attractive and repelling +Powers which intercede the Particles. The _Vis inertiæ_ is a passive +Principle by which Bodies persist in their Motion or Rest, receive +Motion in proportion to the Force impressing it, and resist as much as +they are resisted. By this Principle alone there never could have been +any Motion in the World. Some other Principle was necessary for putting +Bodies into Motion; and now they are in Motion, some other Principle is +necessary for conserving the Motion. For from the various Composition of +two Motions, 'tis very certain that there is not always the same +quantity of Motion in the World. For if two Globes joined by a slender +Rod, revolve about their common Center of Gravity with an uniform +Motion, while that Center moves on uniformly in a right Line drawn in +the Plane of their circular Motion; the Sum of the Motions of the two +Globes, as often as the Globes are in the right Line described by their +common Center of Gravity, will be bigger than the Sum of their Motions, +when they are in a Line perpendicular to that right Line. By this +Instance it appears that Motion may be got or lost. But by reason of the +Tenacity of Fluids, and Attrition of their Parts, and the Weakness of +Elasticity in Solids, Motion is much more apt to be lost than got, and +is always upon the Decay. For Bodies which are either absolutely hard, +or so soft as to be void of Elasticity, will not rebound from one +another. Impenetrability makes them only stop. If two equal Bodies meet +directly _in vacuo_, they will by the Laws of Motion stop where they +meet, and lose all their Motion, and remain in rest, unless they be +elastick, and receive new Motion from their Spring. If they have so much +Elasticity as suffices to make them re-bound with a quarter, or half, or +three quarters of the Force with which they come together, they will +lose three quarters, or half, or a quarter of their Motion. And this may +be try'd, by letting two equal Pendulums fall against one another from +equal heights. If the Pendulums be of Lead or soft Clay, they will lose +all or almost all their Motions: If of elastick Bodies they will lose +all but what they recover from their Elasticity. If it be said, that +they can lose no Motion but what they communicate to other Bodies, the +consequence is, that _in vacuo_ they can lose no Motion, but when they +meet they must go on and penetrate one another's Dimensions. If three +equal round Vessels be filled, the one with Water, the other with Oil, +the third with molten Pitch, and the Liquors be stirred about alike to +give them a vortical Motion; the Pitch by its Tenacity will lose its +Motion quickly, the Oil being less tenacious will keep it longer, and +the Water being less tenacious will keep it longest, but yet will lose +it in a short time. Whence it is easy to understand, that if many +contiguous Vortices of molten Pitch were each of them as large as those +which some suppose to revolve about the Sun and fix'd Stars, yet these +and all their Parts would, by their Tenacity and Stiffness, communicate +their Motion to one another till they all rested among themselves. +Vortices of Oil or Water, or some fluider Matter, might continue longer +in Motion; but unless the Matter were void of all Tenacity and Attrition +of Parts, and Communication of Motion, (which is not to be supposed,) +the Motion would constantly decay. Seeing therefore the variety of +Motion which we find in the World is always decreasing, there is a +necessity of conserving and recruiting it by active Principles, such as +are the cause of Gravity, by which Planets and Comets keep their Motions +in their Orbs, and Bodies acquire great Motion in falling; and the cause +of Fermentation, by which the Heart and Blood of Animals are kept in +perpetual Motion and Heat; the inward Parts of the Earth are constantly +warm'd, and in some places grow very hot; Bodies burn and shine, +Mountains take fire, the Caverns of the Earth are blown up, and the Sun +continues violently hot and lucid, and warms all things by his Light. +For we meet with very little Motion in the World, besides what is owing +to these active Principles. And if it were not for these Principles, the +Bodies of the Earth, Planets, Comets, Sun, and all things in them, +would grow cold and freeze, and become inactive Masses; and all +Putrefaction, Generation, Vegetation and Life would cease, and the +Planets and Comets would not remain in their Orbs. + +All these things being consider'd, it seems probable to me, that God in +the Beginning form'd Matter in solid, massy, hard, impenetrable, +moveable Particles, of such Sizes and Figures, and with such other +Properties, and in such Proportion to Space, as most conduced to the End +for which he form'd them; and that these primitive Particles being +Solids, are incomparably harder than any porous Bodies compounded of +them; even so very hard, as never to wear or break in pieces; no +ordinary Power being able to divide what God himself made one in the +first Creation. While the Particles continue entire, they may compose +Bodies of one and the same Nature and Texture in all Ages: But should +they wear away, or break in pieces, the Nature of Things depending on +them, would be changed. Water and Earth, composed of old worn Particles +and Fragments of Particles, would not be of the same Nature and Texture +now, with Water and Earth composed of entire Particles in the Beginning. +And therefore, that Nature may be lasting, the Changes of corporeal +Things are to be placed only in the various Separations and new +Associations and Motions of these permanent Particles; compound Bodies +being apt to break, not in the midst of solid Particles, but where those +Particles are laid together, and only touch in a few Points. + +It seems to me farther, that these Particles have not only a _Vis +inertiæ_, accompanied with such passive Laws of Motion as naturally +result from that Force, but also that they are moved by certain active +Principles, such as is that of Gravity, and that which causes +Fermentation, and the Cohesion of Bodies. These Principles I consider, +not as occult Qualities, supposed to result from the specifick Forms of +Things, but as general Laws of Nature, by which the Things themselves +are form'd; their Truth appearing to us by Phænomena, though their +Causes be not yet discover'd. For these are manifest Qualities, and +their Causes only are occult. And the _Aristotelians_ gave the Name of +occult Qualities, not to manifest Qualities, but to such Qualities only +as they supposed to lie hid in Bodies, and to be the unknown Causes of +manifest Effects: Such as would be the Causes of Gravity, and of +magnetick and electrick Attractions, and of Fermentations, if we should +suppose that these Forces or Actions arose from Qualities unknown to us, +and uncapable of being discovered and made manifest. Such occult +Qualities put a stop to the Improvement of natural Philosophy, and +therefore of late Years have been rejected. To tell us that every +Species of Things is endow'd with an occult specifick Quality by which +it acts and produces manifest Effects, is to tell us nothing: But to +derive two or three general Principles of Motion from Phænomena, and +afterwards to tell us how the Properties and Actions of all corporeal +Things follow from those manifest Principles, would be a very great step +in Philosophy, though the Causes of those Principles were not yet +discover'd: And therefore I scruple not to propose the Principles of +Motion above-mention'd, they being of very general Extent, and leave +their Causes to be found out. + +Now by the help of these Principles, all material Things seem to have +been composed of the hard and solid Particles above-mention'd, variously +associated in the first Creation by the Counsel of an intelligent Agent. +For it became him who created them to set them in order. And if he did +so, it's unphilosophical to seek for any other Origin of the World, or +to pretend that it might arise out of a Chaos by the mere Laws of +Nature; though being once form'd, it may continue by those Laws for many +Ages. For while Comets move in very excentrick Orbs in all manner of +Positions, blind Fate could never make all the Planets move one and the +same way in Orbs concentrick, some inconsiderable Irregularities +excepted, which may have risen from the mutual Actions of Comets and +Planets upon one another, and which will be apt to increase, till this +System wants a Reformation. Such a wonderful Uniformity in the Planetary +System must be allowed the Effect of Choice. And so must the Uniformity +in the Bodies of Animals, they having generally a right and a left side +shaped alike, and on either side of their Bodies two Legs behind, and +either two Arms, or two Legs, or two Wings before upon their Shoulders, +and between their Shoulders a Neck running down into a Back-bone, and a +Head upon it; and in the Head two Ears, two Eyes, a Nose, a Mouth, and +a Tongue, alike situated. Also the first Contrivance of those very +artificial Parts of Animals, the Eyes, Ears, Brain, Muscles, Heart, +Lungs, Midriff, Glands, Larynx, Hands, Wings, swimming Bladders, natural +Spectacles, and other Organs of Sense and Motion; and the Instinct of +Brutes and Insects, can be the effect of nothing else than the Wisdom +and Skill of a powerful ever-living Agent, who being in all Places, is +more able by his Will to move the Bodies within his boundless uniform +Sensorium, and thereby to form and reform the Parts of the Universe, +than we are by our Will to move the Parts of our own Bodies. And yet we +are not to consider the World as the Body of God, or the several Parts +thereof, as the Parts of God. He is an uniform Being, void of Organs, +Members or Parts, and they are his Creatures subordinate to him, and +subservient to his Will; and he is no more the Soul of them, than the +Soul of Man is the Soul of the Species of Things carried through the +Organs of Sense into the place of its Sensation, where it perceives them +by means of its immediate Presence, without the Intervention of any +third thing. The Organs of Sense are not for enabling the Soul to +perceive the Species of Things in its Sensorium, but only for conveying +them thither; and God has no need of such Organs, he being every where +present to the Things themselves. And since Space is divisible _in +infinitum_, and Matter is not necessarily in all places, it may be also +allow'd that God is able to create Particles of Matter of several Sizes +and Figures, and in several Proportions to Space, and perhaps of +different Densities and Forces, and thereby to vary the Laws of Nature, +and make Worlds of several sorts in several Parts of the Universe. At +least, I see nothing of Contradiction in all this. + +As in Mathematicks, so in Natural Philosophy, the Investigation of +difficult Things by the Method of Analysis, ought ever to precede the +Method of Composition. This Analysis consists in making Experiments and +Observations, and in drawing general Conclusions from them by Induction, +and admitting of no Objections against the Conclusions, but such as are +taken from Experiments, or other certain Truths. For Hypotheses are not +to be regarded in experimental Philosophy. And although the arguing from +Experiments and Observations by Induction be no Demonstration of general +Conclusions; yet it is the best way of arguing which the Nature of +Things admits of, and may be looked upon as so much the stronger, by how +much the Induction is more general. And if no Exception occur from +Phænomena, the Conclusion may be pronounced generally. But if at any +time afterwards any Exception shall occur from Experiments, it may then +begin to be pronounced with such Exceptions as occur. By this way of +Analysis we may proceed from Compounds to Ingredients, and from Motions +to the Forces producing them; and in general, from Effects to their +Causes, and from particular Causes to more general ones, till the +Argument end in the most general. This is the Method of Analysis: And +the Synthesis consists in assuming the Causes discover'd, and +establish'd as Principles, and by them explaining the Phænomena +proceeding from them, and proving the Explanations. + +In the two first Books of these Opticks, I proceeded by this Analysis to +discover and prove the original Differences of the Rays of Light in +respect of Refrangibility, Reflexibility, and Colour, and their +alternate Fits of easy Reflexion and easy Transmission, and the +Properties of Bodies, both opake and pellucid, on which their Reflexions +and Colours depend. And these Discoveries being proved, may be assumed +in the Method of Composition for explaining the Phænomena arising from +them: An Instance of which Method I gave in the End of the first Book. +In this third Book I have only begun the Analysis of what remains to be +discover'd about Light and its Effects upon the Frame of Nature, hinting +several things about it, and leaving the Hints to be examin'd and +improv'd by the farther Experiments and Observations of such as are +inquisitive. And if natural Philosophy in all its Parts, by pursuing +this Method, shall at length be perfected, the Bounds of Moral +Philosophy will be also enlarged. For so far as we can know by natural +Philosophy what is the first Cause, what Power he has over us, and what +Benefits we receive from him, so far our Duty towards him, as well as +that towards one another, will appear to us by the Light of Nature. And +no doubt, if the Worship of false Gods had not blinded the Heathen, +their moral Philosophy would have gone farther than to the four +Cardinal Virtues; and instead of teaching the Transmigration of Souls, +and to worship the Sun and Moon, and dead Heroes, they would have taught +us to worship our true Author and Benefactor, as their Ancestors did +under the Government of _Noah_ and his Sons before they corrupted +themselves. \ No newline at end of file diff --git a/src/testing/benchmark.go b/src/testing/benchmark.go index 9c7b1be79e743..8dd8cbc17e267 100644 --- a/src/testing/benchmark.go +++ b/src/testing/benchmark.go @@ -10,15 +10,50 @@ import ( "internal/race" "os" "runtime" + "strconv" + "strings" "sync" "sync/atomic" "time" ) var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`") -var benchTime = flag.Duration("test.benchtime", 1*time.Second, "run each benchmark for duration `d`") +var benchTime = benchTimeFlag{d: 1 * time.Second} var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks") +func init() { + flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`") +} + +type benchTimeFlag struct { + d time.Duration + n int +} + +func (f *benchTimeFlag) String() string { + if f.n > 0 { + return fmt.Sprintf("%dx", f.n) + } + return time.Duration(f.d).String() +} + +func (f *benchTimeFlag) Set(s string) error { + if strings.HasSuffix(s, "x") { + n, err := strconv.ParseInt(s[:len(s)-1], 10, 0) + if err != nil || n <= 0 { + return fmt.Errorf("invalid count") + } + *f = benchTimeFlag{n: int(n)} + return nil + } + d, err := time.ParseDuration(s) + if err != nil || d <= 0 { + return fmt.Errorf("invalid duration") + } + *f = benchTimeFlag{d: d} + return nil +} + // Global lock to ensure only one benchmark runs at a time. var benchmarkLock sync.Mutex @@ -53,7 +88,7 @@ type B struct { previousN int // number of iterations in the previous run previousDuration time.Duration // total duration of the previous run benchFunc func(b *B) - benchTime time.Duration + benchTime benchTimeFlag bytes int64 missingBytes bool // one of the subbenchmarks does not have bytes set. timerOn bool @@ -195,7 +230,7 @@ func roundUp(n int) int { } } -// run1 runs the first iteration of benchFunc. It returns whether more +// run1 runs the first iteration of benchFunc. It reports whether more // iterations of this benchmarks should be run. func (b *B) run1() bool { if ctx := b.context; ctx != nil { @@ -273,21 +308,25 @@ func (b *B) launch() { }() // Run the benchmark for at least the specified amount of time. - d := b.benchTime - for n := 1; !b.failed && b.duration < d && n < 1e9; { - last := n - // Predict required iterations. - n = int(d.Nanoseconds()) - if nsop := b.nsPerOp(); nsop != 0 { - n /= int(nsop) + if b.benchTime.n > 0 { + b.runN(b.benchTime.n) + } else { + d := b.benchTime.d + for n := 1; !b.failed && b.duration < d && n < 1e9; { + last := n + // Predict required iterations. + n = int(d.Nanoseconds()) + if nsop := b.nsPerOp(); nsop != 0 { + n /= int(nsop) + } + // Run more iterations than we think we'll need (1.2x). + // Don't grow too fast in case we had timing errors previously. + // Be sure to run at least one more than last time. + n = max(min(n+n/5, 100*last), last+1) + // Round up to something easy to read. + n = roundUp(n) + b.runN(n) } - // Run more iterations than we think we'll need (1.2x). - // Don't grow too fast in case we had timing errors previously. - // Be sure to run at least one more than last time. - n = max(min(n+n/5, 100*last), last+1) - // Round up to something easy to read. - n = roundUp(n) - b.runN(n) } b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes} } @@ -416,7 +455,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e b.Run(Benchmark.Name, Benchmark.F) } }, - benchTime: *benchTime, + benchTime: benchTime, context: ctx, } main.runN(1) @@ -653,7 +692,7 @@ func Benchmark(f func(b *B)) BenchmarkResult { w: discard{}, }, benchFunc: f, - benchTime: *benchTime, + benchTime: benchTime, } if b.run1() { b.run() diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index 9af3909b3556e..5a6d51be592f9 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -17,7 +17,7 @@ import ( func init() { // Make benchmark tests run 10* faster. - *benchTime = 100 * time.Millisecond + benchTime.d = 100 * time.Millisecond } func TestTestContext(t *T) { @@ -411,6 +411,29 @@ func TestTRun(t *T) { ch <- true <-ch }, + }, { + desc: "log in finished sub test logs to parent", + ok: false, + output: ` + --- FAIL: log in finished sub test logs to parent (N.NNs) + sub_test.go:NNN: message2 + sub_test.go:NNN: message1 + sub_test.go:NNN: error`, + maxPar: 1, + f: func(t *T) { + ch := make(chan bool) + t.Run("sub", func(t2 *T) { + go func() { + <-ch + t2.Log("message1") + ch <- true + }() + }) + t.Log("message2") + ch <- true + <-ch + t.Errorf("error") + }, }} for _, tc := range testCases { ctx := newTestContext(tc.maxPar, newMatcher(regexp.MatchString, "", "")) @@ -570,7 +593,7 @@ func TestBRun(t *T) { chatty: tc.chatty, }, benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure. - benchTime: time.Microsecond, + benchTime: benchTimeFlag{d: 1 * time.Microsecond}, } root.runN(1) if ok != !tc.failed { @@ -594,8 +617,8 @@ func TestBRun(t *T) { func makeRegexp(s string) string { s = regexp.QuoteMeta(s) - s = strings.Replace(s, ":NNN:", `:\d\d\d:`, -1) - s = strings.Replace(s, "N\\.NNs", `\d*\.\d*s`, -1) + s = strings.ReplaceAll(s, ":NNN:", `:\d\d\d:`) + s = strings.ReplaceAll(s, "N\\.NNs", `\d*\.\d*s`) return s } @@ -683,6 +706,55 @@ func TestRacyOutput(t *T) { } } +// The late log message did not include the test name. Issue 29388. +func TestLogAfterComplete(t *T) { + ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "")) + var buf bytes.Buffer + t1 := &T{ + common: common{ + // Use a buffered channel so that tRunner can write + // to it although nothing is reading from it. + signal: make(chan bool, 1), + w: &buf, + }, + context: ctx, + } + + c1 := make(chan bool) + c2 := make(chan string) + tRunner(t1, func(t *T) { + t.Run("TestLateLog", func(t *T) { + go func() { + defer close(c2) + defer func() { + p := recover() + if p == nil { + c2 <- "subtest did not panic" + return + } + s, ok := p.(string) + if !ok { + c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p) + return + } + const want = "Log in goroutine after TestLateLog has completed" + if !strings.Contains(s, want) { + c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want) + } + }() + + <-c1 + t.Log("log after test") + }() + }) + }) + close(c1) + + if s := <-c2; s != "" { + t.Error(s) + } +} + func TestBenchmark(t *T) { res := Benchmark(func(b *B) { for i := 0; i < 5; i++ { diff --git a/src/testing/testing.go b/src/testing/testing.go index a552b363617c6..3068630e8a976 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -17,13 +17,13 @@ // package builds but will be included when the ``go test'' command is run. // For more detail, run ``go help test'' and ``go help testflag''. // -// Tests and benchmarks may be skipped if not applicable with a call to -// the Skip method of *T and *B: -// func TestTimeConsuming(t *testing.T) { -// if testing.Short() { -// t.Skip("skipping test in short mode.") +// A simple test function looks like this: +// +// func TestAbs(t *testing.T) { +// got := Abs(-1) +// if got != 1 { +// t.Errorf("Abs(-1) = %d; want 1", got) // } -// ... // } // // Benchmarks @@ -132,6 +132,18 @@ // example function, at least one other function, type, variable, or constant // declaration, and no test or benchmark functions. // +// Skipping +// +// Tests or benchmarks may be skipped at run time with a call to +// the Skip method of *T or *B: +// +// func TestTimeConsuming(t *testing.T) { +// if testing.Short() { +// t.Skip("skipping test in short mode.") +// } +// ... +// } +// // Subtests and Sub-benchmarks // // The Run methods of T and B allow defining subtests and sub-benchmarks, @@ -316,6 +328,13 @@ type common struct { // Short reports whether the -test.short flag is set. func Short() bool { + // Catch code that calls this from TestMain without first + // calling flag.Parse. This shouldn't really be a panic + if !flag.Parsed() { + fmt.Fprintf(os.Stderr, "testing: testing.Short called before flag.Parse\n") + os.Exit(2) + } + return *short } @@ -396,8 +415,8 @@ func (c *common) frameSkip(skip int) runtime.Frame { // decorate prefixes the string with the file and line of the call site // and inserts the final newline if needed and indentation spaces for formatting. // This function must be called with c.mu held. -func (c *common) decorate(s string) string { - frame := c.frameSkip(3) // decorate + log + public function. +func (c *common) decorate(s string, skip int) string { + frame := c.frameSkip(skip) file := frame.File line := frame.Line if file != "" { @@ -592,9 +611,28 @@ func (c *common) FailNow() { // log generates the output. It's always at the same stack depth. func (c *common) log(s string) { + c.logDepth(s, 3) // logDepth + log + public function +} + +// logDepth generates the output. At an arbitary stack depth +func (c *common) logDepth(s string, depth int) { c.mu.Lock() defer c.mu.Unlock() - c.output = append(c.output, c.decorate(s)...) + if !c.done { + c.output = append(c.output, c.decorate(s, depth+1)...) + } else { + // This test has already finished. Try and log this message + // with our parent. If we don't have a parent, panic. + for parent := c.parent; parent != nil; parent = parent.parent { + parent.mu.Lock() + defer parent.mu.Unlock() + if !parent.done { + parent.output = append(parent.output, parent.decorate(s, depth+1)...) + return + } + } + panic("Log in goroutine after " + c.name + " has completed") + } } // Log formats its arguments using default formatting, analogous to Println, diff --git a/src/text/scanner/scanner.go b/src/text/scanner/scanner.go index 4e76664dc0c81..893a4edbaf92f 100644 --- a/src/text/scanner/scanner.go +++ b/src/text/scanner/scanner.go @@ -384,6 +384,9 @@ func (s *Scanner) scanExponent(ch rune) rune { if ch == '-' || ch == '+' { ch = s.next() } + if !isDecimal(ch) { + s.error("illegal exponent") + } ch = s.scanMantissa(ch) } return ch diff --git a/src/text/scanner/scanner_test.go b/src/text/scanner/scanner_test.go index 9a6b72ef673b0..e26e816f51989 100644 --- a/src/text/scanner/scanner_test.go +++ b/src/text/scanner/scanner_test.go @@ -252,6 +252,14 @@ func checkTok(t *testing.T, s *Scanner, line int, got, want rune, text string) { } } +func checkTokErr(t *testing.T, s *Scanner, line int, want rune, text string) { + prevCount := s.ErrorCount + checkTok(t, s, line, s.Scan(), want, text) + if s.ErrorCount != prevCount+1 { + t.Fatalf("want error for %q", text) + } +} + func countNewlines(s string) int { n := 0 for _, ch := range s { @@ -282,6 +290,21 @@ func TestScan(t *testing.T) { testScan(t, GoTokens&^SkipComments) } +func TestIllegalExponent(t *testing.T) { + const src = "1.5e 1.5E 1e+ 1e- 1.5z" + s := new(Scanner).Init(strings.NewReader(src)) + checkTokErr(t, s, 1, Float, "1.5e") + checkTokErr(t, s, 1, Float, "1.5E") + checkTokErr(t, s, 1, Float, "1e+") + checkTokErr(t, s, 1, Float, "1e-") + checkTok(t, s, 1, s.Scan(), Float, "1.5") + checkTok(t, s, 1, s.Scan(), Ident, "z") + checkTok(t, s, 1, s.Scan(), EOF, "") + if s.ErrorCount != 4 { + t.Errorf("%d errors, want 4", s.ErrorCount) + } +} + func TestPosition(t *testing.T) { src := makeSource("\t\t\t\t%s\n") s := new(Scanner).Init(src) @@ -475,6 +498,10 @@ func TestError(t *testing.T) { testError(t, `0x`, ":1:3", "illegal hexadecimal number", Int) testError(t, `0xg`, ":1:3", "illegal hexadecimal number", Int) testError(t, `'aa'`, ":1:4", "illegal char literal", Char) + testError(t, `1.5e`, ":1:5", "illegal exponent", Float) + testError(t, `1.5E`, ":1:5", "illegal exponent", Float) + testError(t, `1.5e+`, ":1:6", "illegal exponent", Float) + testError(t, `1.5e-`, ":1:6", "illegal exponent", Float) testError(t, `'`, ":1:2", "literal not terminated", Char) testError(t, `'`+"\n", ":1:2", "literal not terminated", Char) diff --git a/src/text/template/doc.go b/src/text/template/doc.go index 4b243067b0f3b..0179dec5c33de 100644 --- a/src/text/template/doc.go +++ b/src/text/template/doc.go @@ -142,7 +142,9 @@ An argument is a simple value, denoted by one of the following. - A boolean, string, character, integer, floating-point, imaginary or complex constant in Go syntax. These behave like Go's untyped - constants. + constants. Note that, as in Go, whether a large integer constant + overflows when assigned or passed to a function can depend on whether + the host machine's ints are 32 or 64 bits. - The keyword nil, representing an untyped Go nil. - The character '.' (period): . diff --git a/src/text/template/exec.go b/src/text/template/exec.go index 214f72d51b32a..c6ce657cf64ef 100644 --- a/src/text/template/exec.go +++ b/src/text/template/exec.go @@ -7,10 +7,10 @@ package template import ( "bytes" "fmt" + "internal/fmtsort" "io" "reflect" "runtime" - "sort" "strings" "text/template/parse" ) @@ -102,7 +102,7 @@ func (s *state) at(node parse.Node) { // doublePercent returns the string with %'s replaced by %%, if necessary, // so it can be used safely inside a Printf format string. func doublePercent(str string) string { - return strings.Replace(str, "%", "%%", -1) + return strings.ReplaceAll(str, "%", "%%") } // TODO: It would be nice if ExecError was more broken down, but @@ -362,8 +362,9 @@ func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { if val.Len() == 0 { break } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) + om := fmtsort.Sort(val) + for i, key := range om.Key { + oneIteration(key, om.Value[i]) } return case reflect.Chan: @@ -692,13 +693,13 @@ func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, a } argv[i] = s.validateType(final, t) } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { + v, err := safeCall(fun, argv) + // If we have an error that is not nil, stop execution and return that + // error to the caller. + if err != nil { s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) + s.errorf("error calling %s: %v", name, err) } - v := result[0] if v.Type() == reflectValueType { v = v.Interface().(reflect.Value) } @@ -958,29 +959,3 @@ func printableValue(v reflect.Value) (interface{}, bool) { } return v.Interface(), true } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Slice(v, func(i, j int) bool { - return v[i].Float() < v[j].Float() - }) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Slice(v, func(i, j int) bool { - return v[i].Int() < v[j].Int() - }) - case reflect.String: - sort.Slice(v, func(i, j int) bool { - return v[i].String() < v[j].String() - }) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Slice(v, func(i, j int) bool { - return v[i].Uint() < v[j].Uint() - }) - } - return v -} diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go index 6f40d80635fd0..bfd6d38bf42cf 100644 --- a/src/text/template/exec_test.go +++ b/src/text/template/exec_test.go @@ -74,6 +74,7 @@ type T struct { VariadicFuncInt func(int, ...string) string NilOKFunc func(*int) bool ErrFunc func() (string, error) + PanicFunc func() string // Template to test evaluation of templates. Tmpl *Template // Unexported field; cannot be accessed by template. @@ -156,6 +157,7 @@ var tVal = &T{ VariadicFuncInt: func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") }, NilOKFunc: func(s *int) bool { return s == nil }, ErrFunc: func() (string, error) { return "bla", nil }, + PanicFunc: func() string { panic("test panic") }, Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X } @@ -1279,6 +1281,7 @@ func TestBadFuncNames(t *testing.T) { } func testBadFuncName(name string, t *testing.T) { + t.Helper() defer func() { recover() }() @@ -1450,3 +1453,60 @@ func TestInterfaceValues(t *testing.T) { } } } + +// Check that panics during calls are recovered and returned as errors. +func TestExecutePanicDuringCall(t *testing.T) { + funcs := map[string]interface{}{ + "doPanic": func() string { + panic("custom panic string") + }, + } + tests := []struct { + name string + input string + data interface{} + wantErr string + }{ + { + "direct func call panics", + "{{doPanic}}", (*T)(nil), + `template: t:1:2: executing "t" at : error calling doPanic: custom panic string`, + }, + { + "indirect func call panics", + "{{call doPanic}}", (*T)(nil), + `template: t:1:7: executing "t" at : error calling doPanic: custom panic string`, + }, + { + "direct method call panics", + "{{.GetU}}", (*T)(nil), + `template: t:1:2: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`, + }, + { + "indirect method call panics", + "{{call .GetU}}", (*T)(nil), + `template: t:1:7: executing "t" at <.GetU>: error calling GetU: runtime error: invalid memory address or nil pointer dereference`, + }, + { + "func field call panics", + "{{call .PanicFunc}}", tVal, + `template: t:1:2: executing "t" at : error calling call: test panic`, + }, + } + for _, tc := range tests { + b := new(bytes.Buffer) + tmpl, err := New("t").Funcs(funcs).Parse(tc.input) + if err != nil { + t.Fatalf("parse error: %s", err) + } + err = tmpl.Execute(b, tc.data) + if err == nil { + t.Errorf("%s: expected error; got none", tc.name) + } else if !strings.Contains(err.Error(), tc.wantErr) { + if *debug { + fmt.Printf("%s: test execute error: %s\n", tc.name, err) + } + t.Errorf("%s: expected error:\n%s\ngot:\n%s", tc.name, tc.wantErr, err) + } + } +} diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go index abddfa1141b1f..72d3f666918d6 100644 --- a/src/text/template/funcs.go +++ b/src/text/template/funcs.go @@ -65,7 +65,7 @@ func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { func addValueFuncs(out map[string]reflect.Value, in FuncMap) { for name, fn := range in { if !goodName(name) { - panic(fmt.Errorf("function name %s is not a valid identifier", name)) + panic(fmt.Errorf("function name %q is not a valid identifier", name)) } v := reflect.ValueOf(fn) if v.Kind() != reflect.Func { @@ -275,11 +275,26 @@ func call(fn reflect.Value, args ...reflect.Value) (reflect.Value, error) { return reflect.Value{}, fmt.Errorf("arg %d: %s", i, err) } } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0], result[1].Interface().(error) + return safeCall(v, argv) +} + +// safeCall runs fun.Call(args), and returns the resulting value and error, if +// any. If the call panics, the panic value is returned as an error. +func safeCall(fun reflect.Value, args []reflect.Value) (val reflect.Value, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(error); ok { + err = e + } else { + err = fmt.Errorf("%v", r) + } + } + }() + ret := fun.Call(args) + if len(ret) == 2 && !ret[1].IsNil() { + return ret[0], ret[1].Interface().(error) } - return result[0], nil + return ret[0], nil } // Boolean logic. diff --git a/src/text/template/parse/lex.go b/src/text/template/parse/lex.go index fc259f351ed5f..94a676c579cb4 100644 --- a/src/text/template/parse/lex.go +++ b/src/text/template/parse/lex.go @@ -117,6 +117,7 @@ type lexer struct { items chan item // channel of scanned items parenDepth int // nesting depth of ( ) exprs line int // 1+number of newlines seen + startLine int // start line of this item } // next returns the next rune in the input. @@ -152,19 +153,16 @@ func (l *lexer) backup() { // emit passes an item back to the client. func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos], l.line} - // Some items contain text internally. If so, count their newlines. - switch t { - case itemText, itemRawString, itemLeftDelim, itemRightDelim: - l.line += strings.Count(l.input[l.start:l.pos], "\n") - } + l.items <- item{t, l.start, l.input[l.start:l.pos], l.startLine} l.start = l.pos + l.startLine = l.line } // ignore skips over the pending input before this point. func (l *lexer) ignore() { l.line += strings.Count(l.input[l.start:l.pos], "\n") l.start = l.pos + l.startLine = l.line } // accept consumes the next rune if it's from the valid set. @@ -186,7 +184,7 @@ func (l *lexer) acceptRun(valid string) { // errorf returns an error token and terminates the scan by passing // back a nil pointer that will be the next state, terminating l.nextItem. func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.line} + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...), l.startLine} return nil } @@ -218,6 +216,7 @@ func lex(name, input, left, right string) *lexer { rightDelim: right, items: make(chan item), line: 1, + startLine: 1, } go l.run() return l @@ -252,16 +251,17 @@ func lexText(l *lexer) stateFn { } l.pos -= trimLength if l.pos > l.start { + l.line += strings.Count(l.input[l.start:l.pos], "\n") l.emit(itemText) } l.pos += trimLength l.ignore() return lexLeftDelim - } else { - l.pos = Pos(len(l.input)) } + l.pos = Pos(len(l.input)) // Correctly reached EOF. if l.pos > l.start { + l.line += strings.Count(l.input[l.start:l.pos], "\n") l.emit(itemText) } l.emit(itemEOF) @@ -609,14 +609,10 @@ Loop: // lexRawQuote scans a raw quoted string. func lexRawQuote(l *lexer) stateFn { - startLine := l.line Loop: for { switch l.next() { case eof: - // Restore line number to location of opening quote. - // We will error out so it's ok just to overwrite the field. - l.line = startLine return l.errorf("unterminated raw quoted string") case '`': break Loop diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go index cb9b44e9da003..7c35b0ff3d891 100644 --- a/src/text/template/parse/parse.go +++ b/src/text/template/parse/parse.go @@ -148,9 +148,6 @@ func (t *Tree) ErrorContext(n Node) (location, context string) { } lineNum := 1 + strings.Count(text, "\n") context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context } @@ -383,46 +380,44 @@ func (t *Tree) action() (n Node) { // Pipeline: // declarations? command ('|' command)* func (t *Tree) pipeline(context string) (pipe *PipeNode) { - decl := false - var vars []*VariableNode token := t.peekNonSpace() - pos := token.pos + pipe = t.newPipeline(token.pos, token.line, nil) // Are there declarations or assignments? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - next := t.peekNonSpace() - switch { - case next.typ == itemAssign, next.typ == itemDeclare, - next.typ == itemChar && next.val == ",": - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - vars = append(vars, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemDeclare { - decl = true - } - if next.typ == itemChar && next.val == "," { - if context == "range" && len(vars) < 2 { - continue - } - t.errorf("too many declarations in %s", context) +decls: + if v := t.peekNonSpace(); v.typ == itemVariable { + t.next() + // Since space is a token, we need 3-token look-ahead here in the worst case: + // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an + // argument variable rather than a declaration. So remember the token + // adjacent to the variable so we can push it back if necessary. + tokenAfterVariable := t.peek() + next := t.peekNonSpace() + switch { + case next.typ == itemAssign, next.typ == itemDeclare: + pipe.IsAssign = next.typ == itemAssign + t.nextNonSpace() + pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val)) + t.vars = append(t.vars, v.val) + case next.typ == itemChar && next.val == ",": + t.nextNonSpace() + pipe.Decl = append(pipe.Decl, t.newVariable(v.pos, v.val)) + t.vars = append(t.vars, v.val) + if context == "range" && len(pipe.Decl) < 2 { + switch t.peekNonSpace().typ { + case itemVariable, itemRightDelim, itemRightParen: + // second initialized variable in a range pipeline + goto decls + default: + t.errorf("range can only initialize variables") } - case tokenAfterVariable.typ == itemSpace: - t.backup3(v, tokenAfterVariable) - default: - t.backup2(v) } + t.errorf("too many declarations in %s", context) + case tokenAfterVariable.typ == itemSpace: + t.backup3(v, tokenAfterVariable) + default: + t.backup2(v) } - break } - pipe = t.newPipeline(pos, token.line, vars) - pipe.IsAssign = !decl for { switch token := t.nextNonSpace(); token.typ { case itemRightDelim, itemRightParen: diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go index c1f80c1326bc3..15cc65670adbb 100644 --- a/src/text/template/parse/parse_test.go +++ b/src/text/template/parse/parse_test.go @@ -447,18 +447,40 @@ var errorTests = []parseTest{ {"emptypipeline", `{{ ( ) }}`, hasError, `missing value for parenthesized pipeline`}, + {"multilinerawstring", + "{{ $v := `\n` }} {{", + hasError, `multilinerawstring:2: unexpected unclosed action`}, + {"rangeundefvar", + "{{range $k}}{{end}}", + hasError, `undefined variable`}, + {"rangeundefvars", + "{{range $k, $v}}{{end}}", + hasError, `undefined variable`}, + {"rangemissingvalue1", + "{{range $k,}}{{end}}", + hasError, `missing value for range`}, + {"rangemissingvalue2", + "{{range $k, $v := }}{{end}}", + hasError, `missing value for range`}, + {"rangenotvariable1", + "{{range $k, .}}{{end}}", + hasError, `range can only initialize variables`}, + {"rangenotvariable2", + "{{range $k, 123 := .}}{{end}}", + hasError, `range can only initialize variables`}, } func TestErrors(t *testing.T) { for _, test := range errorTests { - _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) - if err == nil { - t.Errorf("%q: expected error", test.name) - continue - } - if !strings.Contains(err.Error(), test.result) { - t.Errorf("%q: error %q does not contain %q", test.name, err, test.result) - } + t.Run(test.name, func(t *testing.T) { + _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) + if err == nil { + t.Fatalf("expected error %q, got nil", test.result) + } + if !strings.Contains(err.Error(), test.result) { + t.Fatalf("error %q does not contain %q", err, test.result) + } + }) } } diff --git a/src/time/example_test.go b/src/time/example_test.go index 494a41680253d..0fd325f2e4f02 100644 --- a/src/time/example_test.go +++ b/src/time/example_test.go @@ -132,7 +132,7 @@ func ExampleAfter() { select { case m := <-c: handle(m) - case <-time.After(5 * time.Minute): + case <-time.After(10 * time.Second): fmt.Println("timed out") } } @@ -144,7 +144,7 @@ func ExampleSleep() { func statusUpdate() string { return "" } func ExampleTick() { - c := time.Tick(1 * time.Minute) + c := time.Tick(5 * time.Second) for now := range c { fmt.Printf("%v %s\n", now, statusUpdate()) } @@ -429,6 +429,17 @@ func ExampleTime_Truncate() { // t.Truncate(10m0s) = 12:10:00 } +func ExampleLoadLocation() { + location, err := time.LoadLocation("America/Los_Angeles") + if err != nil { + panic(err) + } + + timeInUTC := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC) + fmt.Println(timeInUTC.In(location)) + // Output: 2018-08-30 05:00:00 -0700 PDT +} + func ExampleLocation() { // China doesn't have daylight saving. It uses a fixed 8 hour offset from UTC. secondsEastOfUTC := int((8 * time.Hour).Seconds()) diff --git a/src/time/format.go b/src/time/format.go index 237f28738b5d2..2adbbe07706d8 100644 --- a/src/time/format.go +++ b/src/time/format.go @@ -1120,7 +1120,8 @@ func parseTimeZone(value string) (length int, ok bool) { // Special Case 3: Some time zones are not named, but have +/-00 format if value[0] == '+' || value[0] == '-' { length = parseSignedOffset(value) - return length, true + ok := length > 0 // parseSignedOffset returns 0 in case of bad input + return length, ok } // How many upper-case letters are there? Need at least three, at most five. var nUpper int @@ -1152,7 +1153,7 @@ func parseTimeZone(value string) (length int, ok bool) { // parseGMT parses a GMT time zone. The input string is known to start "GMT". // The function checks whether that is followed by a sign and a number in the -// range -14 through 12 excluding zero. +// range -23 through +23 excluding zero. func parseGMT(value string) int { value = value[3:] if len(value) == 0 { @@ -1163,7 +1164,7 @@ func parseGMT(value string) int { } // parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04"). -// The function checks for a signed number in the range -14 through +12 excluding zero. +// The function checks for a signed number in the range -23 through +23 excluding zero. // Returns length of the found offset string or 0 otherwise func parseSignedOffset(value string) int { sign := value[0] @@ -1171,13 +1172,15 @@ func parseSignedOffset(value string) int { return 0 } x, rem, err := leadingInt(value[1:]) - if err != nil { + + // fail if nothing consumed by leadingInt + if err != nil || value[1:] == rem { return 0 } if sign == '-' { x = -x } - if x == 0 || x < -14 || 12 < x { + if x < -23 || 23 < x { return 0 } return len(value) - len(rem) diff --git a/src/time/format_test.go b/src/time/format_test.go index 68a4d3ddb0e89..db9d4f495ac93 100644 --- a/src/time/format_test.go +++ b/src/time/format_test.go @@ -416,7 +416,11 @@ var parseTimeZoneTests = []ParseTimeZoneTest{ {"gmt hi there", 0, false}, {"GMT hi there", 3, true}, {"GMT+12 hi there", 6, true}, - {"GMT+00 hi there", 3, true}, // 0 or 00 is not a legal offset. + {"GMT+00 hi there", 6, true}, + {"GMT+", 3, true}, + {"GMT+3", 5, true}, + {"GMT+a", 3, true}, + {"GMT+3a", 5, true}, {"GMT-5 hi there", 5, true}, {"GMT-51 hi there", 3, true}, {"ChST hi there", 4, true}, @@ -427,8 +431,19 @@ var parseTimeZoneTests = []ParseTimeZoneTest{ {"ESASTT hi", 0, false}, // run of upper-case letters too long. {"ESATY hi", 0, false}, // five letters must end in T. {"WITA hi", 4, true}, // Issue #18251 - {"+03 hi", 3, true}, // Issue #24071 - {"-04 hi", 3, true}, // Issue #24071 + // Issue #24071 + {"+03 hi", 3, true}, + {"-04 hi", 3, true}, + // Issue #26032 + {"+00", 3, true}, + {"-11", 3, true}, + {"-12", 3, true}, + {"-23", 3, true}, + {"-24", 0, false}, + {"+13", 3, true}, + {"+14", 3, true}, + {"+23", 3, true}, + {"+24", 0, false}, } func TestParseTimeZone(t *testing.T) { diff --git a/src/time/sleep.go b/src/time/sleep.go index b8c81b437c0c6..10edf6fe0e0dd 100644 --- a/src/time/sleep.go +++ b/src/time/sleep.go @@ -8,9 +8,6 @@ package time // A negative or zero duration causes Sleep to return immediately. func Sleep(d Duration) -// runtimeNano returns the current value of the runtime clock in nanoseconds. -func runtimeNano() int64 - // Interface to timers implemented in package runtime. // Must be in sync with ../runtime/time.go:/^type timer type runtimeTimer struct { diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index a31494d47b145..c97e6df3991ab 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -425,10 +425,6 @@ func TestOverflowSleep(t *testing.T) { // Test that a panic while deleting a timer does not leave // the timers mutex held, deadlocking a ticker.Stop in a defer. func TestIssue5745(t *testing.T) { - if runtime.GOOS == "darwin" && runtime.GOARCH == "arm" { - t.Skipf("skipping on %s/%s, see issue 10043", runtime.GOOS, runtime.GOARCH) - } - ticker := NewTicker(Hour) defer func() { // would deadlock here before the fix due to diff --git a/src/time/sys_unix.go b/src/time/sys_unix.go index e064e0046cb4e..f4756b18a629b 100644 --- a/src/time/sys_unix.go +++ b/src/time/sys_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris package time diff --git a/src/time/time.go b/src/time/time.go index 2374043ea3cc6..d0d780fd6ca83 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -75,7 +75,10 @@ // package time -import "errors" +import ( + "errors" + _ "unsafe" // for go:linkname +) // A Time represents an instant in time with nanosecond precision. // @@ -102,6 +105,10 @@ import "errors" // change the instant in time being denoted and therefore does not affect the // computations described in earlier paragraphs. // +// Representations of a Time value saved by the GobEncode, MarshalBinary, +// MarshalJSON, and MarshalText methods store the Time.Location's offset, but not +// the location name. They therefore lose information about Daylight Saving Time. +// // In addition to the required “wall clock” reading, a Time may contain an optional // reading of the current process's monotonic clock, to provide additional precision // for comparison or subtraction. @@ -908,13 +915,27 @@ func (t Time) Sub(u Time) Duration { // Since returns the time elapsed since t. // It is shorthand for time.Now().Sub(t). func Since(t Time) Duration { - return Now().Sub(t) + var now Time + if t.wall&hasMonotonic != 0 { + // Common case optimization: if t has monotomic time, then Sub will use only it. + now = Time{hasMonotonic, runtimeNano() - startNano, nil} + } else { + now = Now() + } + return now.Sub(t) } // Until returns the duration until t. // It is shorthand for t.Sub(time.Now()). func Until(t Time) Duration { - return t.Sub(Now()) + var now Time + if t.wall&hasMonotonic != 0 { + // Common case optimization: if t has monotomic time, then Sub will use only it. + now = Time{hasMonotonic, runtimeNano() - startNano, nil} + } else { + now = Now() + } + return t.Sub(now) } // AddDate returns the time corresponding to adding the @@ -933,7 +954,7 @@ func (t Time) AddDate(years int, months int, days int) Time { const ( secondsPerMinute = 60 - secondsPerHour = 60 * 60 + secondsPerHour = 60 * secondsPerMinute secondsPerDay = 24 * secondsPerHour secondsPerWeek = 7 * secondsPerDay daysPer400Years = 365*400 + 97 @@ -1050,9 +1071,22 @@ func daysIn(m Month, year int) int { // Provided by package runtime. func now() (sec int64, nsec int32, mono int64) +// runtimeNano returns the current value of the runtime clock in nanoseconds. +//go:linkname runtimeNano runtime.nanotime +func runtimeNano() int64 + +// Monotonic times are reported as offsets from startNano. +// We initialize startNano to runtimeNano() - 1 so that on systems where +// monotonic time resolution is fairly low (e.g. Windows 2008 +// which appears to have a default resolution of 15ms), +// we avoid ever reporting a monotonic time of 0. +// (Callers may want to use 0 as "time not set".) +var startNano int64 = runtimeNano() - 1 + // Now returns the current local time. func Now() Time { sec, nsec, mono := now() + mono -= startNano sec += unixToInternal - minWall if uint64(sec)>>33 != 0 { return Time{uint64(nsec), sec + minWall, Local} @@ -1076,7 +1110,7 @@ func (t Time) Local() Time { return t } -// In returns a copy of t representating the same time instant, but +// In returns a copy of t representing the same time instant, but // with the copy's location information set to loc for display // purposes. // diff --git a/src/time/zoneinfo.go b/src/time/zoneinfo.go index d2bc642d81312..7dffbfad5e47d 100644 --- a/src/time/zoneinfo.go +++ b/src/time/zoneinfo.go @@ -205,7 +205,7 @@ func (l *Location) lookupFirstZone() int { return 0 } -// firstZoneUsed returns whether the first zone is used by some +// firstZoneUsed reports whether the first zone is used by some // transition. func (l *Location) firstZoneUsed() bool { for _, tx := range l.tx { @@ -288,14 +288,23 @@ func LoadLocation(name string) (*Location, error) { env, _ := syscall.Getenv("ZONEINFO") zoneinfo = &env }) + var firstErr error if *zoneinfo != "" { if zoneData, err := loadTzinfoFromDirOrZip(*zoneinfo, name); err == nil { if z, err := LoadLocationFromTZData(name, zoneData); err == nil { return z, nil } + firstErr = err + } else if err != syscall.ENOENT { + firstErr = err } } - return loadLocation(name, zoneSources) + if z, err := loadLocation(name, zoneSources); err == nil { + return z, nil + } else if firstErr == nil { + firstErr = err + } + return nil, firstErr } // containsDotDot reports whether s contains "..". diff --git a/src/time/zoneinfo_android.go b/src/time/zoneinfo_android.go index 65e0975ab02a1..237ff202f913f 100644 --- a/src/time/zoneinfo_android.go +++ b/src/time/zoneinfo_android.go @@ -11,6 +11,7 @@ package time import ( "errors" "runtime" + "syscall" ) var zoneSources = []string{ @@ -75,5 +76,5 @@ func androidLoadTzinfoFromTzdata(file, name string) ([]byte, error) { } return buf, nil } - return nil, errors.New("cannot find " + name + " in tzdata file " + file) + return nil, syscall.ENOENT } diff --git a/src/time/zoneinfo_js.go b/src/time/zoneinfo_js.go new file mode 100644 index 0000000000000..2d76a571f2178 --- /dev/null +++ b/src/time/zoneinfo_js.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js,wasm + +package time + +import ( + "runtime" + "syscall/js" +) + +var zoneSources = []string{ + "/usr/share/zoneinfo/", + "/usr/share/lib/zoneinfo/", + "/usr/lib/locale/TZ/", + runtime.GOROOT() + "/lib/time/zoneinfo.zip", +} + +func initLocal() { + localLoc.name = "Local" + + z := zone{} + d := js.Global().Get("Date").New() + offset := d.Call("getTimezoneOffset").Int() * -1 + z.offset = offset * 60 + // According to https://tc39.github.io/ecma262/#sec-timezoneestring, + // the timezone name from (new Date()).toTimeString() is an implementation-dependent + // result, and in Google Chrome, it gives the fully expanded name rather than + // the abbreviation. + // Hence, we construct the name from the offset. + z.name = "UTC" + if offset < 0 { + z.name += "-" + offset *= -1 + } else { + z.name += "+" + } + z.name += itoa(offset / 60) + min := offset % 60 + if min != 0 { + z.name += ":" + itoa(min) + } + localLoc.zone = []zone{z} +} + +// itoa is like strconv.Itoa but only works for values of i in range [0,99]. +// It panics if i is out of range. +func itoa(i int) string { + if i < 10 { + return digits[i : i+1] + } + return smallsString[i*2 : i*2+2] +} + +const smallsString = "00010203040506070809" + + "10111213141516171819" + + "20212223242526272829" + + "30313233343536373839" + + "40414243444546474849" + + "50515253545556575859" + + "60616263646566676869" + + "70717273747576777879" + + "80818283848586878889" + + "90919293949596979899" +const digits = "0123456789" diff --git a/src/time/zoneinfo_read.go b/src/time/zoneinfo_read.go index 20f84f00671bf..d8d4070d5b51a 100644 --- a/src/time/zoneinfo_read.go +++ b/src/time/zoneinfo_read.go @@ -11,6 +11,7 @@ package time import ( "errors" + "runtime" "syscall" ) @@ -55,7 +56,7 @@ func (d *dataIO) big4() (n uint32, ok bool) { d.error = true return 0, false } - return uint32(p[0])<<24 | uint32(p[1])<<16 | uint32(p[2])<<8 | uint32(p[3]), true + return uint32(p[3]) | uint32(p[2])<<8 | uint32(p[1])<<16 | uint32(p[0])<<24, true } func (d *dataIO) byte() (n byte, ok bool) { @@ -172,6 +173,14 @@ func LoadLocationFromTZData(name string, data []byte) (*Location, error) { return nil, badData } zone[i].name = byteString(abbrev[b:]) + if runtime.GOOS == "aix" && len(name) > 8 && (name[:8] == "Etc/GMT+" || name[:8] == "Etc/GMT-") { + // There is a bug with AIX 7.2 TL 0 with files in Etc, + // GMT+1 will return GMT-1 instead of GMT+1 or -01. + if name != "Etc/GMT+0" { + // GMT+0 is OK + zone[i].name = name[4:] + } + } } // Now the transition time info. @@ -262,7 +271,7 @@ func get2(b []byte) int { func loadTzinfoFromZip(zipfile, name string) ([]byte, error) { fd, err := open(zipfile) if err != nil { - return nil, errors.New("open " + zipfile + ": " + err.Error()) + return nil, err } defer closefd(fd) @@ -364,7 +373,7 @@ func loadTzinfoFromZip(zipfile, name string) ([]byte, error) { return buf, nil } - return nil, errors.New("cannot find " + name + " in zip file " + zipfile) + return nil, syscall.ENOENT } // loadTzinfoFromTzdata returns the time zone information of the time zone diff --git a/src/time/zoneinfo_test.go b/src/time/zoneinfo_test.go index 450f5aa114aaa..4458ba8e26e1a 100644 --- a/src/time/zoneinfo_test.go +++ b/src/time/zoneinfo_test.go @@ -5,6 +5,7 @@ package time_test import ( + "errors" "fmt" "os" "reflect" @@ -36,6 +37,16 @@ func TestEnvVarUsage(t *testing.T) { } } +func TestBadLocationErrMsg(t *testing.T) { + time.ResetZoneinfoForTesting() + loc := "Asia/SomethingNotExist" + want := errors.New("unknown time zone " + loc) + _, err := time.LoadLocation(loc) + if err.Error() != want.Error() { + t.Errorf("LoadLocation(%q) error = %v; want %v", loc, err, want) + } +} + func TestLoadLocationValidatesNames(t *testing.T) { time.ResetZoneinfoForTesting() const env = "ZONEINFO" diff --git a/src/time/zoneinfo_unix.go b/src/time/zoneinfo_unix.go index 682e24b56607a..d6bcabfb8083b 100644 --- a/src/time/zoneinfo_unix.go +++ b/src/time/zoneinfo_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,386 darwin,amd64 dragonfly freebsd js,wasm linux,!android nacl netbsd openbsd solaris +// +build aix darwin,386 darwin,amd64 dragonfly freebsd linux,!android nacl netbsd openbsd solaris // Parse "zoneinfo" time zone file. // This is a fairly standard file format used on OS X, Linux, BSD, Sun, and others. diff --git a/src/time/zoneinfo_windows_test.go b/src/time/zoneinfo_windows_test.go index d0f2a444fe8f4..f23d9dcecb83a 100644 --- a/src/time/zoneinfo_windows_test.go +++ b/src/time/zoneinfo_windows_test.go @@ -15,13 +15,6 @@ func testZoneAbbr(t *testing.T) { // discard nsec t1 = Date(t1.Year(), t1.Month(), t1.Day(), t1.Hour(), t1.Minute(), t1.Second(), 0, t1.Location()) - // Skip the test if we're in a timezone with no abbreviation. - // Format will fallback to the numeric abbreviation, and - // Parse(RFC1123, ..) will fail (see Issue 21183). - if tz := t1.Format("MST"); tz[0] == '-' || tz[0] == '+' { - t.Skip("No zone abbreviation") - } - t2, err := Parse(RFC1123, t1.Format(RFC1123)) if err != nil { t.Fatalf("Parse failed: %v", err) diff --git a/src/unicode/maketables.go b/src/unicode/maketables.go index b11b77c634418..a1f15869ea8d3 100644 --- a/src/unicode/maketables.go +++ b/src/unicode/maketables.go @@ -458,6 +458,39 @@ package unicode ` +var categoryMapping = map[string]string{ + "Lu": "Letter, uppercase", + "Ll": "Letter, lowercase", + "Lt": "Letter, titlecase", + "Lm": "Letter, modifier", + "Lo": "Letter, other", + "Mn": "Mark, nonspacing", + "Mc": "Mark, spacing combining", + "Me": "Mark, enclosing", + "Nd": "Number, decimal digit", + "Nl": "Number, letter", + "No": "Number, other", + "Pc": "Punctuation, connector", + "Pd": "Punctuation, dash", + "Ps": "Punctuation, open", + "Pe": "Punctuation, close", + "Pi": "Punctuation, initial quote", + "Pf": "Punctuation, final quote", + "Po": "Punctuation, other", + "Sm": "Symbol, math", + "Sc": "Symbol, currency", + "Sk": "Symbol, modifier", + "So": "Symbol, other", + "Zs": "Separator, space", + "Zl": "Separator, line", + "Zp": "Separator, paragraph", + "Cc": "Other, control", + "Cf": "Other, format", + "Cs": "Other, surrogate", + "Co": "Other, private use", + "Cn": "Other, not assigned", +} + func printCategories() { if *tablelist == "" { return @@ -528,9 +561,16 @@ func printCategories() { varDecl = "\tTitle = _Lt; // Title is the set of Unicode title case letters.\n" } if len(name) > 1 { - varDecl += fmt.Sprintf( - "\t%s = _%s; // %s is the set of Unicode characters in category %s.\n", - name, name, name, name) + desc, ok := categoryMapping[name] + if ok { + varDecl += fmt.Sprintf( + "\t%s = _%s; // %s is the set of Unicode characters in category %s (%s).\n", + name, name, name, name, desc) + } else { + varDecl += fmt.Sprintf( + "\t%s = _%s; // %s is the set of Unicode characters in category %s.\n", + name, name, name, name) + } } decl[ndecl] = varDecl ndecl++ diff --git a/src/unicode/tables.go b/src/unicode/tables.go index dd2f70b651958..ce85b128ca4dc 100644 --- a/src/unicode/tables.go +++ b/src/unicode/tables.go @@ -3380,53 +3380,53 @@ var _Zs = &RangeTable{ // These variables have type *RangeTable. var ( - Cc = _Cc // Cc is the set of Unicode characters in category Cc. - Cf = _Cf // Cf is the set of Unicode characters in category Cf. - Co = _Co // Co is the set of Unicode characters in category Co. - Cs = _Cs // Cs is the set of Unicode characters in category Cs. + Cc = _Cc // Cc is the set of Unicode characters in category Cc (Other, control). + Cf = _Cf // Cf is the set of Unicode characters in category Cf (Other, format). + Co = _Co // Co is the set of Unicode characters in category Co (Other, private use). + Cs = _Cs // Cs is the set of Unicode characters in category Cs (Other, surrogate). Digit = _Nd // Digit is the set of Unicode characters with the "decimal digit" property. - Nd = _Nd // Nd is the set of Unicode characters in category Nd. + Nd = _Nd // Nd is the set of Unicode characters in category Nd (Number, decimal digit). Letter = _L // Letter/L is the set of Unicode letters, category L. L = _L - Lm = _Lm // Lm is the set of Unicode characters in category Lm. - Lo = _Lo // Lo is the set of Unicode characters in category Lo. + Lm = _Lm // Lm is the set of Unicode characters in category Lm (Letter, modifier). + Lo = _Lo // Lo is the set of Unicode characters in category Lo (Letter, other). Lower = _Ll // Lower is the set of Unicode lower case letters. - Ll = _Ll // Ll is the set of Unicode characters in category Ll. + Ll = _Ll // Ll is the set of Unicode characters in category Ll (Letter, lowercase). Mark = _M // Mark/M is the set of Unicode mark characters, category M. M = _M - Mc = _Mc // Mc is the set of Unicode characters in category Mc. - Me = _Me // Me is the set of Unicode characters in category Me. - Mn = _Mn // Mn is the set of Unicode characters in category Mn. - Nl = _Nl // Nl is the set of Unicode characters in category Nl. - No = _No // No is the set of Unicode characters in category No. + Mc = _Mc // Mc is the set of Unicode characters in category Mc (Mark, spacing combining). + Me = _Me // Me is the set of Unicode characters in category Me (Mark, enclosing). + Mn = _Mn // Mn is the set of Unicode characters in category Mn (Mark, nonspacing). + Nl = _Nl // Nl is the set of Unicode characters in category Nl (Number, letter). + No = _No // No is the set of Unicode characters in category No (Number, other). Number = _N // Number/N is the set of Unicode number characters, category N. N = _N Other = _C // Other/C is the set of Unicode control and special characters, category C. C = _C - Pc = _Pc // Pc is the set of Unicode characters in category Pc. - Pd = _Pd // Pd is the set of Unicode characters in category Pd. - Pe = _Pe // Pe is the set of Unicode characters in category Pe. - Pf = _Pf // Pf is the set of Unicode characters in category Pf. - Pi = _Pi // Pi is the set of Unicode characters in category Pi. - Po = _Po // Po is the set of Unicode characters in category Po. - Ps = _Ps // Ps is the set of Unicode characters in category Ps. + Pc = _Pc // Pc is the set of Unicode characters in category Pc (Punctuation, connector). + Pd = _Pd // Pd is the set of Unicode characters in category Pd (Punctuation, dash). + Pe = _Pe // Pe is the set of Unicode characters in category Pe (Punctuation, close). + Pf = _Pf // Pf is the set of Unicode characters in category Pf (Punctuation, final quote). + Pi = _Pi // Pi is the set of Unicode characters in category Pi (Punctuation, initial quote). + Po = _Po // Po is the set of Unicode characters in category Po (Punctuation, other). + Ps = _Ps // Ps is the set of Unicode characters in category Ps (Punctuation, open). Punct = _P // Punct/P is the set of Unicode punctuation characters, category P. P = _P - Sc = _Sc // Sc is the set of Unicode characters in category Sc. - Sk = _Sk // Sk is the set of Unicode characters in category Sk. - Sm = _Sm // Sm is the set of Unicode characters in category Sm. - So = _So // So is the set of Unicode characters in category So. + Sc = _Sc // Sc is the set of Unicode characters in category Sc (Symbol, currency). + Sk = _Sk // Sk is the set of Unicode characters in category Sk (Symbol, modifier). + Sm = _Sm // Sm is the set of Unicode characters in category Sm (Symbol, math). + So = _So // So is the set of Unicode characters in category So (Symbol, other). Space = _Z // Space/Z is the set of Unicode space characters, category Z. Z = _Z Symbol = _S // Symbol/S is the set of Unicode symbol characters, category S. S = _S Title = _Lt // Title is the set of Unicode title case letters. - Lt = _Lt // Lt is the set of Unicode characters in category Lt. + Lt = _Lt // Lt is the set of Unicode characters in category Lt (Letter, titlecase). Upper = _Lu // Upper is the set of Unicode upper case letters. - Lu = _Lu // Lu is the set of Unicode characters in category Lu. - Zl = _Zl // Zl is the set of Unicode characters in category Zl. - Zp = _Zp // Zp is the set of Unicode characters in category Zp. - Zs = _Zs // Zs is the set of Unicode characters in category Zs. + Lu = _Lu // Lu is the set of Unicode characters in category Lu (Letter, uppercase). + Zl = _Zl // Zl is the set of Unicode characters in category Zl (Separator, line). + Zp = _Zp // Zp is the set of Unicode characters in category Zp (Separator, paragraph). + Zs = _Zs // Zs is the set of Unicode characters in category Zs (Separator, space). ) // Generated by running diff --git a/src/unsafe/unsafe.go b/src/unsafe/unsafe.go index 00961cffa336a..272761d9363a4 100644 --- a/src/unsafe/unsafe.go +++ b/src/unsafe/unsafe.go @@ -99,6 +99,12 @@ type ArbitraryType int // u := uintptr(p) // p = unsafe.Pointer(u + offset) // +// Note that the pointer must point into an allocated object, so it may not be nil. +// +// // INVALID: conversion of nil pointer +// u := unsafe.Pointer(nil) +// p := unsafe.Pointer(uintptr(u) + offset) +// // (4) Conversion of a Pointer to a uintptr when calling syscall.Syscall. // // The Syscall functions in package syscall pass their uintptr arguments directly @@ -178,11 +184,13 @@ type Pointer *ArbitraryType // The size does not include any memory possibly referenced by x. // For instance, if x is a slice, Sizeof returns the size of the slice // descriptor, not the size of the memory referenced by the slice. +// The return value of Sizeof is a Go constant. func Sizeof(x ArbitraryType) uintptr // Offsetof returns the offset within the struct of the field represented by x, // which must be of the form structValue.field. In other words, it returns the // number of bytes between the start of the struct and the start of the field. +// The return value of Offsetof is a Go constant. func Offsetof(x ArbitraryType) uintptr // Alignof takes an expression x of any type and returns the required alignment @@ -193,4 +201,5 @@ func Offsetof(x ArbitraryType) uintptr // within that struct, then Alignof(s.f) will return the required alignment // of a field of that type within a struct. This case is the same as the // value returned by reflect.TypeOf(s.f).FieldAlign(). +// The return value of Alignof is a Go constant. func Alignof(x ArbitraryType) uintptr diff --git a/test/bench/garbage/tree.go b/test/bench/garbage/tree.go index 0a3ec234db803..524cfebc73f29 100644 --- a/test/bench/garbage/tree.go +++ b/test/bench/garbage/tree.go @@ -28,7 +28,7 @@ POSSIBILITY OF SUCH DAMAGE. */ /* The Computer Language Benchmarks Game - * http://shootout.alioth.debian.org/ + * https://benchmarksgame-team.pages.debian.net/benchmarksgame/ * * contributed by The Go Authors. * based on C program by Kevin Carson diff --git a/test/chan/powser1.go b/test/chan/powser1.go index 93862003fdbe2..5357eec50fd65 100644 --- a/test/chan/powser1.go +++ b/test/chan/powser1.go @@ -11,7 +11,7 @@ // coefficients. A denominator of zero signifies the end. // Original code in Newsqueak by Doug McIlroy. // See Squinting at Power Series by Doug McIlroy, -// http://www.cs.bell-labs.com/who/rsc/thread/squint.pdf +// https://swtch.com/~rsc/thread/squint.pdf package main diff --git a/test/chan/powser2.go b/test/chan/powser2.go index 8fa3b7e11cdf6..fb1fb8518efc7 100644 --- a/test/chan/powser2.go +++ b/test/chan/powser2.go @@ -15,7 +15,7 @@ // coefficients. A denominator of zero signifies the end. // Original code in Newsqueak by Doug McIlroy. // See Squinting at Power Series by Doug McIlroy, -// http://www.cs.bell-labs.com/who/rsc/thread/squint.pdf +// https://swtch.com/~rsc/thread/squint.pdf package main diff --git a/test/chancap.go b/test/chancap.go index 9675e38bdb1a7..8dce9247cd45e 100644 --- a/test/chancap.go +++ b/test/chancap.go @@ -42,8 +42,10 @@ func main() { shouldPanic("makechan: size out of range", func() { _ = make(T, n) }) shouldPanic("makechan: size out of range", func() { _ = make(T, int64(n)) }) if ptrSize == 8 { - var n2 int64 = 1 << 50 + // Test mem > maxAlloc + var n2 int64 = 1 << 59 shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) }) + // Test elem.size*cap overflow n2 = 1<<63 - 1 shouldPanic("makechan: size out of range", func() { _ = make(T, int(n2)) }) } else { diff --git a/test/checkbce.go b/test/checkbce.go index 430dcf9cbc285..a8f060aa72eb6 100644 --- a/test/checkbce.go +++ b/test/checkbce.go @@ -1,4 +1,4 @@ -// +build amd64 +// +build amd64,!gcflags_noopt // errorcheck -0 -d=ssa/check_bce/debug=3 // Copyright 2016 The Go Authors. All rights reserved. @@ -10,6 +10,8 @@ package main +import "encoding/binary" + func f0(a []int) { a[0] = 1 // ERROR "Found IsInBounds$" a[0] = 1 @@ -142,6 +144,33 @@ func g4(a [100]int) { } } +func decode1(data []byte) (x uint64) { + for len(data) >= 32 { + x += binary.BigEndian.Uint64(data[:8]) + x += binary.BigEndian.Uint64(data[8:16]) + x += binary.BigEndian.Uint64(data[16:24]) + x += binary.BigEndian.Uint64(data[24:32]) + data = data[32:] + } + return x +} + +func decode2(data []byte) (x uint64) { + // TODO(rasky): this should behave like decode1 and compile to no + // boundchecks. We're currently not able to remove all of them. + for len(data) >= 32 { + x += binary.BigEndian.Uint64(data) + data = data[8:] + x += binary.BigEndian.Uint64(data) // ERROR "Found IsInBounds$" + data = data[8:] + x += binary.BigEndian.Uint64(data) // ERROR "Found IsInBounds$" + data = data[8:] + x += binary.BigEndian.Uint64(data) // ERROR "Found IsInBounds$" + data = data[8:] + } + return x +} + //go:noinline func useInt(a int) { } diff --git a/test/closure3.dir/main.go b/test/closure3.dir/main.go index e382ad980b9e9..ae4bef79a67cf 100644 --- a/test/closure3.dir/main.go +++ b/test/closure3.dir/main.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Check correctness of various closure corner cases that +// Check correctness of various closure corner cases // that are expected to be inlined package main @@ -238,6 +238,8 @@ func main() { if c != 4 { ppanic("c != 4") } + for i := 0; i < 10; i++ { // prevent inlining + } }() }() if c != 4 { diff --git a/test/closure3.go b/test/closure3.go index 263d8fcb47419..37b548d6dcc69 100644 --- a/test/closure3.go +++ b/test/closure3.go @@ -4,7 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Check correctness of various closure corner cases that +// Check correctness of various closure corner cases // that are expected to be inlined package ignored diff --git a/test/codegen/arithmetic.go b/test/codegen/arithmetic.go index f358020f5598a..e5671774edfff 100644 --- a/test/codegen/arithmetic.go +++ b/test/codegen/arithmetic.go @@ -14,11 +14,28 @@ package codegen // Subtraction // // ----------------- // -func SubMem(arr []int, b int) int { +var ef int +func SubMem(arr []int, b, c, d int) int { // 386:`SUBL\s[A-Z]+,\s8\([A-Z]+\)` + // amd64:`SUBQ\s[A-Z]+,\s16\([A-Z]+\)` arr[2] -= b // 386:`SUBL\s[A-Z]+,\s12\([A-Z]+\)` + // amd64:`SUBQ\s[A-Z]+,\s24\([A-Z]+\)` arr[3] -= b + // 386:`DECL\s16\([A-Z]+\)` + arr[4]-- + // 386:`ADDL\s[$]-20,\s20\([A-Z]+\)` + arr[5] -= 20 + // 386:`SUBL\s\([A-Z]+\)\([A-Z]+\*4\),\s[A-Z]+` + ef -= arr[b] + // 386:`SUBL\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+\*4\)` + arr[c] -= b + // 386:`ADDL\s[$]-15,\s\([A-Z]+\)\([A-Z]+\*4\)` + arr[d] -= 15 + // 386:`DECL\s\([A-Z]+\)\([A-Z]+\*4\)` + arr[b]-- + // amd64:`DECQ\s64\([A-Z]+\)` + arr[8]-- // 386:"SUBL\t4" // amd64:"SUBQ\t8" return arr[0] - arr[1] @@ -33,22 +50,37 @@ func Pow2Muls(n1, n2 int) (int, int) { // 386:"SHLL\t[$]5",-"IMULL" // arm:"SLL\t[$]5",-"MUL" // arm64:"LSL\t[$]5",-"MUL" + // ppc64:"SLD\t[$]5",-"MUL" + // ppc64le:"SLD\t[$]5",-"MUL" a := n1 * 32 // amd64:"SHLQ\t[$]6",-"IMULQ" // 386:"SHLL\t[$]6",-"IMULL" // arm:"SLL\t[$]6",-"MUL" - // arm64:"LSL\t[$]6",-"MUL" + // arm64:`NEG\sR[0-9]+<<6,\sR[0-9]+`,-`LSL`,-`MUL` + // ppc64:"SLD\t[$]6","NEG\\sR[0-9]+,\\sR[0-9]+",-"MUL" + // ppc64le:"SLD\t[$]6","NEG\\sR[0-9]+,\\sR[0-9]+",-"MUL" b := -64 * n2 return a, b } func Mul_96(n int) int { - // amd64:`SHLQ\t[$]5`,`LEAQ\t\(.*\)\(.*\*2\),` + // amd64:`SHLQ\t[$]5`,`LEAQ\t\(.*\)\(.*\*2\),`,-`IMULQ` + // 386:`SHLL\t[$]5`,`LEAL\t\(.*\)\(.*\*2\),`,-`IMULL` + // arm64:`LSL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` + // arm:`SLL\t[$]5`,`ADD\sR[0-9]+<<1,\sR[0-9]+`,-`MUL` return n * 96 } +func MulMemSrc(a []uint32, b []float32) { + // 386:`IMULL\s4\([A-Z]+\),\s[A-Z]+` + a[0] *= a[1] + // 386/sse2:`MULSS\s4\([A-Z]+\),\sX[0-9]+` + // amd64:`MULSS\s4\([A-Z]+\),\sX[0-9]+` + b[0] *= b[1] +} + // Multiplications merging tests func MergeMuls1(n int) int { @@ -85,17 +117,27 @@ func MergeMuls5(a, n int) int { // Division // // -------------- // +func DivMemSrc(a []float64) { + // 386/sse2:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` + // amd64:`DIVSD\s8\([A-Z]+\),\sX[0-9]+` + a[0] /= a[1] +} + func Pow2Divs(n1 uint, n2 int) (uint, int) { // 386:"SHRL\t[$]5",-"DIVL" // amd64:"SHRQ\t[$]5",-"DIVQ" // arm:"SRL\t[$]5",-".*udiv" // arm64:"LSR\t[$]5",-"UDIV" + // ppc64:"SRD" + // ppc64le:"SRD" a := n1 / 32 // unsigned // amd64:"SARQ\t[$]6",-"IDIVQ" // 386:"SARL\t[$]6",-"IDIVL" // arm:"SRA\t[$]6",-".*udiv" // arm64:"ASR\t[$]6",-"SDIV" + // ppc64:"SRAD" + // ppc64le:"SRAD" b := n2 / 64 // signed return a, b @@ -104,19 +146,33 @@ func Pow2Divs(n1 uint, n2 int) (uint, int) { // Check that constant divisions get turned into MULs func ConstDivs(n1 uint, n2 int) (uint, int) { // amd64:"MOVQ\t[$]-1085102592571150095","MULQ",-"DIVQ" + // 386:"MOVL\t[$]-252645135","MULL",-"DIVL" + // arm64:`MOVD`,`UMULH`,-`DIV` + // arm:`MOVW`,`MUL`,-`.*udiv` a := n1 / 17 // unsigned // amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ" + // 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL" + // arm64:`MOVD`,`SMULH`,-`DIV` + // arm:`MOVW`,`MUL`,-`.*udiv` b := n2 / 17 // signed return a, b } +func FloatDivs(a []float32) float32 { + // amd64:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` + // 386/sse2:`DIVSS\s8\([A-Z]+\),\sX[0-9]+` + return a[1] / a[2] +} + func Pow2Mods(n1 uint, n2 int) (uint, int) { // 386:"ANDL\t[$]31",-"DIVL" // amd64:"ANDQ\t[$]31",-"DIVQ" // arm:"AND\t[$]31",-".*udiv" // arm64:"AND\t[$]31",-"UDIV" + // ppc64:"ANDCC\t[$]31" + // ppc64le:"ANDCC\t[$]31" a := n1 % 32 // unsigned // 386:-"IDIVL" @@ -131,50 +187,164 @@ func Pow2Mods(n1 uint, n2 int) (uint, int) { // Check that constant modulo divs get turned into MULs func ConstMods(n1 uint, n2 int) (uint, int) { // amd64:"MOVQ\t[$]-1085102592571150095","MULQ",-"DIVQ" + // 386:"MOVL\t[$]-252645135","MULL",-"DIVL" + // arm64:`MOVD`,`UMULH`,-`DIV` + // arm:`MOVW`,`MUL`,-`.*udiv` a := n1 % 17 // unsigned // amd64:"MOVQ\t[$]-1085102592571150095","IMULQ",-"IDIVQ" + // 386:"MOVL\t[$]-252645135","IMULL",-"IDIVL" + // arm64:`MOVD`,`SMULH`,-`DIV` + // arm:`MOVW`,`MUL`,-`.*udiv` b := n2 % 17 // signed return a, b } +// Check that fix-up code is not generated for divisions where it has been proven that +// that the divisor is not -1 or that the dividend is > MinIntNN. +func NoFix64A(divr int64) (int64, int64) { + var d int64 = 42 + var e int64 = 84 + if divr > 5 { + d /= divr // amd64:-"JMP" + e %= divr // amd64:-"JMP" + } + return d, e +} + +func NoFix64B(divd int64) (int64, int64) { + var d int64 + var e int64 + var divr int64 = -1 + if divd > -9223372036854775808 { + d = divd / divr // amd64:-"JMP" + e = divd % divr // amd64:-"JMP" + } + return d, e +} + +func NoFix32A(divr int32) (int32, int32) { + var d int32 = 42 + var e int32 = 84 + if divr > 5 { + // amd64:-"JMP" + // 386:-"JMP" + d /= divr + // amd64:-"JMP" + // 386:-"JMP" + e %= divr + } + return d, e +} + +func NoFix32B(divd int32) (int32, int32) { + var d int32 + var e int32 + var divr int32 = -1 + if divd > -2147483648 { + // amd64:-"JMP" + // 386:-"JMP" + d = divd / divr + // amd64:-"JMP" + // 386:-"JMP" + e = divd % divr + } + return d, e +} + +func NoFix16A(divr int16) (int16, int16) { + var d int16 = 42 + var e int16 = 84 + if divr > 5 { + // amd64:-"JMP" + // 386:-"JMP" + d /= divr + // amd64:-"JMP" + // 386:-"JMP" + e %= divr + } + return d, e +} + +func NoFix16B(divd int16) (int16, int16) { + var d int16 + var e int16 + var divr int16 = -1 + if divd > -32768 { + // amd64:-"JMP" + // 386:-"JMP" + d = divd / divr + // amd64:-"JMP" + // 386:-"JMP" + e = divd % divr + } + return d, e +} + // Check that len() and cap() calls divided by powers of two are // optimized into shifts and ands func LenDiv1(a []int) int { // 386:"SHRL\t[$]10" // amd64:"SHRQ\t[$]10" + // arm64:"LSR\t[$]10",-"SDIV" + // arm:"SRL\t[$]10",-".*udiv" + // ppc64:"SRD"\t[$]10" + // ppc64le:"SRD"\t[$]10" return len(a) / 1024 } func LenDiv2(s string) int { // 386:"SHRL\t[$]11" // amd64:"SHRQ\t[$]11" + // arm64:"LSR\t[$]11",-"SDIV" + // arm:"SRL\t[$]11",-".*udiv" + // ppc64:"SRD\t[$]11" + // ppc64le:"SRD\t[$]11" return len(s) / (4097 >> 1) } func LenMod1(a []int) int { // 386:"ANDL\t[$]1023" // amd64:"ANDQ\t[$]1023" + // arm64:"AND\t[$]1023",-"SDIV" + // arm/6:"AND",-".*udiv" + // arm/7:"BFC",-".*udiv",-"AND" + // ppc64:"ANDCC\t[$]1023" + // ppc64le:"ANDCC\t[$]1023" return len(a) % 1024 } func LenMod2(s string) int { // 386:"ANDL\t[$]2047" // amd64:"ANDQ\t[$]2047" + // arm64:"AND\t[$]2047",-"SDIV" + // arm/6:"AND",-".*udiv" + // arm/7:"BFC",-".*udiv",-"AND" + // ppc64:"ANDCC\t[$]2047" + // ppc64le:"ANDCC\t[$]2047" return len(s) % (4097 >> 1) } func CapDiv(a []int) int { // 386:"SHRL\t[$]12" // amd64:"SHRQ\t[$]12" + // arm64:"LSR\t[$]12",-"SDIV" + // arm:"SRL\t[$]12",-".*udiv" + // ppc64:"SRD\t[$]12" + // ppc64le:"SRD\t[$]12" return cap(a) / ((1 << 11) + 2048) } func CapMod(a []int) int { // 386:"ANDL\t[$]4095" // amd64:"ANDQ\t[$]4095" + // arm64:"AND\t[$]4095",-"SDIV" + // arm/6:"AND",-".*udiv" + // arm/7:"BFC",-".*udiv",-"AND" + // ppc64:"ANDCC\t[$]4095" + // ppc64le:"ANDCC\t[$]4095" return cap(a) % ((1 << 11) + 2048) } @@ -182,3 +352,31 @@ func AddMul(x int) int { // amd64:"LEAQ\t1" return 2*x + 1 } + +func MULA(a, b, c uint32) (uint32, uint32, uint32) { + // arm:`MULA`,-`MUL\s` + // arm64:`MADDW`,-`MULW` + r0 := a*b + c + // arm:`MULA`,-`MUL\s` + // arm64:`MADDW`,-`MULW` + r1 := c*79 + a + // arm:`ADD`,-`MULA`,-`MUL\s` + // arm64:`ADD`,-`MADD`,-`MULW` + r2 := b*64 + c + return r0, r1, r2 +} + +func MULS(a, b, c uint32) (uint32, uint32, uint32) { + // arm/7:`MULS`,-`MUL\s` + // arm/6:`SUB`,`MUL\s`,-`MULS` + // arm64:`MSUBW`,-`MULW` + r0 := c - a*b + // arm/7:`MULS`,-`MUL\s` + // arm/6:`SUB`,`MUL\s`,-`MULS` + // arm64:`MSUBW`,-`MULW` + r1 := a - c*79 + // arm/7:`SUB`,-`MULS`,-`MUL\s` + // arm64:`SUB`,-`MSUBW`,-`MULW` + r2 := c - b*64 + return r0, r1, r2 +} diff --git a/test/codegen/bits.go b/test/codegen/bits.go index 9de2201cb1b2a..65d57c8f9f078 100644 --- a/test/codegen/bits.go +++ b/test/codegen/bits.go @@ -262,6 +262,22 @@ func bitcompl32(a, b uint32) (n uint32) { return n } +// check direct operation on memory with constant source +func bitOpOnMem(a []uint32) { + // amd64:`ANDL\s[$]200,\s\([A-Z]+\)` + a[0] &= 200 + // amd64:`ORL\s[$]220,\s4\([A-Z]+\)` + a[1] |= 220 + // amd64:`XORL\s[$]240,\s8\([A-Z]+\)` + a[2] ^= 240 + // amd64:`BTRL\s[$]15,\s12\([A-Z]+\)`,-`ANDL` + a[3] &= 0xffff7fff + // amd64:`BTSL\s[$]14,\s16\([A-Z]+\)`,-`ORL` + a[4] |= 0x4000 + // amd64:`BTCL\s[$]13,\s20\([A-Z]+\)`,-`XORL` + a[5] ^= 0x2000 +} + // Check AND masking on arm64 (Issue #19857) func and_mask_1(a uint64) uint64 { @@ -274,6 +290,14 @@ func and_mask_2(a uint64) uint64 { return a & (1 << 63) } +func and_mask_3(a, b uint32) (uint32, uint32) { + // arm/7:`BIC`,-`AND` + a &= 0xffffaaaa + // arm/7:`BFC`,-`AND`,-`BIC` + b &= 0xffc003ff + return a, b +} + // Check generation of arm64 BIC/EON/ORN instructions func op_bic(x, y uint32) uint32 { diff --git a/test/codegen/comparisons.go b/test/codegen/comparisons.go index 2f010bcbaefc6..f14918e9dfdda 100644 --- a/test/codegen/comparisons.go +++ b/test/codegen/comparisons.go @@ -36,6 +36,7 @@ func CompareString2(s string) bool { func CompareString3(s string) bool { // amd64:`CMPQ\t\(.*\), [A-Z]` // arm64:-`CMPW\t` + // ppc64:-`CMPW\t` // ppc64le:-`CMPW\t` // s390x:-`CMPW\t` return s == "xxxxxxxx" @@ -122,6 +123,16 @@ func CmpMem5(p **int) { *p = nil } +func CmpMem6(a []int) int { + // 386:`CMPL\s8\([A-Z]+\),` + // amd64:`CMPQ\s16\([A-Z]+\),` + if a[1] > a[2] { + return 1 + } else { + return 2 + } +} + // Check tbz/tbnz are generated when comparing against zero on arm64 func CmpZero1(a int32, ptr *int) { @@ -147,3 +158,88 @@ func CmpZero4(a int64, ptr *int) { *ptr = 0 } } + +func CmpToZero(a, b, d int32, e, f int64) int32 { + // arm:`TST`,-`AND` + // arm64:`TSTW`,-`AND` + // 386:`TESTL`,-`ANDL` + // amd64:`TESTL`,-`ANDL` + c0 := a&b < 0 + // arm:`CMN`,-`ADD` + // arm64:`CMNW`,-`ADD` + c1 := a+b < 0 + // arm:`TEQ`,-`XOR` + c2 := a^b < 0 + // arm64:`TST`,-`AND` + // amd64:`TESTQ`,-`ANDQ` + c3 := e&f < 0 + // arm64:`CMN`,-`ADD` + c4 := e+f < 0 + // not optimized to single CMNW/CMN due to further use of b+d + // arm64:`ADD`,-`CMNW` + // arm:`ADD`,-`CMN` + c5 := b+d == 0 + // not optimized to single TSTW/TST due to further use of a&d + // arm64:`AND`,-`TSTW` + // arm:`AND`,-`TST` + // 386:`ANDL` + c6 := a&d >= 0 + // arm64:`TST\sR[0-9]+<<3,\sR[0-9]+` + c7 := e&(f<<3) < 0 + // arm64:`CMN\sR[0-9]+<<3,\sR[0-9]+` + c8 := e+(f<<3) < 0 + if c0 { + return 1 + } else if c1 { + return 2 + } else if c2 { + return 3 + } else if c3 { + return 4 + } else if c4 { + return 5 + } else if c5 { + return b + d + } else if c6 { + return a & d + } else if c7 { + return 7 + } else if c8 { + return 8 + } else { + return 0 + } +} + +func CmpLogicalToZero(a, b, c uint32, d, e uint64) uint64 { + + // ppc64:"ANDCC",-"CMPW" + // ppc64le:"ANDCC",-"CMPW" + if a & 63 == 0 { + return 1 + } + + // ppc64:"ANDCC",-"CMP" + // ppc64le:"ANDCC",-"CMP" + if d & 255 == 0 { + return 1 + } + + // ppc64:"ANDCC",-"CMP" + // ppc64le:"ANDCC",-"CMP" + if d & e == 0 { + return 1 + } + // ppc64:"ORCC",-"CMP" + // ppc64le:"ORCC",-"CMP" + if d | e == 0 { + return 1 + } + + // ppc64:"XORCC",-"CMP" + // ppc64le:"XORCC",-"CMP" + if e ^ d == 0 { + return 1 + } + return 0 +} diff --git a/test/codegen/condmove.go b/test/codegen/condmove.go index 32039c16ae3f6..aa82d43f4981e 100644 --- a/test/codegen/condmove.go +++ b/test/codegen/condmove.go @@ -180,3 +180,20 @@ func cmovinvert6(x, y uint64) uint64 { // amd64:"CMOVQLS" return y } + +func cmovload(a []int, i int, b bool) int { + if b { + i++ + } + // See issue 26306 + // amd64:-"CMOVQNE" + return a[i] +} + +func cmovstore(a []int, i int, b bool) { + if b { + i++ + } + // amd64:"CMOVQNE" + a[i] = 7 +} diff --git a/test/codegen/copy.go b/test/codegen/copy.go index 5c3837bc7c337..46c2bde9abdcc 100644 --- a/test/codegen/copy.go +++ b/test/codegen/copy.go @@ -16,6 +16,8 @@ func movesmall4() { // amd64:-".*memmove" // arm:-".*memmove" // arm64:-".*memmove" + // ppc64:-".*memmove" + // ppc64le:-".*memmove" copy(x[1:], x[:]) } @@ -24,6 +26,8 @@ func movesmall7() { // 386:-".*memmove" // amd64:-".*memmove" // arm64:-".*memmove" + // ppc64:-".*memmove" + // ppc64le:-".*memmove" copy(x[1:], x[:]) } @@ -40,19 +44,22 @@ var x [256]byte func moveDisjointStack() { var s [256]byte // s390x:-".*memmove" + // amd64:-".*memmove" copy(s[:], x[:]) runtime.KeepAlive(&s) } -func moveDisjointArg(b *[256]byte) { +func moveDisjointArg(b *[256]byte) { var s [256]byte // s390x:-".*memmove" + // amd64:-".*memmove" copy(s[:], b[:]) runtime.KeepAlive(&s) } func moveDisjointNoOverlap(a *[256]byte) { // s390x:-".*memmove" + // amd64:-".*memmove" copy(a[:], a[128:]) } @@ -60,6 +67,7 @@ func moveDisjointNoOverlap(a *[256]byte) { func ptrEqual() { // amd64:-"JEQ",-"JNE" + // ppc64:-"BEQ",-"BNE" // ppc64le:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[:], x[:]) @@ -67,6 +75,7 @@ func ptrEqual() { func ptrOneOffset() { // amd64:-"JEQ",-"JNE" + // ppc64:-"BEQ",-"BNE" // ppc64le:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[1:], x[:]) @@ -74,6 +83,7 @@ func ptrOneOffset() { func ptrBothOffset() { // amd64:-"JEQ",-"JNE" + // ppc64:-"BEQ",-"BNE" // ppc64le:-"BEQ",-"BNE" // s390x:-"BEQ",-"BNE" copy(x[1:], x[2:]) diff --git a/test/codegen/floats.go b/test/codegen/floats.go index e0e4d973a3895..5e1f60b08bade 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -6,6 +6,8 @@ package codegen +import "math" + // This file contains codegen tests related to arithmetic // simplifications and optimizations on float types. // For codegen tests on integer types, see arithmetic.go. @@ -20,6 +22,8 @@ func Mul2(f float64) float64 { // amd64:"ADDSD",-"MULSD" // arm/7:"ADDD",-"MULD" // arm64:"FADDD",-"FMULD" + // ppc64:"FADD",-"FMUL" + // ppc64le:"FADD",-"FMUL" return f * 2.0 } @@ -29,6 +33,8 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" + // ppc64:"FMUL",-"FDIV" + // ppc64le:"FMUL",-"FDIV" x := f1 / 16.0 // 386/sse2:"MULSD",-"DIVSD" @@ -36,6 +42,8 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // amd64:"MULSD",-"DIVSD" // arm/7:"MULD",-"DIVD" // arm64:"FMULD",-"FDIVD" + // ppc64:"FMUL",-"FDIVD" + // ppc64le:"FMUL",-"FDIVD" y := f2 / 0.125 // 386/sse2:"ADDSD",-"DIVSD",-"MULSD" @@ -43,39 +51,72 @@ func DivPow2(f1, f2, f3 float64) (float64, float64, float64) { // amd64:"ADDSD",-"DIVSD",-"MULSD" // arm/7:"ADDD",-"MULD",-"DIVD" // arm64:"FADDD",-"FMULD",-"FDIVD" + // ppc64:"FADD",-"FMUL",-"FDIV" + // ppc64le:"FADD",-"FMUL",-"FDIV" z := f3 / 0.5 return x, y, z } +func getPi() float64 { + // 386/387:"FLDPI" + return math.Pi +} + +func indexLoad(b0 []float32, b1 float32, idx int) float32 { + // arm64:`FMOVS\s\(R[0-9]+\)\(R[0-9]+\),\sF[0-9]+` + return b0[idx] * b1 +} + +func indexStore(b0 []float64, b1 float64, idx int) { + // arm64:`FMOVD\sF[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)` + b0[idx] = b1 +} + // ----------- // // Fused // // ----------- // func FusedAdd32(x, y, z float32) float32 { // s390x:"FMADDS\t" + // ppc64:"FMADDS\t" // ppc64le:"FMADDS\t" + // arm64:"FMADDS" return x*y + z } -func FusedSub32(x, y, z float32) float32 { +func FusedSub32_a(x, y, z float32) float32 { // s390x:"FMSUBS\t" + // ppc64:"FMSUBS\t" // ppc64le:"FMSUBS\t" return x*y - z } +func FusedSub32_b(x, y, z float32) float32 { + // arm64:"FMSUBS" + return z - x*y +} + func FusedAdd64(x, y, z float64) float64 { // s390x:"FMADD\t" + // ppc64:"FMADD\t" // ppc64le:"FMADD\t" + // arm64:"FMADDD" return x*y + z } -func FusedSub64(x, y, z float64) float64 { +func FusedSub64_a(x, y, z float64) float64 { // s390x:"FMSUB\t" + // ppc64:"FMSUB\t" // ppc64le:"FMSUB\t" return x*y - z } +func FusedSub64_b(x, y, z float64) float64 { + // arm64:"FMSUBD" + return z - x*y +} + // ---------------- // // Non-floats // // ---------------- // diff --git a/test/codegen/issue25378.go b/test/codegen/issue25378.go new file mode 100644 index 0000000000000..14aa2c30f2d83 --- /dev/null +++ b/test/codegen/issue25378.go @@ -0,0 +1,22 @@ +// asmcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +var wsp = [256]bool{ + ' ': true, + '\t': true, + '\n': true, + '\r': true, +} + +func zeroExtArgByte(ch byte) bool { + return wsp[ch] // amd64:-"MOVBLZX\t..,.." +} + +func zeroExtArgUint16(ch uint16) bool { + return wsp[ch] // amd64:-"MOVWLZX\t..,.." +} diff --git a/test/codegen/mapaccess.go b/test/codegen/mapaccess.go index 35620e741cedb..a914a0c766ece 100644 --- a/test/codegen/mapaccess.go +++ b/test/codegen/mapaccess.go @@ -304,6 +304,18 @@ func mapAppendAssignmentInt32() { // arm64:-".*mapaccess" m[k] = append(m[k], a...) + // 386:-".*mapaccess" + // amd64:-".*mapaccess" + // arm:-".*mapaccess" + // arm64:-".*mapaccess" + m[k+1] = append(m[k+1], a...) + + // 386:-".*mapaccess" + // amd64:-".*mapaccess" + // arm:-".*mapaccess" + // arm64:-".*mapaccess" + m[-k] = append(m[-k], a...) + // Exceptions // 386:".*mapaccess" @@ -349,6 +361,18 @@ func mapAppendAssignmentInt64() { // arm64:-".*mapaccess" m[k] = append(m[k], a...) + // 386:-".*mapaccess" + // amd64:-".*mapaccess" + // arm:-".*mapaccess" + // arm64:-".*mapaccess" + m[k+1] = append(m[k+1], a...) + + // 386:-".*mapaccess" + // amd64:-".*mapaccess" + // arm:-".*mapaccess" + // arm64:-".*mapaccess" + m[-k] = append(m[-k], a...) + // Exceptions // 386:".*mapaccess" diff --git a/test/codegen/maps.go b/test/codegen/maps.go index d1677158988f4..8dd22ed5caaa8 100644 --- a/test/codegen/maps.go +++ b/test/codegen/maps.go @@ -37,6 +37,35 @@ func AccessString2(m map[string]int) bool { return ok } +// ------------------- // +// String Conversion // +// ------------------- // + +func LookupStringConversionSimple(m map[string]int, bytes []byte) int { + // amd64:-`.*runtime\.slicebytetostring\(` + return m[string(bytes)] +} + +func LookupStringConversionStructLit(m map[struct{ string }]int, bytes []byte) int { + // amd64:-`.*runtime\.slicebytetostring\(` + return m[struct{ string }{string(bytes)}] +} + +func LookupStringConversionArrayLit(m map[[2]string]int, bytes []byte) int { + // amd64:-`.*runtime\.slicebytetostring\(` + return m[[2]string{string(bytes), string(bytes)}] +} + +func LookupStringConversionNestedLit(m map[[1]struct{ s [1]string }]int, bytes []byte) int { + // amd64:-`.*runtime\.slicebytetostring\(` + return m[[1]struct{ s [1]string }{struct{ s [1]string }{s: [1]string{string(bytes)}}}] +} + +func LookupStringConversionKeyedArrayLit(m map[[2]string]int, bytes []byte) int { + // amd64:-`.*runtime\.slicebytetostring\(` + return m[[2]string{0: string(bytes)}] +} + // ------------------- // // Map Clear // // ------------------- // diff --git a/test/codegen/math.go b/test/codegen/math.go index 1ecba26847a05..aaf6b080ffa4f 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -13,25 +13,30 @@ var sink64 [8]float64 func approx(x float64) { // s390x:"FIDBR\t[$]6" // arm64:"FRINTPD" + // ppc64:"FRIP" // ppc64le:"FRIP" sink64[0] = math.Ceil(x) // s390x:"FIDBR\t[$]7" // arm64:"FRINTMD" + // ppc64:"FRIM" // ppc64le:"FRIM" sink64[1] = math.Floor(x) // s390x:"FIDBR\t[$]1" // arm64:"FRINTAD" + // ppc64:"FRIN" // ppc64le:"FRIN" sink64[2] = math.Round(x) // s390x:"FIDBR\t[$]5" // arm64:"FRINTZD" + // ppc64:"FRIZ" // ppc64le:"FRIZ" sink64[3] = math.Trunc(x) // s390x:"FIDBR\t[$]4" + // arm64:"FRINTND" sink64[4] = math.RoundToEven(x) } @@ -48,12 +53,15 @@ func sqrt(x float64) float64 { // Check that it's using integer registers func abs(x, y float64) { // amd64:"BTRQ\t[$]63" + // arm64:"FABSD\t" // s390x:"LPDFR\t",-"MOVD\t" (no integer load/store) + // ppc64:"FABS\t" // ppc64le:"FABS\t" sink64[0] = math.Abs(x) // amd64:"BTRQ\t[$]63","PXOR" (TODO: this should be BTSQ) // s390x:"LNDFR\t",-"MOVD\t" (no integer load/store) + // ppc64:"FNABS\t" // ppc64le:"FNABS\t" sink64[1] = -math.Abs(y) } @@ -68,12 +76,15 @@ func abs32(x float32) float32 { func copysign(a, b, c float64) { // amd64:"BTRQ\t[$]63","SHRQ\t[$]63","SHLQ\t[$]63","ORQ" // s390x:"CPSDR",-"MOVD" (no integer load/store) + // ppc64:"FCPSGN" // ppc64le:"FCPSGN" sink64[0] = math.Copysign(a, b) // amd64:"BTSQ\t[$]63" // s390x:"LNDFR\t",-"MOVD\t" (no integer load/store) + // ppc64:"FCPSGN" // ppc64le:"FCPSGN" + // arm64:"ORR", -"AND" sink64[1] = math.Copysign(c, -1) // Like math.Copysign(c, -1), but with integer operations. Useful @@ -83,27 +94,36 @@ func copysign(a, b, c float64) { // amd64:-"SHLQ\t[$]1",-"SHRQ\t[$]1","SHRQ\t[$]63","SHLQ\t[$]63","ORQ" // s390x:"CPSDR\t",-"MOVD\t" (no integer load/store) + // ppc64:"FCPSGN" // ppc64le:"FCPSGN" sink64[3] = math.Copysign(-1, c) } func fromFloat64(f64 float64) uint64 { // amd64:"MOVQ\tX.*, [^X].*" + // arm64:"FMOVD\tF.*, R.*" + // ppc64:"MFVSRD" + // ppc64le:"MFVSRD" return math.Float64bits(f64+1) + 1 } func fromFloat32(f32 float32) uint32 { // amd64:"MOVL\tX.*, [^X].*" + // arm64:"FMOVS\tF.*, R.*" return math.Float32bits(f32+1) + 1 } func toFloat64(u64 uint64) float64 { // amd64:"MOVQ\t[^X].*, X.*" + // arm64:"FMOVD\tR.*, F.*" + // ppc64:"MTVSRD" + // ppc64le:"MTVSRD" return math.Float64frombits(u64+1) + 1 } func toFloat32(u32 uint32) float32 { // amd64:"MOVL\t[^X].*, X.*" + // arm64:"FMOVS\tR.*, F.*" return math.Float32frombits(u32+1) + 1 } @@ -128,7 +148,9 @@ func constantCheck32() bool { func constantConvert32(x float32) float32 { // amd64:"MOVSS\t[$]f32.3f800000\\(SB\\)" // s390x:"FMOVS\t[$]f32.3f800000\\(SB\\)" + // ppc64:"FMOVS\t[$]f32.3f800000\\(SB\\)" // ppc64le:"FMOVS\t[$]f32.3f800000\\(SB\\)" + // arm64:"FMOVS\t[$]\\(1.0\\)" if x > math.Float32frombits(0x3f800000) { return -x } @@ -138,7 +160,9 @@ func constantConvert32(x float32) float32 { func constantConvertInt32(x uint32) uint32 { // amd64:-"MOVSS" // s390x:-"FMOVS" + // ppc64:-"FMOVS" // ppc64le:-"FMOVS" + // arm64:-"FMOVS" if x > math.Float32bits(1) { return -x } diff --git a/test/codegen/mathbits.go b/test/codegen/mathbits.go index 85c54ea61b785..44ab2c02b7578 100644 --- a/test/codegen/mathbits.go +++ b/test/codegen/mathbits.go @@ -101,29 +101,48 @@ func Len8(n uint8) int { // -------------------- // func OnesCount(n uint) int { - // amd64:"POPCNTQ",".*support_popcnt" + // amd64:"POPCNTQ",".*x86HasPOPCNT" // arm64:"VCNT","VUADDLV" + // s390x:"POPCNT" + // ppc64:"POPCNTD" + // ppc64le:"POPCNTD" return bits.OnesCount(n) } func OnesCount64(n uint64) int { - // amd64:"POPCNTQ",".*support_popcnt" + // amd64:"POPCNTQ",".*x86HasPOPCNT" // arm64:"VCNT","VUADDLV" + // s390x:"POPCNT" + // ppc64:"POPCNTD" + // ppc64le:"POPCNTD" return bits.OnesCount64(n) } func OnesCount32(n uint32) int { - // amd64:"POPCNTL",".*support_popcnt" + // amd64:"POPCNTL",".*x86HasPOPCNT" // arm64:"VCNT","VUADDLV" + // s390x:"POPCNT" + // ppc64:"POPCNTW" + // ppc64le:"POPCNTW" return bits.OnesCount32(n) } func OnesCount16(n uint16) int { - // amd64:"POPCNTL",".*support_popcnt" + // amd64:"POPCNTL",".*x86HasPOPCNT" // arm64:"VCNT","VUADDLV" + // s390x:"POPCNT" + // ppc64:"POPCNTW" + // ppc64le:"POPCNTW" return bits.OnesCount16(n) } +func OnesCount8(n uint8) int { + // s390x:"POPCNT" + // ppc64:"POPCNTB" + // ppc64le:"POPCNTB" + return bits.OnesCount8(n) +} + // ----------------------- // // bits.ReverseBytes // // ----------------------- // @@ -162,6 +181,8 @@ func RotateLeft64(n uint64) uint64 { // amd64:"ROLQ" // arm64:"ROR" // ppc64:"ROTL" + // ppc64le:"ROTL" + // s390x:"RLLG" return bits.RotateLeft64(n, 37) } @@ -169,6 +190,8 @@ func RotateLeft32(n uint32) uint32 { // amd64:"ROLL" 386:"ROLL" // arm64:"RORW" // ppc64:"ROTLW" + // ppc64le:"ROTLW" + // s390x:"RLL" return bits.RotateLeft32(n, 9) } @@ -182,6 +205,33 @@ func RotateLeft8(n uint8) uint8 { return bits.RotateLeft8(n, 5) } +func RotateLeftVariable(n uint, m int) uint { + // amd64:"ROLQ" + // arm64:"ROR" + // ppc64:"ROTL" + // ppc64le:"ROTL" + // s390x:"RLLG" + return bits.RotateLeft(n, m) +} + +func RotateLeftVariable64(n uint64, m int) uint64 { + // amd64:"ROLQ" + // arm64:"ROR" + // ppc64:"ROTL" + // ppc64le:"ROTL" + // s390x:"RLLG" + return bits.RotateLeft64(n, m) +} + +func RotateLeftVariable32(n uint32, m int) uint32 { + // amd64:"ROLL" + // arm64:"RORW" + // ppc64:"ROTLW" + // ppc64le:"ROTLW" + // s390x:"RLL" + return bits.RotateLeft32(n, m) +} + // ------------------------ // // bits.TrailingZeros // // ------------------------ // @@ -189,24 +239,32 @@ func RotateLeft8(n uint8) uint8 { func TrailingZeros(n uint) int { // amd64:"BSFQ","MOVL\t\\$64","CMOVQEQ" // s390x:"FLOGR" + // ppc64:"ANDN","POPCNTD" + // ppc64le:"ANDN","POPCNTD" return bits.TrailingZeros(n) } func TrailingZeros64(n uint64) int { // amd64:"BSFQ","MOVL\t\\$64","CMOVQEQ" // s390x:"FLOGR" + // ppc64:"ANDN","POPCNTD" + // ppc64le:"ANDN","POPCNTD" return bits.TrailingZeros64(n) } func TrailingZeros32(n uint32) int { // amd64:"BTSQ\\t\\$32","BSFQ" // s390x:"FLOGR","MOVWZ" + // ppc64:"ANDN","POPCNTW" + // ppc64le:"ANDN","POPCNTW" return bits.TrailingZeros32(n) } func TrailingZeros16(n uint16) int { // amd64:"BSFL","BTSL\\t\\$16" // s390x:"FLOGR","OR\t\\$65536" + // ppc64:"POPCNTD","OR\\t\\$65536" + // ppc64le:"POPCNTD","OR\\t\\$65536" return bits.TrailingZeros16(n) } @@ -267,3 +325,157 @@ func IterateBits8(n uint8) int { } return i } + +// --------------- // +// bits.Add* // +// --------------- // + +func Add(x, y, ci uint) (r, co uint) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add(x, y, ci) +} + +func AddC(x, ci uint) (r, co uint) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add(x, 7, ci) +} + +func AddZ(x, y uint) (r, co uint) { + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + return bits.Add(x, y, 0) +} + +func AddR(x, y, ci uint) uint { + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + r, _ := bits.Add(x, y, ci) + return r +} +func AddM(p, q, r *[3]uint) { + var c uint + r[0], c = bits.Add(p[0], q[0], c) + // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" + r[1], c = bits.Add(p[1], q[1], c) + r[2], c = bits.Add(p[2], q[2], c) +} + +func Add64(x, y, ci uint64) (r, co uint64) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add64(x, y, ci) +} + +func Add64C(x, ci uint64) (r, co uint64) { + // amd64:"NEGL","ADCQ","SBBQ","NEGQ" + return bits.Add64(x, 7, ci) +} + +func Add64Z(x, y uint64) (r, co uint64) { + // amd64:"ADDQ","SBBQ","NEGQ",-"NEGL",-"ADCQ" + return bits.Add64(x, y, 0) +} + +func Add64R(x, y, ci uint64) uint64 { + // amd64:"NEGL","ADCQ",-"SBBQ",-"NEGQ" + r, _ := bits.Add64(x, y, ci) + return r +} +func Add64M(p, q, r *[3]uint64) { + var c uint64 + r[0], c = bits.Add64(p[0], q[0], c) + // amd64:"ADCQ",-"NEGL",-"SBBQ",-"NEGQ" + r[1], c = bits.Add64(p[1], q[1], c) + r[2], c = bits.Add64(p[2], q[2], c) +} + +// --------------- // +// bits.Sub* // +// --------------- // + +func Sub(x, y, ci uint) (r, co uint) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub(x, y, ci) +} + +func SubC(x, ci uint) (r, co uint) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub(x, 7, ci) +} + +func SubZ(x, y uint) (r, co uint) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + return bits.Sub(x, y, 0) +} + +func SubR(x, y, ci uint) uint { + // amd64:"NEGL","SBBQ",-"NEGQ" + r, _ := bits.Sub(x, y, ci) + return r +} +func SubM(p, q, r *[3]uint) { + var c uint + r[0], c = bits.Sub(p[0], q[0], c) + // amd64:"SBBQ",-"NEGL",-"NEGQ" + r[1], c = bits.Sub(p[1], q[1], c) + r[2], c = bits.Sub(p[2], q[2], c) +} + +func Sub64(x, y, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub64(x, y, ci) +} + +func Sub64C(x, ci uint64) (r, co uint64) { + // amd64:"NEGL","SBBQ","NEGQ" + return bits.Sub64(x, 7, ci) +} + +func Sub64Z(x, y uint64) (r, co uint64) { + // amd64:"SUBQ","SBBQ","NEGQ",-"NEGL" + return bits.Sub64(x, y, 0) +} + +func Sub64R(x, y, ci uint64) uint64 { + // amd64:"NEGL","SBBQ",-"NEGQ" + r, _ := bits.Sub64(x, y, ci) + return r +} +func Sub64M(p, q, r *[3]uint64) { + var c uint64 + r[0], c = bits.Sub64(p[0], q[0], c) + // amd64:"SBBQ",-"NEGL",-"NEGQ" + r[1], c = bits.Sub64(p[1], q[1], c) + r[2], c = bits.Sub64(p[2], q[2], c) +} + +// --------------- // +// bits.Mul* // +// --------------- // + +func Mul(x, y uint) (hi, lo uint) { + // amd64:"MULQ" + // arm64:"UMULH","MUL" + // ppc64:"MULHDU","MULLD" + // ppc64le:"MULHDU","MULLD" + return bits.Mul(x, y) +} + +func Mul64(x, y uint64) (hi, lo uint64) { + // amd64:"MULQ" + // arm64:"UMULH","MUL" + // ppc64:"MULHDU","MULLD" + // ppc64le:"MULHDU","MULLD" + return bits.Mul64(x, y) +} + +// --------------- // +// bits.Div* // +// --------------- // + +func Div(hi, lo, x uint) (q, r uint) { + // amd64:"DIVQ" + return bits.Div(hi, lo, x) +} + +func Div64(hi, lo, x uint64) (q, r uint64) { + // amd64:"DIVQ" + return bits.Div64(hi, lo, x) +} diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go index 0db366250f25b..b3d2cb2067639 100644 --- a/test/codegen/memcombine.go +++ b/test/codegen/memcombine.go @@ -113,16 +113,22 @@ func load_be16_idx(b []byte, idx int) { func load_le_byte2_uint16(s []byte) uint16 { // arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB` + // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` + // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` return uint16(s[0]) | uint16(s[1])<<8 } func load_le_byte2_uint16_inv(s []byte) uint16 { // arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR`,-`MOVB` + // 386:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` + // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` return uint16(s[1])<<8 | uint16(s[0]) } func load_le_byte4_uint32(s []byte) uint32 { // arm64:`MOVWU\t\(R[0-9]+\)`,-`ORR`,-`MOV[BH]` + // 386:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`-`MOVW` + // amd64:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`-`MOVW` return uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24 } @@ -133,21 +139,25 @@ func load_le_byte4_uint32_inv(s []byte) uint32 { func load_le_byte8_uint64(s []byte) uint64 { // arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+` return uint64(s[0]) | uint64(s[1])<<8 | uint64(s[2])<<16 | uint64(s[3])<<24 | uint64(s[4])<<32 | uint64(s[5])<<40 | uint64(s[6])<<48 | uint64(s[7])<<56 } func load_le_byte8_uint64_inv(s []byte) uint64 { // arm64:`MOVD\t\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+` return uint64(s[7])<<56 | uint64(s[6])<<48 | uint64(s[5])<<40 | uint64(s[4])<<32 | uint64(s[3])<<24 | uint64(s[2])<<16 | uint64(s[1])<<8 | uint64(s[0]) } func load_be_byte2_uint16(s []byte) uint16 { // arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` return uint16(s[0])<<8 | uint16(s[1]) } func load_be_byte2_uint16_inv(s []byte) uint16 { // arm64:`MOVHU\t\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)`,-`MOVB`,-`ORL` return uint16(s[1]) | uint16(s[0])<<8 } @@ -158,31 +168,39 @@ func load_be_byte4_uint32(s []byte) uint32 { func load_be_byte4_uint32_inv(s []byte) uint32 { // arm64:`MOVWU\t\(R[0-9]+\)`,`REVW`,-`ORR`,-`REV16W`,-`MOV[BH]` + // amd64:`MOVL\s\([A-Z]+\)`,-`MOVB`,-`OR`,-`MOVW` return uint32(s[3]) | uint32(s[2])<<8 | uint32(s[1])<<16 | uint32(s[0])<<24 } func load_be_byte8_uint64(s []byte) uint64 { // arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+` return uint64(s[0])<<56 | uint64(s[1])<<48 | uint64(s[2])<<40 | uint64(s[3])<<32 | uint64(s[4])<<24 | uint64(s[5])<<16 | uint64(s[6])<<8 | uint64(s[7]) } func load_be_byte8_uint64_inv(s []byte) uint64 { // arm64:`MOVD\t\(R[0-9]+\)`,`REV`,-`ORR`,-`REVW`,-`REV16W`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\),\s[A-Z]+` return uint64(s[7]) | uint64(s[6])<<8 | uint64(s[5])<<16 | uint64(s[4])<<24 | uint64(s[3])<<32 | uint64(s[2])<<40 | uint64(s[1])<<48 | uint64(s[0])<<56 } func load_le_byte2_uint16_idx(s []byte, idx int) uint16 { // arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOVB` + // 386:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`ORL`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB` return uint16(s[idx]) | uint16(s[idx+1])<<8 } func load_le_byte2_uint16_idx_inv(s []byte, idx int) uint16 { // arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOVB` + // 386:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`ORL`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB` return uint16(s[idx+1])<<8 | uint16(s[idx]) } func load_le_byte4_uint32_idx(s []byte, idx int) uint32 { // arm64:`MOVWU\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOV[BH]` + // amd64:`MOVL\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB`,-`MOVW` return uint32(s[idx]) | uint32(s[idx+1])<<8 | uint32(s[idx+2])<<16 | uint32(s[idx+3])<<24 } @@ -193,6 +211,7 @@ func load_le_byte4_uint32_idx_inv(s []byte, idx int) uint32 { func load_le_byte8_uint64_idx(s []byte, idx int) uint64 { // arm64:`MOVD\s\(R[0-9]+\)\(R[0-9]+\)`,-`ORR`,-`MOV[BHW]` + // amd64:`MOVQ\s\([A-Z]+\)\([A-Z]+` return uint64(s[idx]) | uint64(s[idx+1])<<8 | uint64(s[idx+2])<<16 | uint64(s[idx+3])<<24 | uint64(s[idx+4])<<32 | uint64(s[idx+5])<<40 | uint64(s[idx+6])<<48 | uint64(s[idx+7])<<56 } @@ -203,11 +222,13 @@ func load_le_byte8_uint64_idx_inv(s []byte, idx int) uint64 { func load_be_byte2_uint16_idx(s []byte, idx int) uint16 { // arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB` return uint16(s[idx])<<8 | uint16(s[idx+1]) } func load_be_byte2_uint16_idx_inv(s []byte, idx int) uint16 { // arm64:`MOVHU\s\(R[0-9]+\)\(R[0-9]+\)`,`REV16W`,-`ORR`,-`MOVB` + // amd64:`MOVWLZX\s\([A-Z]+\)\([A-Z]+`,-`OR`,-`MOVB` return uint16(s[idx+1]) | uint16(s[idx])<<8 } @@ -405,45 +426,67 @@ func store_be16_idx(b []byte, idx int) { func store_le_byte_2(b []byte, val uint16) { _ = b[2] // arm64:`MOVH\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB` + // 386:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB` + // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB` b[1], b[2] = byte(val), byte(val>>8) } +func store_le_byte_2_inv(b []byte, val uint16) { + _ = b[2] + // 386:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB` + // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB` + b[2], b[1] = byte(val>>8), byte(val) +} + func store_le_byte_4(b []byte, val uint32) { _ = b[4] // arm64:`MOVW\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH` + // 386:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW` + // amd64:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW` b[1], b[2], b[3], b[4] = byte(val), byte(val>>8), byte(val>>16), byte(val>>24) } func store_le_byte_8(b []byte, val uint64) { _ = b[8] // arm64:`MOVD\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`MOVW` + // amd64:`MOVQ\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`,-`MOVL` b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8] = byte(val), byte(val>>8), byte(val>>16), byte(val>>24), byte(val>>32), byte(val>>40), byte(val>>48), byte(val>>56) } func store_be_byte_2(b []byte, val uint16) { _ = b[2] // arm64:`REV16W`,`MOVH\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB` + // amd64:`MOVW\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB` b[1], b[2] = byte(val>>8), byte(val) } func store_be_byte_4(b []byte, val uint32) { _ = b[4] // arm64:`REVW`,`MOVW\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`REV16W` + // amd64:`MOVL\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW` b[1], b[2], b[3], b[4] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val) } func store_be_byte_8(b []byte, val uint64) { _ = b[8] // arm64:`REV`,`MOVD\sR[0-9]+,\s1\(R[0-9]+\)`,-`MOVB`,-`MOVH`,-`MOVW`,-`REV16W`,-`REVW` + // amd64:`MOVQ\s[A-Z]+,\s1\([A-Z]+\)`,-`MOVB`,-`MOVW`,-`MOVL` b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8] = byte(val>>56), byte(val>>48), byte(val>>40), byte(val>>32), byte(val>>24), byte(val>>16), byte(val>>8), byte(val) } func store_le_byte_2_idx(b []byte, idx int, val uint16) { _, _ = b[idx+0], b[idx+1] // arm64:`MOVH\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOVB` + // 386:`MOVW\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+`,-`MOVB` b[idx+1], b[idx+0] = byte(val>>8), byte(val) } +func store_le_byte_2_idx_inv(b []byte, idx int, val uint16) { + _, _ = b[idx+0], b[idx+1] + // 386:`MOVW\s[A-Z]+,\s\([A-Z]+\)\([A-Z]+`,-`MOVB` + b[idx+0], b[idx+1] = byte(val), byte(val>>8) +} + func store_le_byte_4_idx(b []byte, idx int, val uint32) { _, _, _, _ = b[idx+0], b[idx+1], b[idx+2], b[idx+3] // arm64:`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+\)`,-`MOVB`,-`MOVH` @@ -468,6 +511,24 @@ func store_be_byte_2_idx2(b []byte, idx int, val uint16) { b[(idx<<1)+0], b[(idx<<1)+1] = byte(val>>8), byte(val) } +func store_le_byte_2_idx2(b []byte, idx int, val uint16) { + _, _ = b[(idx<<1)+0], b[(idx<<1)+1] + // arm64:`MOVH\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<1\)`,-`MOVB` + b[(idx<<1)+1], b[(idx<<1)+0] = byte(val>>8), byte(val) +} + +func store_be_byte_4_idx4(b []byte, idx int, val uint32) { + _, _, _, _ = b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3] + // arm64:`REVW`,`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOVB`,-`MOVH`,-`REV16W` + b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val) +} + +func store_le_byte_4_idx4_inv(b []byte, idx int, val uint32) { + _, _, _, _ = b[(idx<<2)+0], b[(idx<<2)+1], b[(idx<<2)+2], b[(idx<<2)+3] + // arm64:`MOVW\sR[0-9]+,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOVB`,-`MOVH` + b[(idx<<2)+3], b[(idx<<2)+2], b[(idx<<2)+1], b[(idx<<2)+0] = byte(val>>24), byte(val>>16), byte(val>>8), byte(val) +} + // ------------- // // Zeroing // // ------------- // @@ -477,14 +538,24 @@ func store_be_byte_2_idx2(b []byte, idx int, val uint16) { func zero_byte_2(b1, b2 []byte) { // bounds checks to guarantee safety of writes below _, _ = b1[1], b2[1] - b1[0], b1[1] = 0, 0 // arm64:"MOVH\tZR",-"MOVB" - b2[1], b2[0] = 0, 0 // arm64:"MOVH\tZR",-"MOVB" + // arm64:"MOVH\tZR",-"MOVB" + // amd64:`MOVW\s[$]0,\s\([A-Z]+\)` + // 386:`MOVW\s[$]0,\s\([A-Z]+\)` + b1[0], b1[1] = 0, 0 + // arm64:"MOVH\tZR",-"MOVB" + // 386:`MOVW\s[$]0,\s\([A-Z]+\)` + // amd64:`MOVW\s[$]0,\s\([A-Z]+\)` + b2[1], b2[0] = 0, 0 } func zero_byte_4(b1, b2 []byte) { _, _ = b1[3], b2[3] - b1[0], b1[1], b1[2], b1[3] = 0, 0, 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH" - b2[2], b2[3], b2[1], b2[0] = 0, 0, 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` + // 386:`MOVL\s[$]0,\s\([A-Z]+\)` + b1[0], b1[1], b1[2], b1[3] = 0, 0, 0, 0 + // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + b2[2], b2[3], b2[1], b2[0] = 0, 0, 0, 0 } func zero_byte_8(b []byte) { @@ -501,28 +572,6 @@ func zero_byte_16(b []byte) { b[12], b[13], b[14], b[15] = 0, 0, 0, 0 // arm64:"STP",-"MOVB",-"MOVH",-"MOVW" } -/* TODO: enable them when corresponding optimization are implemented -func zero_byte_4_idx(b []byte, idx int) { - // arm64: `MOVW\sZR,\s\(R[0-9]+\)\(R[0-9]+<<2\)`,-`MOV[BH]` - b[(idx<<2)+0] = 0 - b[(idx<<2)+1] = 0 - b[(idx<<2)+2] = 0 - b[(idx<<2)+3] = 0 -} - -func zero_byte_8_idx(b []byte, idx int) { - // arm64: `MOVD\sZR,\s\(R[0-9]+\)\(R[0-9]+<<3\)`,-`MOV[BHW]` - b[(idx<<3)+0] = 0 - b[(idx<<3)+1] = 0 - b[(idx<<3)+2] = 0 - b[(idx<<3)+3] = 0 - b[(idx<<3)+4] = 0 - b[(idx<<3)+5] = 0 - b[(idx<<3)+6] = 0 - b[(idx<<3)+7] = 0 -} -*/ - func zero_byte_30(a *[30]byte) { *a = [30]byte{} // arm64:"STP",-"MOVB",-"MOVH",-"MOVW" } @@ -545,14 +594,23 @@ func zero_byte_2_idx2(b []byte, idx int) { func zero_uint16_2(h1, h2 []uint16) { _, _ = h1[1], h2[1] - h1[0], h1[1] = 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH" - h2[1], h2[0] = 0, 0 // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` + // 386:`MOVL\s[$]0,\s\([A-Z]+\)` + h1[0], h1[1] = 0, 0 + // arm64:"MOVW\tZR",-"MOVB",-"MOVH" + // amd64:`MOVL\s[$]0,\s\([A-Z]+\)` + // 386:`MOVL\s[$]0,\s\([A-Z]+\)` + h2[1], h2[0] = 0, 0 } func zero_uint16_4(h1, h2 []uint16) { _, _ = h1[3], h2[3] - h1[0], h1[1], h1[2], h1[3] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" - h2[2], h2[3], h2[1], h2[0] = 0, 0, 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` + h1[0], h1[1], h1[2], h1[3] = 0, 0, 0, 0 + // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + h2[2], h2[3], h2[1], h2[0] = 0, 0, 0, 0 } func zero_uint16_8(h []uint16) { @@ -563,8 +621,12 @@ func zero_uint16_8(h []uint16) { func zero_uint32_2(w1, w2 []uint32) { _, _ = w1[1], w2[1] - w1[0], w1[1] = 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" - w2[1], w2[0] = 0, 0 // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` + w1[0], w1[1] = 0, 0 + // arm64:"MOVD\tZR",-"MOVB",-"MOVH",-"MOVW" + // amd64:`MOVQ\s[$]0,\s\([A-Z]+\)` + w2[1], w2[0] = 0, 0 } func zero_uint32_4(w1, w2 []uint32) { diff --git a/test/codegen/memops.go b/test/codegen/memops.go new file mode 100644 index 0000000000000..dcf5863666521 --- /dev/null +++ b/test/codegen/memops.go @@ -0,0 +1,95 @@ +// asmcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +var x [2]bool +var x8 [2]uint8 +var x16 [2]uint16 +var x32 [2]uint32 +var x64 [2]uint64 + +func compMem1() int { + // amd64:`CMPB\t"".x\+1\(SB\), [$]0` + if x[1] { + return 1 + } + // amd64:`CMPB\t"".x8\+1\(SB\), [$]7` + if x8[1] == 7 { + return 1 + } + // amd64:`CMPW\t"".x16\+2\(SB\), [$]7` + if x16[1] == 7 { + return 1 + } + // amd64:`CMPL\t"".x32\+4\(SB\), [$]7` + if x32[1] == 7 { + return 1 + } + // amd64:`CMPQ\t"".x64\+8\(SB\), [$]7` + if x64[1] == 7 { + return 1 + } + return 0 +} + +//go:noinline +func f(x int) bool { + return false +} + +//go:noinline +func f8(x int) int8 { + return 0 +} + +//go:noinline +func f16(x int) int16 { + return 0 +} + +//go:noinline +func f32(x int) int32 { + return 0 +} + +//go:noinline +func f64(x int) int64 { + return 0 +} + +func compMem2() int { + // amd64:`CMPB\t8\(SP\), [$]0` + if f(3) { + return 1 + } + // amd64:`CMPB\t8\(SP\), [$]7` + if f8(3) == 7 { + return 1 + } + // amd64:`CMPW\t8\(SP\), [$]7` + if f16(3) == 7 { + return 1 + } + // amd64:`CMPL\t8\(SP\), [$]7` + if f32(3) == 7 { + return 1 + } + // amd64:`CMPQ\t8\(SP\), [$]7` + if f64(3) == 7 { + return 1 + } + return 0 +} + +func compMem3(x, y *int) (int, bool) { + // We can do comparisons of a register with memory even if + // the register is used subsequently. + r := *x + // amd64:`CMPQ\t\(` + // 386:`CMPL\t\(` + return r, r < *y +} diff --git a/test/codegen/noextend.go b/test/codegen/noextend.go new file mode 100644 index 0000000000000..46bfe3f2f91a1 --- /dev/null +++ b/test/codegen/noextend.go @@ -0,0 +1,255 @@ +// asmcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +var sval64 [8]int64 +var sval32 [8]int32 +var sval16 [8]int16 +var sval8 [8]int8 +var val64 [8]uint64 +var val32 [8]uint32 +var val16 [8]uint16 +var val8 [8]uint8 + +// ----------------------------- // +// avoid zero/sign extensions // +// ----------------------------- // + +func set16(x8 int8, u8 uint8, y8 int8, z8 uint8) { + // Truncate not needed, load does sign/zero extend + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + sval16[0] = int16(x8) + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + val16[0] = uint16(u8) + + // AND not needed due to size + // ppc64:-"ANDCC" + // ppc64le:-"ANDCC" + sval16[1] = 255 & int16(x8+y8) + + // ppc64:-"ANDCC" + // ppc64le:-"ANDCC" + val16[1] = 255 & uint16(u8+z8) + +} +func shiftidx(x8 int8, u8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) { + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + sval16[0] = int16(val16[x8>>1]) + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + val16[0] = uint16(sval16[u8>>2]) + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + sval16[1] = int16(val16[x16>>1]) + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + val16[1] = uint16(sval16[u16>>2]) + +} + +func setnox(x8 int8, u8 uint8, y8 int8, z8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) { + // Truncate not needed due to sign/zero extension on load + + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + sval16[0] = int16(x8) + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + val16[0] = uint16(u8) + + // AND not needed due to size + // ppc64:-"ANDCC" + // ppc64le:-"ANDCC" + sval16[1] = 255 & int16(x8+y8) + + // ppc64:-"ANDCC" + // ppc64le:-"ANDCC" + val16[1] = 255 & uint16(u8+z8) + + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + sval32[0] = int32(x8) + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + sval32[1] = int32(x16) + + //ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + //ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + val32[0] = uint32(u8) + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + val32[1] = uint32(u16) + + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + sval64[0] = int64(x8) + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + sval64[1] = int64(x16) + + // ppc64:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVW\tR\\d+,\\sR\\d+" + sval64[2] = int64(x32) + + //ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + //ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + val64[0] = uint64(u8) + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + val64[1] = uint64(u16) + + // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+" + val64[2] = uint64(u32) +} + +func cmp16(x8 int8, u8 uint8, x32 int32, u32 uint32, x64 int64, u64 uint64) bool { + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + if int16(x8) == sval16[0] { + return true + } + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + if uint16(u8) == val16[0] { + return true + } + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if uint16(u32>>16) == val16[0] { + return true + } + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if uint16(u64>>48) == val16[0] { + return true + } + + // Verify the truncates are using the correct sign. + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if int16(x32) == sval16[0] { + return true + } + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + if uint16(u32) == val16[0] { + return true + } + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if int16(x64) == sval16[0] { + return true + } + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + if uint16(u64) == val16[0] { + return true + } + + return false +} + +func cmp32(x8 int8, u8 uint8, x16 int16, u16 uint16, x64 int64, u64 uint64) bool { + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + if int32(x8) == sval32[0] { + return true + } + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + if uint32(u8) == val32[0] { + return true + } + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + if int32(x16) == sval32[0] { + return true + } + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if uint32(u16) == val32[0] { + return true + } + + // Verify the truncates are using the correct sign. + // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+" + if int32(x64) == sval32[0] { + return true + } + + // ppc64:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVW\tR\\d+,\\sR\\d+" + if uint32(u64) == val32[0] { + return true + } + + return false +} + + +func cmp64(x8 int8, u8 uint8, x16 int16, u16 uint16, x32 int32, u32 uint32) bool { + // ppc64:-"MOVB\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVB\tR\\d+,\\sR\\d+" + if int64(x8) == sval64[0] { + return true + } + + // ppc64:-"MOVBZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVBZ\tR\\d+,\\sR\\d+" + if uint64(u8) == val64[0] { + return true + } + + // ppc64:-"MOVH\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVH\tR\\d+,\\sR\\d+" + if int64(x16) == sval64[0] { + return true + } + + // ppc64:-"MOVHZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVHZ\tR\\d+,\\sR\\d+" + if uint64(u16) == val64[0] { + return true + } + + // ppc64:-"MOVW\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVW\tR\\d+,\\sR\\d+" + if int64(x32) == sval64[0] { + return true + } + + // ppc64:-"MOVWZ\tR\\d+,\\sR\\d+" + // ppc64le:-"MOVWZ\tR\\d+,\\sR\\d+" + if uint64(u32) == val64[0] { + return true + } + return false +} + diff --git a/test/codegen/rotate.go b/test/codegen/rotate.go index 5812e1c0b1ce6..ce24b57877348 100644 --- a/test/codegen/rotate.go +++ b/test/codegen/rotate.go @@ -16,18 +16,21 @@ func rot64(x uint64) uint64 { // amd64:"ROLQ\t[$]7" // arm64:"ROR\t[$]57" // s390x:"RLLG\t[$]7" + // ppc64:"ROTL\t[$]7" // ppc64le:"ROTL\t[$]7" a += x<<7 | x>>57 // amd64:"ROLQ\t[$]8" // arm64:"ROR\t[$]56" // s390x:"RLLG\t[$]8" + // ppc64:"ROTL\t[$]8" // ppc64le:"ROTL\t[$]8" a += x<<8 + x>>56 // amd64:"ROLQ\t[$]9" // arm64:"ROR\t[$]55" // s390x:"RLLG\t[$]9" + // ppc64:"ROTL\t[$]9" // ppc64le:"ROTL\t[$]9" a += x<<9 ^ x>>55 @@ -41,6 +44,7 @@ func rot32(x uint32) uint32 { // arm:"MOVW\tR\\d+@>25" // arm64:"RORW\t[$]25" // s390x:"RLL\t[$]7" + // ppc64:"ROTLW\t[$]7" // ppc64le:"ROTLW\t[$]7" a += x<<7 | x>>25 @@ -48,6 +52,7 @@ func rot32(x uint32) uint32 { // arm:"MOVW\tR\\d+@>24" // arm64:"RORW\t[$]24" // s390x:"RLL\t[$]8" + // ppc64:"ROTLW\t[$]8" // ppc64le:"ROTLW\t[$]8" a += x<<8 + x>>24 @@ -55,6 +60,7 @@ func rot32(x uint32) uint32 { // arm:"MOVW\tR\\d+@>23" // arm64:"RORW\t[$]23" // s390x:"RLL\t[$]9" + // ppc64:"ROTLW\t[$]9" // ppc64le:"ROTLW\t[$]9" a += x<<9 ^ x>>23 @@ -101,6 +107,7 @@ func rot64nc(x uint64, z uint) uint64 { z &= 63 // amd64:"ROLQ" + // ppc64:"ROTL" // ppc64le:"ROTL" a += x<>(64-z) @@ -116,6 +123,7 @@ func rot32nc(x uint32, z uint) uint32 { z &= 31 // amd64:"ROLL" + // ppc64:"ROTLW" // ppc64le:"ROTLW" a += x<>(32-z) diff --git a/test/codegen/stack.go b/test/codegen/stack.go index 7e12dbc0eb6bb..ed2c1ed95958b 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -16,8 +16,9 @@ import "runtime" // 386:"TEXT\t.*, [$]0-" // amd64:"TEXT\t.*, [$]0-" // arm:"TEXT\t.*, [$]-4-" -// arm64:"TEXT\t.*, [$]-8-" +// arm64:"TEXT\t.*, [$]0-" // mips:"TEXT\t.*, [$]-4-" +// ppc64:"TEXT\t.*, [$]0-" // ppc64le:"TEXT\t.*, [$]0-" // s390x:"TEXT\t.*, [$]0-" func StackStore() int { @@ -35,8 +36,9 @@ type T struct { // 386:"TEXT\t.*, [$]0-" // amd64:"TEXT\t.*, [$]0-" // arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]-8-" +// arm64:"TEXT\t.*, [$]0-" // mips:"TEXT\t.*, [$]-4-" +// ppc64:"TEXT\t.*, [$]0-" // ppc64le:"TEXT\t.*, [$]0-" // s390x:"TEXT\t.*, [$]0-" func ZeroLargeStruct(x *T) { @@ -50,7 +52,8 @@ func ZeroLargeStruct(x *T) { // - 386 fails due to spilling a register // amd64:"TEXT\t.*, [$]0-" // arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]-8-" +// arm64:"TEXT\t.*, [$]0-" +// ppc64:"TEXT\t.*, [$]0-" // ppc64le:"TEXT\t.*, [$]0-" // s390x:"TEXT\t.*, [$]0-" // Note: that 386 currently has to spill a register. @@ -64,7 +67,8 @@ func KeepWanted(t *T) { // - 386 fails due to spilling a register // - arm & mips fail due to softfloat calls // amd64:"TEXT\t.*, [$]0-" -// arm64:"TEXT\t.*, [$]-8-" +// arm64:"TEXT\t.*, [$]0-" +// ppc64:"TEXT\t.*, [$]0-" // ppc64le:"TEXT\t.*, [$]0-" // s390x:"TEXT\t.*, [$]0-" func ArrayAdd64(a, b [4]float64) [4]float64 { @@ -76,8 +80,9 @@ func ArrayAdd64(a, b [4]float64) [4]float64 { // 386:"TEXT\t.*, [$]0-" // amd64:"TEXT\t.*, [$]0-" // arm:"TEXT\t.*, [$]0-" (spills return address) -// arm64:"TEXT\t.*, [$]-8-" +// arm64:"TEXT\t.*, [$]0-" // mips:"TEXT\t.*, [$]-4-" +// ppc64:"TEXT\t.*, [$]0-" // ppc64le:"TEXT\t.*, [$]0-" // s390x:"TEXT\t.*, [$]0-" func ArrayInit(i, j int) [4]int { diff --git a/test/codegen/strings.go b/test/codegen/strings.go index ccb6bd4273a1f..d688b6cbf9259 100644 --- a/test/codegen/strings.go +++ b/test/codegen/strings.go @@ -13,3 +13,49 @@ func CountRunes(s string) int { // Issue #24923 // amd64:`.*countrunes` return len([]rune(s)) } + +func ToByteSlice() []byte { // Issue #24698 + // amd64:`LEAQ\ttype\.\[3\]uint8` + // amd64:`CALL\truntime\.newobject` + // amd64:-`.*runtime.stringtoslicebyte` + return []byte("foo") +} + +// Loading from read-only symbols should get transformed into constants. +func ConstantLoad() { + // 12592 = 0x3130 + // 50 = 0x32 + // amd64:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(` + // 386:`MOVW\t\$12592, \(`,`MOVB\t\$50, 2\(` + // arm:`MOVW\t\$48`,`MOVW\t\$49`,`MOVW\t\$50` + // arm64:`MOVD\t\$12592`,`MOVD\t\$50` + bsink = []byte("012") + + // 858927408 = 0x33323130 + // 13620 = 0x3534 + // amd64:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(` + // 386:`MOVL\t\$858927408`,`MOVW\t\$13620, 4\(` + // arm64:`MOVD\t\$858927408`,`MOVD\t\$13620` + bsink = []byte("012345") + + // 3978425819141910832 = 0x3736353433323130 + // 7306073769690871863 = 0x6564636261393837 + // amd64:`MOVQ\t\$3978425819141910832`,`MOVQ\t\$7306073769690871863` + // 386:`MOVL\t\$858927408, \(`,`DUFFCOPY` + // arm64:`MOVD\t\$3978425819141910832`,`MOVD\t\$1650538808`,`MOVD\t\$25699`,`MOVD\t\$101` + bsink = []byte("0123456789abcde") + + // 56 = 0x38 + // amd64:`MOVQ\t\$3978425819141910832`,`MOVB\t\$56` + bsink = []byte("012345678") + + // 14648 = 0x3938 + // amd64:`MOVQ\t\$3978425819141910832`,`MOVW\t\$14648` + bsink = []byte("0123456789") + + // 1650538808 = 0x62613938 + // amd64:`MOVQ\t\$3978425819141910832`,`MOVL\t\$1650538808` + bsink = []byte("0123456789ab") +} + +var bsink []byte diff --git a/test/codegen/zerosize.go b/test/codegen/zerosize.go new file mode 100644 index 0000000000000..cd0c83b6efed7 --- /dev/null +++ b/test/codegen/zerosize.go @@ -0,0 +1,25 @@ +// asmcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure a pointer variable and a zero-sized variable +// aren't allocated to the same stack slot. +// See issue 24993. + +package codegen + +func zeroSize() { + c := make(chan struct{}) + // amd64:`MOVQ\t\$0, ""\.s\+32\(SP\)` + var s *int + g(&s) // force s to be a stack object + + // amd64:`LEAQ\t""\..*\+31\(SP\)` + c <- struct{}{} +} + +//go:noinline +func g(p **int) { +} diff --git a/test/errchk b/test/errchk deleted file mode 100755 index 1cb57bb961c41..0000000000000 --- a/test/errchk +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env perl -# Copyright 2009 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This script checks that the compilers emit the errors which we expect. -# Usage: errchk COMPILER [OPTS] SOURCEFILES. This will run the command -# COMPILER [OPTS] SOURCEFILES. The compilation is expected to fail; if -# it succeeds, this script will report an error. The stderr output of -# the compiler will be matched against comments in SOURCEFILES. For each -# line of the source files which should generate an error, there should -# be a comment of the form // ERROR "regexp". If the compiler generates -# an error for a line which has no such comment, this script will report -# an error. Likewise if the compiler does not generate an error for a -# line which has a comment, or if the error message does not match the -# . The syntax is Perl but its best to stick to egrep. - -use POSIX; - -my $exitcode = 1; - -if(@ARGV >= 1 && $ARGV[0] eq "-0") { - $exitcode = 0; - shift; -} - -if(@ARGV < 1) { - print STDERR "Usage: errchk COMPILER [OPTS] SOURCEFILES\n"; - exit 1; -} - -# Grab SOURCEFILES -foreach(reverse 0 .. @ARGV-1) { - unless($ARGV[$_] =~ /\.(go|s)$/) { - @file = @ARGV[$_+1 .. @ARGV-1]; - last; - } -} - -# If no files have been specified try to grab SOURCEFILES from the last -# argument that is an existing directory if any -unless(@file) { - foreach(reverse 0 .. @ARGV-1) { - if(-d $ARGV[$_]) { - @file = glob($ARGV[$_] . "/*.go"); - last; - } - } -} - -foreach $file (@file) { - open(SRC, $file) || die "BUG: errchk: open $file: $!"; - $src{$file} = []; - close(SRC); -} - -# Run command -$cmd = join(' ', @ARGV); -open(CMD, "exec $cmd &1 |") || die "BUG: errchk: run $cmd: $!"; - -# gc error messages continue onto additional lines with leading tabs. -# Split the output at the beginning of each line that doesn't begin with a tab. -$out = join('', ); -@out = split(/^(?!\t)/m, $out); - -close CMD; - -# Remove lines beginning with #, printed by go command to indicate package. -@out = grep {!/^#/} @out; - -if($exitcode != 0 && $? == 0) { - print STDERR "BUG: errchk: command succeeded unexpectedly\n"; - print STDERR @out; - exit 0; -} - -if($exitcode == 0 && $? != 0) { - print STDERR "BUG: errchk: command failed unexpectedly\n"; - print STDERR @out; - exit 0; -} - -if(!WIFEXITED($?)) { - print STDERR "BUG: errchk: compiler crashed\n"; - print STDERR @out, "\n"; - exit 0; -} - -sub bug() { - if(!$bug++) { - print STDERR "BUG: "; - } -} - -sub chk { - my $file = shift; - my $line = 0; - my $regexp; - my @errmsg; - my @match; - foreach my $src (@{$src{$file}}) { - $line++; - next if $src =~ m|////|; # double comment disables ERROR - next unless $src =~ m|// (GC_)?ERROR (.*)|; - my $all = $2; - if($all !~ /^"([^"]*)"/) { - print STDERR "$file:$line: malformed regexp\n"; - next; - } - @errmsg = grep { /$file:$line[:[]/ } @out; - @out = grep { !/$file:$line[:[]/ } @out; - if(@errmsg == 0) { - bug(); - print STDERR "errchk: $file:$line: missing expected error: '$all'\n"; - next; - } - foreach my $regexp ($all =~ /"([^"]*)"/g) { - # Turn relative line number in message into absolute line number. - if($regexp =~ /LINE(([+-])([0-9]+))?/) { - my $n = $line; - if(defined($1)) { - if($2 eq "+") { - $n += int($3); - } else { - $n -= int($3); - } - } - $regexp = "$`$file:$n$'"; - } - - @match = grep { /$regexp/ } @errmsg; - if(@match == 0) { - bug(); - print STDERR "errchk: $file:$line: error messages do not match '$regexp'\n"; - next; - } - @errmsg = grep { !/$regexp/ } @errmsg; - } - if(@errmsg != 0) { - bug(); - print STDERR "errchk: $file:$line: unmatched error messages:\n"; - foreach my $l (@errmsg) { - print STDERR "> $l"; - } - } - } -} - -foreach $file (@file) { - chk($file) -} - -if(@out != 0) { - bug(); - print STDERR "errchk: unmatched error messages:\n"; - print STDERR "==================================================\n"; - print STDERR @out; - print STDERR "==================================================\n"; -} - -exit 0; diff --git a/test/escape2.go b/test/escape2.go index ef3d6a88bf382..a39291e85558d 100644 --- a/test/escape2.go +++ b/test/escape2.go @@ -1593,27 +1593,33 @@ func ptrlitEscape() { // self-assignments type Buffer struct { - arr [64]byte - buf1 []byte - buf2 []byte - str1 string - str2 string + arr [64]byte + arrPtr *[64]byte + buf1 []byte + buf2 []byte + str1 string + str2 string } func (b *Buffer) foo() { // ERROR "\(\*Buffer\).foo b does not escape$" - b.buf1 = b.buf1[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf1[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf2[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf2[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" + b.buf1 = b.buf1[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf1\[1:2\]$" + b.buf1 = b.buf1[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf1\[1:2:3\]$" + b.buf1 = b.buf2[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf2\[1:2\]$" + b.buf1 = b.buf2[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf2\[1:2:3\]$" } func (b *Buffer) bar() { // ERROR "leaking param: b$" b.buf1 = b.arr[1:2] // ERROR "b.arr escapes to heap$" } +func (b *Buffer) arrayPtr() { // ERROR "\(\*Buffer\).arrayPtr b does not escape" + b.buf1 = b.arrPtr[1:2] // ERROR "\(\*Buffer\).arrayPtr ignoring self-assignment in b.buf1 = b.arrPtr\[1:2\]$" + b.buf1 = b.arrPtr[1:2:3] // ERROR "\(\*Buffer\).arrayPtr ignoring self-assignment in b.buf1 = b.arrPtr\[1:2:3\]$" +} + func (b *Buffer) baz() { // ERROR "\(\*Buffer\).baz b does not escape$" - b.str1 = b.str1[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment to b.str1$" - b.str1 = b.str2[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment to b.str1$" + b.str1 = b.str1[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment in b.str1 = b.str1\[1:2\]$" + b.str1 = b.str2[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment in b.str1 = b.str2\[1:2\]$" } func (b *Buffer) bat() { // ERROR "leaking param content: b$" @@ -1623,8 +1629,8 @@ func (b *Buffer) bat() { // ERROR "leaking param content: b$" } func quux(sp *string, bp *[]byte) { // ERROR "quux bp does not escape$" "quux sp does not escape$" - *sp = (*sp)[1:2] // ERROR "quux ignoring self-assignment to \*sp$" - *bp = (*bp)[1:2] // ERROR "quux ignoring self-assignment to \*bp$" + *sp = (*sp)[1:2] // ERROR "quux ignoring self-assignment in \*sp = \(\*sp\)\[1:2\]$" + *bp = (*bp)[1:2] // ERROR "quux ignoring self-assignment in \*bp = \(\*bp\)\[1:2\]$" } type StructWithString struct { diff --git a/test/escape2n.go b/test/escape2n.go index b1130d3c3c9d3..989cf18d35dbe 100644 --- a/test/escape2n.go +++ b/test/escape2n.go @@ -1593,27 +1593,33 @@ func ptrlitEscape() { // self-assignments type Buffer struct { - arr [64]byte - buf1 []byte - buf2 []byte - str1 string - str2 string + arr [64]byte + arrPtr *[64]byte + buf1 []byte + buf2 []byte + str1 string + str2 string } func (b *Buffer) foo() { // ERROR "\(\*Buffer\).foo b does not escape$" - b.buf1 = b.buf1[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf1[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf2[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" - b.buf1 = b.buf2[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment to b.buf1$" + b.buf1 = b.buf1[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf1\[1:2\]$" + b.buf1 = b.buf1[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf1\[1:2:3\]$" + b.buf1 = b.buf2[1:2] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf2\[1:2\]$" + b.buf1 = b.buf2[1:2:3] // ERROR "\(\*Buffer\).foo ignoring self-assignment in b.buf1 = b.buf2\[1:2:3\]$" } func (b *Buffer) bar() { // ERROR "leaking param: b$" b.buf1 = b.arr[1:2] // ERROR "b.arr escapes to heap$" } +func (b *Buffer) arrayPtr() { // ERROR "\(\*Buffer\).arrayPtr b does not escape" + b.buf1 = b.arrPtr[1:2] // ERROR "\(\*Buffer\).arrayPtr ignoring self-assignment in b.buf1 = b.arrPtr\[1:2\]$" + b.buf1 = b.arrPtr[1:2:3] // ERROR "\(\*Buffer\).arrayPtr ignoring self-assignment in b.buf1 = b.arrPtr\[1:2:3\]$" +} + func (b *Buffer) baz() { // ERROR "\(\*Buffer\).baz b does not escape$" - b.str1 = b.str1[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment to b.str1$" - b.str1 = b.str2[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment to b.str1$" + b.str1 = b.str1[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment in b.str1 = b.str1\[1:2\]$" + b.str1 = b.str2[1:2] // ERROR "\(\*Buffer\).baz ignoring self-assignment in b.str1 = b.str2\[1:2\]$" } func (b *Buffer) bat() { // ERROR "leaking param content: b$" @@ -1623,8 +1629,8 @@ func (b *Buffer) bat() { // ERROR "leaking param content: b$" } func quux(sp *string, bp *[]byte) { // ERROR "quux bp does not escape$" "quux sp does not escape$" - *sp = (*sp)[1:2] // ERROR "quux ignoring self-assignment to \*sp$" - *bp = (*bp)[1:2] // ERROR "quux ignoring self-assignment to \*bp$" + *sp = (*sp)[1:2] // ERROR "quux ignoring self-assignment in \*sp = \(\*sp\)\[1:2\]$" + *bp = (*bp)[1:2] // ERROR "quux ignoring self-assignment in \*bp = \(\*bp\)\[1:2\]$" } type StructWithString struct { diff --git a/test/escape5.go b/test/escape5.go index 03283a37f8470..e26ecd5275032 100644 --- a/test/escape5.go +++ b/test/escape5.go @@ -228,3 +228,20 @@ func f15730c(args ...interface{}) { // ERROR "leaking param content: args" } } } + +// Issue 29000: unnamed parameter is not handled correctly + +var sink4 interface{} +var alwaysFalse = false + +func f29000(_ int, x interface{}) { // ERROR "leaking param: x" + sink4 = x + if alwaysFalse { + g29000() + } +} + +func g29000() { + x := 1 + f29000(2, x) // ERROR "x escapes to heap" +} diff --git a/test/escape_because.go b/test/escape_because.go index c7548fc677a5e..3b67ff9e4bf8b 100644 --- a/test/escape_because.go +++ b/test/escape_because.go @@ -113,8 +113,7 @@ func f13() { escape(c) } -//go:noinline -func transmit(b []byte) []byte { // ERROR "from ~r1 \(return\) at escape_because.go:118$" "leaking param: b to result ~r1 level=0$" +func transmit(b []byte) []byte { // ERROR "from ~r1 \(return\) at escape_because.go:117$" "leaking param: b to result ~r1 level=0$" return b } @@ -125,6 +124,24 @@ func f14() { _, _ = s1, s2 } +func leakParams(p1, p2 *int) (*int, *int) { // ERROR "leaking param: p1 to result ~r2 level=0$" "from ~r2 \(return\) at escape_because.go:128$" "leaking param: p2 to result ~r3 level=0$" "from ~r3 \(return\) at escape_because.go:128$" + return p1, p2 +} + +func leakThroughOAS2() { + // See #26987. + i := 0 // ERROR "moved to heap: i$" + j := 0 // ERROR "moved to heap: j$" + sink, sink = &i, &j // ERROR "&i escapes to heap$" "from sink \(assign-pair\) at escape_because.go:135$" "from &i \(interface-converted\) at escape_because.go:135$" "&j escapes to heap$" "from &j \(interface-converted\) at escape_because.go:135" +} + +func leakThroughOAS2FUNC() { + // See #26987. + i := 0 // ERROR "moved to heap: i$" + j := 0 + sink, _ = leakParams(&i, &j) // ERROR "&i escapes to heap$" "&j does not escape$" "from .out0 \(passed-to-and-returned-from-call\) at escape_because.go:142$" "from sink \(assign-pair-func-call\) at escape_because.go:142$" +} + // The list below is all of the why-escapes messages seen building the escape analysis tests. /* for i in escape*go ; do echo compile $i; go build -gcflags '-l -m -m' $i >& `basename $i .go`.log ; done diff --git a/test/escape_param.go b/test/escape_param.go index 2c43b96ba0316..dff13b6f7cc9b 100644 --- a/test/escape_param.go +++ b/test/escape_param.go @@ -11,6 +11,8 @@ package escape +func zero() int { return 0 } + var sink interface{} // in -> out @@ -58,20 +60,91 @@ func caller2b() { sink = p // ERROR "p escapes to heap$" } +func paramArraySelfAssign(p *PairOfPairs) { // ERROR "p does not escape" + p.pairs[0] = p.pairs[1] // ERROR "ignoring self-assignment in p.pairs\[0\] = p.pairs\[1\]" +} + +func paramArraySelfAssignUnsafeIndex(p *PairOfPairs) { // ERROR "leaking param content: p" + // Function call inside index disables self-assignment case to trigger. + p.pairs[zero()] = p.pairs[1] + p.pairs[zero()+1] = p.pairs[1] +} + +type PairOfPairs struct { + pairs [2]*Pair +} + +type BoxedPair struct { + pair *Pair +} + +type WrappedPair struct { + pair Pair +} + +func leakParam(x interface{}) { // ERROR "leaking param: x" + sink = x +} + +func sinkAfterSelfAssignment1(box *BoxedPair) { // ERROR "leaking param content: box" + box.pair.p1 = box.pair.p2 // ERROR "ignoring self-assignment in box.pair.p1 = box.pair.p2" + sink = box.pair.p2 // ERROR "box.pair.p2 escapes to heap" +} + +func sinkAfterSelfAssignment2(box *BoxedPair) { // ERROR "leaking param content: box" + box.pair.p1 = box.pair.p2 // ERROR "ignoring self-assignment in box.pair.p1 = box.pair.p2" + sink = box.pair // ERROR "box.pair escapes to heap" +} + +func sinkAfterSelfAssignment3(box *BoxedPair) { // ERROR "leaking param content: box" + box.pair.p1 = box.pair.p2 // ERROR "ignoring self-assignment in box.pair.p1 = box.pair.p2" + leakParam(box.pair.p2) // ERROR "box.pair.p2 escapes to heap" +} + +func sinkAfterSelfAssignment4(box *BoxedPair) { // ERROR "leaking param content: box" + box.pair.p1 = box.pair.p2 // ERROR "ignoring self-assignment in box.pair.p1 = box.pair.p2" + leakParam(box.pair) // ERROR "box.pair escapes to heap" +} + +func selfAssignmentAndUnrelated(box1, box2 *BoxedPair) { // ERROR "leaking param content: box2" "box1 does not escape" + box1.pair.p1 = box1.pair.p2 // ERROR "ignoring self-assignment in box1.pair.p1 = box1.pair.p2" + leakParam(box2.pair.p2) // ERROR "box2.pair.p2 escapes to heap" +} + +func notSelfAssignment1(box1, box2 *BoxedPair) { // ERROR "leaking param content: box2" "box1 does not escape" + box1.pair.p1 = box2.pair.p1 +} + +func notSelfAssignment2(p1, p2 *PairOfPairs) { // ERROR "leaking param content: p2" "p1 does not escape" + p1.pairs[0] = p2.pairs[1] +} + +func notSelfAssignment3(p1, p2 *PairOfPairs) { // ERROR "leaking param content: p2" "p1 does not escape" + p1.pairs[0].p1 = p2.pairs[1].p1 +} + +func boxedPairSelfAssign(box *BoxedPair) { // ERROR "box does not escape" + box.pair.p1 = box.pair.p2 // ERROR "ignoring self-assignment in box.pair.p1 = box.pair.p2" +} + +func wrappedPairSelfAssign(w *WrappedPair) { // ERROR "w does not escape" + w.pair.p1 = w.pair.p2 // ERROR "ignoring self-assignment in w.pair.p1 = w.pair.p2" +} + // in -> in type Pair struct { p1 *int p2 *int } -func param3(p *Pair) { // ERROR "leaking param content: p$" - p.p1 = p.p2 +func param3(p *Pair) { // ERROR "param3 p does not escape" + p.p1 = p.p2 // ERROR "param3 ignoring self-assignment in p.p1 = p.p2" } func caller3a() { - i := 0 // ERROR "moved to heap: i$" - j := 0 // ERROR "moved to heap: j$" - p := Pair{&i, &j} // ERROR "&i escapes to heap$" "&j escapes to heap$" + i := 0 + j := 0 + p := Pair{&i, &j} // ERROR "caller3a &i does not escape" "caller3a &j does not escape" param3(&p) // ERROR "caller3a &p does not escape" _ = p } diff --git a/test/fixedbugs/bug273.go b/test/fixedbugs/bug273.go index 7305c6063ccdc..2af8800171ed4 100644 --- a/test/fixedbugs/bug273.go +++ b/test/fixedbugs/bug273.go @@ -14,7 +14,7 @@ var bug = false var minus1 = -1 var five = 5 -var big int64 = 10 | 1<<40 +var big int64 = 10 | 1<<46 type block [1 << 19]byte diff --git a/test/fixedbugs/bug506.dir/a.go b/test/fixedbugs/bug506.dir/a.go new file mode 100644 index 0000000000000..8e8a2005810fa --- /dev/null +++ b/test/fixedbugs/bug506.dir/a.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type internal struct { + f1 string + f2 float64 +} + +type S struct { + F struct { + I internal + } +} diff --git a/test/fixedbugs/bug506.dir/main.go b/test/fixedbugs/bug506.dir/main.go new file mode 100644 index 0000000000000..1b60e40d8d0a4 --- /dev/null +++ b/test/fixedbugs/bug506.dir/main.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + + "./a" +) + +var v = a.S{} + +func main() { + want := "{{ 0}}" + if got := fmt.Sprint(v.F); got != want { + panic(got) + } +} diff --git a/test/fixedbugs/bug506.go b/test/fixedbugs/bug506.go new file mode 100644 index 0000000000000..7c8ccc6ec7ed7 --- /dev/null +++ b/test/fixedbugs/bug506.go @@ -0,0 +1,10 @@ +// rundir + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Gccgo caused an undefined symbol reference building hash functions +// for an imported struct with unexported fields. + +package ignored diff --git a/test/fixedbugs/issue13265.go b/test/fixedbugs/issue13265.go index 3036ba7c24408..3e16cee6e75a5 100644 --- a/test/fixedbugs/issue13265.go +++ b/test/fixedbugs/issue13265.go @@ -1,4 +1,5 @@ // errorcheck -0 -race +// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64 // Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/fixedbugs/issue15091.go b/test/fixedbugs/issue15091.go index 00fb473d6a27e..678e7911c8023 100644 --- a/test/fixedbugs/issue15091.go +++ b/test/fixedbugs/issue15091.go @@ -1,4 +1,5 @@ // errorcheck -0 -race +// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64 // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/fixedbugs/issue16008.go b/test/fixedbugs/issue16008.go index 0e369efcbbf2a..45457cdb7f568 100644 --- a/test/fixedbugs/issue16008.go +++ b/test/fixedbugs/issue16008.go @@ -1,4 +1,5 @@ // errorcheck -0 -race +// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64 // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/fixedbugs/issue17038.go b/test/fixedbugs/issue17038.go index 1b65ffc1f0eec..e07a4b22ce25d 100644 --- a/test/fixedbugs/issue17038.go +++ b/test/fixedbugs/issue17038.go @@ -6,4 +6,4 @@ package main -const A = complex(0()) // ERROR "cannot call non-function" +const A = complex(0()) // ERROR "cannot call non-function" "const initializer .* is not a constant" diff --git a/test/fixedbugs/issue17449.go b/test/fixedbugs/issue17449.go index 23029178e8dcf..51cc8eaa06d85 100644 --- a/test/fixedbugs/issue17449.go +++ b/test/fixedbugs/issue17449.go @@ -1,4 +1,5 @@ // errorcheck -0 -race +// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64 // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/fixedbugs/issue18640.go b/test/fixedbugs/issue18640.go index 60abd31f760cf..091bbe596b22b 100644 --- a/test/fixedbugs/issue18640.go +++ b/test/fixedbugs/issue18640.go @@ -20,8 +20,7 @@ type ( d = c ) -// The compiler reports an incorrect (non-alias related) -// type cycle here (via dowith()). Disabled for now. +// The compiler cannot handle these cases. Disabled for now. // See issue #25838. /* type ( @@ -32,7 +31,6 @@ type ( i = j j = e ) -*/ type ( a1 struct{ *b1 } @@ -45,3 +43,4 @@ type ( b2 = c2 c2 struct{ *b2 } ) +*/ diff --git a/test/fixedbugs/issue19507.dir/div_arm.s b/test/fixedbugs/issue19507.dir/div_arm.s index f67c3bb66d7ee..0bc33e92ce297 100644 --- a/test/fixedbugs/issue19507.dir/div_arm.s +++ b/test/fixedbugs/issue19507.dir/div_arm.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -TEXT main·f(SB),0,$0-8 +TEXT ·f(SB),0,$0-8 MOVW x+0(FP), R1 MOVW x+4(FP), R2 DIVU R1, R2 diff --git a/test/fixedbugs/issue20250.go b/test/fixedbugs/issue20250.go index 6fc861a8dc420..c190515274af2 100644 --- a/test/fixedbugs/issue20250.go +++ b/test/fixedbugs/issue20250.go @@ -11,14 +11,14 @@ package p type T struct { - s string + s [2]string } func f(a T) { // ERROR "live at entry to f: a" - var e interface{} - func() { // ERROR "live at entry to f.func1: a &e" - e = a.s // ERROR "live at call to convT2Estring: a &e" - }() // ERROR "live at call to f.func1: e$" + var e interface{} // ERROR "stack object e interface \{\}$" + func() { // ERROR "live at entry to f.func1: a &e" + e = a.s // ERROR "live at call to convT2E: &e" "stack object a T$" + }() // Before the fix, both a and e were live at the previous line. _ = e } diff --git a/test/fixedbugs/issue20780.go b/test/fixedbugs/issue20780.go index a31e031b78a5b..58952e53eea67 100644 --- a/test/fixedbugs/issue20780.go +++ b/test/fixedbugs/issue20780.go @@ -6,15 +6,14 @@ // We have a limit of 1GB for stack frames. // Make sure we include the callee args section. -// (The dispatch wrapper which implements (*S).f -// copies the return value from f to a stack temp, then -// from that stack temp to the return value of (*S).f. -// It uses ~800MB for each section.) package main -type S struct { - i interface { - f() [800e6]byte - } +func f() { // ERROR "stack frame too large" + var x [800e6]byte + g(x) + return } + +//go:noinline +func g([800e6]byte) {} diff --git a/test/fixedbugs/issue22327.go b/test/fixedbugs/issue22327.go new file mode 100644 index 0000000000000..7b21d834029d8 --- /dev/null +++ b/test/fixedbugs/issue22327.go @@ -0,0 +1,18 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Using a multi-result function as an argument to +// append should compile successfully. Previously there +// was a missing *int -> interface{} conversion that caused +// the compiler to ICE. + +package p + +func f() ([]interface{}, *int) { + return nil, nil +} + +var _ = append(f()) diff --git a/test/fixedbugs/issue22662b.go b/test/fixedbugs/issue22662b.go index 3594c0f4ef751..2678383ab0775 100644 --- a/test/fixedbugs/issue22662b.go +++ b/test/fixedbugs/issue22662b.go @@ -18,7 +18,7 @@ import ( ) // Each of these tests is expected to fail (missing package clause) -// at the position determined by the preceeding line directive. +// at the position determined by the preceding line directive. var tests = []struct { src, pos string }{ diff --git a/test/fixedbugs/issue23311.dir/main.go b/test/fixedbugs/issue23311.dir/main.go new file mode 100644 index 0000000000000..fa4cf76b89a7a --- /dev/null +++ b/test/fixedbugs/issue23311.dir/main.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "unsafe" // for linkname + +//go:linkname f runtime.GC +func f() + +func main() { + f() +} diff --git a/src/runtime/testdata/testprog/empty.s b/test/fixedbugs/issue23311.go similarity index 70% rename from src/runtime/testdata/testprog/empty.s rename to test/fixedbugs/issue23311.go index c5aa6f8a54661..128cf9d06ad6e 100644 --- a/src/runtime/testdata/testprog/empty.s +++ b/test/fixedbugs/issue23311.go @@ -1,5 +1,7 @@ +// compiledir + // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This exists solely so we can linkname in symbols from runtime. +package ignored diff --git a/test/fixedbugs/issue23734.go b/test/fixedbugs/issue23734.go new file mode 100644 index 0000000000000..dd5869b8f0039 --- /dev/null +++ b/test/fixedbugs/issue23734.go @@ -0,0 +1,32 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + m := map[interface{}]int{} + k := []int{} + + mustPanic(func() { + _ = m[k] + }) + mustPanic(func() { + _, _ = m[k] + }) + mustPanic(func() { + delete(m, k) + }) +} + +func mustPanic(f func()) { + defer func() { + r := recover() + if r == nil { + panic("didn't panic") + } + }() + f() +} diff --git a/test/fixedbugs/issue23780.go b/test/fixedbugs/issue23780.go new file mode 100644 index 0000000000000..71fc2d9ed6f3a --- /dev/null +++ b/test/fixedbugs/issue23780.go @@ -0,0 +1,17 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f() { + _ = []byte{1 << 30: 1} +} + +func g() { + sink = []byte{1 << 30: 1} +} + +var sink []byte diff --git a/test/fixedbugs/issue23781.go b/test/fixedbugs/issue23781.go new file mode 100644 index 0000000000000..5c03cf7e4ef53 --- /dev/null +++ b/test/fixedbugs/issue23781.go @@ -0,0 +1,10 @@ +// +build amd64 +// compile + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var _ = []int{1 << 31: 1} // ok on machines with 64bit int diff --git a/test/fixedbugs/issue23823.go b/test/fixedbugs/issue23823.go index 2f802d0988606..9297966cbd6f6 100644 --- a/test/fixedbugs/issue23823.go +++ b/test/fixedbugs/issue23823.go @@ -1,4 +1,4 @@ -// errorcheck +// compile // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -6,10 +6,14 @@ package p +// The compiler cannot handle this. Disabled for now. +// See issue #25838. +/* type I1 = interface { I2 } -type I2 interface { // ERROR "invalid recursive type" +type I2 interface { I1 } +*/ diff --git a/test/fixedbugs/issue23837.go b/test/fixedbugs/issue23837.go new file mode 100644 index 0000000000000..7ad50837f4644 --- /dev/null +++ b/test/fixedbugs/issue23837.go @@ -0,0 +1,70 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +//go:noinline +func f(p, q *struct{}) bool { + return *p == *q +} + +type T struct { + x struct{} + y int +} + +//go:noinline +func g(p, q *T) bool { + return p.x == q.x +} + +//go:noinline +func h(p, q func() struct{}) bool { + return p() == q() +} + +func fi(p, q *struct{}) bool { + return *p == *q +} + +func gi(p, q *T) bool { + return p.x == q.x +} + +func hi(p, q func() struct{}) bool { + return p() == q() +} + +func main() { + shouldPanic(func() { f(nil, nil) }) + shouldPanic(func() { g(nil, nil) }) + shouldPanic(func() { h(nil, nil) }) + shouldPanic(func() { fi(nil, nil) }) + shouldPanic(func() { gi(nil, nil) }) + shouldPanic(func() { hi(nil, nil) }) + n := 0 + inc := func() struct{} { + n++ + return struct{}{} + } + h(inc, inc) + if n != 2 { + panic("inc not called") + } + hi(inc, inc) + if n != 4 { + panic("inc not called") + } +} + +func shouldPanic(x func()) { + defer func() { + if recover() == nil { + panic("did not panic") + } + }() + x() +} diff --git a/test/fixedbugs/issue24488.go b/test/fixedbugs/issue24488.go new file mode 100644 index 0000000000000..b3deab48228d8 --- /dev/null +++ b/test/fixedbugs/issue24488.go @@ -0,0 +1,38 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "strings" +) + +type Func func() + +func (f Func) Foo() { + if f != nil { + f() + } +} + +func (f Func) Bar() { + if f != nil { + f() + } + buf := make([]byte, 4000) + n := runtime.Stack(buf, true) + s := string(buf[:n]) + if strings.Contains(s, "-fm") { + panic("wrapper present in stack trace:\n" + s) + } +} + +func main() { + foo := Func(func() {}) + foo = foo.Bar + foo.Foo() +} diff --git a/test/fixedbugs/issue24651a.go b/test/fixedbugs/issue24651a.go index 5f63635a2a814..b12b0cce29b1e 100644 --- a/test/fixedbugs/issue24651a.go +++ b/test/fixedbugs/issue24651a.go @@ -1,4 +1,5 @@ //errorcheck -0 -race -m -m +// +build linux,amd64 linux,ppc64le darwin,amd64 freebsd,amd64 netbsd,amd64 windows,amd64 // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/fixedbugs/issue24760.go b/test/fixedbugs/issue24760.go new file mode 100644 index 0000000000000..cd6f124517a5c --- /dev/null +++ b/test/fixedbugs/issue24760.go @@ -0,0 +1,12 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "unsafe" + +var _ = string([]byte(nil))[0] +var _ = uintptr(unsafe.Pointer(uintptr(1))) << 100 diff --git a/test/fixedbugs/issue24939.go b/test/fixedbugs/issue24939.go index 26530e95b29ee..0ae6f2b9f2600 100644 --- a/test/fixedbugs/issue24939.go +++ b/test/fixedbugs/issue24939.go @@ -15,7 +15,9 @@ type M interface { } type P = interface { - I() M + // The compiler cannot handle this case. Disabled for now. + // See issue #25838. + // I() M } func main() {} diff --git a/test/fixedbugs/issue26411.go b/test/fixedbugs/issue26411.go new file mode 100644 index 0000000000000..5f40bf25229a1 --- /dev/null +++ b/test/fixedbugs/issue26411.go @@ -0,0 +1,92 @@ +// +build !nacl,!js +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Ensure that label redefinition errors print out +// a column number that matches the start of the current label's +// definition instead of the label delimiting token ":" + +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" +) + +func main() { + tmpdir, err := ioutil.TempDir("", "issue26411") + if err != nil { + log.Fatalf("Failed to create temporary directory: %v", err) + } + defer os.RemoveAll(tmpdir) + + tests := []struct { + code string + errors []string + }{ + { + code: ` +package main + +func main() { +foo: +foo: +} +`, + errors: []string{ + "^.+:5:1: label foo defined and not used\n", + ".+:6:1: label foo already defined at .+:5:1\n$", + }, + }, + { + code: ` +package main + +func main() { + + bar: + bar: +bar: +bar : +} +`, + + errors: []string{ + "^.+:6:13: label bar defined and not used\n", + ".+:7:4: label bar already defined at .+:6:13\n", + ".+:8:1: label bar already defined at .+:6:13\n", + ".+:9:1: label bar already defined at .+:6:13\n$", + }, + }, + } + + for i, test := range tests { + filename := filepath.Join(tmpdir, fmt.Sprintf("%d.go", i)) + if err := ioutil.WriteFile(filename, []byte(test.code), 0644); err != nil { + log.Printf("#%d: failed to create file %s", i, filename) + continue + } + output, _ := exec.Command("go", "tool", "compile", filename).CombinedOutput() + + // remove each matching error from the output + for _, err := range test.errors { + rx := regexp.MustCompile(err) + match := rx.Find(output) + output = bytes.Replace(output, match, nil, 1) // remove match (which might be nil) from output + } + + // at this point all output should have been consumed + if len(output) != 0 { + log.Printf("Test case %d has unmatched errors:\n%s", i, output) + } + } +} diff --git a/test/fixedbugs/issue26616.go b/test/fixedbugs/issue26616.go new file mode 100644 index 0000000000000..e5565b68ca55a --- /dev/null +++ b/test/fixedbugs/issue26616.go @@ -0,0 +1,20 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var x int = three() // ERROR "assignment mismatch: 1 variable but three returns 3 values" + +func f() { + var _ int = three() // ERROR "assignment mismatch: 1 variable but three returns 3 values" + var a int = three() // ERROR "assignment mismatch: 1 variable but three returns 3 values" + a = three() // ERROR "assignment mismatch: 1 variable but three returns 3 values" + b := three() // ERROR "assignment mismatch: 1 variable but three returns 3 values" + + _, _ = a, b +} + +func three() (int, int, int) diff --git a/test/fixedbugs/issue26855.go b/test/fixedbugs/issue26855.go new file mode 100644 index 0000000000000..d5b95ddbf1bb7 --- /dev/null +++ b/test/fixedbugs/issue26855.go @@ -0,0 +1,28 @@ +// errorcheck + +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we get the correct (T vs &T) literal specification +// in the error message. + +package p + +type S struct { + f T +} + +type P struct { + f *T +} + +type T struct{} + +var _ = S{ + f: &T{}, // ERROR "cannot use &T literal" +} + +var _ = P{ + f: T{}, // ERROR "cannot use T literal" +} diff --git a/test/fixedbugs/issue27143.go b/test/fixedbugs/issue27143.go new file mode 100644 index 0000000000000..009ec9f6c255e --- /dev/null +++ b/test/fixedbugs/issue27143.go @@ -0,0 +1,17 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 27143: cmd/compile: erroneous application of walkinrange +// optimization for const over 2**63 + +package p + +var c uint64 + +var b1 bool = 0x7fffffffffffffff < c && c < 0x8000000000000000 +var b2 bool = c < 0x8000000000000000 && 0x7fffffffffffffff < c +var b3 bool = 0x8000000000000000 < c && c < 0x8000000000000001 +var b4 bool = c < 0x8000000000000001 && 0x8000000000000000 < c diff --git a/test/fixedbugs/issue27201.go b/test/fixedbugs/issue27201.go new file mode 100644 index 0000000000000..0c9611fc73b93 --- /dev/null +++ b/test/fixedbugs/issue27201.go @@ -0,0 +1,37 @@ +// run + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "strings" +) + +func main() { + f(nil) +} + +func f(p *int32) { + defer checkstack() + v := *p // panic should happen here, line 20 + sink = int64(v) // not here, line 21 +} + +var sink int64 + +func checkstack() { + _ = recover() + var buf [1024]byte + n := runtime.Stack(buf[:], false) + s := string(buf[:n]) + if strings.Contains(s, "issue27201.go:21 ") { + panic("panic at wrong location") + } + if !strings.Contains(s, "issue27201.go:20 ") { + panic("no panic at correct location") + } +} diff --git a/test/fixedbugs/issue27232.go b/test/fixedbugs/issue27232.go new file mode 100644 index 0000000000000..3a1cc87e4cb96 --- /dev/null +++ b/test/fixedbugs/issue27232.go @@ -0,0 +1,19 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type F = func(T) + +type T interface { + m(F) +} + +type t struct{} + +func (t) m(F) {} + +var _ T = &t{} diff --git a/test/fixedbugs/issue27267.go b/test/fixedbugs/issue27267.go new file mode 100644 index 0000000000000..ebae44f48fefc --- /dev/null +++ b/test/fixedbugs/issue27267.go @@ -0,0 +1,21 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +// 1st test case from issue +type F = func(E) // compiles if not type alias or moved below E struct +type E struct { + f F +} + +var x = E{func(E) {}} + +// 2nd test case from issue +type P = *S +type S struct { + p P +} diff --git a/test/fixedbugs/issue27278.go b/test/fixedbugs/issue27278.go new file mode 100644 index 0000000000000..73f7c755e1e3e --- /dev/null +++ b/test/fixedbugs/issue27278.go @@ -0,0 +1,63 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 27278: dead auto elim deletes an auto and its +// initialization, but it is live because of a nil check. + +package main + +type T struct { + _ [3]string + T2 +} + +func (t *T) M() []string { + return t.T2.M() +} + +type T2 struct { + T3 +} + +func (t *T2) M() []string { + return t.T3.M() +} + +type T3 struct { + a string +} + +func (t *T3) M() []string { + return []string{} +} + +func main() { + poison() + f() +} + +//go:noinline +func f() { + (&T{}).M() + grow(10000) +} + +// grow stack, triggers stack copy +func grow(n int) { + if n == 0 { + return + } + grow(n-1) +} + +// put some junk on stack, which cannot be valid address +//go:noinline +func poison() { + x := [10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + g = x +} + +var g [10]int diff --git a/test/fixedbugs/issue27289.go b/test/fixedbugs/issue27289.go new file mode 100644 index 0000000000000..293b9d005570f --- /dev/null +++ b/test/fixedbugs/issue27289.go @@ -0,0 +1,24 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure we don't prove that the bounds check failure branch is unreachable. + +package main + +//go:noinline +func f(a []int) { + _ = a[len(a)-1] +} + +func main() { + defer func() { + if err := recover(); err != nil { + return + } + panic("f should panic") + }() + f(nil) +} diff --git a/test/fixedbugs/issue27356.go b/test/fixedbugs/issue27356.go new file mode 100644 index 0000000000000..42784876a558a --- /dev/null +++ b/test/fixedbugs/issue27356.go @@ -0,0 +1,19 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 27356: function parameter hiding built-in function results in compiler crash + +package p + +var a = []int{1,2,3} + +func _(len int) { + _ = len(a) // ERROR "cannot call non-function" +} + +var cap = false +var _ = cap(a) // ERROR "cannot call non-function" + diff --git a/test/fixedbugs/issue27518a.go b/test/fixedbugs/issue27518a.go new file mode 100644 index 0000000000000..d6224df017dc4 --- /dev/null +++ b/test/fixedbugs/issue27518a.go @@ -0,0 +1,45 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" +) + +var nilp *int +var forceHeap interface{} + +func main() { + // x is a pointer on the stack to heap-allocated memory. + x := new([32]*int) + forceHeap = x + forceHeap = nil + + // Push a defer to be run when we panic below. + defer func() { + // Ignore the panic. + recover() + // Force a stack walk. Go 1.11 will fail because x is now + // considered live again. + runtime.GC() + }() + // Make x live at the defer's PC. + runtime.KeepAlive(x) + + // x is no longer live. Garbage collect the [32]*int on the + // heap. + runtime.GC() + // At this point x's dead stack slot points to dead memory. + + // Trigger a sigpanic. Since this is an implicit panic, we + // don't have an explicit liveness map here. + // Traceback used to use the liveness map of the most recent defer, + // but in that liveness map, x will be live again even though + // it points to dead memory. The fix is to use the liveness + // map of a deferreturn call instead. + *nilp = 0 +} diff --git a/test/fixedbugs/issue27518b.go b/test/fixedbugs/issue27518b.go new file mode 100644 index 0000000000000..ea72a3088503b --- /dev/null +++ b/test/fixedbugs/issue27518b.go @@ -0,0 +1,72 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +var finalized bool +var err string + +type HeapObj [8]int64 + +const filler int64 = 0x123456789abcdef0 + +func (h *HeapObj) init() { + for i := 0; i < len(*h); i++ { + h[i] = filler + } +} +func (h *HeapObj) check() { + for i := 0; i < len(*h); i++ { + if h[i] != filler { + err = "filler overwritten" + } + } +} + +type StackObj struct { + h *HeapObj +} + +func gc(shouldFinalize bool) { + runtime.GC() + runtime.GC() + runtime.GC() + if shouldFinalize != finalized { + err = "heap object finalized at the wrong time" + } +} + +func main() { + var s StackObj + s.h = new(HeapObj) + s.h.init() + runtime.SetFinalizer(s.h, func(h *HeapObj) { + finalized = true + }) + gc(false) + h := g(&s) + gc(false) + h.check() + gc(true) // finalize here, after return value's last use. (Go1.11 never runs the finalizer.) + if err != "" { + panic(err) + } +} + +func g(p *StackObj) (v *HeapObj) { + gc(false) + v = p.h // last use of the stack object. the only reference to the heap object is in the return slot. + gc(false) + defer func() { + gc(false) + recover() + gc(false) + }() + *(*int)(nil) = 0 + return +} diff --git a/test/fixedbugs/issue27595.go b/test/fixedbugs/issue27595.go new file mode 100644 index 0000000000000..af5c7a10d9b01 --- /dev/null +++ b/test/fixedbugs/issue27595.go @@ -0,0 +1,19 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var a = twoResults() // ERROR "assignment mismatch: 1 variable but twoResults returns 2 values" +var b, c, d = twoResults() // ERROR "assignment mismatch: 3 variables but twoResults returns 2 values" +var e, f = oneResult() // ERROR "assignment mismatch: 2 variables but oneResult returns 1 values" + +func twoResults() (int, int) { + return 1, 2 +} + +func oneResult() int { + return 1 +} diff --git a/test/fixedbugs/issue27695.go b/test/fixedbugs/issue27695.go new file mode 100644 index 0000000000000..8bd4939e7ecb4 --- /dev/null +++ b/test/fixedbugs/issue27695.go @@ -0,0 +1,62 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure return values are always scanned, when +// calling methods (+functions, TODO) with reflect. + +package main + +import ( + "reflect" + "runtime/debug" + "sync" +) + +func main() { + debug.SetGCPercent(1) // run GC frequently + var wg sync.WaitGroup + for i := 0; i < 20; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for i := 0; i < 2000; i++ { + _test() + } + }() + } + wg.Wait() +} + +type Stt struct { + Data interface{} +} + +type My struct { + b byte +} + +func (this *My) Run(rawData []byte) (Stt, error) { + var data string = "hello" + stt := Stt{ + Data: data, + } + return stt, nil +} + +func _test() (interface{}, error) { + f := reflect.ValueOf(&My{}).MethodByName("Run") + if method, ok := f.Interface().(func([]byte) (Stt, error)); ok { + s, e := method(nil) + // The bug in issue27695 happens here, during the return + // from the above call (at the end of reflect.callMethod + // when preparing to return). The result value that + // is assigned to s was not being scanned if GC happens + // to occur there. + i := interface{}(s) + return i, e + } + return nil, nil +} diff --git a/test/fixedbugs/issue27695b.go b/test/fixedbugs/issue27695b.go new file mode 100644 index 0000000000000..d80acfb8b40ee --- /dev/null +++ b/test/fixedbugs/issue27695b.go @@ -0,0 +1,64 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure return values aren't scanned until they +// are initialized, when calling functions and methods +// via reflect. + +package main + +import ( + "reflect" + "runtime" + "unsafe" +) + +var badPtr uintptr + +var sink []byte + +func init() { + // Allocate large enough to use largeAlloc. + b := make([]byte, 1<<16-1) + sink = b // force heap allocation + // Any space between the object and the end of page is invalid to point to. + badPtr = uintptr(unsafe.Pointer(&b[len(b)-1])) + 1 +} + +func f(d func() *byte) *byte { + // Initialize callee args section with a bad pointer. + g(badPtr) + + // Then call a function which returns a pointer. + // That return slot starts out holding a bad pointer. + return d() +} + +//go:noinline +func g(x uintptr) { +} + +type T struct { +} + +func (t *T) Foo() *byte { + runtime.GC() + return nil +} + +func main() { + // Functions + d := reflect.MakeFunc(reflect.TypeOf(func() *byte { return nil }), + func(args []reflect.Value) []reflect.Value { + runtime.GC() + return []reflect.Value{reflect.ValueOf((*byte)(nil))} + }).Interface().(func() *byte) + f(d) + + // Methods + e := reflect.ValueOf(&T{}).Method(0).Interface().(func() *byte) + f(e) +} diff --git a/test/fixedbugs/issue27695c.go b/test/fixedbugs/issue27695c.go new file mode 100644 index 0000000000000..948191cc96686 --- /dev/null +++ b/test/fixedbugs/issue27695c.go @@ -0,0 +1,65 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure return values aren't scanned until they +// are initialized, when calling functions and methods +// via reflect. + +package main + +import ( + "io" + "reflect" + "runtime" + "unsafe" +) + +var badPtr uintptr + +var sink []byte + +func init() { + // Allocate large enough to use largeAlloc. + b := make([]byte, 1<<16-1) + sink = b // force heap allocation + // Any space between the object and the end of page is invalid to point to. + badPtr = uintptr(unsafe.Pointer(&b[len(b)-1])) + 1 +} + +func f(d func(error) error) error { + // Initialize callee args section with a bad pointer. + g(badPtr, badPtr, badPtr, badPtr) + + // Then call a function which returns a pointer. + // That return slot starts out holding a bad pointer. + return d(io.EOF) +} + +//go:noinline +func g(x, y, z, w uintptr) { +} + +type T struct { +} + +func (t *T) Foo(e error) error { + runtime.GC() + return e +} + +func main() { + // Functions + d := reflect.MakeFunc(reflect.TypeOf(func(e error) error { return e }), + func(args []reflect.Value) []reflect.Value { + runtime.GC() + return args + }).Interface().(func(error) error) + f(d) + + // Methods + x := reflect.ValueOf(&T{}).Method(0).Interface().(func(error) error) + f(x) +} diff --git a/test/fixedbugs/issue27718.go b/test/fixedbugs/issue27718.go new file mode 100644 index 0000000000000..f7794182f588d --- /dev/null +++ b/test/fixedbugs/issue27718.go @@ -0,0 +1,72 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// (-0)+0 should be 0, not -0. + +package main + +//go:noinline +func add64(x float64) float64 { + return x + 0 +} + +func testAdd64() { + var zero float64 + inf := 1.0 / zero + negZero := -1 / inf + if 1/add64(negZero) != inf { + panic("negZero+0 != posZero (64 bit)") + } +} + +//go:noinline +func sub64(x float64) float64 { + return x - 0 +} + +func testSub64() { + var zero float64 + inf := 1.0 / zero + negZero := -1 / inf + if 1/sub64(negZero) != -inf { + panic("negZero-0 != negZero (64 bit)") + } +} + +//go:noinline +func add32(x float32) float32 { + return x + 0 +} + +func testAdd32() { + var zero float32 + inf := 1.0 / zero + negZero := -1 / inf + if 1/add32(negZero) != inf { + panic("negZero+0 != posZero (32 bit)") + } +} + +//go:noinline +func sub32(x float32) float32 { + return x - 0 +} + +func testSub32() { + var zero float32 + inf := 1.0 / zero + negZero := -1 / inf + if 1/sub32(negZero) != -inf { + panic("negZero-0 != negZero (32 bit)") + } +} + +func main() { + testAdd64() + testSub64() + testAdd32() + testSub32() +} diff --git a/test/fixedbugs/issue27829.go b/test/fixedbugs/issue27829.go new file mode 100644 index 0000000000000..9204043202852 --- /dev/null +++ b/test/fixedbugs/issue27829.go @@ -0,0 +1,27 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Bad AND/BTR combination rule. + +package main + +import "fmt" + +//go:noinline +func f(x uint64) uint64 { + return (x >> 48) &^ (uint64(0x4000)) +} + +func main() { + bad := false + if got, want := f(^uint64(0)), uint64(0xbfff); got != want { + fmt.Printf("got %x, want %x\n", got, want) + bad = true + } + if bad { + panic("bad") + } +} diff --git "a/test/fixedbugs/issue27836.dir/\303\204foo.go" "b/test/fixedbugs/issue27836.dir/\303\204foo.go" new file mode 100644 index 0000000000000..8b6a814c3c4de --- /dev/null +++ "b/test/fixedbugs/issue27836.dir/\303\204foo.go" @@ -0,0 +1,13 @@ +package Äfoo + +var ÄbarV int = 101 + +func Äbar(x int) int { + defer func() { ÄbarV += 3 }() + return Äblix(x) +} + +func Äblix(x int) int { + defer func() { ÄbarV += 9 }() + return ÄbarV + x +} diff --git "a/test/fixedbugs/issue27836.dir/\303\204main.go" "b/test/fixedbugs/issue27836.dir/\303\204main.go" new file mode 100644 index 0000000000000..25d2c71fc00a9 --- /dev/null +++ "b/test/fixedbugs/issue27836.dir/\303\204main.go" @@ -0,0 +1,13 @@ +package main + +import ( + "fmt" + + "./Äfoo" + Äblix "./Äfoo" +) + +func main() { + fmt.Printf("Äfoo.Äbar(33) returns %v\n", Äfoo.Äbar(33)) + fmt.Printf("Äblix.Äbar(33) returns %v\n", Äblix.Äbar(33)) +} diff --git a/test/fixedbugs/issue27836.go b/test/fixedbugs/issue27836.go new file mode 100644 index 0000000000000..128cf9d06ad6e --- /dev/null +++ b/test/fixedbugs/issue27836.go @@ -0,0 +1,7 @@ +// compiledir + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue27938.go b/test/fixedbugs/issue27938.go new file mode 100644 index 0000000000000..b0007be928361 --- /dev/null +++ b/test/fixedbugs/issue27938.go @@ -0,0 +1,23 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we get a single non-confusing error +// message for embedded fields/interfaces that use +// a qualified identifier with non-existing package. + +package p + +type _ struct { + F sync.Mutex // ERROR "undefined: sync" +} + +type _ struct { + sync.Mutex // ERROR "undefined: sync" +} + +type _ interface { + sync.Mutex // ERROR "undefined: sync" +} diff --git a/test/fixedbugs/issue27961.go b/test/fixedbugs/issue27961.go new file mode 100644 index 0000000000000..f8b4f669c4828 --- /dev/null +++ b/test/fixedbugs/issue27961.go @@ -0,0 +1,35 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 27961: some optimizations generate OffPtr with wrong +// types, which causes invalid bytecode on Wasm. + +package main + +import "math" + +type Vec2 [2]float64 + +func main() { + var a Vec2 + a.A().B().C().D() +} + +func (v Vec2) A() Vec2 { + return Vec2{v[0], v[0]} +} + +func (v Vec2) B() Vec2 { + return Vec2{1.0 / v.D(), 0} +} + +func (v Vec2) C() Vec2 { + return Vec2{v[0], v[0]} +} + +func (v Vec2) D() float64 { + return math.Sqrt(v[0]) +} diff --git a/test/fixedbugs/issue28055.go b/test/fixedbugs/issue28055.go new file mode 100644 index 0000000000000..d4889d54d4530 --- /dev/null +++ b/test/fixedbugs/issue28055.go @@ -0,0 +1,16 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure VARDEF can be a top-level statement. + +package p + +func f() { + var s string + var as []string + switch false && (s+"a"+as[0]+s+as[0]+s == "") { + } +} diff --git a/test/fixedbugs/issue28058.go b/test/fixedbugs/issue28058.go new file mode 100644 index 0000000000000..d8206e73576c4 --- /dev/null +++ b/test/fixedbugs/issue28058.go @@ -0,0 +1,13 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 14988: declaring a map with an invalid key type should not cause a +// fatal panic. + +package main + +var x map[func()]int // ERROR "invalid map key type" +var X map[func()]int // ERROR "invalid map key type" diff --git a/test/fixedbugs/issue28078.go b/test/fixedbugs/issue28078.go new file mode 100644 index 0000000000000..2e4c4b55164e7 --- /dev/null +++ b/test/fixedbugs/issue28078.go @@ -0,0 +1,34 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Non-constant duplicate keys/cases should not be reported +// as errors by the compiler. + +package p + +import "unsafe" + +func f() { + _ = map[uintptr]int{ + 0: 0, + uintptr(unsafe.Pointer(nil)): 0, + } + + switch uintptr(0) { + case 0: + case uintptr(unsafe.Pointer(nil)): + } + + switch interface{}(nil) { + case nil: + case nil: + } + + _ = map[interface{}]int{ + nil: 0, + nil: 0, + } +} diff --git a/test/fixedbugs/issue28079a.go b/test/fixedbugs/issue28079a.go new file mode 100644 index 0000000000000..b0631bbd86b8e --- /dev/null +++ b/test/fixedbugs/issue28079a.go @@ -0,0 +1,20 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Non-Go-constant but constant indexes are ok at compile time. + +package p + +import "unsafe" + +func f() { + var x [0]int + x[uintptr(unsafe.Pointer(nil))] = 0 +} +func g() { + var x [10]int + _ = x[3:uintptr(unsafe.Pointer(nil))] +} diff --git a/test/fixedbugs/issue28079b.go b/test/fixedbugs/issue28079b.go new file mode 100644 index 0000000000000..47cc16dfb2f6c --- /dev/null +++ b/test/fixedbugs/issue28079b.go @@ -0,0 +1,17 @@ +// errorcheck + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Non-Go-constant but constant values aren't ok for array sizes. + +package p + +import "unsafe" + +type T [uintptr(unsafe.Pointer(nil))]int // ERROR "non-constant array bound" + +func f() { + _ = complex(1<> 1) + +func main() { + s := make([]T, maxInt) + shouldPanic("cap out of range", func() { s = append(s, T{}) }) + var oneElem = make([]T, 1) + shouldPanic("cap out of range", func() { s = append(s, oneElem...) }) +} + +func shouldPanic(str string, f func()) { + defer func() { + err := recover() + if err == nil { + panic("did not panic") + } + s := err.(error).Error() + if !strings.Contains(s, str) { + panic("got panic " + s + ", want " + str) + } + }() + + f() +} diff --git a/test/fixedbugs/issue29215.go b/test/fixedbugs/issue29215.go new file mode 100644 index 0000000000000..df703aa25d7e8 --- /dev/null +++ b/test/fixedbugs/issue29215.go @@ -0,0 +1,18 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func f() { + var s string + var p, q bool + s = "a" + for p { + p = false == (true != q) + s = "" + } + _ = s == "bbb" +} diff --git a/test/fixedbugs/issue29220.go b/test/fixedbugs/issue29220.go new file mode 100644 index 0000000000000..bbfe930786e09 --- /dev/null +++ b/test/fixedbugs/issue29220.go @@ -0,0 +1,26 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func ascii(r rune) rune { + switch { + case 97 <= r && r <= 122: + return r - 32 + case 65 <= r && r <= 90: + return r + 32 + default: + return r + } +} + +func main() { + nomeObjeto := "ABE1FK21" + println(string(nomeObjeto[1:4])) + println(ascii(rune(nomeObjeto[4])) >= 48 && ascii(rune(nomeObjeto[4])) <= 57) + println(string(nomeObjeto[5])) + println(string(nomeObjeto[6:10])) +} diff --git a/test/fixedbugs/issue29264.go b/test/fixedbugs/issue29264.go new file mode 100644 index 0000000000000..3781559ada315 --- /dev/null +++ b/test/fixedbugs/issue29264.go @@ -0,0 +1,22 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we emit a valid type descriptor for +// a fairly deeply nested type. + +package main + +import "fmt" +import "strings" + +func main() { + a := [][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][][]int{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{42}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + got := fmt.Sprint(a) + want := strings.Repeat("[", 100) + "42" + strings.Repeat("]", 100) + if got != want { + fmt.Printf("got %q\nwant %q\n", got, want) + } +} diff --git a/test/fixedbugs/issue29304.go b/test/fixedbugs/issue29304.go new file mode 100644 index 0000000000000..47bc99f9ca20e --- /dev/null +++ b/test/fixedbugs/issue29304.go @@ -0,0 +1,19 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that relocation target go.builtin.error.Error +// is defined and the code links and runs correctly. + +package main + +import "errors" + +func main() { + err := errors.New("foo") + if error.Error(err) != "foo" { + panic("FAILED") + } +} diff --git a/test/fixedbugs/issue29329.go b/test/fixedbugs/issue29329.go new file mode 100644 index 0000000000000..7818bca30a02e --- /dev/null +++ b/test/fixedbugs/issue29329.go @@ -0,0 +1,107 @@ +// +build cgo +// run -race + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,amd64 + +package main + +import ( + "fmt" +) + +type LineString []Point +type Point [2]float64 + +//go:noinline +func benchmarkData() LineString { + return LineString{{1.0, 2.0}} +} + +func (ls LineString) Clone() LineString { + ps := MultiPoint(ls) + return LineString(ps.Clone()) +} + +type MultiPoint []Point + +func (mp MultiPoint) Clone() MultiPoint { + if mp == nil { + return nil + } + + points := make([]Point, len(mp)) + copy(points, mp) + + return MultiPoint(points) +} + +func F1() { + cases := []struct { + threshold float64 + length int + }{ + {0.1, 1118}, + {0.5, 257}, + {1.0, 144}, + {1.5, 95}, + {2.0, 71}, + {3.0, 46}, + {4.0, 39}, + {5.0, 33}, + } + + ls := benchmarkData() + + for k := 0; k < 100; k++ { + for i, tc := range cases { + r := DouglasPeucker(tc.threshold).LineString(ls.Clone()) + if len(r) == tc.length { + fmt.Printf("%d: unexpected\n", i) + } + } + } +} + +// A DouglasPeuckerSimplifier wraps the DouglasPeucker function. +type DouglasPeuckerSimplifier struct { + Threshold float64 +} + +// DouglasPeucker creates a new DouglasPeuckerSimplifier. +func DouglasPeucker(threshold float64) *DouglasPeuckerSimplifier { + return &DouglasPeuckerSimplifier{ + Threshold: threshold, + } +} + +func (s *DouglasPeuckerSimplifier) LineString(ls LineString) LineString { + return lineString(s, ls) +} + +type simplifier interface { + simplify(LineString, bool) (LineString, []int) +} + +func lineString(s simplifier, ls LineString) LineString { + return runSimplify(s, ls) +} + +func runSimplify(s simplifier, ls LineString) LineString { + if len(ls) <= 2 { + return ls + } + ls, _ = s.simplify(ls, false) + return ls +} + +func (s *DouglasPeuckerSimplifier) simplify(ls LineString, wim bool) (LineString, []int) { + return nil, nil +} + +func main() { + F1() +} diff --git a/test/fixedbugs/issue29350.go b/test/fixedbugs/issue29350.go new file mode 100644 index 0000000000000..9d59f6f6c0f10 --- /dev/null +++ b/test/fixedbugs/issue29350.go @@ -0,0 +1,9 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var X interface{} = 'x' diff --git a/test/fixedbugs/issue29362.go b/test/fixedbugs/issue29362.go new file mode 100644 index 0000000000000..a8bd607c4a29b --- /dev/null +++ b/test/fixedbugs/issue29362.go @@ -0,0 +1,42 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we don't consider a Go'd function's +// arguments as pointers when they aren't. + +package main + +import ( + "unsafe" +) + +var badPtr uintptr + +var sink []byte + +func init() { + // Allocate large enough to use largeAlloc. + b := make([]byte, 1<<16-1) + sink = b // force heap allocation + // Any space between the object and the end of page is invalid to point to. + badPtr = uintptr(unsafe.Pointer(&b[len(b)-1])) + 1 +} + +var throttle = make(chan struct{}, 10) + +func noPointerArgs(a, b, c, d uintptr) { + sink = make([]byte, 4096) + <-throttle +} + +func main() { + const N = 1000 + for i := 0; i < N; i++ { + throttle <- struct{}{} + go noPointerArgs(badPtr, badPtr, badPtr, badPtr) + sink = make([]byte, 4096) + } +} diff --git a/test/fixedbugs/issue29362b.go b/test/fixedbugs/issue29362b.go new file mode 100644 index 0000000000000..d1e3b4733f8ab --- /dev/null +++ b/test/fixedbugs/issue29362b.go @@ -0,0 +1,53 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Verify that we don't consider a Go'd function's +// arguments as pointers when they aren't. + +package main + +import ( + "unsafe" +) + +var badPtr uintptr + +var sink []byte + +func init() { + // Allocate large enough to use largeAlloc. + b := make([]byte, 1<<16-1) + sink = b // force heap allocation + // Any space between the object and the end of page is invalid to point to. + badPtr = uintptr(unsafe.Pointer(&b[len(b)-1])) + 1 +} + +var throttle = make(chan struct{}, 10) + +// There are 2 arg bitmaps for this function, each with 2 bits. +// In the first, p and q are both live, so that bitmap is 11. +// In the second, only p is live, so that bitmap is 10. +// Bitmaps are byte aligned, so if the first bitmap is interpreted as +// extending across the entire argument area, we incorrectly concatenate +// the bitmaps and end up using 110000001. That bad bitmap causes a6 +// to be considered a pointer. +func noPointerArgs(p, q *byte, a0, a1, a2, a3, a4, a5, a6 uintptr) { + sink = make([]byte, 4096) + sinkptr = q + <-throttle + sinkptr = p +} + +var sinkptr *byte + +func main() { + const N = 1000 + for i := 0; i < N; i++ { + throttle <- struct{}{} + go noPointerArgs(nil, nil, badPtr, badPtr, badPtr, badPtr, badPtr, badPtr, badPtr) + sink = make([]byte, 4096) + } +} diff --git a/test/fixedbugs/issue29389.go b/test/fixedbugs/issue29389.go new file mode 100644 index 0000000000000..43859fd38f085 --- /dev/null +++ b/test/fixedbugs/issue29389.go @@ -0,0 +1,17 @@ +// compile + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure we can correctly compile method expressions +// where the method is implicitly declared. + +package main + +import "io" + +func main() { + err := io.EOF + _ = err.Error +} diff --git a/test/fixedbugs/issue29402.go b/test/fixedbugs/issue29402.go new file mode 100644 index 0000000000000..8a1f959d84280 --- /dev/null +++ b/test/fixedbugs/issue29402.go @@ -0,0 +1,23 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 29402: wrong optimization of comparison of +// constant and shift on MIPS. + +package main + +//go:noinline +func F(s []int) bool { + half := len(s) / 2 + return half >= 0 +} + +func main() { + b := F([]int{1, 2, 3, 4}) + if !b { + panic("FAIL") + } +} diff --git a/test/fixedbugs/issue29562.go b/test/fixedbugs/issue29562.go new file mode 100644 index 0000000000000..cbcd77d5df76b --- /dev/null +++ b/test/fixedbugs/issue29562.go @@ -0,0 +1,26 @@ +// compile + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Triggers a double walk of the (inlined) switch in il + +package p + +func il(s string) string { + switch len(s) { + case 0: + return "zero" + case 1: + return "one" + } + return s +} + +func f() { + var s string + var as []string + switch false && (s+"a"+as[0]+il(s)+as[0]+s == "") { + } +} diff --git a/test/fixedbugs/issue29610.dir/a.go b/test/fixedbugs/issue29610.dir/a.go new file mode 100644 index 0000000000000..ccbe451bca9cb --- /dev/null +++ b/test/fixedbugs/issue29610.dir/a.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +type I interface { + M(init bool) +} + +var V I + +func init() { + V = nil +} diff --git a/test/fixedbugs/issue29610.dir/b.go b/test/fixedbugs/issue29610.dir/b.go new file mode 100644 index 0000000000000..c2016de3d05ca --- /dev/null +++ b/test/fixedbugs/issue29610.dir/b.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +import "./a" + +type S struct { + a.I +} + +var V a.I + +func init() { + V = S{} +} diff --git a/test/fixedbugs/issue29610.dir/main.go b/test/fixedbugs/issue29610.dir/main.go new file mode 100644 index 0000000000000..29437bfa61863 --- /dev/null +++ b/test/fixedbugs/issue29610.dir/main.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "./b" + +var v b.S + +func main() {} diff --git a/test/fixedbugs/issue29610.go b/test/fixedbugs/issue29610.go new file mode 100644 index 0000000000000..8d49ba6b8c19c --- /dev/null +++ b/test/fixedbugs/issue29610.go @@ -0,0 +1,13 @@ +// rundir + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 29610: Symbol import and initialization order caused function +// symbols to be recorded as non-function symbols. + +// This uses rundir not because we actually want to run the final +// binary, but because we need to at least link it. + +package ignored diff --git a/test/fixedbugs/issue29735.go b/test/fixedbugs/issue29735.go new file mode 100644 index 0000000000000..7a0381d533eff --- /dev/null +++ b/test/fixedbugs/issue29735.go @@ -0,0 +1,33 @@ +// run + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure FuncForPC won't panic when given a pc which +// lies between two functions. + +package main + +import ( + "runtime" +) + +func main() { + var stack [1]uintptr + runtime.Callers(1, stack[:]) + f() // inlined function, to give main some inlining info + for i := uintptr(0); true; i++ { + f := runtime.FuncForPC(stack[0] + i) + if f.Name() != "main.main" && f.Name() != "main.f" { + // Reached next function successfully. + break + } + } +} + +func f() { + sink = 0 // one instruction which can't be removed +} + +var sink int diff --git a/test/fixedbugs/issue29870.go b/test/fixedbugs/issue29870.go new file mode 100644 index 0000000000000..b79860ca2bb9e --- /dev/null +++ b/test/fixedbugs/issue29870.go @@ -0,0 +1,15 @@ +// compile + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure we can compile "_" functions without crashing. + +package main + +import "log" + +func _() { + log.Println("%2F") +} diff --git a/test/fixedbugs/issue29870b.go b/test/fixedbugs/issue29870b.go new file mode 100644 index 0000000000000..1bac566bbbfd5 --- /dev/null +++ b/test/fixedbugs/issue29870b.go @@ -0,0 +1,14 @@ +// errorcheck + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure we're compiling "_" functions at least enough +// to get to an error which is generated during walk. + +package main + +func _() { + x := 7 // ERROR "x declared and not used" +} diff --git a/test/fixedbugs/issue29919.dir/a.go b/test/fixedbugs/issue29919.dir/a.go new file mode 100644 index 0000000000000..cfccc4aabb671 --- /dev/null +++ b/test/fixedbugs/issue29919.dir/a.go @@ -0,0 +1,75 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure tracebacks from initialization code are reported correctly. + +package a + +import ( + "fmt" + "runtime" + "strings" +) + +var x = f() // line 15 + +func f() int { + var b [4096]byte + n := runtime.Stack(b[:], false) // line 19 + s := string(b[:n]) + var pcs [10]uintptr + n = runtime.Callers(1, pcs[:]) // line 22 + + // Check the Stack results. + if debug { + println(s) + } + if strings.Contains(s, "autogenerated") { + panic("autogenerated code in traceback") + } + if !strings.Contains(s, "a.go:15") { + panic("missing a.go:15") + } + if !strings.Contains(s, "a.go:19") { + panic("missing a.go:19") + } + if !strings.Contains(s, "a.init.ializers") { + panic("missing a.init.ializers") + } + + // Check the CallersFrames results. + if debug { + iter := runtime.CallersFrames(pcs[:n]) + for { + f, more := iter.Next() + fmt.Printf("%s %s:%d\n", f.Function, f.File, f.Line) + if !more { + break + } + } + } + iter := runtime.CallersFrames(pcs[:n]) + f, more := iter.Next() + if f.Function != "a.f" || !strings.HasSuffix(f.File, "a.go") || f.Line != 22 { + panic(fmt.Sprintf("bad f %v\n", f)) + } + if !more { + panic("traceback truncated after f") + } + f, more = iter.Next() + if f.Function != "a.init.ializers" || !strings.HasSuffix(f.File, "a.go") || f.Line != 15 { + panic(fmt.Sprintf("bad init.ializers %v\n", f)) + } + if !more { + panic("traceback truncated after init.ializers") + } + f, _ = iter.Next() + if f.Function != "runtime.main" { + panic("runtime.main missing") + } + + return 0 +} + +const debug = false diff --git a/test/fixedbugs/issue29919.dir/main.go b/test/fixedbugs/issue29919.dir/main.go new file mode 100644 index 0000000000000..3e99ca891b55d --- /dev/null +++ b/test/fixedbugs/issue29919.dir/main.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import _ "./a" + +func main() { +} diff --git a/test/fixedbugs/issue29919.go b/test/fixedbugs/issue29919.go new file mode 100644 index 0000000000000..6d97592c11871 --- /dev/null +++ b/test/fixedbugs/issue29919.go @@ -0,0 +1,9 @@ +// rundir + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure tracebacks from initialization code are reported correctly. + +package ignored diff --git a/test/fixedbugs/issue29943.go b/test/fixedbugs/issue29943.go new file mode 100644 index 0000000000000..ff47de55d524e --- /dev/null +++ b/test/fixedbugs/issue29943.go @@ -0,0 +1,28 @@ +// run + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code was miscompiled on ppc64le due to incorrect zero-extension +// that was CSE'd. + +package main + +//go:noinline +func g(i uint64) uint64 { + return uint64(uint32(i)) +} + +var sink uint64 + +func main() { + for i := uint64(0); i < 1; i++ { + i32 := int32(i - 1) + sink = uint64((uint32(i32) << 1) ^ uint32((i32 >> 31))) + x := g(uint64(i32)) + if x != uint64(uint32(i32)) { + panic(x) + } + } +} diff --git a/test/fixedbugs/issue30041.go b/test/fixedbugs/issue30041.go new file mode 100644 index 0000000000000..7d8a1698cbc28 --- /dev/null +++ b/test/fixedbugs/issue30041.go @@ -0,0 +1,63 @@ +// run + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 30041: copying results of a reflect-generated +// call on stack should not have write barrier. + +package main + +import ( + "reflect" + "runtime" + "unsafe" +) + +var badPtr uintptr + +var sink []byte + +func init() { + // Allocate large enough to use largeAlloc. + b := make([]byte, 1<<16-1) + sink = b // force heap allocation + // Any space between the object and the end of page is invalid to point to. + badPtr = uintptr(unsafe.Pointer(&b[len(b)-1])) + 1 +} + +type ft func() *int + +var fn ft + +func rf([]reflect.Value) []reflect.Value { + a := reflect.ValueOf((*int)(nil)) + return []reflect.Value{a} +} + +const N = 1000 + +func main() { + fn = reflect.MakeFunc(reflect.TypeOf(fn), rf).Interface().(ft) + + // Keep running GC so the write barrier is on. + go func() { + for i := 0; i < N; i++ { + runtime.GC() + } + }() + + var x [10]uintptr + for i := range x { + x[i] = badPtr + } + for i := 0; i < N; i++ { + runtime.Gosched() + use(x) // prepare bad pointers on stack + fn() + } +} + +//go:noinline +func use([10]uintptr) {} diff --git a/test/fixedbugs/issue30061.go b/test/fixedbugs/issue30061.go new file mode 100644 index 0000000000000..5092b01799ef0 --- /dev/null +++ b/test/fixedbugs/issue30061.go @@ -0,0 +1,20 @@ +// compile + +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure we can linkname to memmove with an unsafe.Pointer argument. + +package p + +import "unsafe" + +//go:linkname memmove runtime.memmove +func memmove(to, from unsafe.Pointer, n uintptr) + +var V1, V2 int + +func F() { + memmove(unsafe.Pointer(&V1), unsafe.Pointer(&V2), unsafe.Sizeof(int(0))) +} diff --git a/test/fixedbugs/issue4085b.go b/test/fixedbugs/issue4085b.go index 6bf315fcc2f06..6304ce073aa9e 100644 --- a/test/fixedbugs/issue4085b.go +++ b/test/fixedbugs/issue4085b.go @@ -21,9 +21,11 @@ func main() { shouldPanic("cap out of range", func() { _ = make(T, 0, int64(n)) }) var t *byte if unsafe.Sizeof(t) == 8 { - var n2 int64 = 1 << 50 + // Test mem > maxAlloc + var n2 int64 = 1 << 59 shouldPanic("len out of range", func() { _ = make(T, int(n2)) }) shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) }) + // Test elem.size*cap overflow n2 = 1<<63 - 1 shouldPanic("len out of range", func() { _ = make(T, int(n2)) }) shouldPanic("cap out of range", func() { _ = make(T, 0, int(n2)) }) diff --git a/test/fixedbugs/issue5089.go b/test/fixedbugs/issue5089.go index 9f7fa5a4d452c..dc393e9b06c53 100644 --- a/test/fixedbugs/issue5089.go +++ b/test/fixedbugs/issue5089.go @@ -8,7 +8,7 @@ package p -import "bufio" // GCCGO_ERROR "previous" +import "bufio" func (b *bufio.Reader) Buffered() int { // ERROR "non-local|redefinition" return -1 diff --git a/test/fixedbugs/issue5856.go b/test/fixedbugs/issue5856.go index 5e16c78b4d9b3..f13258854e5c4 100644 --- a/test/fixedbugs/issue5856.go +++ b/test/fixedbugs/issue5856.go @@ -29,7 +29,7 @@ func f() { } func g() { - _, file, line, _ := runtime.Caller(3) + _, file, line, _ := runtime.Caller(2) if !strings.HasSuffix(file, "issue5856.go") || line != 28 { fmt.Printf("BUG: defer called from %s:%d, want issue5856.go:28\n", file, line) os.Exit(1) diff --git a/test/fixedbugs/issue7921.go b/test/fixedbugs/issue7921.go new file mode 100644 index 0000000000000..ce8d09a2769e2 --- /dev/null +++ b/test/fixedbugs/issue7921.go @@ -0,0 +1,57 @@ +// +build !gcflags_noopt +// errorcheck -0 -m + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package foo + +import "bytes" + +// In order to get desired results, we need a combination of +// both escape analysis and inlining. + +func bufferNotEscape() string { + // b itself does not escape, only its buf field will be + // copied during String() call, but object "handle" itself + // can be stack-allocated. + var b bytes.Buffer + b.WriteString("123") // ERROR "bufferNotEscape b does not escape$" + b.Write([]byte{'4'}) // ERROR "bufferNotEscape \[\]byte literal does not escape$" "bufferNotEscape b does not escape$" + return b.String() // ERROR "bufferNotEscape b does not escape$" "inlining call to bytes.\(\*Buffer\).String$" "string\(bytes.b.buf\[bytes.b.off:\]\) escapes to heap$" +} + +func bufferNoEscape2(xs []string) int { // ERROR "bufferNoEscape2 xs does not escape$" + b := bytes.NewBuffer(make([]byte, 0, 64)) // ERROR "bufferNoEscape2 &bytes.Buffer literal does not escape$" "bufferNoEscape2 make\(\[\]byte, 0, 64\) does not escape$" "inlining call to bytes.NewBuffer$" + for _, x := range xs { + b.WriteString(x) + } + return b.Len() // ERROR "inlining call to bytes.\(\*Buffer\).Len$" +} + +func bufferNoEscape3(xs []string) string { // ERROR "bufferNoEscape3 xs does not escape$" + b := bytes.NewBuffer(make([]byte, 0, 64)) // ERROR "bufferNoEscape3 &bytes.Buffer literal does not escape$" "bufferNoEscape3 make\(\[\]byte, 0, 64\) does not escape$" "inlining call to bytes.NewBuffer$" + for _, x := range xs { + b.WriteString(x) + b.WriteByte(',') + } + return b.String() // ERROR "inlining call to bytes.\(\*Buffer\).String$" "string\(bytes.b.buf\[bytes.b.off:\]\) escapes to heap$" +} + +func bufferNoEscape4() []byte { + var b bytes.Buffer + b.Grow(64) // ERROR "bufferNoEscape4 b does not escape$" "bufferNoEscape4 ignoring self-assignment in bytes.b.buf = bytes.b.buf\[:bytes.m·3\]$" "inlining call to bytes.\(\*Buffer\).Grow$" + useBuffer(&b) // ERROR "bufferNoEscape4 &b does not escape$" + return b.Bytes() // ERROR "bufferNoEscape4 b does not escape$" "inlining call to bytes.\(\*Buffer\).Bytes$" +} + +func bufferNoEscape5() { // ERROR "can inline bufferNoEscape5$" + b := bytes.NewBuffer(make([]byte, 0, 128)) // ERROR "bufferNoEscape5 &bytes.Buffer literal does not escape$" "bufferNoEscape5 make\(\[\]byte, 0, 128\) does not escape$" "inlining call to bytes.NewBuffer$" + useBuffer(b) +} + +//go:noinline +func useBuffer(b *bytes.Buffer) { // ERROR "useBuffer b does not escape$" + b.WriteString("1234") +} diff --git a/test/inline.go b/test/inline.go index 2553230462709..9428c1487b896 100644 --- a/test/inline.go +++ b/test/inline.go @@ -11,6 +11,7 @@ package foo import ( "errors" + "runtime" "unsafe" ) @@ -162,3 +163,20 @@ func k() (T, int, int) { return T{}, 0, 0 } // ERROR "can inline k" func _() { // ERROR "can inline _" T.meth(k()) // ERROR "inlining call to k" "inlining call to T.meth" } + +func small1() { // ERROR "can inline small1" + runtime.GC() +} +func small2() int { // ERROR "can inline small2" + return runtime.GOMAXPROCS(0) +} +func small3(t T) { // ERROR "can inline small3" + t.meth2(3, 5) +} +func small4(t T) { // not inlineable - has 2 calls. + t.meth2(runtime.GOMAXPROCS(0), 5) +} +func (T) meth2(int, int) { // not inlineable - has 2 calls. + runtime.GC() + runtime.GC() +} diff --git a/test/inline_caller.go b/test/inline_caller.go index 79039a6bb532e..daff145a9229f 100644 --- a/test/inline_caller.go +++ b/test/inline_caller.go @@ -54,9 +54,9 @@ type wantFrame struct { // -1 means don't care var expected = []wantFrame{ - 0: {"main.testCaller", 36}, - 1: {"main.testCaller", 31}, - 2: {"main.testCaller", 27}, + 0: {"main.h", 36}, + 1: {"main.g", 31}, + 2: {"main.f", 27}, 3: {"main.testCaller", 42}, 4: {"main.main", 68}, 5: {"runtime.main", -1}, diff --git a/test/inline_callers.go b/test/inline_callers.go index 6df6861951281..ee7d6470728cb 100644 --- a/test/inline_callers.go +++ b/test/inline_callers.go @@ -31,7 +31,7 @@ func testCallers(skp int) (frames []string) { skip = skp f() for i := 0; i < npcs; i++ { - fn := runtime.FuncForPC(pcs[i]) + fn := runtime.FuncForPC(pcs[i] - 1) frames = append(frames, fn.Name()) if fn.Name() == "main.main" { break @@ -56,11 +56,11 @@ func testCallersFrames(skp int) (frames []string) { } var expectedFrames [][]string = [][]string{ - 0: {"runtime.Callers", "main.testCallers", "main.main"}, - 1: {"main.testCallers", "main.main"}, - 2: {"main.testCallers", "runtime.skipPleaseUseCallersFrames", "main.main"}, - 3: {"main.testCallers", "runtime.skipPleaseUseCallersFrames", "main.main"}, - 4: {"main.testCallers", "runtime.skipPleaseUseCallersFrames", "main.main"}, + 0: {"runtime.Callers", "main.h", "main.g", "main.f", "main.testCallers", "main.main"}, + 1: {"main.h", "main.g", "main.f", "main.testCallers", "main.main"}, + 2: {"main.g", "main.f", "main.testCallers", "main.main"}, + 3: {"main.f", "main.testCallers", "main.main"}, + 4: {"main.testCallers", "main.main"}, 5: {"main.main"}, } diff --git a/test/inline_math_bits_rotate.go b/test/inline_math_bits_rotate.go new file mode 100644 index 0000000000000..a0341ea4971e9 --- /dev/null +++ b/test/inline_math_bits_rotate.go @@ -0,0 +1,28 @@ +// +build amd64 +// errorcheck -0 -m + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that inlining of math/bits.RotateLeft* treats those calls as intrinsics. + +package p + +import "math/bits" + +var ( + x8 uint8 + x16 uint16 + x32 uint32 + x64 uint64 + x uint +) + +func f() { // ERROR "can inline f" + x8 = bits.RotateLeft8(x8, 1) + x16 = bits.RotateLeft16(x16, 1) + x32 = bits.RotateLeft32(x32, 1) + x64 = bits.RotateLeft64(x64, 1) + x = bits.RotateLeft(x, 1) +} diff --git a/test/live.go b/test/live.go index 18611f5113d63..a508947afc5a8 100644 --- a/test/live.go +++ b/test/live.go @@ -32,9 +32,9 @@ func printbytepointer(*byte) func printint(int) func f1() { - var x *int - printpointer(&x) // ERROR "live at call to printpointer: x$" + var x *int // ERROR "stack object x \*int$" printpointer(&x) // ERROR "live at call to printpointer: x$" + printpointer(&x) } func f2(b bool) { @@ -42,9 +42,9 @@ func f2(b bool) { printint(0) // nothing live here return } - var x *int - printpointer(&x) // ERROR "live at call to printpointer: x$" + var x *int // ERROR "stack object x \*int$" printpointer(&x) // ERROR "live at call to printpointer: x$" + printpointer(&x) } func f3(b1, b2 bool) { @@ -60,15 +60,15 @@ func f3(b1, b2 bool) { } if b2 { - var x *int - printpointer(&x) // ERROR "live at call to printpointer: x$" + var x *int // ERROR "stack object x \*int$" printpointer(&x) // ERROR "live at call to printpointer: x$" + printpointer(&x) } else { - var y *int - printpointer(&y) // ERROR "live at call to printpointer: y$" + var y *int // ERROR "stack object y \*int$" printpointer(&y) // ERROR "live at call to printpointer: y$" + printpointer(&y) } - printint(0) // ERROR "f3: x \(type \*int\) is ambiguously live$" "f3: y \(type \*int\) is ambiguously live$" "live at call to printint: x y$" + printint(0) // nothing is live here } // The old algorithm treated x as live on all code that @@ -83,7 +83,7 @@ func f4(b1, b2 bool) { // x not live here return } var z **int - x := new(int) + x := new(int) // ERROR "stack object x \*int$" *x = 42 z = &x printint(**z) // ERROR "live at call to printint: x$" @@ -99,15 +99,15 @@ func f4(b1, b2 bool) { // x not live here func f5(b1 bool) { var z **int if b1 { - x := new(int) + x := new(int) // ERROR "stack object x \*int$" *x = 42 z = &x } else { - y := new(int) + y := new(int) // ERROR "stack object y \*int$" *y = 54 z = &y } - printint(**z) // ERROR "f5: x \(type \*int\) is ambiguously live$" "f5: y \(type \*int\) is ambiguously live$" "live at call to printint: x y$" + printint(**z) // nothing live here } // confusion about the _ result used to cause spurious "live at entry to f6: _". @@ -119,7 +119,7 @@ func f6() (_, y string) { // confusion about addressed results used to cause "live at entry to f7: x". -func f7() (x string) { +func f7() (x string) { // ERROR "stack object x string" _ = &x x = "hello" return @@ -141,7 +141,7 @@ var i9 interface{} func f9() bool { g8() x := i9 - y := interface{}(str()) // ERROR "live at call to convT2Estring: .autotmp_[0-9]+ x.data$" "live at call to str: x.data$" + y := interface{}(g18()) // ERROR "live at call to convT2E: x.data$" "live at call to g18: x.data$" "stack object .autotmp_[0-9]+ \[2\]string$" i9 = y // make y escape so the line above has to call convT2E return x != y } @@ -163,7 +163,7 @@ var b bool // this used to have a spurious "live at entry to f11a: ~r0" func f11a() *int { - select { // ERROR "live at call to selectgo: .autotmp_[0-9]+$" + select { // ERROR "stack object .autotmp_[0-9]+ \[2\]struct" case <-c: return nil case <-c: @@ -178,7 +178,7 @@ func f11b() *int { // get to the bottom of the function. // This used to have a spurious "live at call to printint: p". printint(1) // nothing live here! - select { // ERROR "live at call to selectgo: .autotmp_[0-9]+$" + select { // ERROR "stack object .autotmp_[0-9]+ \[2\]struct" case <-c: return nil case <-c: @@ -198,7 +198,7 @@ func f11c() *int { // Unlike previous, the cases in this select fall through, // so we can get to the println, so p is not dead. printint(1) // ERROR "live at call to printint: p$" - select { // ERROR "live at call to selectgo: .autotmp_[0-9]+ p$" + select { // ERROR "live at call to selectgo: p$" "stack object .autotmp_[0-9]+ \[2\]struct" case <-c: case <-c: } @@ -233,8 +233,8 @@ func h13(string, string) string // more incorrectly placed VARDEF. func f14() { - x := g14() - printstringpointer(&x) // ERROR "live at call to printstringpointer: x$" + x := g14() // ERROR "stack object x string$" + printstringpointer(&x) } func g14() string @@ -254,10 +254,10 @@ func iface() interface{} func f16() { if b { - delete(mi, iface()) // ERROR "live at call to mapdelete: .autotmp_[0-9]+$" + delete(mi, iface()) // ERROR "stack object .autotmp_[0-9]+ interface \{\}$" } - delete(mi, iface()) // ERROR "live at call to mapdelete: .autotmp_[0-9]+$" - delete(mi, iface()) // ERROR "live at call to mapdelete: .autotmp_[0-9]+$" + delete(mi, iface()) + delete(mi, iface()) } var m2s map[string]*byte @@ -300,10 +300,10 @@ func f18() { // temporary introduced by orderexpr. var z *byte if b { - z = m2[g18()] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + z = m2[g18()] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } - z = m2[g18()] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" - z = m2[g18()] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + z = m2[g18()] + z = m2[g18()] printbytepointer(z) } @@ -317,9 +317,9 @@ func f19() { var z *byte if b { - z = <-ch // ERROR "live at call to chanrecv1: .autotmp_[0-9]+$" + z = <-ch // ERROR "stack object .autotmp_[0-9]+ \*byte$" } - z = <-ch // ERROR "live at call to chanrecv1: .autotmp_[0-9]+$" + z = <-ch z = <-ch // ERROR "live at call to chanrecv1: .autotmp_[0-9]+$" printbytepointer(z) } @@ -327,20 +327,20 @@ func f19() { func f20() { // src temporary for channel send if b { - ch <- byteptr() // ERROR "live at call to chansend1: .autotmp_[0-9]+$" + ch <- byteptr() // ERROR "stack object .autotmp_[0-9]+ \*byte$" } - ch <- byteptr() // ERROR "live at call to chansend1: .autotmp_[0-9]+$" - ch <- byteptr() // ERROR "live at call to chansend1: .autotmp_[0-9]+$" + ch <- byteptr() + ch <- byteptr() } func f21() { // key temporary for mapaccess using array literal key. var z *byte if b { - z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + z = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } - z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" - z = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + z = m2[[2]string{"x", "y"}] + z = m2[[2]string{"x", "y"}] printbytepointer(z) } @@ -349,10 +349,10 @@ func f23() { var z *byte var ok bool if b { - z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: .autotmp_[0-9]+$" + z, ok = m2[[2]string{"x", "y"}] // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } - z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: .autotmp_[0-9]+$" - z, ok = m2[[2]string{"x", "y"}] // ERROR "live at call to mapaccess2: .autotmp_[0-9]+$" + z, ok = m2[[2]string{"x", "y"}] + z, ok = m2[[2]string{"x", "y"}] printbytepointer(z) print(ok) } @@ -361,10 +361,10 @@ func f24() { // key temporary for map access using array literal key. // value temporary too. if b { - m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign: .autotmp_[0-9]+$" + m2[[2]string{"x", "y"}] = nil // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } - m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign: .autotmp_[0-9]+$" - m2[[2]string{"x", "y"}] = nil // ERROR "live at call to mapassign: .autotmp_[0-9]+$" + m2[[2]string{"x", "y"}] = nil + m2[[2]string{"x", "y"}] = nil } // defer should not cause spurious ambiguously live variables @@ -387,10 +387,10 @@ func g25() func f26(b bool) { if b { - print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: .autotmp_[0-9]+$" + print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "stack object .autotmp_[0-9]+ \[3\]interface \{\}$" } - print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: .autotmp_[0-9]+$" - print26((*int)(nil), (*int)(nil), (*int)(nil)) // ERROR "live at call to print26: .autotmp_[0-9]+$" + print26((*int)(nil), (*int)(nil), (*int)(nil)) + print26((*int)(nil), (*int)(nil), (*int)(nil)) printnl() } @@ -402,10 +402,10 @@ func print26(...interface{}) func f27(b bool) { x := 0 if b { - call27(func() { x++ }) // ERROR "live at call to call27: .autotmp_[0-9]+$" + call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{" } - call27(func() { x++ }) // ERROR "live at call to call27: .autotmp_[0-9]+$" - call27(func() { x++ }) // ERROR "live at call to call27: .autotmp_[0-9]+$" + call27(func() { x++ }) + call27(func() { x++ }) printnl() } @@ -414,11 +414,11 @@ func f27(b bool) { func f27defer(b bool) { x := 0 if b { - defer call27(func() { x++ }) // ERROR "live at call to deferproc: .autotmp_[0-9]+$" "live at call to deferreturn: .autotmp_[0-9]+$" + defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{" } - defer call27(func() { x++ }) // ERROR "f27defer: .autotmp_[0-9]+ \(type struct { F uintptr; x \*int }\) is ambiguously live$" "live at call to deferproc: .autotmp_[0-9]+ .autotmp_[0-9]+$" "live at call to deferreturn: .autotmp_[0-9]+ .autotmp_[0-9]+$" - printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ .autotmp_[0-9]+$" -} // ERROR "live at call to deferreturn: .autotmp_[0-9]+ .autotmp_[0-9]+$" + defer call27(func() { x++ }) // ERROR "stack object .autotmp_[0-9]+ struct \{" + printnl() +} // and newproc (go) escapes to the heap @@ -440,17 +440,17 @@ var s1, s2, s3, s4, s5, s6, s7, s8, s9, s10 string func f28(b bool) { if b { - printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: .autotmp_[0-9]+$" "live at call to printstring: .autotmp_[0-9]+$" + printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "stack object .autotmp_[0-9]+ \[10\]string$" } - printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: .autotmp_[0-9]+$" "live at call to printstring: .autotmp_[0-9]+$" - printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) // ERROR "live at call to concatstrings: .autotmp_[0-9]+$" "live at call to printstring: .autotmp_[0-9]+$" + printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) + printstring(s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9 + s10) } // map iterator should die on end of range loop func f29(b bool) { if b { - for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" + for k := range m { // ERROR "live at call to mapiterinit: .autotmp_[0-9]+$" "live at call to mapiternext: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ map.iter\[string\]int$" printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$" } } @@ -465,7 +465,7 @@ func f29(b bool) { // copy of array of pointers should die at end of range loop var pstructarr [10]pstruct -// Struct size choosen to make pointer to element in pstructarr +// Struct size chosen to make pointer to element in pstructarr // not computable by strength reduction. type pstruct struct { intp *int @@ -473,20 +473,19 @@ type pstruct struct { } func f30(b bool) { - // two live temps during printintpointer(p): - // in the copy of p.intp and + // live temp during printintpointer(p): // the internal iterator pointer if a pointer to pstruct in pstructarr // can not be easily computed by strength reduction. if b { - for _, p := range pstructarr { - printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+ .autotmp_[0-9]+$" + for _, p := range pstructarr { // ERROR "stack object .autotmp_[0-9]+ \[10\]pstruct$" + printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$" } } for _, p := range pstructarr { - printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+ .autotmp_[0-9]+$" + printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$" } for _, p := range pstructarr { - printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+ .autotmp_[0-9]+$" + printintpointer(p.intp) // ERROR "live at call to printintpointer: .autotmp_[0-9]+$" } } @@ -494,13 +493,13 @@ func f30(b bool) { func f31(b1, b2, b3 bool) { if b1 { - g31(str()) // ERROR "live at call to convT2Estring: .autotmp_[0-9]+$" "live at call to g31: .autotmp_[0-9]+$" + g31(g18()) // ERROR "stack object .autotmp_[0-9]+ \[2\]string$" } if b2 { - h31(str()) // ERROR "live at call to convT2Estring: .autotmp_[0-9]+ .autotmp_[0-9]+$" "live at call to h31: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" + h31(g18()) // ERROR "live at call to convT2E: .autotmp_[0-9]+$" "live at call to newobject: .autotmp_[0-9]+$" } if b3 { - panic(str()) // ERROR "live at call to convT2Estring: .autotmp_[0-9]+$" "live at call to gopanic: .autotmp_[0-9]+$" + panic(g18()) } print(b3) } @@ -520,10 +519,10 @@ var t32 T32 func f32(b bool) { if b { - call32(t32.Inc) // ERROR "live at call to call32: .autotmp_[0-9]+$" + call32(t32.Inc) // ERROR "stack object .autotmp_[0-9]+ struct \{" } - call32(t32.Inc) // ERROR "live at call to call32: .autotmp_[0-9]+$" - call32(t32.Inc) // ERROR "live at call to call32: .autotmp_[0-9]+$" + call32(t32.Inc) + call32(t32.Inc) } //go:noescape @@ -535,7 +534,7 @@ func call32(func()) var m33 map[interface{}]int func f33() { - if m33[byteptr()] == 0 { // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + if m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}$" printnl() return } else { @@ -545,7 +544,7 @@ func f33() { } func f34() { - if m33[byteptr()] == 0 { // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" + if m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}$" printnl() return } @@ -553,7 +552,8 @@ func f34() { } func f35() { - if m33[byteptr()] == 0 && m33[byteptr()] == 0 { // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" "f35: .autotmp_[0-9]+ \(type interface \{\}\) is ambiguously live$" + if m33[byteptr()] == 0 && // ERROR "stack object .autotmp_[0-9]+ interface \{\}" + m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}" printnl() return } @@ -561,7 +561,8 @@ func f35() { } func f36() { - if m33[byteptr()] == 0 || m33[byteptr()] == 0 { // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" "f36: .autotmp_[0-9]+ \(type interface \{\}\) is ambiguously live$" + if m33[byteptr()] == 0 || // ERROR "stack object .autotmp_[0-9]+ interface \{\}" + m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}" printnl() return } @@ -569,7 +570,9 @@ func f36() { } func f37() { - if (m33[byteptr()] == 0 || m33[byteptr()] == 0) && m33[byteptr()] == 0 { // ERROR "live at call to mapaccess1: .autotmp_[0-9]+$" "f37: .autotmp_[0-9]+ \(type interface \{\}\) is ambiguously live$" + if (m33[byteptr()] == 0 || // ERROR "stack object .autotmp_[0-9]+ interface \{\}" + m33[byteptr()] == 0) && // ERROR "stack object .autotmp_[0-9]+ interface \{\}" + m33[byteptr()] == 0 { // ERROR "stack object .autotmp_[0-9]+ interface \{\}" printnl() return } @@ -589,14 +592,14 @@ func f38(b bool) { // we care that the println lines have no live variables // and therefore no output. if b { - select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" + select { // ERROR "live at call to selectgo:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ \[4\]struct \{" case <-fc38(): printnl() - case fc38() <- *fi38(1): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" + case fc38() <- *fi38(1): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$" printnl() - case *fi38(2) = <-fc38(): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" + case *fi38(2) = <-fc38(): // ERROR "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" "stack object .autotmp_[0-9]+ string$" printnl() - case *fi38(3), *fb38() = <-fc38(): // ERROR "live at call to fb38:( .autotmp_[0-9]+)+$" "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" + case *fi38(3), *fb38() = <-fc38(): // ERROR "stack object .autotmp_[0-9]+ string$" "live at call to fc38:( .autotmp_[0-9]+)+$" "live at call to fi38:( .autotmp_[0-9]+)+$" printnl() } printnl() @@ -655,15 +658,17 @@ func bad40() { } func good40() { - ret := T40{} - ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+ ret$" + ret := T40{} // ERROR "stack object ret T40$" + ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+ ret$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$" t := &ret - printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ ret$" - useT40(t) // ERROR "live at call to useT40: .autotmp_[0-9]+ ret$" + printnl() // ERROR "live at call to printnl: ret$" + // Note: ret is live at the printnl because the compiler moves &ret + // from before the printnl to after. + useT40(t) } func ddd1(x, y *int) { // ERROR "live at entry to ddd1: x y$" - ddd2(x, y) // ERROR "live at call to ddd2: .autotmp_[0-9]+$" + ddd2(x, y) // ERROR "stack object .autotmp_[0-9]+ \[2\]\*int$" printnl() // Note: no .?autotmp live at printnl. See issue 16996. } @@ -689,3 +694,12 @@ func f41(p, q *int) (r *int) { // ERROR "live at entry to f41: p q$" r = q return // ERROR "live at call to deferreturn: r$" } + +func f42() { + var p, q, r int + f43([]*int{&p,&q,&r}) // ERROR "stack object .autotmp_[0-9]+ \[3\]\*int$" + f43([]*int{&p,&r,&q}) + f43([]*int{&q,&p,&r}) +} +//go:noescape +func f43(a []*int) diff --git a/test/live2.go b/test/live2.go index cc1b0b7acf580..cea312f075187 100644 --- a/test/live2.go +++ b/test/live2.go @@ -10,7 +10,6 @@ package main // issue 8142: lost 'addrtaken' bit on inlined variables. -// no inlining in this test, so just checking that non-inlined works. func printnl() @@ -28,15 +27,15 @@ func newT40() *T40 { } func bad40() { - t := newT40() // ERROR "live at call to makemap: .autotmp_[0-9]+ ret$" - printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ ret$" - useT40(t) // ERROR "live at call to useT40: .autotmp_[0-9]+ ret$" + t := newT40() // ERROR "live at call to makemap: ret$" "stack object ret T40$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$" + printnl() // ERROR "live at call to printnl: ret$" + useT40(t) } func good40() { - ret := T40{} - ret.m = make(map[int]int, 42) // ERROR "live at call to makemap: .autotmp_[0-9]+ ret$" + ret := T40{} // ERROR "stack object ret T40$" + ret.m = make(map[int]int, 42) // ERROR "live at call to makemap: ret$" "stack object .autotmp_[0-9]+ map.hdr\[int\]int$" t := &ret - printnl() // ERROR "live at call to printnl: .autotmp_[0-9]+ ret$" - useT40(t) // ERROR "live at call to useT40: .autotmp_[0-9]+ ret$" + printnl() // ERROR "live at call to printnl: ret$" + useT40(t) } diff --git a/test/live_syscall.go b/test/live_syscall.go index 65a161c028eb6..7b4471735059d 100644 --- a/test/live_syscall.go +++ b/test/live_syscall.go @@ -17,24 +17,24 @@ import ( func f(uintptr) // ERROR "f assuming arg#1 is unsafe uintptr" -func g() { +func g() { // ERROR "can inline g" var t int - f(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to f: .?autotmp" "g &t does not escape" + f(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to f: .?autotmp" "g &t does not escape" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } -func h() { +func h() { // ERROR "can inline h" var v int - syscall.Syscall(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to Syscall: .?autotmp" "h &v does not escape" + syscall.Syscall(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to Syscall: .?autotmp" "h &v does not escape" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } -func i() { +func i() { // ERROR "can inline i" var t int p := unsafe.Pointer(&t) // ERROR "i &t does not escape" - f(uintptr(p)) // ERROR "live at call to f: .?autotmp" + f(uintptr(p)) // ERROR "live at call to f: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } -func j() { +func j() { // ERROR "can inline j" var v int p := unsafe.Pointer(&v) // ERROR "j &v does not escape" - syscall.Syscall(0, 1, uintptr(p), 2) // ERROR "live at call to Syscall: .?autotmp" + syscall.Syscall(0, 1, uintptr(p), 2) // ERROR "live at call to Syscall: .?autotmp" "stack object .autotmp_[0-9]+ unsafe.Pointer$" } diff --git a/test/loopbce.go b/test/loopbce.go index b4bf797497f0c..81f2524e95598 100644 --- a/test/loopbce.go +++ b/test/loopbce.go @@ -6,7 +6,7 @@ package main func f0a(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += a[i] // ERROR "Proved IsInBounds$" + x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -14,7 +14,7 @@ func f0a(a []int) int { func f0b(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[i:] // ERROR "Proved IsSliceInBounds$" + b := a[i:] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" x += b[0] } return x @@ -23,7 +23,7 @@ func f0b(a []int) int { func f0c(a []int) int { x := 0 for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[:i+1] // ERROR "Proved IsSliceInBounds$" + b := a[:i+1] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" x += b[0] } return x @@ -40,7 +40,7 @@ func f1(a []int) int { func f2(a []int) int { x := 0 for i := 1; i < len(a); i++ { // ERROR "Induction variable: limits \[1,\?\), increment 1$" - x += a[i] // ERROR "Proved IsInBounds$" + x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -48,7 +48,7 @@ func f2(a []int) int { func f4(a [10]int) int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,10\), increment 2$" - x += a[i] // ERROR "Proved IsInBounds$" + x += a[i] // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -63,7 +63,7 @@ func f5(a [10]int) int { func f6(a []int) { for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - b := a[0:i] // ERROR "Proved IsSliceInBounds$" + b := a[0:i] // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" "(\([0-9]+\) )?Proved Geq64$" f6(b) } } @@ -71,7 +71,7 @@ func f6(a []int) { func g0a(a string) int { x := 0 for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "Proved IsInBounds$" + x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -79,7 +79,7 @@ func g0a(a string) int { func g0b(a string) int { x := 0 for i := 0; len(a) > i; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - x += int(a[i]) // ERROR "Proved IsInBounds$" + x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -87,7 +87,7 @@ func g0b(a string) int { func g0c(a string) int { x := 0 for i := len(a); i > 0; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "Proved IsInBounds$" + x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -95,7 +95,7 @@ func g0c(a string) int { func g0d(a string) int { x := 0 for i := len(a); 0 < i; i-- { // ERROR "Induction variable: limits \(0,\?\], increment 1$" - x += int(a[i-1]) // ERROR "Proved IsInBounds$" + x += int(a[i-1]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -103,7 +103,7 @@ func g0d(a string) int { func g0e(a string) int { x := 0 for i := len(a) - 1; i >= 0; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "Proved IsInBounds$" + x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -111,7 +111,7 @@ func g0e(a string) int { func g0f(a string) int { x := 0 for i := len(a) - 1; 0 <= i; i-- { // ERROR "Induction variable: limits \[0,\?\], increment 1$" - x += int(a[i]) // ERROR "Proved IsInBounds$" + x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -120,7 +120,7 @@ func g1() int { a := "evenlength" x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,10\), increment 2$" - x += int(a[i]) // ERROR "Proved IsInBounds$" + x += int(a[i]) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return x } @@ -130,7 +130,7 @@ func g2() int { x := 0 for i := 0; i < len(a); i += 2 { // ERROR "Induction variable: limits \[0,10\), increment 2$" j := i - if a[i] == 'e' { // ERROR "Proved IsInBounds$" + if a[i] == 'e' { // ERROR "(\([0-9]+\) )?Proved IsInBounds$" j = j + 1 } x += int(a[j]) @@ -141,27 +141,27 @@ func g2() int { func g3a() { a := "this string has length 25" for i := 0; i < len(a); i += 5 { // ERROR "Induction variable: limits \[0,25\), increment 5$" - useString(a[i:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" useString(a[:i+3]) } } func g3b(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i+1:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i+1:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } } func g3c(a string) { for i := 0; i < len(a); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[:i+1]) // ERROR "Proved IsSliceInBounds$" + useString(a[:i+1]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } } func h1(a []byte) { c := a[:128] for i := range c { // ERROR "Induction variable: limits \[0,128\), increment 1$" - c[i] = byte(i) // ERROR "Proved IsInBounds$" + c[i] = byte(i) // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } } @@ -174,11 +174,11 @@ func h2(a []byte) { func k0(a [100]int) [100]int { for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$" a[i-11] = i - a[i-10] = i // ERROR "Proved IsInBounds$" - a[i-5] = i // ERROR "Proved IsInBounds$" - a[i] = i // ERROR "Proved IsInBounds$" - a[i+5] = i // ERROR "Proved IsInBounds$" - a[i+10] = i // ERROR "Proved IsInBounds$" + a[i-10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+5] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" a[i+11] = i } return a @@ -186,13 +186,13 @@ func k0(a [100]int) [100]int { func k1(a [100]int) [100]int { for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$" - useSlice(a[:i-11]) - useSlice(a[:i-10]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[:i-5]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[:i]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[:i+5]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[:i+10]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[:i+11]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i-11]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i-10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i-5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" "(\([0-9]+\) )?Proved Geq64$" + useSlice(a[:i+5]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i+10]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[:i+11]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" useSlice(a[:i+12]) } @@ -202,12 +202,12 @@ func k1(a [100]int) [100]int { func k2(a [100]int) [100]int { for i := 10; i < 90; i++ { // ERROR "Induction variable: limits \[10,90\), increment 1$" useSlice(a[i-11:]) - useSlice(a[i-10:]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[i-5:]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[i:]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[i+5:]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[i+10:]) // ERROR "Proved IsSliceInBounds$" - useSlice(a[i+11:]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[i-10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i-5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i+5:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i+10:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" + useSlice(a[i+11:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" useSlice(a[i+12:]) } return a @@ -216,7 +216,7 @@ func k2(a [100]int) [100]int { func k3(a [100]int) [100]int { for i := -10; i < 90; i++ { // ERROR "Induction variable: limits \[-10,90\), increment 1$" a[i+9] = i - a[i+10] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" a[i+11] = i } return a @@ -225,7 +225,7 @@ func k3(a [100]int) [100]int { func k3neg(a [100]int) [100]int { for i := 89; i > -11; i-- { // ERROR "Induction variable: limits \(-11,89\], increment 1$" a[i+9] = i - a[i+10] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" a[i+11] = i } return a @@ -234,7 +234,7 @@ func k3neg(a [100]int) [100]int { func k3neg2(a [100]int) [100]int { for i := 89; i >= -10; i-- { // ERROR "Induction variable: limits \[-10,89\], increment 1$" a[i+9] = i - a[i+10] = i // ERROR "Proved IsInBounds$" + a[i+10] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" a[i+11] = i } return a @@ -243,16 +243,16 @@ func k3neg2(a [100]int) [100]int { func k4(a [100]int) [100]int { min := (-1) << 63 for i := min; i < min+50; i++ { // ERROR "Induction variable: limits \[-9223372036854775808,-9223372036854775758\), increment 1$" - a[i-min] = i // ERROR "Proved IsInBounds$" + a[i-min] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return a } func k5(a [100]int) [100]int { max := (1 << 63) - 1 - for i := max - 50; i < max; i++ { // ERROR "Induction variable: limits \[9223372036854775757,9223372036854775807\), increment 1" - a[i-max+50] = i // ERROR "Proved IsInBounds$" - a[i-(max-70)] = i // ERROR "Proved IsInBounds$" + for i := max - 50; i < max; i++ { // ERROR "Induction variable: limits \[9223372036854775757,9223372036854775807\), increment 1$" + a[i-max+50] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" + a[i-(max-70)] = i // ERROR "(\([0-9]+\) )?Proved IsInBounds$" } return a } @@ -275,17 +275,17 @@ func nobce1() { func nobce2(a string) { for i := int64(0); i < int64(len(a)); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } for i := int64(0); i < int64(len(a))-31337; i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } for i := int64(0); i < int64(len(a))+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } j := int64(len(a)) - 123 for i := int64(0); i < j+123+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" - useString(a[i:]) // ERROR "Proved IsSliceInBounds$" + useString(a[i:]) // ERROR "(\([0-9]+\) )?Proved IsSliceInBounds$" } for i := int64(0); i < j+122+int64(-1<<63); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" // len(a)-123+122+MinInt overflows when len(a) == 0, so a bound check is needed here diff --git a/test/nilptr.go b/test/nilptr.go index 8d674a7098212..90f57c54b6c9c 100644 --- a/test/nilptr.go +++ b/test/nilptr.go @@ -7,6 +7,9 @@ // Test that the implementation catches nil ptr indirection // in a large address space. +// +build !aix +// Address space starts at 1<<32 on AIX, so dummy is too far. + package main import "unsafe" diff --git a/test/nilptr3.go b/test/nilptr3.go index a22e60ef11f61..e0f2ed9767659 100644 --- a/test/nilptr3.go +++ b/test/nilptr3.go @@ -1,6 +1,7 @@ // errorcheck -0 -d=nil // +build !wasm +// +build !aix // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -192,21 +193,6 @@ func f4(x *[10]int) { _ = &x[9] // ERROR "removed[a-z ]* nil check" } -func f5(p *float32, q *float64, r *float32, s *float64) float64 { - x := float64(*p) // ERROR "removed nil check" - y := *q // ERROR "removed nil check" - *r = 7 // ERROR "removed nil check" - *s = 9 // ERROR "removed nil check" - return x + y -} - -type T [29]byte - -func f6(p, q *T) { - x := *p // ERROR "removed nil check" - *q = x // ERROR "removed nil check" -} - func m1(m map[int][80]byte) byte { v := m[3] // ERROR "removed nil check" return v[5] @@ -246,8 +232,8 @@ type TT struct { func f(t *TT) *byte { // See issue 17242. - s := &t.SS // ERROR "removed nil check" - return &s.x // ERROR "generated nil check" + s := &t.SS // ERROR "generated nil check" + return &s.x // ERROR "removed nil check" } // make sure not to do nil check for newobject @@ -257,11 +243,6 @@ func f7() (*Struct, float64) { return t, *p // ERROR "removed nil check" } -// make sure to remove nil check for memory move (issue #18003) -func f8(t *[8]int) [8]int { - return *t // ERROR "removed nil check" -} - func f9() []int { x := new([1]int) x[0] = 1 // ERROR "removed nil check" diff --git a/test/nilptr3_wasm.go b/test/nilptr3_wasm.go deleted file mode 100644 index 9376d42097454..0000000000000 --- a/test/nilptr3_wasm.go +++ /dev/null @@ -1,270 +0,0 @@ -// errorcheck -0 -d=nil - -// +build wasm - -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Test that nil checks are removed. -// Optimization is enabled. - -package p - -type Struct struct { - X int - Y float64 -} - -type BigStruct struct { - X int - Y float64 - A [1 << 20]int - Z string -} - -type Empty struct { -} - -type Empty1 struct { - Empty -} - -var ( - intp *int - arrayp *[10]int - array0p *[0]int - bigarrayp *[1 << 26]int - structp *Struct - bigstructp *BigStruct - emptyp *Empty - empty1p *Empty1 -) - -func f1() { - _ = *intp // ERROR "generated nil check" - - // This one should be removed but the block copy needs - // to be turned into its own pseudo-op in order to see - // the indirect. - _ = *arrayp // ERROR "generated nil check" - - // 0-byte indirect doesn't suffice. - // we don't registerize globals, so there are no removed.* nil checks. - _ = *array0p // ERROR "generated nil check" - _ = *array0p // ERROR "removed nil check" - - _ = *intp // ERROR "removed nil check" - _ = *arrayp // ERROR "removed nil check" - _ = *structp // ERROR "generated nil check" - _ = *emptyp // ERROR "generated nil check" - _ = *arrayp // ERROR "removed nil check" -} - -func f2() { - var ( - intp *int - arrayp *[10]int - array0p *[0]int - bigarrayp *[1 << 20]int - structp *Struct - bigstructp *BigStruct - emptyp *Empty - empty1p *Empty1 - ) - - _ = *intp // ERROR "generated nil check" - _ = *arrayp // ERROR "generated nil check" - _ = *array0p // ERROR "generated nil check" - _ = *array0p // ERROR "removed.* nil check" - _ = *intp // ERROR "removed.* nil check" - _ = *arrayp // ERROR "removed.* nil check" - _ = *structp // ERROR "generated nil check" - _ = *emptyp // ERROR "generated nil check" - _ = *arrayp // ERROR "removed.* nil check" - _ = *bigarrayp // ERROR "generated nil check" ARM removed nil check before indirect!! - _ = *bigstructp // ERROR "generated nil check" - _ = *empty1p // ERROR "generated nil check" -} - -func fx10k() *[10000]int - -var b bool - -func f3(x *[10000]int) { - // Using a huge type and huge offsets so the compiler - // does not expect the memory hardware to fault. - _ = x[9999] // ERROR "generated nil check" - - for { - if x[9999] != 0 { // ERROR "removed nil check" - break - } - } - - x = fx10k() - _ = x[9999] // ERROR "generated nil check" - if b { - _ = x[9999] // ERROR "removed.* nil check" - } else { - _ = x[9999] // ERROR "removed.* nil check" - } - _ = x[9999] // ERROR "removed nil check" - - x = fx10k() - if b { - _ = x[9999] // ERROR "generated nil check" - } else { - _ = x[9999] // ERROR "generated nil check" - } - _ = x[9999] // ERROR "generated nil check" - - fx10k() - // This one is a bit redundant, if we figured out that - // x wasn't going to change across the function call. - // But it's a little complex to do and in practice doesn't - // matter enough. - _ = x[9999] // ERROR "removed nil check" -} - -func f3a() { - x := fx10k() - y := fx10k() - z := fx10k() - _ = &x[9] // ERROR "generated nil check" - y = z - _ = &x[9] // ERROR "removed.* nil check" - x = y - _ = &x[9] // ERROR "generated nil check" -} - -func f3b() { - x := fx10k() - y := fx10k() - _ = &x[9] // ERROR "generated nil check" - y = x - _ = &x[9] // ERROR "removed.* nil check" - x = y - _ = &x[9] // ERROR "removed.* nil check" -} - -func fx10() *[10]int - -func f4(x *[10]int) { - // Most of these have no checks because a real memory reference follows, - // and the offset is small enough that if x is nil, the address will still be - // in the first unmapped page of memory. - - _ = x[9] // ERROR "generated nil check" // bug: would like to remove this check (but nilcheck and load are in different blocks) - - for { - if x[9] != 0 { // ERROR "removed nil check" - break - } - } - - x = fx10() - _ = x[9] // ERROR "generated nil check" // bug would like to remove before indirect - if b { - _ = x[9] // ERROR "removed nil check" - } else { - _ = x[9] // ERROR "removed nil check" - } - _ = x[9] // ERROR "removed nil check" - - x = fx10() - if b { - _ = x[9] // ERROR "generated nil check" // bug would like to remove before indirect - } else { - _ = &x[9] // ERROR "generated nil check" - } - _ = x[9] // ERROR "generated nil check" // bug would like to remove before indirect - - fx10() - _ = x[9] // ERROR "removed nil check" - - x = fx10() - y := fx10() - _ = &x[9] // ERROR "generated nil check" - y = x - _ = &x[9] // ERROR "removed[a-z ]* nil check" - x = y - _ = &x[9] // ERROR "removed[a-z ]* nil check" -} - -func f5(p *float32, q *float64, r *float32, s *float64) float64 { - x := float64(*p) // ERROR "generated nil check" - y := *q // ERROR "generated nil check" - *r = 7 // ERROR "generated nil check" - *s = 9 // ERROR "generated nil check" - return x + y -} - -type T [29]byte - -func f6(p, q *T) { - x := *p // ERROR "generated nil check" - *q = x // ERROR "generated nil check" -} - -func m1(m map[int][80]byte) byte { - v := m[3] // ERROR "removed nil check" - return v[5] -} -func m2(m map[int][800]byte) byte { - v := m[3] // ERROR "removed nil check" - return v[5] -} -func m3(m map[int][80]byte) (byte, bool) { - v, ok := m[3] // ERROR "removed nil check" - return v[5], ok -} -func m4(m map[int][800]byte) (byte, bool) { - v, ok := m[3] // ERROR "removed nil check" - return v[5], ok -} -func p1() byte { - p := new([100]byte) - return p[5] // ERROR "removed nil check" -} - -// make sure not to do nil check for access of PAUTOHEAP -//go:noinline -func (p *Struct) m() {} -func c1() { - var x Struct - func() { x.m() }() // ERROR "removed nil check" -} - -type SS struct { - x byte -} - -type TT struct { - SS -} - -func f(t *TT) *byte { - // See issue 17242. - s := &t.SS // ERROR "removed nil check" - return &s.x // ERROR "generated nil check" -} - -// make sure not to do nil check for newobject -func f7() (*Struct, float64) { - t := new(Struct) - p := &t.Y // ERROR "removed nil check" - return t, *p // ERROR "removed nil check" -} - -// make sure to remove nil check for memory move (issue #18003) -func f8(t *[8]int) [8]int { - return *t // ERROR "generated nil check" -} - -func f9() []int { - x := new([1]int) - x[0] = 1 // ERROR "removed nil check" - y := x[:] // ERROR "removed nil check" - return y -} diff --git a/test/nilptr5.go b/test/nilptr5.go new file mode 100644 index 0000000000000..2c48c0b261019 --- /dev/null +++ b/test/nilptr5.go @@ -0,0 +1,33 @@ +// errorcheck -0 -d=nil + +// +build !wasm +// +build !aix + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that nil checks are removed. +// Optimization is enabled. + +package p + +func f5(p *float32, q *float64, r *float32, s *float64) float64 { + x := float64(*p) // ERROR "removed nil check" + y := *q // ERROR "removed nil check" + *r = 7 // ERROR "removed nil check" + *s = 9 // ERROR "removed nil check" + return x + y +} + +type T [29]byte + +func f6(p, q *T) { + x := *p // ERROR "removed nil check" + *q = x // ERROR "removed nil check" +} + +// make sure to remove nil check for memory move (issue #18003) +func f8(t *[8]int) [8]int { + return *t // ERROR "removed nil check" +} diff --git a/test/nilptr5_aix.go b/test/nilptr5_aix.go new file mode 100644 index 0000000000000..142780718b528 --- /dev/null +++ b/test/nilptr5_aix.go @@ -0,0 +1,32 @@ +// errorcheck -0 -d=nil + +// +build aix + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that nil checks are removed. +// Optimization is enabled. + +package p + +func f5(p *float32, q *float64, r *float32, s *float64) float64 { + x := float64(*p) // ERROR "generated nil check" + y := *q // ERROR "generated nil check" + *r = 7 // ERROR "removed nil check" + *s = 9 // ERROR "removed nil check" + return x + y +} + +type T [29]byte + +func f6(p, q *T) { + x := *p // ERROR "generated nil check" + *q = x // ERROR "removed nil check" +} + +// make sure to remove nil check for memory move (issue #18003) +func f8(t *[8]int) [8]int { + return *t // ERROR "generated nil check" +} diff --git a/test/nilptr5_wasm.go b/test/nilptr5_wasm.go new file mode 100644 index 0000000000000..6ef8a02e90820 --- /dev/null +++ b/test/nilptr5_wasm.go @@ -0,0 +1,32 @@ +// errorcheck -0 -d=nil + +// +build wasm + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that nil checks are removed. +// Optimization is enabled. + +package p + +func f5(p *float32, q *float64, r *float32, s *float64) float64 { + x := float64(*p) // ERROR "generated nil check" + y := *q // ERROR "generated nil check" + *r = 7 // ERROR "generated nil check" + *s = 9 // ERROR "generated nil check" + return x + y +} + +type T [29]byte + +func f6(p, q *T) { + x := *p // ERROR "generated nil check" + *q = x // ERROR "generated nil check" +} + +// make sure to remove nil check for memory move (issue #18003) +func f8(t *[8]int) [8]int { + return *t // ERROR "generated nil check" +} diff --git a/test/nilptr_aix.go b/test/nilptr_aix.go new file mode 100644 index 0000000000000..ea5fcc3f4e823 --- /dev/null +++ b/test/nilptr_aix.go @@ -0,0 +1,185 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that the implementation catches nil ptr indirection +// in a large address space. + +// +build aix + +package main + +import "unsafe" + +// Having a big address space means that indexing +// at a 1G + 256 MB offset from a nil pointer might not +// cause a memory access fault. This test checks +// that Go is doing the correct explicit checks to catch +// these nil pointer accesses, not just relying on the hardware. +// The reason of the 1G offset is because AIX addresses start after 1G. +var dummy [256 << 20]byte // give us a big address space + +func main() { + // the test only tests what we intend to test + // if dummy starts in the first 256 MB of memory. + // otherwise there might not be anything mapped + // at the address that might be accidentally + // dereferenced below. + if uintptr(unsafe.Pointer(&dummy)) < 1<<32 { + panic("dummy not far enough") + } + + shouldPanic(p1) + shouldPanic(p2) + shouldPanic(p3) + shouldPanic(p4) + shouldPanic(p5) + shouldPanic(p6) + shouldPanic(p7) + shouldPanic(p8) + shouldPanic(p9) + shouldPanic(p10) + shouldPanic(p11) + shouldPanic(p12) + shouldPanic(p13) + shouldPanic(p14) + shouldPanic(p15) + shouldPanic(p16) +} + +func shouldPanic(f func()) { + defer func() { + if recover() == nil { + panic("memory reference did not panic") + } + }() + f() +} + +func p1() { + // Array index. + var p *[1 << 33]byte = nil + println(p[1<<32+256<<20]) // very likely to be inside dummy, but should panic +} + +var xb byte + +func p2() { + var p *[1 << 33]byte = nil + xb = 123 + + // Array index. + println(p[uintptr(unsafe.Pointer(&xb))]) // should panic +} + +func p3() { + // Array to slice. + var p *[1 << 33]byte = nil + var x []byte = p[0:] // should panic + _ = x +} + +var q *[1 << 33]byte + +func p4() { + // Array to slice. + var x []byte + var y = &x + *y = q[0:] // should crash (uses arraytoslice runtime routine) +} + +func fb([]byte) { + panic("unreachable") +} + +func p5() { + // Array to slice. + var p *[1 << 33]byte = nil + fb(p[0:]) // should crash +} + +func p6() { + // Array to slice. + var p *[1 << 33]byte = nil + var _ []byte = p[10 : len(p)-10] // should crash +} + +type T struct { + x [1<<32 + 256<<20]byte + i int +} + +func f() *T { + return nil +} + +var y *T +var x = &y + +func p7() { + // Struct field access with large offset. + println(f().i) // should crash +} + +func p8() { + // Struct field access with large offset. + println((*x).i) // should crash +} + +func p9() { + // Struct field access with large offset. + var t *T + println(&t.i) // should crash +} + +func p10() { + // Struct field access with large offset. + var t *T + println(t.i) // should crash +} + +type T1 struct { + T +} + +type T2 struct { + *T1 +} + +func p11() { + t := &T2{} + p := &t.i + println(*p) +} + +// ADDR(DOT(IND(p))) needs a check also +func p12() { + var p *T = nil + println(*(&((*p).i))) +} + +// Tests suggested in golang.org/issue/6080. + +func p13() { + var x *[10]int + y := x[:] + _ = y +} + +func p14() { + println((*[1]int)(nil)[:]) +} + +func p15() { + for i := range (*[1]int)(nil)[:] { + _ = i + } +} + +func p16() { + for i, v := range (*[1]int)(nil)[:] { + _ = i + v + } +} diff --git a/test/nosplit.go b/test/nosplit.go index e6cd04e563060..734f456cc990c 100644 --- a/test/nosplit.go +++ b/test/nosplit.go @@ -1,4 +1,4 @@ -// +build !nacl,!js +// +build !nacl,!js,!aix,!gcflags_noopt // run // Copyright 2014 The Go Authors. All rights reserved. @@ -118,11 +118,11 @@ main 136 # (CallSize is 32 on ppc64, 8 on amd64 for frame pointer.) main 96 nosplit main 100 nosplit; REJECT ppc64 ppc64le -main 104 nosplit; REJECT ppc64 ppc64le +main 104 nosplit; REJECT ppc64 ppc64le arm64 main 108 nosplit; REJECT ppc64 ppc64le -main 112 nosplit; REJECT ppc64 ppc64le +main 112 nosplit; REJECT ppc64 ppc64le arm64 main 116 nosplit; REJECT ppc64 ppc64le -main 120 nosplit; REJECT ppc64 ppc64le amd64 +main 120 nosplit; REJECT ppc64 ppc64le amd64 arm64 main 124 nosplit; REJECT ppc64 ppc64le amd64 main 128 nosplit; REJECT main 132 nosplit; REJECT @@ -136,11 +136,11 @@ main 136 nosplit; REJECT # Because AMD64 uses frame pointer, it has 8 fewer bytes. main 96 nosplit call f; f 0 nosplit main 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le -main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le +main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le arm64 main 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le -main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 +main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64 main 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 -main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 +main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 arm64 main 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386 main 128 nosplit call f; f 0 nosplit; REJECT main 132 nosplit call f; f 0 nosplit; REJECT @@ -152,11 +152,11 @@ main 136 nosplit call f; f 0 nosplit; REJECT # Architectures differ in the same way as before. main 96 nosplit call f; f 0 call f main 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le -main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 +main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 main 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 -main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 +main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 arm64 main 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 -main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386 +main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386 arm64 main 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386 main 128 nosplit call f; f 0 call f; REJECT main 132 nosplit call f; f 0 call f; REJECT @@ -165,11 +165,11 @@ main 136 nosplit call f; f 0 call f; REJECT # Indirect calls are assumed to be splitting functions. main 96 nosplit callind main 100 nosplit callind; REJECT ppc64 ppc64le -main 104 nosplit callind; REJECT ppc64 ppc64le amd64 +main 104 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 main 108 nosplit callind; REJECT ppc64 ppc64le amd64 -main 112 nosplit callind; REJECT ppc64 ppc64le amd64 +main 112 nosplit callind; REJECT ppc64 ppc64le amd64 arm64 main 116 nosplit callind; REJECT ppc64 ppc64le amd64 -main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386 +main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386 arm64 main 124 nosplit callind; REJECT ppc64 ppc64le amd64 386 main 128 nosplit callind; REJECT main 132 nosplit callind; REJECT @@ -319,7 +319,7 @@ TestCases: } } - if size%ptrSize == 4 || goarch == "arm64" && size != 0 && (size+8)%16 != 0 { + if size%ptrSize == 4 { continue TestCases } nosplit := m[3] diff --git a/test/notinheap.go b/test/notinheap.go index 44b79646ef70c..16c3f8faf0a67 100644 --- a/test/notinheap.go +++ b/test/notinheap.go @@ -46,10 +46,18 @@ type t1 struct{ x int } //go:notinheap type t2 t1 +//go:notinheap +type t3 byte + +//go:notinheap +type t4 rune + var sink interface{} func i() { sink = new(t1) // no error sink = (*t2)(new(t1)) // ERROR "cannot convert(.|\n)*t2 is go:notinheap" sink = (*t2)(new(struct{ x int })) // ERROR "cannot convert(.|\n)*t2 is go:notinheap" + sink = []t3("foo") // ERROR "cannot convert(.|\n)*t3 is go:notinheap" + sink = []t4("bar") // ERROR "cannot convert(.|\n)*t4 is go:notinheap" } diff --git a/test/notinheap3.go b/test/notinheap3.go index d48c2a0cc974f..5ace8d6793f21 100644 --- a/test/notinheap3.go +++ b/test/notinheap3.go @@ -58,3 +58,19 @@ func h() { _ = append(v1s, v1s...) // no barrier _ = append(v2s, v2s...) // ERROR "write barrier" } + +// Slice clearing + +var ( + sliceIH []*ih + sliceNIH []*nih +) + +func sliceClear() { + for i := range sliceIH { + sliceIH[i] = nil // ERROR "write barrier" + } + for i := range sliceNIH { + sliceNIH[i] = nil // no barrier + } +} diff --git a/test/prove.go b/test/prove.go index 45cee9e8b5803..eb0fb2a15e813 100644 --- a/test/prove.go +++ b/test/prove.go @@ -62,7 +62,7 @@ func f1c(a []int, i int64) int { } func f2(a []int) int { - for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1" + for i := range a { // ERROR "Induction variable: limits \[0,\?\), increment 1$" a[i+1] = i a[i+1] = i // ERROR "Proved IsInBounds$" } @@ -269,7 +269,7 @@ func f11b(a []int, i int) { func f11c(a []int, i int) { useSlice(a[:i]) - useSlice(a[:i]) // ERROR "Proved IsSliceInBounds$" + useSlice(a[:i]) // ERROR "Proved Geq64$" "Proved IsSliceInBounds$" } func f11d(a []int, i int) { @@ -464,12 +464,12 @@ func f16(s []int) []int { } func f17(b []int) { - for i := 0; i < len(b); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1" + for i := 0; i < len(b); i++ { // ERROR "Induction variable: limits \[0,\?\), increment 1$" // This tests for i <= cap, which we can only prove // using the derived relation between len and cap. // This depends on finding the contradiction, since we // don't query this condition directly. - useSlice(b[:i]) // ERROR "Proved IsSliceInBounds$" + useSlice(b[:i]) // ERROR "Proved Geq64$" "Proved IsSliceInBounds$" } } @@ -488,6 +488,20 @@ func f18(b []int, x int, y uint) { } } +func f19() (e int64, err error) { + // Issue 29502: slice[:0] is incorrectly disproved. + var stack []int64 + stack = append(stack, 123) + if len(stack) > 1 { + panic("too many elements") + } + last := len(stack) - 1 + e = stack[last] + // Buggy compiler prints "Disproved Geq64" for the next line. + stack = stack[:last] // ERROR "Proved IsSliceInBounds" + return e, nil +} + func sm1(b []int, x int) { // Test constant argument to slicemask. useSlice(b[2:8]) // ERROR "Proved slicemask not needed$" @@ -530,7 +544,7 @@ func fence1(b []int, x, y int) { } if len(b) < cap(b) { // This eliminates the growslice path. - b = append(b, 1) // ERROR "Disproved Greater64$" + b = append(b, 1) // ERROR "Disproved Greater64U$" } } @@ -542,7 +556,7 @@ func fence2(x, y int) { } } -func fence3(b []int, x, y int64) { +func fence3(b, c []int, x, y int64) { if x-1 >= y { if x <= y { // Can't prove because x may have wrapped. return @@ -555,6 +569,8 @@ func fence3(b []int, x, y int64) { } } + c[len(c)-1] = 0 // Can't prove because len(c) might be 0 + if n := len(b); n > 0 { b[n-1] = 0 // ERROR "Proved IsInBounds$" } @@ -577,18 +593,18 @@ func fence4(x, y int64) { func trans1(x, y int64) { if x > 5 { if y > x { - if y > 2 { // ERROR "Proved Greater64" + if y > 2 { // ERROR "Proved Greater64$" return } } else if y == x { - if y > 5 { // ERROR "Proved Greater64" + if y > 5 { // ERROR "Proved Greater64$" return } } } if x >= 10 { if y > x { - if y > 10 { // ERROR "Proved Greater64" + if y > 10 { // ERROR "Proved Greater64$" return } } @@ -622,7 +638,7 @@ func natcmp(x, y []uint) (r int) { } i := m - 1 - for i > 0 && // ERROR "Induction variable: limits \(0,\?\], increment 1" + for i > 0 && // ERROR "Induction variable: limits \(0,\?\], increment 1$" x[i] == // ERROR "Proved IsInBounds$" y[i] { // ERROR "Proved IsInBounds$" i-- @@ -684,7 +700,7 @@ func range2(b [][32]int) { if i < len(b) { // ERROR "Proved Less64$" println("x") } - if i >= 0 { // ERROR "Proved Geq64" + if i >= 0 { // ERROR "Proved Geq64$" println("x") } } diff --git a/test/run.go b/test/run.go index 99ef79feb180c..ad38d420c9bbc 100644 --- a/test/run.go +++ b/test/run.go @@ -78,6 +78,7 @@ func main() { // Disable parallelism if printing or if using a simulator. if *verbose || len(findExecCmd()) > 0 { *numParallel = 1 + *runoutputLimit = 1 } ratec = make(chan bool, *numParallel) @@ -321,7 +322,7 @@ func goDirFiles(longdir string) (filter []os.FileInfo, err error) { return } -var packageRE = regexp.MustCompile(`(?m)^package (\w+)`) +var packageRE = regexp.MustCompile(`(?m)^package ([\p{Lu}\p{Ll}\w]+)`) // If singlefilepkgs is set, each file is considered a separate package // even if the package names are the same. @@ -354,8 +355,9 @@ func goDirPackages(longdir string, singlefilepkgs bool) ([][]string, error) { } type context struct { - GOOS string - GOARCH string + GOOS string + GOARCH string + noOptEnv bool } // shouldTest looks for build tags in a source file and returns @@ -375,10 +377,13 @@ func shouldTest(src string, goos, goarch string) (ok bool, whyNot string) { if len(line) == 0 || line[0] != '+' { continue } + gcFlags := os.Getenv("GO_GCFLAGS") ctxt := &context{ - GOOS: goos, - GOARCH: goarch, + GOOS: goos, + GOARCH: goarch, + noOptEnv: strings.Contains(gcFlags, "-N") || strings.Contains(gcFlags, "-l"), } + words := strings.Fields(line) if words[0] == "+build" { ok := false @@ -425,6 +430,10 @@ func (ctxt *context) match(name string) bool { return true } + if ctxt.noOptEnv && name == "gcflags_noopt" { + return true + } + if name == "test_run" { return true } @@ -435,7 +444,7 @@ func (ctxt *context) match(name string) bool { func init() { checkShouldTest() } // goGcflags returns the -gcflags argument to use with go build / go run. -// This must match the flags used for building the standard libary, +// This must match the flags used for building the standard library, // or else the commands will rebuild any needed packages (like runtime) // over and over. func goGcflags() string { @@ -550,6 +559,19 @@ func (t *test) run() { } args = args[1:] } + if action == "errorcheck" { + found := false + for i, f := range flags { + if strings.HasPrefix(f, "-d=") { + flags[i] = f + ",ssa/check/on" + found = true + break + } + } + if !found { + flags = append(flags, "-d=ssa/check/on") + } + } t.makeTempDir() if !*keep { @@ -620,7 +642,8 @@ func (t *test) run() { // against a set of regexps in comments. ops := t.wantedAsmOpcodes(long) for _, env := range ops.Envs() { - cmdline := []string{"build", "-gcflags", "-S"} + // -S=2 forces outermost line numbers when disassembling inlined code. + cmdline := []string{"build", "-gcflags", "-S=2"} cmdline = append(cmdline, flags...) cmdline = append(cmdline, long) cmd := exec.Command(goTool(), cmdline...) @@ -787,25 +810,37 @@ func (t *test) run() { t.err = dirErr break } - var gos []os.FileInfo - var asms []os.FileInfo + var gos []string + var asms []string for _, file := range files { switch filepath.Ext(file.Name()) { case ".go": - gos = append(gos, file) + gos = append(gos, filepath.Join(longdir, file.Name())) case ".s": - asms = append(asms, file) + asms = append(asms, filepath.Join(longdir, file.Name())) } } + if len(asms) > 0 { + emptyHdrFile := filepath.Join(t.tempDir, "go_asm.h") + if err := ioutil.WriteFile(emptyHdrFile, nil, 0666); err != nil { + t.err = fmt.Errorf("write empty go_asm.h: %s", err) + return + } + cmd := []string{goTool(), "tool", "asm", "-gensymabis", "-o", "symabis"} + cmd = append(cmd, asms...) + _, err = runcmd(cmd...) + if err != nil { + t.err = err + break + } + } var objs []string cmd := []string{goTool(), "tool", "compile", "-e", "-D", ".", "-I", ".", "-o", "go.o"} if len(asms) > 0 { - cmd = append(cmd, "-asmhdr", "go_asm.h") - } - for _, file := range gos { - cmd = append(cmd, filepath.Join(longdir, file.Name())) + cmd = append(cmd, "-asmhdr", "go_asm.h", "-symabis", "symabis") } + cmd = append(cmd, gos...) _, err := runcmd(cmd...) if err != nil { t.err = err @@ -814,9 +849,7 @@ func (t *test) run() { objs = append(objs, "go.o") if len(asms) > 0 { cmd = []string{goTool(), "tool", "asm", "-e", "-I", ".", "-o", "asm.o"} - for _, file := range asms { - cmd = append(cmd, filepath.Join(longdir, file.Name())) - } + cmd = append(cmd, asms...) _, err = runcmd(cmd...) if err != nil { t.err = err @@ -1063,10 +1096,10 @@ func splitOutput(out string, wantAuto bool) []string { // this function will report an error. // Likewise if outStr does not have an error for a line which has a comment, // or if the error message does not match the . -// The syntax is Perl but its best to stick to egrep. +// The syntax is Perl but it's best to stick to egrep. // // Sources files are supplied as fullshort slice. -// It consists of pairs: full path to source file and it's base name. +// It consists of pairs: full path to source file and its base name. func (t *test) errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) { defer func() { if *verbose && err != nil { @@ -1180,7 +1213,7 @@ func (t *test) updateErrors(out, file string) { msg := errStr[colon2+2:] msg = strings.Replace(msg, file, base, -1) // normalize file mentions in error itself msg = strings.TrimLeft(msg, " \t") - for _, r := range []string{`\`, `*`, `+`, `[`, `]`, `(`, `)`} { + for _, r := range []string{`\`, `*`, `+`, `?`, `[`, `]`, `(`, `)`} { msg = strings.Replace(msg, r, `\`+r, -1) } msg = strings.Replace(msg, `"`, `.`, -1) @@ -1329,11 +1362,12 @@ const ( var ( // Regexp to split a line in code and comment, trimming spaces - rxAsmComment = regexp.MustCompile(`^\s*(.*?)\s*(?:\/\/\s*(.+)\s*)?$`) + rxAsmComment = regexp.MustCompile(`^\s*(.*?)\s*(?://\s*(.+)\s*)?$`) - // Regexp to extract an architecture check: architecture name, followed by semi-colon, - // followed by a comma-separated list of opcode checks. - rxAsmPlatform = regexp.MustCompile(`(\w+)(/\w+)?(/\w*)?:(` + reMatchCheck + `(?:,` + reMatchCheck + `)*)`) + // Regexp to extract an architecture check: architecture name (or triplet), + // followed by semi-colon, followed by a comma-separated list of opcode checks. + // Extraneous spaces are ignored. + rxAsmPlatform = regexp.MustCompile(`(\w+)(/\w+)?(/\w*)?\s*:\s*(` + reMatchCheck + `(?:\s*,\s*` + reMatchCheck + `)*)`) // Regexp to extract a single opcoded check rxAsmCheck = regexp.MustCompile(reMatchCheck) diff --git a/test/safe/main.go b/test/safe/main.go deleted file mode 100644 index d173ed9266395..0000000000000 --- a/test/safe/main.go +++ /dev/null @@ -1,14 +0,0 @@ -// true - -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -// can't use local path with -u, use -I. instead -import "pkg" // ERROR "import unsafe package" - -func main() { - print(pkg.Float32bits(1.0)) -} diff --git a/test/safe/nousesafe.go b/test/safe/nousesafe.go deleted file mode 100644 index fcd25af315481..0000000000000 --- a/test/safe/nousesafe.go +++ /dev/null @@ -1,8 +0,0 @@ -// $G $D/pkg.go && pack grc pkg.a pkg.$A 2> /dev/null && rm pkg.$A && errchk $G -I . -u $D/main.go -// rm -f pkg.a - -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ignored diff --git a/test/safe/pkg.go b/test/safe/pkg.go deleted file mode 100644 index bebc43a214cb3..0000000000000 --- a/test/safe/pkg.go +++ /dev/null @@ -1,16 +0,0 @@ -// true - -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// a package that uses unsafe on the inside but not in it's api - -package pkg - -import "unsafe" - -// this should be inlinable -func Float32bits(f float32) uint32 { - return *(*uint32)(unsafe.Pointer(&f)) -} \ No newline at end of file diff --git a/test/safe/usesafe.go b/test/safe/usesafe.go deleted file mode 100644 index 5d0829e290b7c..0000000000000 --- a/test/safe/usesafe.go +++ /dev/null @@ -1,8 +0,0 @@ -// $G $D/pkg.go && pack grcS pkg.a pkg.$A 2> /dev/null && rm pkg.$A && $G -I . -u $D/main.go -// rm -f pkg.a - -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ignored diff --git a/test/sinit.go b/test/sinit.go index c4d0edf87136a..df4d50d36769c 100644 --- a/test/sinit.go +++ b/test/sinit.go @@ -43,15 +43,12 @@ var c = []int{1201, 1202, 1203} var aa = [3][3]int{[3]int{2001, 2002, 2003}, [3]int{2004, 2005, 2006}, [3]int{2007, 2008, 2009}} var as = [3]S{S{2101, 2102, 2103}, S{2104, 2105, 2106}, S{2107, 2108, 2109}} -var ac = [3][]int{[]int{2201, 2202, 2203}, []int{2204, 2205, 2206}, []int{2207, 2208, 2209}} var sa = SA{[3]int{3001, 3002, 3003}, [3]int{3004, 3005, 3006}, [3]int{3007, 3008, 3009}} var ss = SS{S{3101, 3102, 3103}, S{3104, 3105, 3106}, S{3107, 3108, 3109}} -var sc = SC{[]int{3201, 3202, 3203}, []int{3204, 3205, 3206}, []int{3207, 3208, 3209}} var ca = [][3]int{[3]int{4001, 4002, 4003}, [3]int{4004, 4005, 4006}, [3]int{4007, 4008, 4009}} var cs = []S{S{4101, 4102, 4103}, S{4104, 4105, 4106}, S{4107, 4108, 4109}} -var cc = [][]int{[]int{4201, 4202, 4203}, []int{4204, 4205, 4206}, []int{4207, 4208, 4209}} var answers = [...]int{ // s @@ -135,15 +132,12 @@ var copy_c = c var copy_aa = aa var copy_as = as -var copy_ac = ac var copy_sa = sa var copy_ss = ss -var copy_sc = sc var copy_ca = ca var copy_cs = cs -var copy_cc = cc var copy_answers = answers diff --git a/test/stackobj.go b/test/stackobj.go new file mode 100644 index 0000000000000..b6af4bd8f31e1 --- /dev/null +++ b/test/stackobj.go @@ -0,0 +1,57 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +type HeapObj [8]int64 + +type StkObj struct { + h *HeapObj +} + +var n int +var c int = -1 + +func gc() { + // encourage heap object to be collected, and have its finalizer run. + runtime.GC() + runtime.GC() + runtime.GC() + n++ +} + +func main() { + f() + gc() // prior to stack objects, heap object is not collected until here + if c < 0 { + panic("heap object never collected") + } + if c != 1 { + panic(fmt.Sprintf("expected collection at phase 1, got phase %d", c)) + } +} + +func f() { + var s StkObj + s.h = new(HeapObj) + runtime.SetFinalizer(s.h, func(h *HeapObj) { + // Remember at what phase the heap object was collected. + c = n + }) + g(&s) + gc() +} + +func g(s *StkObj) { + gc() // heap object is still live here + runtime.KeepAlive(s) + gc() // heap object should be collected here +} diff --git a/test/stackobj2.go b/test/stackobj2.go new file mode 100644 index 0000000000000..a1abd9b1d1220 --- /dev/null +++ b/test/stackobj2.go @@ -0,0 +1,83 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +// linked list up the stack, to test lots of stack objects. + +type T struct { + // points to a heap object. Test will make sure it isn't freed. + data *int64 + // next pointer for a linked list of stack objects + next *T + // duplicate of next, to stress test the pointer buffers + // used during stack tracing. + next2 *T +} + +func main() { + makelist(nil, 10000) +} + +func makelist(x *T, n int64) { + if n%2 != 0 { + panic("must be multiple of 2") + } + if n == 0 { + runtime.GC() + i := int64(1) + for ; x != nil; x, i = x.next, i+1 { + // Make sure x.data hasn't been collected. + if got := *x.data; got != i { + panic(fmt.Sprintf("bad data want %d, got %d", i, got)) + } + } + return + } + // Put 2 objects in each frame, to test intra-frame pointers. + // Use both orderings to ensure the linked list isn't always in address order. + var a, b T + if n%3 == 0 { + a.data = newInt(n) + a.next = x + a.next2 = x + b.data = newInt(n - 1) + b.next = &a + b.next2 = &a + x = &b + } else { + b.data = newInt(n) + b.next = x + b.next2 = x + a.data = newInt(n - 1) + a.next = &b + a.next2 = &b + x = &a + } + + makelist(x, n-2) +} + +// big enough and pointer-y enough to not be tinyalloc'd +type NotTiny struct { + n int64 + p *byte +} + +// newInt allocates n on the heap and returns a pointer to it. +func newInt(n int64) *int64 { + h := &NotTiny{n: n} + p := &h.n + escape = p + return p +} + +var escape *int64 diff --git a/test/stackobj3.go b/test/stackobj3.go new file mode 100644 index 0000000000000..8bb8fb3270581 --- /dev/null +++ b/test/stackobj3.go @@ -0,0 +1,93 @@ +// run + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that ambiguously live arguments work correctly. + +package main + +import ( + "runtime" +) + +type HeapObj [8]int64 + +type StkObj struct { + h *HeapObj +} + +var n int +var c int = -1 + +func gc() { + // encourage heap object to be collected, and have its finalizer run. + runtime.GC() + runtime.GC() + runtime.GC() + n++ +} + +var null StkObj + +var sink *HeapObj + +//go:noinline +func use(p *StkObj) { +} + +//go:noinline +func f(s StkObj, b bool) { + var p *StkObj + if b { + p = &s + } else { + p = &null + } + // use is required here to prevent the conditional + // code above from being executed after the first gc() call. + use(p) + // If b==false, h should be collected here. + gc() // 0 + sink = p.h + gc() // 1 + sink = nil + // If b==true, h should be collected here. + gc() // 2 +} + +func fTrue() { + var s StkObj + s.h = new(HeapObj) + c = -1 + n = 0 + runtime.SetFinalizer(s.h, func(h *HeapObj) { + // Remember at what phase the heap object was collected. + c = n + }) + f(s, true) + if c != 2 { + panic("bad liveness") + } +} + +func fFalse() { + var s StkObj + s.h = new(HeapObj) + c = -1 + n = 0 + runtime.SetFinalizer(s.h, func(h *HeapObj) { + // Remember at what phase the heap object was collected. + c = n + }) + f(s, false) + if c != 0 { + panic("bad liveness") + } +} + +func main() { + fTrue() + fFalse() +} diff --git a/test/typeswitch2.go b/test/typeswitch2.go index 1160b62e140fb..5958b7db8ebd7 100644 --- a/test/typeswitch2.go +++ b/test/typeswitch2.go @@ -4,7 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Verify that various erroneous type switches are caught be the compiler. +// Verify that various erroneous type switches are caught by the compiler. // Does not compile. package main diff --git a/test/typeswitch3.go b/test/typeswitch3.go index 58d4cba2d0c8d..138818756672d 100644 --- a/test/typeswitch3.go +++ b/test/typeswitch3.go @@ -4,7 +4,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Verify that erroneous type switches are caught be the compiler. +// Verify that erroneous type switches are caught by the compiler. // Issue 2700, among other things. // Does not compile. diff --git a/test/uintptrescapes2.go b/test/uintptrescapes2.go index c94bc148c81f5..2c8dfd7102789 100644 --- a/test/uintptrescapes2.go +++ b/test/uintptrescapes2.go @@ -30,14 +30,14 @@ func F4(...uintptr) {} // ERROR "escaping ...uintptr" func G() { var t int // ERROR "moved to heap" - F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "&t escapes to heap" + F1(uintptr(unsafe.Pointer(&t))) // ERROR "live at call to F1: .?autotmp" "&t escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" var t2 int // ERROR "moved to heap" F3(uintptr(unsafe.Pointer(&t2))) // ERROR "live at call to F3: .?autotmp" "&t2 escapes to heap" } func H() { var v int // ERROR "moved to heap" - F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" + F2(0, 1, uintptr(unsafe.Pointer(&v)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F2: .?autotmp" "escapes to heap" "stack object .autotmp_[0-9]+ unsafe.Pointer$" var v2 int // ERROR "moved to heap" F4(0, 1, uintptr(unsafe.Pointer(&v2)), 2) // ERROR "live at call to newobject: .?autotmp" "live at call to F4: .?autotmp" "escapes to heap" } diff --git a/test/writebarrier.go b/test/writebarrier.go index 55ba81e764e08..8d262dd203783 100644 --- a/test/writebarrier.go +++ b/test/writebarrier.go @@ -250,3 +250,14 @@ func f23c() { // also test partial assignments t23 = T23{p: &i23} // ERROR "write barrier" } + +var g int + +func f24() **int { + p := new(*int) + *p = &g // no write barrier here + return p +} +func f25() []string { + return []string{"abc", "def", "ghi"} // no write barrier here +}
    (.*?)