<assign>
<filesystem path="/usr/bin/xwalkctl" exec_label="User" />
<filesystem path="/usr/bin/xwalk-launcher" exec_label="User" />
+ <filesystem path="/usr/lib/xwalk/xwalk_backend" exec_label="User" />
<filesystem path="/usr/lib64/xwalk/xwalk_backend" exec_label="User" />
</assign>
</manifest>
%define _binary_payload w3.gzdio
Name: crosswalk
-Version: 10.39.226.0
+Version: 10.39.233.0
Release: 0
Summary: Chromium-based app runtime
License: (BSD-3-Clause and LGPL-2.1+)
List<Pair<String, String>> headers = new ArrayList<Pair<String, String>>();
headers.add(Pair.create("x-auto-login", xAutoLoginHeader));
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pageUrl = webServer.setResponse(path, html, headers);
final int callCount = loginRequestHelper.getCallCount();
loadUrlAsync(awContents, pageUrl);
assertEquals(expectedAccount, loginRequestHelper.getAccount());
assertEquals(expectedArgs, loginRequestHelper.getArgs());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
protected void setUp() throws Exception {
super.setUp();
AwContents.setShouldDownloadFavicons();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
private void init(TestAwContentsClientBase contentsClient) throws Exception {
@Override
public void setUp() throws Exception {
super.setUp();
- mServer = new TestWebServer(false);
+ mServer = TestWebServer.start();
mContentsClient = new TestAwContentsClient();
final AwTestContainerView testContainerView =
createAwTestContainerViewOnMainSync(mContentsClient);
mAwContents = mTestContainerView.getAwContents();
mShouldInterceptRequestHelper = mContentsClient.getShouldInterceptRequestHelper();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
@Override
protected void setUp() throws Exception {
super.setUp();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
final String path = "/testUpdateVisitedHistoryCallback.html";
final String html = "testUpdateVisitedHistoryCallback";
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pageUrl = webServer.setResponse(path, html, null);
final DoUpdateVisitedHistoryHelper doUpdateVisitedHistoryHelper =
mContentsClient.getDoUpdateVisitedHistoryHelper();
assertEquals(pageUrl, doUpdateVisitedHistoryHelper.getUrl());
assertEquals(true, doUpdateVisitedHistoryHelper.getIsReload());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
final String visitedLinks[] = {"http://foo.com", "http://bar.com", null};
final String html = "<a src=\"http://foo.com\">foo</a><a src=\"http://bar.com\">bar</a>";
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pageUrl = webServer.setResponse(path, html, null);
loadUrlSync(awContents, mContentsClient.getOnPageFinishedHelper(), pageUrl);
visitedHistoryHelper.waitForCallback(callCount);
loadUrlSync(awContents, mContentsClient.getOnPageFinishedHelper(), pageUrl);
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(mContentsClient);
final AwContents awContents = testContainer.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pagePath = "/clear_cache_test.html";
List<Pair<String, String>> headers = new ArrayList<Pair<String, String>>();
// Set Cache-Control headers to cache this request. One century should be long enough.
pageUrl);
assertEquals(2, webServer.getRequestCount(pagePath));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
final AwTestContainerView testView = createAwTestContainerViewOnMainSync(mContentsClient);
final AwContents awContents = testView.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String faviconUrl = webServer.setResponseBase64(
"/" + CommonResources.FAVICON_FILENAME, CommonResources.FAVICON_DATA_BASE64,
CommonResources.getImagePngHeaders(false));
assertTrue(awContents.getFavicon().sameAs(originalFavicon));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
downloadHeaders.add(Pair.create("Content-Type", mimeType));
downloadHeaders.add(Pair.create("Content-Length", Integer.toString(data.length())));
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pageUrl = webServer.setResponse(
"/download.txt", data, downloadHeaders);
final OnDownloadStartHelper downloadStartHelper =
assertEquals(mimeType, downloadStartHelper.getMimeType());
assertEquals(data.length(), downloadStartHelper.getContentLength());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(mContentsClient);
final AwContents awContents = testContainer.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String pagePath = "/test_can_inject_headers.html";
final String pageUrl = webServer.setResponse(
pagePath, "<html><body>foo</body></html>", null);
assertEquals(value.getValue(), matchingHeaders[0].getValue());
}
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
mContentsClient = new TestAwContentsClient();
mTestView = createAwTestContainerViewOnMainSync(mContentsClient);
mAwContents = mTestView.getAwContents();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
mOrigin = mWebServer.getBaseUrl();
AwSettings settings = getAwSettingsOnUiThread(mAwContents);
final String customUserAgentString =
"testUserAgentWithTestServerUserAgent";
- TestWebServer webServer = null;
String fileName = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String httpPath = "/testUserAgentWithTestServer.html";
final String url = webServer.setResponse(httpPath, "foo", null);
Header header = matchingHeaders[0];
assertEquals(customUserAgentString, header.getValue());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
settings.setJavaScriptEnabled(true);
ImagePageGenerator generator = new ImagePageGenerator(0, false);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String httpImageUrl = generator.getPageUrl(webServer);
settings.setImagesEnabled(false);
}
});
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@Feature({"AndroidWebView", "Preferences"})
public void testBlockNetworkImagesWithTwoViews() throws Throwable {
ViewPair views = createViews();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
runPerViewSettingsTest(
new AwSettingsImagesEnabledHelper(
views.getContainer0(),
webServer,
new ImagePageGenerator(1, true)));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
awSettings.setJavaScriptEnabled(true);
ImagePageGenerator generator = new ImagePageGenerator(0, false);
- TestWebServer webServer = null;
String fileName = null;
+ TestWebServer webServer = TestWebServer.start();
try {
// Set up http image.
- webServer = new TestWebServer(false);
final String httpPath = "/image.png";
final String imageUrl = webServer.setResponseBase64(
httpPath, generator.getImageSourceNoAdvance(),
assertEquals(1, webServer.getRequestCount(httpPath));
assertEquals("img_onload_fired", getTitleOnUiThread(awContents));
} finally {
+ webServer.shutdown();
if (fileName != null) TestFileUtil.deleteFile(fileName);
- if (webServer != null) webServer.shutdown();
}
}
final CallbackHelper callback = new CallbackHelper();
awSettings.setJavaScriptEnabled(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String httpPath = "/audio.mp3";
// Don't care about the response is correct or not, just want
// to know whether Url is accessed.
assertTrue(0 != webServer.getRequestCount(httpPath));
*/
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
clearCacheOnUiThread(awContents, true);
assertEquals(WebSettings.LOAD_DEFAULT, awSettings.getCacheMode());
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String htmlPath = "/testCacheMode.html";
final String url = webServer.setResponse(htmlPath, "response", null);
awSettings.setCacheMode(WebSettings.LOAD_CACHE_ELSE_NETWORK);
urlNotInCache);
assertEquals(0, webServer.getRequestCount(htmlNotInCachePath));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
assertEquals(WebSettings.LOAD_DEFAULT, awSettings.getCacheMode());
awSettings.setBlockNetworkLoads(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String htmlPath = "/testCacheModeWithBlockedNetworkLoads.html";
final String url = webServer.setResponse(htmlPath, "response", null);
loadUrlSyncAndExpectError(awContents,
url);
assertEquals(0, webServer.getRequestCount(htmlPath));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@Feature({"AndroidWebView", "Preferences"})
public void testCacheModeWithTwoViews() throws Throwable {
ViewPair views = createViews();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
runPerViewSettingsTest(
new AwSettingsCacheModeTestHelper(
views.getContainer0(), views.getClient0(), 0, webServer),
new AwSettingsCacheModeTestHelper(
views.getContainer1(), views.getClient1(), 1, webServer));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
// Note that the cache isn't actually enabled until the call to setAppCachePath.
settings.setAppCacheEnabled(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
ManifestTestHelper helper = new ManifestTestHelper(
webServer, "testAppCache.html", "appcache.manifest");
loadUrlSync(
helper.getHtmlUrl());
helper.waitUntilManifestIsRequested(0);
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
// AppCachePath setting is global, no need to set it for the second view.
settings1.setAppCacheEnabled(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
ManifestTestHelper helper0 = new ManifestTestHelper(
webServer, "testAppCache_0.html", "appcache.manifest_0");
loadUrlSync(
assertEquals(
prevManifestRequestCount, webServer.getRequestCount(helper1.getManifestPath()));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
TestWebServer httpsServer = null;
TestWebServer httpServer = null;
try {
- httpsServer = new TestWebServer(true);
- httpServer = new TestWebServer(false);
+ httpsServer = TestWebServer.startSsl();
+ httpServer = TestWebServer.start();
final String jsUrl = "/insecure.js";
final String imageUrl = "/insecure.png";
* 4. url2 onPageFinishedCalled
*/
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String redirectTargetPath = "/redirect_target.html";
final String redirectTargetUrl = webServer.setResponse(redirectTargetPath,
"<html><body>hello world</body></html>", null);
// onPageFinished needs to be called for redirectTargetUrl, but not for redirectUrl
assertEquals(redirectTargetUrl, onPageFinishedHelper.getUrl());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
TestCallbackHelperContainer.OnPageFinishedHelper onPageFinishedHelper =
mContentsClient.getOnPageFinishedHelper();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String testHtml = "<html><head>Header</head><body>Body</body></html>";
final String testPath = "/test.html";
final String syncPath = "/sync.html";
onPageFinishedHelper.waitForCallback(synchronizationPageCallCount);
assertEquals(syncUrl, onPageFinishedHelper.getUrl());
assertEquals(2, onPageFinishedHelper.getCallCount());
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
mContentsClient.getOnPageFinishedHelper();
enableJavaScriptOnUiThread(mAwContents);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String testHtml = "<html><head>Header</head><body>Body</body></html>";
final String testPath = "/test.html";
final String historyPath = "/history.html";
onPageFinishedHelper.waitForCallback(synchronizationPageCallCount);
assertEquals(syncUrl, onPageFinishedHelper.getUrl());
assertEquals(2, onPageFinishedHelper.getCallCount());
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
mContentsClient.getOnPageStartedHelper();
enableJavaScriptOnUiThread(mAwContents);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String testHtml = CommonResources.makeHtmlPageFrom("",
"<a href=\"#anchor\" id=\"link\">anchor</a>");
final String testPath = "/test.html";
onPageFinishedHelper.waitForCallback(onPageFinishedCallCount);
assertEquals(onPageStartedCallCount, onPageStartedHelper.getCallCount());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
@MediumTest
@Feature({"AndroidWebView"})
public void testStartup() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
String path = "/cookie_test.html";
String url = webServer.setResponse(path, CommonResources.ABOUT_HTML, null);
assertEquals("count=42", cookieManager.getCookie(url));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@MediumTest
@Feature({"AndroidWebView", "Privacy"})
public void testAcceptCookie() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
String path = "/cookie_test.html";
String responseStr =
"<html><head><title>TEST!</title></head><body>HELLO!</body></html>";
assertNotNull(cookie);
validateCookies(cookie, "test2", "header-test2");
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@MediumTest
@Feature({"AndroidWebView", "Privacy"})
public void testThirdPartyCookie() throws Throwable {
- TestWebServer webServer = null;
+ // In theory we need two servers to test this, one server ('the first party') which returns
+ // a response with a link to a second server ('the third party') at different origin. This
+ // second server attempts to set a cookie which should fail if AcceptThirdPartyCookie() is
+ // false.
+ // Strictly according to the letter of RFC6454 it should be possible to set this situation
+ // up with two TestServers on different ports (these count as having different origins) but
+ // Chrome is not strict about this and does not check the port. Instead we cheat making some
+ // of the urls come from localhost and some from 127.0.0.1 which count (both in theory and
+ // pratice) as having different origins.
+ TestWebServer webServer = TestWebServer.start();
try {
- // In theory we need two servers to test this, one server ('the first party')
- // which returns a response with a link to a second server ('the third party')
- // at different origin. This second server attempts to set a cookie which should
- // fail if AcceptThirdPartyCookie() is false.
- // Strictly according to the letter of RFC6454 it should be possible to set this
- // situation up with two TestServers on different ports (these count as having
- // different origins) but Chrome is not strict about this and does not check the
- // port. Instead we cheat making some of the urls come from localhost and some
- // from 127.0.0.1 which count (both in theory and pratice) as having different
- // origins.
- webServer = new TestWebServer(false);
-
// Turn global allow on.
mCookieManager.setAcceptCookie(true);
assertTrue(mCookieManager.acceptCookie());
assertNotNull(cookie);
validateCookies(cookie, "test2");
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@MediumTest
@Feature({"AndroidWebView", "Privacy"})
public void testThirdPartyJavascriptCookie() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
// This test again uses 127.0.0.1/localhost trick to simulate a third party.
- webServer = new TestWebServer(false);
ThirdPartyCookiesTestHelper thirdParty
= new ThirdPartyCookiesTestHelper(webServer);
// ...we can set third party cookies.
thirdParty.assertThirdPartyIFrameCookieResult("2", true);
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@MediumTest
@Feature({"AndroidWebView", "Privacy"})
public void testThirdPartyCookiesArePerWebview() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
mCookieManager.setAcceptCookie(true);
mCookieManager.removeAllCookie();
assertTrue(mCookieManager.acceptCookie());
helperOne.assertThirdPartyIFrameCookieResult("7", true);
helperTwo.assertThirdPartyIFrameCookieResult("8", false);
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
private PageInfo loadFromUrlAndGetTitle(String html, String filename) throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String url = webServer.setResponse(filename, html, null);
loadUrlSync(mAwContents, mContentsClient.getOnPageFinishedHelper(), url);
return new PageInfo(getTitleOnUiThread(mAwContents),
url.replaceAll("http:\\/\\/", ""));
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@SmallTest
@Feature({"AndroidWebView"})
public void testImageLoad() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
webServer.setResponseBase64("/" + CommonResources.FAVICON_FILENAME,
CommonResources.FAVICON_DATA_BASE64, CommonResources.getImagePngHeaders(true));
assertEquals("5", getTitleOnUiThread(mAwContents));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@SmallTest
@Feature({"AndroidWebView"})
public void testScriptLoad() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String scriptUrl = webServer.setResponse(SCRIPT_FILE, SCRIPT_JS,
CommonResources.getTextJavascriptHeaders(true));
final String pageHtml = getScriptFileTestPageHtml(scriptUrl);
getAwSettingsOnUiThread(mAwContents).setJavaScriptEnabled(true);
loadDataWithBaseUrlSync(pageHtml, "text/html", false, webServer.getBaseUrl(), null);
assertEquals(SCRIPT_LOADED, getTitleOnUiThread(mAwContents));
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@SmallTest
@Feature({"AndroidWebView"})
public void testSameOrigin() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String frameUrl = webServer.setResponse("/" + CommonResources.ABOUT_FILENAME,
CommonResources.ABOUT_HTML, CommonResources.getTextHtmlHeaders(true));
final String html = getCrossOriginAccessTestPageHtml(frameUrl);
getAwSettingsOnUiThread(mAwContents).setJavaScriptEnabled(true);
loadDataWithBaseUrlSync(html, "text/html", false, webServer.getBaseUrl(), null);
assertEquals(frameUrl, getTitleOnUiThread(mAwContents));
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@SmallTest
@Feature({"AndroidWebView"})
public void testCrossOrigin() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String frameUrl = webServer.setResponse("/" + CommonResources.ABOUT_FILENAME,
CommonResources.ABOUT_HTML, CommonResources.getTextHtmlHeaders(true));
final String html = getCrossOriginAccessTestPageHtml(frameUrl);
loadDataWithBaseUrlSync(html, "text/html", false, baseUrl, null);
assertEquals("Exception", getTitleOnUiThread(mAwContents));
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
*/
@DisabledTest
public void testHistoryUrlNavigation() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String historyUrl = webServer.setResponse("/" + CommonResources.ABOUT_FILENAME,
CommonResources.ABOUT_HTML, CommonResources.getTextHtmlHeaders(true));
HistoryUtils.goBackSync(getInstrumentation(), mWebContents, onPageFinishedHelper);
// The title of the 'about.html' specified via historyUrl.
assertEquals(CommonResources.ABOUT_TITLE, getTitleOnUiThread(mAwContents));
-
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(contentsClient);
final AwContents awContents = testContainerView.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String imagePath = "/" + CommonResources.FAVICON_FILENAME;
webServer.setResponseBase64(imagePath,
CommonResources.FAVICON_DATA_BASE64, CommonResources.getImagePngHeaders(true));
// Verify that extra headers are only passed for the main resource.
validateNoRequestHeaders(extraHeaders, webServer.getLastRequest(imagePath));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(contentsClient);
final AwContents awContents = testContainerView.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String path = "/no_overriding_of_existing_headers_test.html";
final String url = webServer.setResponse(
path,
assertTrue(header.getValue().length() > 0);
assertFalse(extraHeaders[1].equals(header.getValue()));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(contentsClient);
final AwContents awContents = testContainerView.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String path = "/reload_with_extra_headers_test.html";
final String url = webServer.setResponse(path,
"<html><body>foo</body></html>",
assertEquals(2, webServer.getRequestCount(path));
validateRequestHeaders(extraHeaders, webServer.getLastRequest(path));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
createAwTestContainerViewOnMainSync(contentsClient);
final AwContents awContents = testContainerView.getAwContents();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String path = "/redirect_and_reload_with_extra_headers_test.html";
final String url = webServer.setResponse(path,
"<html><body>foo</body></html>",
// No extra headers. This is consistent with legacy behavior.
validateNoRequestHeaders(extraHeaders, webServer.getLastRequest(path));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
final AwSettings settings = getAwSettingsOnUiThread(awContents);
settings.setJavaScriptEnabled(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String nextPath = "/next.html";
final String nextUrl = webServer.setResponse(nextPath,
"<html><body>Next!</body></html>",
assertEquals(2, webServer.getRequestCount(path));
validateRequestHeaders(extraHeaders, webServer.getLastRequest(path));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
@Override
protected void setUp() throws Exception {
super.setUp();
- mTestWebServer = new TestWebServer(false);
+ mTestWebServer = TestWebServer.start();
mWebRTCPage = mTestWebServer.setResponse("/WebRTC", DATA,
CommonResources.getTextHtmlHeaders(true));
}
final AwTestContainerView testContainerView =
createAwTestContainerViewOnMainSync(mContentsClient);
mAwContents = testContainerView.getAwContents();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
super.setUp();
mVars = createNewView();
mUrls = new String[NUM_NAVIGATIONS];
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
mContentsClient = new TestAwContentsClient();
mTestView = createAwTestContainerViewOnMainSync(mContentsClient);
mAwContents = mTestView.getAwContents();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
private TestWebServer mTestWebServer;
public VideoTestWebServer(Context context) throws Exception {
- mTestWebServer = new TestWebServer(false);
+ mTestWebServer = TestWebServer.start();
List<Pair<String, String>> headers = getWebmHeaders(true);
mOnePixelOneFrameWebmURL = mTestWebServer.setResponseBase64("/" +
ONE_PIXEL_ONE_FRAME_WEBM_FILENAME,
-LASTCHANGE=4e78cd026ad6b9158f6a29bc15606c6a25d5cb2e
+LASTCHANGE=88fc58a654d73e2df3dffc946077486c450f3bdb
*/
@SmallTest
public void testAddStylesheetToTransitionCalled() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
- final String url2 = webServer.setResponse(URL_2, URL_2_DATA, null);
- ContentShellActivity activity = launchContentShellWithUrl(url2);
- waitForActiveShellToBeDoneLoading();
- ContentViewCore contentViewCore = activity.getActiveContentViewCore();
- TestCallbackHelperContainer testCallbackHelperContainer =
- new TestCallbackHelperContainer(contentViewCore);
- contentViewCore.getWebContents().setHasPendingNavigationTransitionForTesting();
- TestNavigationTransitionDelegate delegate =
- new TestNavigationTransitionDelegate(contentViewCore, true);
- contentViewCore.getWebContents().setNavigationTransitionDelegate(delegate);
-
- int currentCallCount = testCallbackHelperContainer
- .getOnPageFinishedHelper().getCallCount();
- String[] headers = {
- "link",
- "<transition0.css>;rel=transition-entering-stylesheet;scope=*",
- "link",
- "<transition1.css>;rel=transition-entering-stylesheet;scope=*",
- "link",
- "<transition2.css>;rel=transition-entering-stylesheet;scope=*"
- };
- final String url3 = webServer.setResponse(URL_3,
- URL_3_DATA,
- createHeadersList(headers));
- LoadUrlParams url3_params = new LoadUrlParams(url3);
- loadUrl(contentViewCore, testCallbackHelperContainer, url3_params);
- testCallbackHelperContainer.getOnPageFinishedHelper().waitForCallback(
- currentCallCount,
- 1,
- 10000,
- TimeUnit.MILLISECONDS);
-
- assertTrue("addStylesheetToTransition called.",
- delegate.getDidCallAddStylesheet());
- assertTrue("Three stylesheets are added",
- delegate.getTransitionStylesheets().size() == 3);
+ final String url2 = webServer.setResponse(URL_2, URL_2_DATA, null);
+ ContentShellActivity activity = launchContentShellWithUrl(url2);
+ waitForActiveShellToBeDoneLoading();
+ ContentViewCore contentViewCore = activity.getActiveContentViewCore();
+ TestCallbackHelperContainer testCallbackHelperContainer =
+ new TestCallbackHelperContainer(contentViewCore);
+ contentViewCore.getWebContents().setHasPendingNavigationTransitionForTesting();
+ TestNavigationTransitionDelegate delegate =
+ new TestNavigationTransitionDelegate(contentViewCore, true);
+ contentViewCore.getWebContents().setNavigationTransitionDelegate(delegate);
+
+ int currentCallCount = testCallbackHelperContainer
+ .getOnPageFinishedHelper().getCallCount();
+ String[] headers = {
+ "link",
+ "<transition0.css>;rel=transition-entering-stylesheet;scope=*",
+ "link",
+ "<transition1.css>;rel=transition-entering-stylesheet;scope=*",
+ "link",
+ "<transition2.css>;rel=transition-entering-stylesheet;scope=*"
+ };
+ final String url3 = webServer.setResponse(URL_3,
+ URL_3_DATA,
+ createHeadersList(headers));
+ LoadUrlParams url3_params = new LoadUrlParams(url3);
+ loadUrl(contentViewCore, testCallbackHelperContainer, url3_params);
+ testCallbackHelperContainer.getOnPageFinishedHelper().waitForCallback(
+ currentCallCount,
+ 1,
+ 10000,
+ TimeUnit.MILLISECONDS);
+
+ assertTrue("addStylesheetToTransition called.",
+ delegate.getDidCallAddStylesheet());
+ assertTrue("Three stylesheets are added",
+ delegate.getTransitionStylesheets().size() == 3);
} finally {
- if (webServer != null)
webServer.shutdown();
}
}
*/
@SmallTest
public void testAddStylesheetToTransitionNotCalled() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
- final String url2 = webServer.setResponse(URL_2, URL_2_DATA, null);
- ContentShellActivity activity = launchContentShellWithUrl(url2);
- waitForActiveShellToBeDoneLoading();
- ContentViewCore contentViewCore = activity.getActiveContentViewCore();
- TestCallbackHelperContainer testCallbackHelperContainer =
- new TestCallbackHelperContainer(contentViewCore);
- contentViewCore.getWebContents().setHasPendingNavigationTransitionForTesting();
- TestNavigationTransitionDelegate delegate =
- new TestNavigationTransitionDelegate(contentViewCore, true);
- contentViewCore.getWebContents().setNavigationTransitionDelegate(delegate);
-
- int currentCallCount = testCallbackHelperContainer
- .getOnPageFinishedHelper().getCallCount();
- final String url3 = webServer.setResponse(URL_3, URL_3_DATA, null);
- LoadUrlParams url3_params = new LoadUrlParams(url3);
- loadUrl(contentViewCore, testCallbackHelperContainer, url3_params);
- testCallbackHelperContainer.getOnPageFinishedHelper().waitForCallback(
- currentCallCount,
- 1,
- 10000,
- TimeUnit.MILLISECONDS);
-
- assertFalse("addStylesheetToTransition is not called.",
- delegate.getDidCallAddStylesheet());
- assertTrue("No stylesheets are added",
- delegate.getTransitionStylesheets().size() == 0);
+ final String url2 = webServer.setResponse(URL_2, URL_2_DATA, null);
+ ContentShellActivity activity = launchContentShellWithUrl(url2);
+ waitForActiveShellToBeDoneLoading();
+ ContentViewCore contentViewCore = activity.getActiveContentViewCore();
+ TestCallbackHelperContainer testCallbackHelperContainer =
+ new TestCallbackHelperContainer(contentViewCore);
+ contentViewCore.getWebContents().setHasPendingNavigationTransitionForTesting();
+ TestNavigationTransitionDelegate delegate =
+ new TestNavigationTransitionDelegate(contentViewCore, true);
+ contentViewCore.getWebContents().setNavigationTransitionDelegate(delegate);
+
+ int currentCallCount = testCallbackHelperContainer
+ .getOnPageFinishedHelper().getCallCount();
+ final String url3 = webServer.setResponse(URL_3, URL_3_DATA, null);
+ LoadUrlParams url3_params = new LoadUrlParams(url3);
+ loadUrl(contentViewCore, testCallbackHelperContainer, url3_params);
+ testCallbackHelperContainer.getOnPageFinishedHelper().waitForCallback(
+ currentCallCount,
+ 1,
+ 10000,
+ TimeUnit.MILLISECONDS);
+
+ assertFalse("addStylesheetToTransition is not called.",
+ delegate.getDidCallAddStylesheet());
+ assertTrue("No stylesheets are added",
+ delegate.getTransitionStylesheets().size() == 0);
} finally {
- if (webServer != null)
webServer.shutdown();
}
}
*/
@SmallTest
public void testParseTransitionEnteringColor() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
-
final String url2 = webServer.setResponse(URL_2, URL_2_DATA, null);
ContentShellActivity activity = launchContentShellWithUrl(url2);
waitForActiveShellToBeDoneLoading();
delegate.getTransitionEnteringColor(),
transitionEnteringColor));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
*
* Based heavily on the CTSWebServer in Android.
*/
-public final class TestWebServer {
+public class TestWebServer {
private static final String TAG = "TestWebServer";
public static final String SHUTDOWN_PREFIX = "/shutdown";
private final ServerThread mServerThread;
private String mServerUri;
private final boolean mSsl;
+ private final int mPort;
private static class Response {
final byte[] mResponseData;
/**
* Create and start a local HTTP server instance.
+ * @param port Port number the server must use, or 0 to automatically choose a free port.
* @param ssl True if the server should be using secure sockets.
* @throws Exception
*/
- public TestWebServer(boolean ssl) throws Exception {
+ private TestWebServer(int port, boolean ssl) throws Exception {
+ mPort = port;
+
mSsl = ssl;
if (mSsl) {
mServerUri = "https:";
}
}
- setInstance(this, mSsl);
- mServerThread = new ServerThread(this, mSsl);
- mServerThread.start();
+ mServerThread = new ServerThread(this, mPort, mSsl);
mServerUri += "//localhost:" + mServerThread.mSocket.getLocalPort();
}
+ public static TestWebServer start(int port) throws Exception {
+ if (sInstance != null) {
+ throw new IllegalStateException("Tried to start multiple TestWebServers");
+ }
+
+ TestWebServer server = new TestWebServer(port, false);
+ server.mServerThread.start();
+ setInstance(server);
+ return server;
+ }
+
+ public static TestWebServer start() throws Exception {
+ return start(0);
+ }
+
+ public static TestWebServer startSsl(int port) throws Exception {
+ if (sSecureInstance != null) {
+ throw new IllegalStateException("Tried to start multiple SSL TestWebServers");
+ }
+
+ TestWebServer server = new TestWebServer(port, true);
+ server.mServerThread.start();
+ setSecureInstance(server);
+ return server;
+ }
+
+ public static TestWebServer startSsl() throws Exception {
+ return startSsl(0);
+ }
+
/**
* Terminate the http server.
*/
public void shutdown() {
+ if (mSsl) {
+ setSecureInstance(null);
+ } else {
+ setInstance(null);
+ }
+
try {
// Avoid a deadlock between two threads where one is trying to call
// close() and the other one is calling accept() by sending a GET
} catch (KeyManagementException e) {
throw new IllegalStateException(e);
}
+ }
- setInstance(null, mSsl);
+ // Setting static variables from instance methods causes findbugs warnings. Calling static
+ // methods which set static variables from instance methods isn't any better, but it silences
+ // the warnings.
+ private static void setInstance(TestWebServer instance) {
+ sInstance = instance;
}
- private static void setInstance(TestWebServer instance, boolean isSsl) {
- if (isSsl) {
- sSecureInstance = instance;
- } else {
- sInstance = instance;
- }
+ private static void setSecureInstance(TestWebServer instance) {
+ sSecureInstance = instance;
}
private static final int RESPONSE_STATUS_NORMAL = 0;
}
- public ServerThread(TestWebServer server, boolean ssl) throws Exception {
+ public ServerThread(TestWebServer server, int port, boolean ssl) throws Exception {
super("ServerThread");
mServer = server;
mIsSsl = ssl;
if (mIsSsl) {
mSslContext = SSLContext.getInstance("TLS");
mSslContext.init(getKeyManagers(), null, null);
- mSocket = mSslContext.getServerSocketFactory().createServerSocket(0);
+ mSocket = mSslContext.getServerSocketFactory().createServerSocket(port);
} else {
- mSocket = new ServerSocket(0);
+ mSocket = new ServerSocket(port);
}
return;
} catch (IOException e) {
#include "ozone/platform/ozone_platform_wayland.h"
+#include "base/at_exit.h"
+#include "base/bind.h"
#include "ozone/ui/cursor/cursor_factory_ozone_wayland.h"
#if defined(TOOLKIT_VIEWS) && !defined(OS_CHROMEOS)
#include "ozone/ui/desktop_aura/desktop_factory_wayland.h"
class OzonePlatformWayland : public OzonePlatform {
public:
OzonePlatformWayland() {
+ base::AtExitManager::RegisterTask(
+ base::Bind(&base::DeletePointer<OzonePlatformWayland>, this));
}
virtual ~OzonePlatformWayland() {
}
WaylandDisplayPollThread::~WaylandDisplayPollThread() {
- DCHECK(!polling_.IsSignaled());
- Stop();
+ StopProcessingEvents();
}
void WaylandDisplayPollThread::StartProcessingEvents() {
void WaylandDisplayPollThread::StopProcessingEvents() {
if (polling_.IsSignaled())
stop_polling_.Signal();
+
+ Stop();
+}
+
+void WaylandDisplayPollThread::CleanUp() {
+ SetThreadWasQuitProperly(true);
}
void WaylandDisplayPollThread::DisplayRun(WaylandDisplayPollThread* data) {
class WaylandDisplayPollThread : public base::Thread {
public:
explicit WaylandDisplayPollThread(wl_display* display);
- virtual ~WaylandDisplayPollThread();
+ ~WaylandDisplayPollThread() override;
// Starts polling on wl_display fd and read/flush requests coming from Wayland
// compositor.
void StartProcessingEvents();
// Stops polling and handling of any events from Wayland compositor.
void StopProcessingEvents();
+
+ protected:
+ void CleanUp() override;
+
private:
static void DisplayRun(WaylandDisplayPollThread* data);
base::WaitableEvent polling_; // Is set as long as the thread is polling.
WaylandPointer::WaylandPointer()
: cursor_(NULL),
dispatcher_(NULL),
- pointer_position_(0, 0) {
+ pointer_position_(0, 0),
+ input_pointer_(NULL) {
}
WaylandPointer::~WaylandPointer() {
delete cursor_;
+ if (input_pointer_)
+ wl_pointer_destroy(input_pointer_);
}
void WaylandPointer::OnSeatCapabilities(wl_seat *seat, uint32_t caps) {
dispatcher_ = ui::EventFactoryOzoneWayland::GetInstance()->EventConverter();
if ((caps & WL_SEAT_CAPABILITY_POINTER) && !cursor_->GetInputPointer()) {
- wl_pointer* input_pointer = wl_seat_get_pointer(seat);
- cursor_->SetInputPointer(input_pointer);
- wl_pointer_set_user_data(input_pointer, this);
- wl_pointer_add_listener(input_pointer, &kInputPointerListener, this);
+ input_pointer_ = wl_seat_get_pointer(seat);
+ cursor_->SetInputPointer(input_pointer_);
+ wl_pointer_set_user_data(input_pointer_, this);
+ wl_pointer_add_listener(input_pointer_, &kInputPointerListener, this);
} else if (!(caps & WL_SEAT_CAPABILITY_POINTER)
&& cursor_->GetInputPointer()) {
cursor_->SetInputPointer(NULL);
// dispatch this with events such as wheel or button which don't have a
// position associated on Wayland.
gfx::Point pointer_position_;
+ struct wl_pointer *input_pointer_;
DISALLOW_COPY_AND_ASSIGN(WaylandPointer);
};
WaylandTouchscreen::WaylandTouchscreen()
: dispatcher_(NULL),
- pointer_position_(0, 0) {
+ pointer_position_(0, 0),
+ wl_touch_(NULL) {
}
WaylandTouchscreen::~WaylandTouchscreen() {
+ if (wl_touch_)
+ wl_touch_destroy(wl_touch_);
}
void WaylandTouchscreen::OnSeatCapabilities(wl_seat *seat, uint32_t caps) {
dispatcher_ = ui::EventFactoryOzoneWayland::GetInstance()->EventConverter();
if ((caps & WL_SEAT_CAPABILITY_TOUCH)) {
- wl_touch* input_touch = wl_seat_get_touch(seat);
- wl_touch_set_user_data(input_touch, this);
- wl_touch_add_listener(input_touch, &kInputTouchListener, this);
+ wl_touch_ = wl_seat_get_touch(seat);
+ wl_touch_set_user_data(wl_touch_, this);
+ wl_touch_add_listener(wl_touch_, &kInputTouchListener, this);
}
}
ui::EventConverterOzoneWayland* dispatcher_;
gfx::Point pointer_position_;
+ struct wl_touch* wl_touch_;
DISALLOW_COPY_AND_ASSIGN(WaylandTouchscreen);
};
device->input_touch_ = new WaylandTouchscreen();
device->input_touch_->OnSeatCapabilities(seat, caps);
} else if (!(caps & WL_SEAT_CAPABILITY_TOUCH) && device->input_touch_) {
- device->input_touch_->OnSeatCapabilities(seat, caps);
delete device->input_touch_;
device->input_touch_ = NULL;
}
bool IsFloat32Array() const;
/**
+ * Returns true if this value is a Float32x4Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat32x4Array() const;
+
+ /**
+ * Returns true if this value is a Float64x2Array.
+ * This is an experimental feature.
+ */
+ bool IsFloat64x2Array() const;
+
+ /**
+ * Returns true if this value is a Int32x4Array.
+ * This is an experimental feature.
+ */
+ bool IsInt32x4Array() const;
+
+ /**
* Returns true if this value is a Float64Array.
* This is an experimental feature.
*/
kExternalInt16Array,
kExternalUint16Array,
kExternalInt32Array,
+ kExternalInt32x4Array,
kExternalUint32Array,
kExternalFloat32Array,
+ kExternalFloat32x4Array,
+ kExternalFloat64x2Array,
kExternalFloat64Array,
kExternalUint8ClampedArray,
};
+class V8_EXPORT Float32x4Array : public TypedArray {
+ public:
+ static Local<Float32x4Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float32x4Array* Cast(Value* obj);
+
+ private:
+ Float32x4Array();
+ static void CheckCast(Value* obj);
+};
+
+
+class V8_EXPORT Float64x2Array : public TypedArray {
+ public:
+ static Local<Float64x2Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float64x2Array* Cast(Value* obj);
+
+ private:
+ Float64x2Array();
+ static void CheckCast(Value* obj);
+};
+
+
+class V8_EXPORT Int32x4Array : public TypedArray {
+ public:
+ static Local<Int32x4Array> New(Handle<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int32x4Array* Cast(Value* obj);
+
+ private:
+ Int32x4Array();
+ static void CheckCast(Value* obj);
+};
+
+
/**
* An instance of Float64Array constructor (ES6 draft 15.13.6).
* This API is experimental and may change significantly.
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 95;
+ static const int kContextEmbedderDataIndex = 108;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 164;
+ static const int kEmptyStringRootIndex = 176;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xbc;
+ static const int kJSObjectType = 0xc2;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x88;
}
+Float32x4Array* Float32x4Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float32x4Array*>(value);
+}
+
+
+Float64x2Array* Float64x2Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float64x2Array*>(value);
+}
+
+
+Int32x4Array* Int32x4Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int32x4Array*>(value);
+}
+
+
Float64Array* Float64Array::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
V(Uint32Array, JSTypedArray) \
V(Int32Array, JSTypedArray) \
V(Float32Array, JSTypedArray) \
+ V(Float32x4Array, JSTypedArray) \
+ V(Float64x2Array, JSTypedArray) \
+ V(Int32x4Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
V(Name, Name) \
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float32Array> ToLocalFloat32Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float32x4Array> ToLocalFloat32x4Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float64x2Array> ToLocalFloat64x2Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int32x4Array> ToLocalInt32x4Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return false; }
int Register::NumAllocatableRegisters() {
return r;
}
+ static int ToAllocationIndex(QwNeonRegister reg) {
+ DCHECK(reg.code() < kMaxNumRegisters);
+ return reg.code();
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ DCHECK(index >= 0 && index < kMaxNumRegisters);
+ const char* const names[] = {
+ "q0",
+ "q1",
+ "q2",
+ "q3",
+ "q4",
+ "q5",
+ "q6",
+ "q7",
+ "q8",
+ "q9",
+ "q10",
+ "q11",
+ "q12",
+ "q13",
+ "q14",
+ "q15",
+ };
+ return names[index];
+ }
+
bool is_valid() const {
return (0 <= code_) && (code_ < kMaxNumRegisters);
}
typedef QwNeonRegister QuadRegister;
+typedef QwNeonRegister SIMD128Register;
// Support for the VFP registers s0 to s31 (d0 to d15).
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
+ int double_regs_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
+ int src_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ DCHECK(n < 2 * arraysize(simd128_registers_));
+ return simd128_registers_[n / 2].d[n % 2];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ DCHECK(n < 2 * arraysize(simd128_registers_));
+ simd128_registers_[n / 2].d[n % 2] = value;
+}
+
+
#undef __
} } // namespace v8::internal
}
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(
+ HUnarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ bool load_128bits_without_neon = IsSIMD128ElementsKind(elements_kind);
+ LOperand* key = load_128bits_without_neon
+ ? UseRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
LInstruction* result = NULL;
if (!instr->is_typed_elements()) {
DCHECK(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
- result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key, NULL, NULL));
} else {
DCHECK(
(instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->representation().IsTagged() &&
+ (IsSIMD128ElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+ result = load_128bits_without_neon
+ ? DefineAsRegister(new(zone()) LLoadKeyed(
+ backing_store, key, TempRegister(), TempRegister()))
+ : DefineAsRegister(new(zone()) LLoadKeyed(
+ backing_store, key, NULL, NULL));
+ if (load_128bits_without_neon) {
+ info()->MarkAsDeferredCalling();
+ AssignPointerMap(result);
+ }
}
if ((instr->is_external() || instr->is_fixed_typed_array()) ?
}
}
- return new(zone()) LStoreKeyed(object, key, val);
+ return new(zone()) LStoreKeyed(object, key, val, NULL, NULL);
}
DCHECK(
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(instr->elements_kind())));
+ IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+ (instr->value()->representation().IsTagged() &&
+ IsSIMD128ElementsKind(instr->elements_kind())));
DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
instr->elements()->representation().IsExternal()));
LOperand* val = UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* backing_store = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(backing_store, key, val);
+ bool store_128bits_without_neon =
+ IsSIMD128ElementsKind(instr->elements_kind());
+ LOperand* key = store_128bits_without_neon
+ ? UseRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LStoreKeyed* result =
+ new(zone()) LStoreKeyed(backing_store, key, val,
+ store_128bits_without_neon ? TempRegister() : NULL,
+ store_128bits_without_neon ? TempRegister() : NULL);
+ return store_128bits_without_neon ? AssignEnvironment(result) : result;
}
};
-class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key,
+ LOperand* temp, LOperand* temp2) {
inputs_[0] = elements;
inputs_[1] = key;
+ temps_[0] = temp;
+ temps_[1] = temp2;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
-class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 2> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+ LOperand* temp, LOperand* temp2) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
}
bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ mov(reg, Operand::Zero());
+
+ PushSafepointRegistersScope scope(this);
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, reg);
+}
+
+
+template<class T>
+void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
+ class DeferredSIMD128ToTagged FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LInstruction* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ // Allocate a SIMD128 object on the heap.
+ Register reg = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+ Register scratch = scratch0();
+
+ DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
+ this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+ __ jmp(deferred->entry());
+ __ bind(deferred->exit());
+
+ // Copy the SIMD128 value from the external array to the heap object.
+ STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
+ Operand operand = key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size);
+
+ __ add(scratch, external_pointer, operand);
+
+ // Load the inner FixedTypedArray.
+ __ ldr(temp2, MemOperand(reg, T::kValueOffset));
+
+ for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
+ __ ldr(temp, MemOperand(scratch, base_offset + offset));
+ __ str(
+ temp,
+ MemOperand(
+ temp2,
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
+ }
+
+ // Now that we have finished with the object's real address tag it
+ __ add(reg, reg, Operand(kHeapObjectTag));
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), base_offset);
}
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Float64x2>(instr);
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
}
+template<class T>
+void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
+ DCHECK(instr->value()->IsRegister());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
+ Register input_reg = ToRegister(instr->value());
+ __ SmiTst(input_reg);
+ DeoptimizeIf(eq, instr);
+ __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
+ DeoptimizeIf(ne, instr);
+
+ STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
+ Register external_pointer = ToRegister(instr->elements());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort(kArrayIndexConstantValueTooBig);
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int base_offset = instr->base_offset();
+ Register address = scratch0();
+ if (key_is_constant) {
+ if (constant_key != 0) {
+ __ add(address, external_pointer,
+ Operand(constant_key << element_size_shift));
+ } else {
+ address = external_pointer;
+ }
+ } else {
+ __ add(address, external_pointer, Operand(key, LSL, shift_size));
+ }
+
+ // Load the inner FixedTypedArray.
+ __ ldr(temp2, MemOperand(input_reg, T::kValueOffset - kHeapObjectTag));
+
+ for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
+ __ ldr(temp, MemOperand(temp2,
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag + offset));
+ __ str(temp, MemOperand(address, base_offset + offset));
+ }
+}
+
+
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
} else { // Storing doubles, not floats.
__ vstr(value, address, base_offset);
}
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Float64x2>(instr);
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand = PrepareKeyedOperand(
case FLOAT64_ELEMENTS:
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
Register result,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LInstruction* instr, Runtime::FunctionId id);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ template<class T>
+ void DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ template<class T>
+ void DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
}
+// Allocates a simd128 object or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateSIMDHeapObject(int size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register map,
+ Label* gc_required,
+ TaggingMode tagging_mode) {
+ UNREACHABLE(); // NOTIMPLEMENTED
+}
+
+
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
Register scratch2,
Register heap_number_map,
Label* gc_required);
+ void AllocateSIMDHeapObject(int size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register map,
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst,
}
static inline bool SupportsCrankshaft();
+ static inline bool SupportsSIMD128InCrankshaft();
static inline unsigned cache_line_size() {
DCHECK(cache_line_size_ != 0);
Handle<Map>* external_map);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
+ void InstallExperimentalSIMDBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
native_context()->set_##type##_array_fun(*fun); \
native_context()->set_##type##_array_external_map(*external_map); \
}
- TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
+ BUILTIN_TYPED_ARRAY(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
Handle<JSFunction> data_view_fun =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
flag, attributes).Assert();
+
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+ if (FLAG_simd_object) {
+ // --- S I M D ---
+ Handle<String> name = factory()->InternalizeUtf8String("SIMD");
+ Handle<JSFunction> cons = factory()->NewFunction(name);
+ JSFunction::SetInstancePrototype(cons,
+ Handle<Object>(native_context()->initial_object_prototype(),
+ isolate()));
+ cons->SetInstanceClassName(*name);
+ Handle<JSObject> simd_object = factory()->NewJSObject(cons, TENURED);
+ DCHECK(simd_object->IsJSObject());
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ global, name, simd_object, DONT_ENUM).Check();
+ native_context()->set_simd_object(*simd_object);
+ // --- f l o a t 3 2 x 4 ---
+ Handle<JSFunction> float32x4_fun =
+ InstallFunction(simd_object, "float32x4", FLOAT32x4_TYPE,
+ Float32x4::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_float32x4_function(*float32x4_fun);
+
+ // --- f l o a t 6 4 x 2 ---
+ Handle<JSFunction> float64x2_fun =
+ InstallFunction(simd_object, "float64x2", FLOAT64x2_TYPE,
+ Float64x2::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_float64x2_function(*float64x2_fun);
+
+ // --- i n t 3 2 x 4 ---
+ Handle<JSFunction> int32x4_fun =
+ InstallFunction(simd_object, "int32x4", INT32x4_TYPE,
+ Int32x4::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal);
+ native_context()->set_int32x4_function(*int32x4_fun);
+
+ // --- F l o a t 3 2 x 4 A r r a y---
+ Handle<JSFunction> fun;
+ Handle<Map> external_map;
+ InstallTypedArray(
+ "Float32x4Array", FLOAT32x4_ELEMENTS, &fun, &external_map);
+ native_context()->set_float32x4_array_fun(*fun);
+ native_context()->set_float32x4_array_external_map(*external_map);
+
+ // --- F l o a t 6 4 x 2 A r r a y---
+ InstallTypedArray(
+ "Float64x2Array", FLOAT64x2_ELEMENTS, &fun, &external_map);
+ native_context()->set_float64x2_array_fun(*fun);
+ native_context()->set_float64x2_array_external_map(*external_map);
+
+ // --- I n t 3 2 x 4 A r r a y---
+ InstallTypedArray(
+ "Int32x4Array", INT32x4_ELEMENTS, &fun, &external_map);
+ native_context()->set_int32x4_array_fun(*fun);
+ native_context()->set_int32x4_array_external_map(*external_map);
+ }
}
INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
INSTALL_EXPERIMENTAL_NATIVE(i, classes, "harmony-classes.js")
+ if (FLAG_simd_object &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native simd128.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ // Store the map for the float32x4, float64x2 and int32x4 function
+ // prototype after the float32x4 and int32x4 function has been set up.
+ InstallExperimentalSIMDBuiltinFunctionIds();
+ JSObject* float32x4_function_prototype = JSObject::cast(
+ native_context()->float32x4_function()->instance_prototype());
+ native_context()->set_float32x4_function_prototype_map(
+ float32x4_function_prototype->map());
+ JSObject* float64x2_function_prototype = JSObject::cast(
+ native_context()->float64x2_function()->instance_prototype());
+ native_context()->set_float64x2_function_prototype_map(
+ float64x2_function_prototype->map());
+ JSObject* int32x4_function_prototype = JSObject::cast(
+ native_context()->int32x4_function()->instance_prototype());
+ native_context()->set_int32x4_function_prototype_map(
+ int32x4_function_prototype->map());
+ }
}
InstallExperimentalNativeFunctions();
}
+static Handle<JSObject> ResolveBuiltinSIMDIdHolder(
+ Handle<Context> native_context,
+ const char* holder_expr) {
+ Isolate* isolate = native_context->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<GlobalObject> global(native_context->global_object());
+ Handle<Object> holder = global;
+ char* name = const_cast<char*>(holder_expr);
+ char* period_pos = strchr(name, '.');
+ while (period_pos != NULL) {
+ Vector<const char> property(name,
+ static_cast<int>(period_pos - name));
+ Handle<String> property_string = factory->InternalizeUtf8String(property);
+ DCHECK(!property_string.is_null());
+ holder = Object::GetProperty(holder, property_string).ToHandleChecked();
+ if (strcmp(".prototype", period_pos) == 0) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(holder);
+ return Handle<JSObject>(JSObject::cast(function->prototype()));
+ } else {
+ name = period_pos + 1;
+ period_pos = strchr(name, '.');
+ }
+ }
+
+ return Handle<JSObject>::cast(Object::GetPropertyOrElement(
+ holder, factory->InternalizeUtf8String(name)).ToHandleChecked());
+}
+
+
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
}
+void Genesis::InstallExperimentalSIMDBuiltinFunctionIds() {
+ HandleScope scope(isolate());
+#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
+ { \
+ Handle<JSObject> holder = ResolveBuiltinSIMDIdHolder( \
+ native_context(), #holder_expr); \
+ BuiltinFunctionId id = k##name; \
+ InstallBuiltinFunctionId(holder, #fun_name, id); \
+ }
+ SIMD_ARRAY_OPERATIONS(INSTALL_BUILTIN_ID)
+#define INSTALL_SIMD_NULLARY_FUNCTION_ID(p1, p2, p3, p4) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_NULLARY_OPERATIONS(INSTALL_SIMD_NULLARY_FUNCTION_ID)
+#undef INSTALL_SIMD_NULLARY_FUNCTION_ID
+#define INSTALL_SIMD_UNARY_FUNCTION_ID(p1, p2, p3, p4, p5) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_UNARY_OPERATIONS(INSTALL_SIMD_UNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_UNARY_FUNCTION_ID
+#define INSTALL_SIMD_BINARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_BINARY_OPERATIONS(INSTALL_SIMD_BINARY_FUNCTION_ID)
+#undef INSTALL_SIMD_BINARY_FUNCTION_ID
+#define INSTALL_SIMD_TERNARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6, p7) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_TERNARY_OPERATIONS(INSTALL_SIMD_TERNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_TERNARY_FUNCTION_ID
+#define INSTALL_SIMD_QUARTERNARY_FUNCTION_ID(p1, p2, p3, p4, p5, p6, p7, p8) \
+ INSTALL_BUILTIN_ID(p1, p2, p3)
+ SIMD_QUARTERNARY_OPERATIONS(INSTALL_SIMD_QUARTERNARY_FUNCTION_ID)
+#undef INSTALL_SIMD_QUARTERNARY_FUNCTION_ID
+#undef INSTALL_BUILTIN_ID
+}
+
+
// Do not forget to update macros.py with named constant
// of cache id.
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
isolate->counters()->contexts_created_from_scratch()->Increment();
}
+ InitializeExperimentalGlobal();
// Install experimental natives.
if (!InstallExperimentalNatives()) return;
- InitializeExperimentalGlobal();
// We can't (de-)serialize typed arrays currently, but we are lucky: The state
// of the random number generator needs no initialization during snapshot
return {taggedness, header_size, Type::Number(), kRepFloat32};
case kExternalFloat64Array:
return {taggedness, header_size, Type::Number(), kRepFloat64};
+ case kExternalFloat32x4Array:
+ case kExternalInt32x4Array:
+ case kExternalFloat64x2Array:
+ // TODO(ningxin): fix this workaround.
+ return {kUntaggedBase, 0, Type::None(), kMachNone};
}
UNREACHABLE();
return {kUntaggedBase, 0, Type::None(), kMachNone};
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+ V(FLOAT32x4_FUNCTION_INDEX, JSFunction, float32x4_function) \
+ V(FLOAT32x4_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ float32x4_function_prototype_map) \
+ V(FLOAT64x2_FUNCTION_INDEX, JSFunction, float64x2_function) \
+ V(FLOAT64x2_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ float64x2_function_prototype_map) \
+ V(INT32x4_FUNCTION_INDEX, JSFunction, int32x4_function) \
+ V(INT32x4_FUNCTION_PROTOTYPE_MAP_INDEX, Map, \
+ int32x4_function_prototype_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
+ V(SIMD_OBJECT_INDEX, JSObject, simd_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(FLOAT32x4_ARRAY_FUN_INDEX, JSFunction, float32x4_array_fun) \
+ V(FLOAT64x2_ARRAY_FUN_INDEX, JSFunction, float64x2_array_fun) \
+ V(INT32x4_ARRAY_FUN_INDEX, JSFunction, int32x4_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \
V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \
V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \
V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \
V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \
+ V(FLOAT32x4_ARRAY_EXTERNAL_MAP_INDEX, Map, float32x4_array_external_map) \
+ V(FLOAT64x2_ARRAY_EXTERNAL_MAP_INDEX, Map, float64x2_array_external_map) \
+ V(INT32x4_ARRAY_EXTERNAL_MAP_INDEX, Map, int32x4_array_external_map) \
V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \
uint8_clamped_array_external_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
INITIAL_ARRAY_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
+ FLOAT32x4_FUNCTION_INDEX,
+ FLOAT32x4_FUNCTION_PROTOTYPE_MAP_INDEX,
+ FLOAT64x2_FUNCTION_INDEX,
+ FLOAT64x2_FUNCTION_PROTOTYPE_MAP_INDEX,
+ INT32x4_FUNCTION_INDEX,
+ INT32x4_FUNCTION_PROTOTYPE_MAP_INDEX,
STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
SYMBOL_FUNCTION_INDEX,
JS_ARRAY_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
+ SIMD_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
CREATE_DATE_FUN_INDEX,
TO_NUMBER_FUN_INDEX,
UINT32_ARRAY_FUN_INDEX,
INT32_ARRAY_FUN_INDEX,
FLOAT32_ARRAY_FUN_INDEX,
+ FLOAT32x4_ARRAY_FUN_INDEX,
+ FLOAT64x2_ARRAY_FUN_INDEX,
+ INT32x4_ARRAY_FUN_INDEX,
FLOAT64_ARRAY_FUN_INDEX,
UINT8_CLAMPED_ARRAY_FUN_INDEX,
INT8_ARRAY_EXTERNAL_MAP_INDEX,
INT32_ARRAY_EXTERNAL_MAP_INDEX,
UINT32_ARRAY_EXTERNAL_MAP_INDEX,
FLOAT32_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT32x4_ARRAY_EXTERNAL_MAP_INDEX,
+ FLOAT64x2_ARRAY_EXTERNAL_MAP_INDEX,
+ INT32x4_ARRAY_EXTERNAL_MAP_INDEX,
FLOAT64_ARRAY_EXTERNAL_MAP_INDEX,
UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX,
DATA_VIEW_FUN_INDEX,
static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float32x4Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Float64x2Array(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Int32x4Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint8ClampedArray(
const v8::FunctionCallbackInfo<v8::Value>& args);
}
// Copy the double registers from the input into the output frame.
- CopyDoubleRegisters(output_frame);
+ CopySIMD128Registers(output_frame);
// Fill registers containing handler and number of parameters.
SetPlatformCompiledStubRegisters(output_frame, &descriptor);
Memory::Object_at(d.destination()) = *num;
}
+ // Materialize all float32x4 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid float32x4 values.
+ for (int i = 0; i < deferred_float32x4s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_float32x4s_[i];
+ float32x4_value_t x4 = d.value().f4;
+ Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float32x4 %p "
+ "[float32x4(%e, %e, %e, %e)] in slot %p\n",
+ reinterpret_cast<void*>(*float32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *float32x4;
+ }
+
+ // Materialize all float64x2 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid float64x2 values.
+ for (int i = 0; i < deferred_float64x2s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_float64x2s_[i];
+ float64x2_value_t x2 = d.value().d2;
+ Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float64x2 %p "
+ "[float64x2(%e, %e)] in slot %p\n",
+ reinterpret_cast<void*>(*float64x2),
+ x2.storage[0], x2.storage[1],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *float64x2;
+ }
+
+ // Materialize all int32x4 before looking at arguments because when the
+ // output frames are used to materialize arguments objects later on they need
+ // to already contain valid int32x4 values.
+ for (int i = 0; i < deferred_int32x4s_.length(); i++) {
+ SIMD128MaterializationDescriptor<Address> d = deferred_int32x4s_[i];
+ int32x4_value_t x4 = d.value().i4;
+ Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new int32x4 %p "
+ "[int32x4(%u, %u, %u, %u)] in slot %p\n",
+ reinterpret_cast<void*>(*int32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ Memory::Object_at(d.destination()) = *int32x4;
+ }
+
+
// Materialize all heap numbers required for arguments/captured objects.
for (int i = 0; i < deferred_objects_double_values_.length(); i++) {
HeapNumberMaterializationDescriptor<int> d =
// Play it safe and clear all object double values before we continue.
deferred_objects_double_values_.Clear();
+ // Materialize all float32x4 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_float32x4_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_float32x4_values_[i];
+ float32x4_value_t x4 = d.value().f4;
+ Handle<Object> float32x4 = isolate_->factory()->NewFloat32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float32x4 %p "
+ "[float32x4(%e, %e, %e, %e)] for object at %d\n",
+ reinterpret_cast<void*>(*float32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ DCHECK(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), float32x4);
+ }
+
+ // Play it safe and clear all object float32x4 values before we continue.
+ deferred_objects_float32x4_values_.Clear();
+
+ // Materialize all float64x2 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_float64x2_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_float64x2_values_[i];
+ float64x2_value_t x2 = d.value().d2;
+ Handle<Object> float64x2 = isolate_->factory()->NewFloat64x2(x2);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new float64x2 %p "
+ "[float64x2(%e, %e)] for object at %d\n",
+ reinterpret_cast<void*>(*float64x2),
+ x2.storage[0], x2.storage[1],
+ d.destination());
+ }
+ DCHECK(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), float64x2);
+ }
+
+ // Play it safe and clear all object float64x2 values before we continue.
+ deferred_objects_float64x2_values_.Clear();
+
+ // Materialize all int32x4 values required for arguments/captured objects.
+ for (int i = 0; i < deferred_objects_int32x4_values_.length(); i++) {
+ SIMD128MaterializationDescriptor<int> d =
+ deferred_objects_int32x4_values_[i];
+ int32x4_value_t x4 = d.value().i4;
+ Handle<Object> int32x4 = isolate_->factory()->NewInt32x4(x4);
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(),
+ "Materialized a new int32x4 %p "
+ "[int32x4(%u, %u, %u, %u)] for object at %d\n",
+ reinterpret_cast<void*>(*int32x4),
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ d.destination());
+ }
+ DCHECK(values.at(d.destination())->IsTheHole());
+ values.Set(d.destination(), int32x4);
+ }
+
+ // Play it safe and clear all object int32x4 values before we continue.
+ deferred_objects_int32x4_values_.Clear();
+
// Materialize arguments/captured objects.
if (!deferred_objects_.is_empty()) {
List<Handle<Object> > materialized_objects(deferred_objects_.length());
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT:
case Translation::LITERAL: {
// The value is not part of any materialized object, so we can ignore it.
iterator->Skip(Translation::NumberOfOperandsFor(opcode));
return;
}
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER: {
+ int input_reg = iterator->Next();
+ simd128_value_t value = input_->GetSIMD128Register(input_reg);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_REGISTER) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float32x4(%e, %e, %e, %e) ; %s\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float64x2(%e, %e) ; %s\n",
+ x2.storage[0], x2.storage[1],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else {
+ DCHECK(opcode == Translation::INT32x4_REGISTER);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "int32x4(%u, %u, %u, %u) ; %s\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ }
+ }
+ AddObjectSIMD128Value(value, opcode);
+ return;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "float64x2(%e, %e) ; [sp + %d]\n",
+ x2.storage[0], x2.storage[1],
+ input_offset);
+ } else {
+ DCHECK(opcode == Translation::INT32x4_STACK_SLOT);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " object @0x%08" V8PRIxPTR ": [field #%d] <- ",
+ reinterpret_cast<intptr_t>(object_slot),
+ field_index);
+ PrintF(trace_scope_->file(),
+ "int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ }
+ }
+ AddObjectSIMD128Value(value, opcode);
+ return;
+ }
+
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
return;
}
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER: {
+ int input_reg = iterator->Next();
+ simd128_value_t value = input_->GetSIMD128Register(input_reg);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_REGISTER) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- float32x4(%e, %e, %e, %e) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else if (opcode == Translation::FLOAT64x2_REGISTER) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- float64x2(%e, %e) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x2.storage[0], x2.storage[1],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ } else {
+ DCHECK(opcode == Translation::INT32x4_REGISTER);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ":"
+ " [top + %d] <- int32x4(%u, %u, %u, %u) ; %s\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ SIMD128Register::AllocationIndexToString(input_reg));
+ }
+ }
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
+ opcode);
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ return;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
return;
}
+ case Translation::FLOAT32x4_STACK_SLOT:
+ case Translation::FLOAT64x2_STACK_SLOT:
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator->Next();
+ unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index);
+ simd128_value_t value = input_->GetSIMD128FrameSlot(input_offset);
+ if (trace_scope_ != NULL) {
+ if (opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ float32x4_value_t x4 = value.f4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- float32x4(%e, %e, %e, %e) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ } else if (opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ float64x2_value_t x2 = value.d2;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- float64x2(%e, %e) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x2.storage[0], x2.storage[1],
+ input_offset);
+ } else {
+ DCHECK(opcode == Translation::INT32x4_STACK_SLOT);
+ int32x4_value_t x4 = value.i4;
+ PrintF(trace_scope_->file(),
+ " 0x%08" V8PRIxPTR ": "
+ "[top + %d] <- int32x4(%u, %u, %u, %u) ; [sp + %d]\n",
+ output_[frame_index]->GetTop() + output_offset,
+ output_offset,
+ x4.storage[0], x4.storage[1], x4.storage[2], x4.storage[3],
+ input_offset);
+ }
+ }
+ // We save the untagged value on the side and store a GC-safe
+ // temporary placeholder in the frame.
+ AddSIMD128Value(output_[frame_index]->GetTop() + output_offset, value,
+ opcode);
+ output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+ return;
+ }
+
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
if (trace_scope_ != NULL) {
}
+void Deoptimizer::AddObjectSIMD128Value(simd128_value_t value,
+ int translation_opcode) {
+ deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value());
+ SIMD128MaterializationDescriptor<int> value_desc(
+ deferred_objects_tagged_values_.length() - 1, value);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(translation_opcode);
+ if (opcode == Translation::FLOAT32x4_REGISTER ||
+ opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ deferred_objects_float32x4_values_.Add(value_desc);
+ } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+ opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ deferred_objects_float64x2_values_.Add(value_desc);
+ } else {
+ DCHECK(opcode == Translation::INT32x4_REGISTER ||
+ opcode == Translation::INT32x4_STACK_SLOT);
+ deferred_objects_int32x4_values_.Add(value_desc);
+ }
+}
+
+
void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
HeapNumberMaterializationDescriptor<Address> value_desc(
reinterpret_cast<Address>(slot_address), value);
}
+void Deoptimizer::AddSIMD128Value(intptr_t slot_address,
+ simd128_value_t value,
+ int translation_opcode) {
+ SIMD128MaterializationDescriptor<Address> value_desc(
+ reinterpret_cast<Address>(slot_address), value);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(translation_opcode);
+ if (opcode == Translation::FLOAT32x4_REGISTER ||
+ opcode == Translation::FLOAT32x4_STACK_SLOT) {
+ deferred_float32x4s_.Add(value_desc);
+ } else if (opcode == Translation::FLOAT64x2_REGISTER ||
+ opcode == Translation::FLOAT64x2_STACK_SLOT) {
+ deferred_float64x2s_.Add(value_desc);
+ } else {
+ DCHECK(opcode == Translation::INT32x4_REGISTER ||
+ opcode == Translation::INT32x4_STACK_SLOT);
+ deferred_int32x4s_.Add(value_desc);
+ }
+}
+
+
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
}
+void Translation::StoreSIMD128Register(SIMD128Register reg, Opcode opcode) {
+ buffer_->Add(opcode, zone());
+ buffer_->Add(SIMD128Register::ToAllocationIndex(reg), zone());
+}
+
+
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT, zone());
buffer_->Add(index, zone());
}
+void Translation::StoreSIMD128StackSlot(int index, Opcode opcode) {
+ buffer_->Add(opcode, zone());
+ buffer_->Add(index, zone());
+}
+
+
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL, zone());
buffer_->Add(literal_id, zone());
case INT32_REGISTER:
case UINT32_REGISTER:
case DOUBLE_REGISTER:
+ case FLOAT32x4_REGISTER:
+ case FLOAT64x2_REGISTER:
+ case INT32x4_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
+ case FLOAT32x4_STACK_SLOT:
+ case FLOAT64x2_STACK_SLOT:
+ case INT32x4_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
return 1;
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::DOUBLE_REGISTER:
+ case Translation::FLOAT32x4_REGISTER:
+ case Translation::FLOAT64x2_REGISTER:
+ case Translation::INT32x4_REGISTER:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
+ case Translation::FLOAT32x4_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::FLOAT32x4);
+ }
+
+ case Translation::FLOAT64x2_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::FLOAT64x2);
+ }
+
+ case Translation::INT32x4_STACK_SLOT: {
+ int slot_index = iterator->Next();
+ Address slot_addr = SlotAddress(frame, slot_index);
+ return SlotRef(slot_addr, SlotRef::INT32x4);
+ }
+
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->GetIsolate(),
return isolate->factory()->NewNumber(value);
}
+ case FLOAT32x4:
+ return isolate->factory()->NewFloat32x4(read_simd128_value(addr_).f4);
+
+ case FLOAT64x2:
+ return isolate->factory()->NewFloat64x2(read_simd128_value(addr_).d2);
+
+ case INT32x4:
+ return isolate->factory()->NewInt32x4(read_simd128_value(addr_).i4);
+
case LITERAL:
return literal_;
return d;
}
+static inline simd128_value_t read_simd128_value(Address p) {
+ return *reinterpret_cast<simd128_value_t*>(p);
+}
class FrameDescription;
class TranslationIterator;
};
+template<typename T>
+class SIMD128MaterializationDescriptor BASE_EMBEDDED {
+ public:
+ SIMD128MaterializationDescriptor(T destination, simd128_value_t value)
+ : destination_(destination), value_(value) { }
+
+ T destination() const { return destination_; }
+ simd128_value_t value() const { return value_; }
+
+ private:
+ T destination_;
+ simd128_value_t value_;
+};
+
+
class ObjectMaterializationDescriptor BASE_EMBEDDED {
public:
ObjectMaterializationDescriptor(
void AddObjectDuplication(intptr_t slot, int object_index);
void AddObjectTaggedValue(intptr_t value);
void AddObjectDoubleValue(double value);
+ void AddObjectSIMD128Value(simd128_value_t value, int translation_opcode);
void AddDoubleValue(intptr_t slot_address, double value);
+ void AddSIMD128Value(intptr_t slot_address, simd128_value_t value,
+ int translation_opcode);
bool ArgumentsObjectIsAdapted(int object_index) {
ObjectMaterializationDescriptor desc = deferred_objects_.at(object_index);
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
CodeStubDescriptor* desc);
- // Fill the given output frame's double registers with the original values
- // from the input frame's double registers.
- void CopyDoubleRegisters(FrameDescription* output_frame);
+ // Fill the given output frame's simd128 registers with the original values
+ // from the input frame's simd128 registers.
+ void CopySIMD128Registers(FrameDescription* output_frame);
// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
List<Object*> deferred_objects_tagged_values_;
List<HeapNumberMaterializationDescriptor<int> >
deferred_objects_double_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float32x4_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_float64x2_values_;
+ List<SIMD128MaterializationDescriptor<int> >
+ deferred_objects_int32x4_values_;
List<ObjectMaterializationDescriptor> deferred_objects_;
List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float32x4s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_float64x2s_;
+ List<SIMD128MaterializationDescriptor<Address> > deferred_int32x4s_;
// Key for lookup of previously materialized objects
Address stack_fp_;
return read_double_value(reinterpret_cast<Address>(ptr));
}
+ simd128_value_t GetSIMD128FrameSlot(unsigned offset) {
+ intptr_t* ptr = GetFrameSlotPointer(offset);
+ return read_simd128_value(reinterpret_cast<Address>(ptr));
+ }
+
void SetFrameSlot(unsigned offset, intptr_t value) {
*GetFrameSlotPointer(offset) = value;
}
return registers_[n];
}
- double GetDoubleRegister(unsigned n) const {
- DCHECK(n < arraysize(double_registers_));
- return double_registers_[n];
+ double GetDoubleRegister(unsigned n) const;
+
+ simd128_value_t GetSIMD128Register(unsigned n) const {
+ DCHECK(n < arraysize(simd128_registers_));
+ return simd128_registers_[n];
}
void SetRegister(unsigned n, intptr_t value) {
registers_[n] = value;
}
- void SetDoubleRegister(unsigned n, double value) {
- DCHECK(n < arraysize(double_registers_));
- double_registers_[n] = value;
+ void SetDoubleRegister(unsigned n, double value);
+
+ void SetSIMD128Register(unsigned n, simd128_value_t value) {
+ DCHECK(n < arraysize(simd128_registers_));
+ simd128_registers_[n] = value;
}
intptr_t GetTop() const { return top_; }
return OFFSET_OF(FrameDescription, registers_);
}
- static int double_registers_offset() {
- return OFFSET_OF(FrameDescription, double_registers_);
+ static int simd128_registers_offset() {
+ return OFFSET_OF(FrameDescription, simd128_registers_);
}
static int frame_size_offset() {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kMaxNumRegisters];
+ simd128_value_t simd128_registers_[SIMD128Register::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(DOUBLE_REGISTER) \
+ V(FLOAT32x4_REGISTER) \
+ V(FLOAT64x2_REGISTER) \
+ V(INT32x4_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
+ V(FLOAT32x4_STACK_SLOT) \
+ V(FLOAT64x2_STACK_SLOT) \
+ V(INT32x4_STACK_SLOT) \
V(LITERAL)
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
+ void StoreSIMD128Register(SIMD128Register reg, Opcode opcode);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
+ void StoreSIMD128StackSlot(int index, Opcode opcode);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
INT32,
UINT32,
DOUBLE,
+ FLOAT32x4,
+ FLOAT64x2,
+ INT32x4,
LITERAL,
DEFERRED_OBJECT, // Object captured by the escape analysis.
// The number of nested objects can be obtained
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
return 3;
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
+ return 4;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
EXTERNAL_INT16_ELEMENTS,
EXTERNAL_UINT16_ELEMENTS,
EXTERNAL_INT32_ELEMENTS,
+ EXTERNAL_INT32x4_ELEMENTS,
EXTERNAL_UINT32_ELEMENTS,
EXTERNAL_FLOAT32_ELEMENTS,
+ EXTERNAL_FLOAT32x4_ELEMENTS,
EXTERNAL_FLOAT64_ELEMENTS,
+ EXTERNAL_FLOAT64x2_ELEMENTS,
EXTERNAL_UINT8_CLAMPED_ELEMENTS,
// Fixed typed arrays
INT16_ELEMENTS,
UINT32_ELEMENTS,
INT32_ELEMENTS,
+ INT32x4_ELEMENTS,
FLOAT32_ELEMENTS,
+ FLOAT32x4_ELEMENTS,
FLOAT64_ELEMENTS,
+ FLOAT64x2_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
// Derived constants from ElementsKind
}
+inline bool IsExternalFloat32x4ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_FLOAT32x4_ELEMENTS;
+}
+
+
+inline bool IsExternalFloat64x2ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_FLOAT64x2_ELEMENTS;
+}
+
+
+inline bool IsExternalInt32x4ElementsKind(ElementsKind kind) {
+ return kind == EXTERNAL_INT32x4_ELEMENTS;
+}
+
+
inline bool IsFixedFloatElementsKind(ElementsKind kind) {
return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
+inline bool IsFixedFloat32x4ElementsKind(ElementsKind kind) {
+ return kind == FLOAT32x4_ELEMENTS;
+}
+
+
+inline bool IsFixedFloat64x2ElementsKind(ElementsKind kind) {
+ return kind == FLOAT64x2_ELEMENTS;
+}
+
+
+inline bool IsFixedInt32x4ElementsKind(ElementsKind kind) {
+ return kind == INT32x4_ELEMENTS;
+}
+
+
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
IsExternalFloatOrDoubleElementsKind(kind) ||
}
+inline bool IsFloat32x4ElementsKind(ElementsKind kind) {
+ return IsExternalFloat32x4ElementsKind(kind) ||
+ IsFixedFloat32x4ElementsKind(kind);
+}
+
+
+inline bool IsFloat64x2ElementsKind(ElementsKind kind) {
+ return IsExternalFloat64x2ElementsKind(kind) ||
+ IsFixedFloat64x2ElementsKind(kind);
+}
+
+
+inline bool IsInt32x4ElementsKind(ElementsKind kind) {
+ return IsExternalInt32x4ElementsKind(kind) ||
+ IsFixedInt32x4ElementsKind(kind);
+}
+
+
+inline bool IsSIMD128ElementsKind(ElementsKind kind) {
+ return IsFloat32x4ElementsKind(kind) || IsFloat64x2ElementsKind(kind) ||
+ IsInt32x4ElementsKind(kind);
+}
+
+
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
// - ExternalInt16ElementsAccessor
// - ExternalUint16ElementsAccessor
// - ExternalInt32ElementsAccessor
+// - ExternalInt32x4ElementsAccessor
// - ExternalUint32ElementsAccessor
// - ExternalFloat32ElementsAccessor
+// - ExternalFloat32x4ElementsAccessor
// - ExternalFloat64ElementsAccessor
+// - ExternalFloat64x2ElementsAccessor
// - ExternalUint8ClampedElementsAccessor
// - FixedUint8ElementsAccessor
// - FixedInt8ElementsAccessor
// - FixedInt16ElementsAccessor
// - FixedUint32ElementsAccessor
// - FixedInt32ElementsAccessor
+// - FixedInt32x4ElementsAccessor
// - FixedFloat32ElementsAccessor
+// - FixedFloat32x4ElementsAccessor
// - FixedFloat64ElementsAccessor
+// - FixedFloat64x2ElementsAccessor
// - FixedUint8ClampedElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
EXTERNAL_UINT16_ELEMENTS, ExternalUint16Array) \
V(ExternalInt32ElementsAccessor, EXTERNAL_INT32_ELEMENTS, \
ExternalInt32Array) \
+ V(ExternalInt32x4ElementsAccessor, EXTERNAL_INT32x4_ELEMENTS, \
+ ExternalInt32x4Array) \
V(ExternalUint32ElementsAccessor, \
EXTERNAL_UINT32_ELEMENTS, ExternalUint32Array) \
V(ExternalFloat32ElementsAccessor, \
EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array) \
+ V(ExternalFloat32x4ElementsAccessor, \
+ EXTERNAL_FLOAT32x4_ELEMENTS, ExternalFloat32x4Array) \
V(ExternalFloat64ElementsAccessor, \
EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array) \
+ V(ExternalFloat64x2ElementsAccessor, \
+ EXTERNAL_FLOAT64x2_ELEMENTS, ExternalFloat64x2Array) \
V(ExternalUint8ClampedElementsAccessor, \
EXTERNAL_UINT8_CLAMPED_ELEMENTS, \
ExternalUint8ClampedArray) \
V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
+ V(FixedInt32x4ElementsAccessor, INT32x4_ELEMENTS, FixedInt32x4Array) \
V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
+ V(FixedFloat32x4ElementsAccessor, FLOAT32x4_ELEMENTS, \
+ FixedFloat32x4Array) \
V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
+ V(FixedFloat64x2ElementsAccessor, FLOAT64x2_ELEMENTS, \
+ FixedFloat64x2Array) \
V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
FixedUint8ClampedArray)
}
+Handle<Float32x4> Factory::NewFloat32x4(float32x4_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFloat32x4(value, pretenure), Float32x4);
+}
+
+
+Handle<Float64x2> Factory::NewFloat64x2(float64x2_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateFloat64x2(value, pretenure), Float64x2);
+}
+
+
+Handle<Int32x4> Factory::NewInt32x4(int32x4_value_t value,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateInt32x4(value, pretenure), Int32x4);
+}
+
+
MaybeHandle<Object> Factory::NewTypeError(const char* message,
Vector<Handle<Object> > args) {
return NewError("MakeTypeError", message, args);
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<Float32x4> NewFloat32x4(float32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Float64x2> NewFloat64x2(float64x2_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<Int32x4> NewInt32x4(int32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
inline Handle<JSObject> NewNeanderObject() {
#define FLAG FLAG_FULL
// Flags for language modes and experimental language features.
+DEFINE_BOOL(simd_object, false, "enable SIMD object and operations")
DEFINE_BOOL(use_strict, false, "enforce strict mode")
DEFINE_BOOL(es_staging, false, "enable upcoming ES6+ features")
// -----------------------------------------------------------------------------
// Constants
+struct float32x4_value_t { float storage[4]; };
+struct float64x2_value_t { double storage[2]; };
+struct int32x4_value_t { int32_t storage[4]; };
+union simd128_value_t {
+ double d[2];
+ float32x4_value_t f4;
+ float64x2_value_t d2;
+ int32x4_value_t i4;
+};
+
const int KB = 1024;
const int MB = KB * KB;
const int GB = KB * KB * KB;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kInt32Size = sizeof(int32_t); // NOLINT
-const int kInt64Size = sizeof(int64_t); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char); // NOLINT
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int); // NOLINT
+const int kInt32Size = sizeof(int32_t); // NOLINT
+const int kInt64Size = sizeof(int64_t); // NOLINT
+const int kDoubleSize = sizeof(double); // NOLINT
+const int kFloatSize = sizeof(float); // NOLINT
+const int kFloat32x4Size = sizeof(float32x4_value_t); // NOLINT
+const int kFloat64x2Size = sizeof(float64x2_value_t); // NOLINT
+const int kInt32x4Size = sizeof(int32x4_value_t); // NOLINT
+const int kSIMD128Size = sizeof(simd128_value_t); // NOLINT
+const int kIntptrSize = sizeof(intptr_t); // NOLINT
+const int kPointerSize = sizeof(void*); // NOLINT
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize = kPointerSize + kPointerSize;
#else
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4) \
+ V(Float64x2, float64x2) \
+ V(Int32x4, int32x4)
+
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type) \
+AllocationResult Heap::Allocate##TYPE(type##_value_t value, \
+ PretenureFlag pretenure) { \
+ STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize); \
+ \
+ AllocationSpace space = \
+ SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure); \
+ \
+ HeapObject* result; \
+ { AllocationResult allocation = \
+ AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE); \
+ if (!allocation.To(&result)) return allocation; \
+ } \
+ \
+ result->set_map_no_write_barrier( \
+ isolate()->native_context()->type##_function()->initial_map()); \
+ JSObject::cast(result)->set_properties(empty_fixed_array()); \
+ JSObject::cast(result)->set_elements(empty_fixed_array()); \
+ \
+ HeapObject* storage; \
+ int storage_size = \
+ FixedTypedArrayBase::kDataOffset + k##TYPE##Size; \
+ space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure); \
+ { AllocationResult allocation = \
+ AllocateRaw(storage_size, space, OLD_DATA_SPACE); \
+ if (!allocation.To(&storage)) return allocation; \
+ } \
+ \
+ storage->set_map( \
+ *isolate()->factory()->fixed_##type##_array_map()); \
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
+ elements->set_length(static_cast<int>(1)); \
+ memset(elements->DataPtr(), 0, elements->DataSize()); \
+ Fixed##TYPE##Array::cast(storage)->set(0, value); \
+ TYPE::cast(result)->set_value(storage); \
+ return result; \
+}
+
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
V(Map, external_int16_array_map, ExternalInt16ArrayMap) \
V(Map, external_uint16_array_map, ExternalUint16ArrayMap) \
V(Map, external_int32_array_map, ExternalInt32ArrayMap) \
+ V(Map, external_int32x4_array_map, ExternalInt32x4ArrayMap) \
V(Map, external_uint32_array_map, ExternalUint32ArrayMap) \
V(Map, external_float32_array_map, ExternalFloat32ArrayMap) \
+ V(Map, external_float32x4_array_map, ExternalFloat32x4ArrayMap) \
+ V(Map, external_float64x2_array_map, ExternalFloat64x2ArrayMap) \
V(Map, external_float64_array_map, ExternalFloat64ArrayMap) \
V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap) \
V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array) \
V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array) \
V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array) \
V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array) \
+ V(ExternalArray, empty_external_int32x4_array, EmptyExternalInt32x4Array) \
V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array) \
V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array) \
+ V(ExternalArray, empty_external_float32x4_array, EmptyExternalFloat32x4Array)\
+ V(ExternalArray, empty_external_float64x2_array, EmptyExternalFloat64x2Array)\
V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array) \
V(ExternalArray, empty_external_uint8_clamped_array, \
EmptyExternalUint8ClampedArray) \
V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
+ V(Map, fixed_int32x4_array_map, FixedInt32x4ArrayMap) \
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
+ V(Map, fixed_float32x4_array_map, FixedFloat32x4ArrayMap) \
+ V(Map, fixed_float64x2_array_map, FixedFloat64x2ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
+ V(FixedTypedArrayBase, empty_fixed_float32x4_array, \
+ EmptyFixedFloat32x4Array) \
+ V(FixedTypedArrayBase, empty_fixed_float64x2_array, \
+ EmptyFixedFloat64x2Array) \
+ V(FixedTypedArrayBase, empty_fixed_int32x4_array, EmptyFixedInt32x4Array) \
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
V(null_string, "null") \
V(number_string, "number") \
V(Number_string, "Number") \
+ V(float32x4_string, "float32x4") \
+ V(float64x2_string, "float64x2") \
+ V(int32x4_string, "int32x4") \
V(nan_string, "NaN") \
V(source_string, "source") \
V(source_url_string, "source_url") \
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value") \
+ V(signMask, "signMask") \
+ V(x, "x") \
+ V(y, "y") \
+ V(z, "z") \
+ V(w, "w") \
+ V(flagX, "flagX") \
+ V(flagY, "flagY") \
+ V(flagZ, "flagZ") \
+ V(flagW, "flagW") \
+ V(simd, "SIMD") \
V(next_string, "next") \
V(byte_length_string, "byteLength") \
V(byte_offset_string, "byteOffset") \
AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocated a Float32x4 from value.
+ MUST_USE_RESULT AllocationResult AllocateFloat32x4(
+ float32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocated a Float64x2 from value.
+ MUST_USE_RESULT AllocationResult AllocateFloat64x2(
+ float64x2_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocated a Int32x4 from value.
+ MUST_USE_RESULT AllocationResult AllocateInt32x4(
+ int32x4_value_t value,
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a byte array of the specified length
MUST_USE_RESULT AllocationResult
AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
case JS_ARRAY_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
case JS_REGEXP_TYPE:
VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
break;
case JS_MESSAGE_OBJECT_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
instance_size);
-
case JS_FUNCTION_TYPE:
return kVisitJSFunction;
case FIXED_UINT32_ARRAY_TYPE:
case FIXED_INT32_ARRAY_TYPE:
case FIXED_FLOAT32_ARRAY_TYPE:
+ case FIXED_INT32x4_ARRAY_TYPE:
+ case FIXED_FLOAT32x4_ARRAY_TYPE:
+ case FIXED_FLOAT64x2_ARRAY_TYPE:
case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
return kVisitFixedTypedArray;
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
case HValue::kStoreFrameContext:
- case HValue::kStoreKeyed:
case HValue::kStoreNamedField:
case HValue::kStoreNamedGeneric:
case HValue::kStringCharCodeAt:
case HValue::kTypeofIsAndBranch:
case HValue::kUnknownOSRValue:
case HValue::kUseConst:
+ case HValue::kNullarySIMDOperation:
return false;
+ case HValue::kStoreKeyed:
+ return !CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ IsSIMD128ElementsKind(HStoreKeyed::cast(this)->elements_kind());
+
case HValue::kAdd:
case HValue::kAllocateBlockContext:
case HValue::kApplyArguments:
case HValue::kTypeof:
case HValue::kUnaryMathOperation:
case HValue::kWrapReceiver:
+ case HValue::kUnarySIMDOperation:
+ case HValue::kBinarySIMDOperation:
+ case HValue::kTernarySIMDOperation:
+ case HValue::kQuarternarySIMDOperation:
return true;
}
UNREACHABLE();
type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
*block = number_type ? FirstSuccessor() : SecondSuccessor();
return true;
+ } else if (value()->representation().IsFloat32x4()) {
+ bool float32x4_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->float32x4_string());
+ *block = float32x4_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsFloat64x2()) {
+ bool float64x2_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->float64x2_string());
+ *block = float64x2_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
+ } else if (value()->representation().IsInt32x4()) {
+ bool int32x4_type =
+ type_literal_.IsKnownGlobal(isolate()->heap()->int32x4_string());
+ *block = int32x4_type ? FirstSuccessor() : SecondSuccessor();
+ return true;
}
+
*block = NULL;
return false;
}
return os << "@" << access.offset();
}
+
+HInstruction* HNullarySIMDOperation::New(
+ Zone* zone, HValue* context, BuiltinFunctionId op) {
+ return new(zone) HNullarySIMDOperation(context, op);
+}
+
+
+HInstruction* HUnarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op,
+ Representation to) {
+ return new(zone) HUnarySIMDOperation(context, value, op, to);
+}
+
+
+HInstruction* HBinarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right,
+ BuiltinFunctionId op) {
+ return new(zone) HBinarySIMDOperation(context, left, right, op);
+}
+
+
+HInstruction* HTernarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* mask, HValue* left, HValue* right,
+ BuiltinFunctionId op) {
+ return new(zone) HTernarySIMDOperation(context, mask, left, right, op);
+}
+
+
+HInstruction* HQuarternarySIMDOperation::New(
+ Zone* zone, HValue* context, HValue* x, HValue* y, HValue* z, HValue* w,
+ BuiltinFunctionId op) {
+ return new(zone) HQuarternarySIMDOperation(context, x, y, z, w, op);
+}
+
+
+const char* HNullarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "." #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+OStream& HNullarySIMDOperation::PrintDataTo(OStream& os) const {
+ return os << OpName();
+}
+
+
+const char* HUnarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "." #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+OStream& HUnarySIMDOperation::PrintDataTo(OStream& os) const {
+ return os << OpName() << " " << NameOf(value());
+}
+
+
+const char* HBinarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "." #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+OStream& HBinarySIMDOperation::PrintDataTo(OStream& os) const {
+ return os << OpName() << " " << NameOf(left()) << " "
+ << NameOf(right());
+}
+
+
+const char* HTernarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "." #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+OStream& HTernarySIMDOperation::PrintDataTo(OStream& os) const {
+ return os << OpName() << " " << NameOf(first()) << " "
+ << NameOf(second()) << " " << NameOf(third());
+}
+
+
+const char* HQuarternarySIMDOperation::OpName() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "." #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+OStream& HQuarternarySIMDOperation::PrintDataTo(OStream& os) const {
+ return os << OpName() << " " << NameOf(x()) << " " << NameOf(y()) << " "
+ << NameOf(z()) << " " << NameOf(w());
+}
+
+
} } // namespace v8::internal
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(UnknownOSRValue) \
V(UseConst) \
V(WrapReceiver)
HType t = type();
if (t.IsSmi()) return Representation::Smi();
if (t.IsHeapNumber()) return Representation::Double();
+ if (t.IsFloat32x4()) return Representation::Float32x4();
+ if (t.IsFloat64x2()) return Representation::Float64x2();
+ if (t.IsInt32x4()) return Representation::Int32x4();
if (t.IsHeapObject()) return r;
return Representation::None();
}
HType type() const { return type_; }
void set_type(HType new_type) {
- DCHECK(new_type.IsSubtypeOf(type_));
+ // TODO(ningxin): for SIMD ops, the initial type is None which
+ // hit the following ASSERT.
+ // DCHECK(new_type.IsSubtypeOf(type_));
type_ = new_type;
}
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
} else {
- set_type(HType::TaggedNumber());
+ if (to.IsFloat32x4()) {
+ set_type(HType::Float32x4());
+ } else if (to.IsFloat64x2()) {
+ set_type(HType::Float64x2());
+ } else if (to.IsInt32x4()) {
+ set_type(HType::Int32x4());
+ } else {
+ set_type(HType::TaggedNumber());
+ }
if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
}
}
Representation::Integer32());
}
+ static HObjectAccess ForSIMD128Double0() {
+ return HObjectAccess(
+ kDouble, Float32x4::kValueOffset, Representation::Double());
+ }
+
+ static HObjectAccess ForSIMD128Double1() {
+ return HObjectAccess(kDouble,
+ Float32x4::kValueOffset + kDoubleSize,
+ Representation::Double());
+ }
+
static HObjectAccess ForElementsPointer() {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
Representation::UInteger16());
}
+ static HObjectAccess ForMapPrototype() {
+ return HObjectAccess(kInobject, Map::kPrototypeOffset);
+ }
+
static HObjectAccess ForPropertyCellValue() {
return HObjectAccess(kInobject, PropertyCell::kValueOffset);
}
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
set_representation(Representation::Double());
+ } else if (IsFloat32x4ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float32x4() : Representation::Tagged());
+ } else if (IsFloat64x2ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged());
+ } else if (IsInt32x4ElementsKind(elements_kind)) {
+ set_representation(CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged());
} else {
set_representation(Representation::Integer32());
}
return Representation::Integer32();
}
+ if (IsFloat32x4ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float32x4() : Representation::Tagged();
+ }
+ if (IsFloat64x2ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Float64x2() : Representation::Tagged();
+ }
+ if (IsInt32x4ElementsKind(kind)) {
+ return CpuFeatures::SupportsSIMD128InCrankshaft() ?
+ Representation::Int32x4() : Representation::Tagged();
+ }
+
if (IsFastSmiElementsKind(kind)) {
return Representation::Smi();
}
};
+class HNullarySIMDOperation FINAL : public HTemplateInstruction<1> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ return Representation::Tagged();
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(NullarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) OVERRIDE {
+ HNullarySIMDOperation* b = HNullarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HNullarySIMDOperation(HValue* context, BuiltinFunctionId op)
+ : HTemplateInstruction<1>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ switch (op) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, representation) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ break;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HUnarySIMDOperation FINAL : public HTemplateInstruction<2> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ BuiltinFunctionId op,
+ Representation to = Representation::Float32x4());
+
+ HValue* context() { return OperandAt(0); }
+ HValue* value() const { return OperandAt(1); }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else if (op_ == kSIMD128Change) {
+ return value()->representation();
+ } else {
+ switch (op_) {
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, representation) \
+ case k##name: \
+ return Representation::representation();
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(UnarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) OVERRIDE {
+ HUnarySIMDOperation* b = HUnarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HUnarySIMDOperation(HValue* context, HValue* value, BuiltinFunctionId op,
+ Representation to = Representation::Float32x4())
+ : HTemplateInstruction<2>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ switch (op) {
+ case kSIMD128Change:
+ set_representation(to);
+ set_type(HType::FromRepresentation(to));
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HBinarySIMDOperation FINAL : public HTemplateInstruction<3> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* left() const { return OperandAt(1); }
+ HValue* right() const { return OperandAt(2); }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, left_representation, \
+ right_representation) \
+ case k##name: \
+ return index == 1 ? Representation::left_representation() \
+ : Representation::right_representation(); \
+ break;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(BinarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) OVERRIDE {
+ HBinarySIMDOperation* b = HBinarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HBinarySIMDOperation(HValue* context, HValue* left, HValue* right,
+ BuiltinFunctionId op)
+ : HTemplateInstruction<3>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, left);
+ SetOperandAt(2, right);
+ switch (op) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, p6) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HTernarySIMDOperation FINAL : public HTemplateInstruction<4> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* first,
+ HValue* second,
+ HValue* third,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* first() const { return OperandAt(1); }
+ HValue* second() const { return OperandAt(2); }
+ HValue* third() const { return OperandAt(3); }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, \
+ first_representation, second_representation, third_representation) \
+ case k##name: \
+ switch (index) { \
+ case 1: return Representation::first_representation(); \
+ case 2: return Representation::second_representation(); \
+ case 3: return Representation::third_representation(); \
+ default: \
+ UNREACHABLE(); \
+ return Representation::None(); \
+ }
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(TernarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) OVERRIDE {
+ HTernarySIMDOperation* b = HTernarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HTernarySIMDOperation(HValue* context, HValue* first, HValue* second,
+ HValue* third, BuiltinFunctionId op)
+ : HTemplateInstruction<4>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, first);
+ SetOperandAt(2, second);
+ SetOperandAt(3, third);
+ switch (op) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, \
+ p6, p7) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
+
+class HQuarternarySIMDOperation FINAL : public HTemplateInstruction<5> {
+ public:
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* x,
+ HValue* y,
+ HValue* z,
+ HValue* w,
+ BuiltinFunctionId op);
+
+ HValue* context() { return OperandAt(0); }
+ HValue* x() const { return OperandAt(1); }
+ HValue* y() const { return OperandAt(2); }
+ HValue* z() const { return OperandAt(3); }
+ HValue* w() const { return OperandAt(4); }
+
+ virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;
+
+ virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ switch (op_) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, \
+ first_representation, second_representation, third_representation, \
+ fourth_representation) \
+ case k##name: \
+ switch (index) { \
+ case 1: return Representation::first_representation(); \
+ case 2: return Representation::second_representation(); \
+ case 3: return Representation::third_representation(); \
+ case 4: return Representation::fourth_representation(); \
+ default: \
+ UNREACHABLE(); \
+ return Representation::None(); \
+ }
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return Representation::None();
+ }
+ }
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+ const char* OpName() const;
+
+ DECLARE_CONCRETE_INSTRUCTION(QuarternarySIMDOperation)
+
+ protected:
+ virtual bool DataEquals(HValue* other) OVERRIDE {
+ HQuarternarySIMDOperation* b = HQuarternarySIMDOperation::cast(other);
+ return op_ == b->op();
+ }
+
+ private:
+ HQuarternarySIMDOperation(HValue* context, HValue* x, HValue* y, HValue* z,
+ HValue* w, BuiltinFunctionId op)
+ : HTemplateInstruction<5>(HType::None()), op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, x);
+ SetOperandAt(2, y);
+ SetOperandAt(3, z);
+ SetOperandAt(4, w);
+ switch (op) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, representation, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ set_representation(Representation::representation()); \
+ set_type(HType::FromRepresentation(representation_)); \
+ if (Representation::p5().IsInteger32() || \
+ Representation::p6().IsInteger32() || \
+ Representation::p7().IsInteger32() || \
+ Representation::p8().IsInteger32()) { \
+ SetFlag(kTruncatingToInt32); \
+ } \
+ break;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
+ virtual bool IsDeletable() const OVERRIDE { return true; }
+
+ BuiltinFunctionId op_;
+};
+
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
}
if (new_value == NULL) {
- new_value = new(graph()->zone()) HChange(
- value, to, is_truncating_to_smi, is_truncating_to_int);
+ if (((to.IsFloat32x4() || to.IsFloat64x2() || to.IsInt32x4()) &&
+ !value->representation().IsTagged()) ||
+ ((value->representation().IsFloat32x4() ||
+ value->representation().IsFloat64x2() ||
+ value->representation().IsInt32x4()) &&
+ !to.IsTagged())) {
+ new_value = HUnarySIMDOperation::New(graph()->zone(),
+ graph()->entry_block()->last_environment()->context(),
+ value, kSIMD128Change, to);
+ } else {
+ new_value = new(graph()->zone()) HChange(
+ value, to, is_truncating_to_smi, is_truncating_to_int);
+ }
if (!use_value->operand_position(use_index).IsUnknown()) {
new_value->set_position(use_value->operand_position(use_index));
} else {
#include "src/hydrogen-types.h"
#include "src/ostreams.h"
+#include "src/property-details.h"
#include "src/types-inl.h"
double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
}
+ if (value->IsFloat32x4()) return HType::Float32x4();
+ if (value->IsFloat64x2()) return HType::Float64x2();
+ if (value->IsInt32x4()) return HType::Int32x4();
if (value->IsString()) return HType::String();
if (value->IsBoolean()) return HType::Boolean();
if (value->IsUndefined()) return HType::Undefined();
}
+// static
+HType HType::FromRepresentation(Representation representation) {
+ HType result = HType::Tagged();
+ if (representation.IsSmi()) {
+ result = HType::Smi();
+ } else if (representation.IsDouble()) {
+ result = HType::HeapNumber();
+ } else if (representation.IsFloat32x4()) {
+ result = HType::Float32x4();
+ } else if (representation.IsFloat64x2()) {
+ result = HType::Float64x2();
+ } else if (representation.IsInt32x4()) {
+ result = HType::Int32x4();
+ }
+ return result;
+}
+
+
OStream& operator<<(OStream& os, const HType& t) {
// Note: The c1visualizer syntax for locals allows only a sequence of the
// following characters: A-Za-z0-9_-|:
template <typename T> class Handle;
class Object;
class OStream;
+class Representation;
#define HTYPE_LIST(V) \
V(Any, 0x0) /* 0000 0000 0000 0000 */ \
V(HeapPrimitive, 0x25) /* 0000 0000 0010 0101 */ \
V(Null, 0x27) /* 0000 0000 0010 0111 */ \
V(HeapNumber, 0x2d) /* 0000 0000 0010 1101 */ \
- V(String, 0x65) /* 0000 0000 0110 0101 */ \
- V(Boolean, 0xa5) /* 0000 0000 1010 0101 */ \
- V(Undefined, 0x125) /* 0000 0001 0010 0101 */ \
- V(JSObject, 0x221) /* 0000 0010 0010 0001 */ \
- V(JSArray, 0x621) /* 0000 0110 0010 0001 */ \
- V(None, 0x7ff) /* 0000 0111 1111 1111 */
+ V(Float32x4, 0x65) /* 0000 0000 0110 0101 */ \
+ V(Float64x2, 0xa5) /* 0000 0000 1010 0101 */ \
+ V(Int32x4, 0x125) /* 0000 0001 0010 0101 */ \
+ V(String, 0x225) /* 0000 0010 0010 0101 */ \
+ V(Boolean, 0x425) /* 0000 0100 0010 0101 */ \
+ V(Undefined, 0x825) /* 0000 1000 0010 0101 */ \
+ V(JSObject, 0x1021) /* 0001 0000 0010 0001 */ \
+ V(JSArray, 0x3021) /* 0011 0000 0010 0001 */ \
+ V(None, 0x3fff) /* 0011 1111 1111 1111 */
class HType FINAL {
public:
template <class T>
static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
+ static HType FromRepresentation(Representation representation);
friend OStream& operator<<(OStream& os, const HType& t);
CHECK_ALIVE(store = BuildNamedGeneric(
STORE, NULL, literal, name, value));
} else {
- PropertyAccessInfo info(this, STORE, ToType(map), name);
+ PropertyAccessInfo info(
+ this, STORE, ToType(map), name, map->instance_type());
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
DCHECK(!info.IsAccessor());
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
+ if (IsSIMD128PropertyCallback() &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ return true;
+ }
if (!CanInlinePropertyAccess(type_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
if (this->map()->function_with_prototype() &&
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
if (types->length() > kMaxLoadPolymorphism) return false;
+ if (IsSIMD128PropertyCallback() &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ for (int i = 1; i < types->length(); ++i) {
+ if (types->at(i)->instance_type() == types->first()->instance_type()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
HObjectAccess access = HObjectAccess::ForMap(); // bogus default
if (GetJSObjectFieldAccess(&access)) {
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ builder_, access_type_, ToType(types->at(i)), name_,
+ types->at(i)->instance_type());
HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
if (!access.Equals(test_access)) return false;
for (int i = 1; i < types->length(); ++i) {
PropertyAccessInfo test_info(
- builder_, access_type_, ToType(types->at(i)), name_);
+ builder_, access_type_, ToType(types->at(i)), name_,
+ types->at(i)->instance_type());
if (!test_info.IsCompatible(this)) return false;
}
}
+static bool IsSIMDProperty(Handle<String> name, uint8_t* mask) {
+ SmartArrayPointer<char> cstring = name->ToCString();
+ int i = 0;
+ while (i <= 3) {
+ int shift = 0;
+ switch (cstring[i]) {
+ case 'W':
+ shift++;
+ case 'Z':
+ shift++;
+ case 'Y':
+ shift++;
+ case 'X':
+ break;
+ default:
+ return false;
+ }
+ *mask |= (shift << 2*i);
+ i++;
+ }
+
+ return true;
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildMonomorphicAccess(
PropertyAccessInfo* info,
HValue* object,
if (info->IsField()) {
if (info->IsLoad()) {
+ if (info->map()->constructor()->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(info->map()->constructor());
+ String* class_name =
+ String::cast(constructor->shared()->instance_class_name());
+ uint8_t mask = 0;
+ if (class_name->Equals(isolate()->heap()->simd()) &&
+ IsSIMDProperty(info->name(), &mask) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ return New<HConstant>(mask);
+ }
+ }
return BuildLoadNamedField(info, checked_holder);
} else {
return BuildStoreNamedField(info, checked_object, value);
STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
int i;
for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
handled_string = false;
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
- PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
+ PropertyAccessInfo info(
+ this, access_type, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
}
+static bool AreInt32x4Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != INT32x4_TYPE) return false;
+ }
+ return true;
+}
+
+
+static bool AreFloat32x4Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != FLOAT32x4_TYPE) return false;
+ }
+ return true;
+}
+
+
+static bool AreFloat64x2Types(SmallMapList* types) {
+ if (types == NULL || types->length() == 0) return false;
+ for (int i = 0; i < types->length(); i++) {
+ if (types->at(i)->instance_type() != FLOAT64x2_TYPE) return false;
+ }
+ return true;
+}
+
+
+static BuiltinFunctionId NameToId(Isolate* isolate, Handle<String> name,
+ InstanceType type) {
+ BuiltinFunctionId id;
+ if (name->Equals(isolate->heap()->signMask())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetSignMask;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetSignMask;
+ } else {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetSignMask;
+ }
+ } else if (name->Equals(isolate->heap()->x())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetX;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetX;
+ } else {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetX;
+ }
+ } else if (name->Equals(isolate->heap()->y())) {
+ if (type == FLOAT32x4_TYPE) {
+ id = kFloat32x4GetY;
+ } else if (type == FLOAT64x2_TYPE) {
+ id = kFloat64x2GetY;
+ } else {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetY;
+ }
+ } else if (name->Equals(isolate->heap()->z())) {
+ id = type == FLOAT32x4_TYPE ? kFloat32x4GetZ : kInt32x4GetZ;
+ } else if (name->Equals(isolate->heap()->w())) {
+ id = type == FLOAT32x4_TYPE ? kFloat32x4GetW : kInt32x4GetW;
+ } else if (name->Equals(isolate->heap()->flagX())) {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagX;
+ } else if (name->Equals(isolate->heap()->flagY())) {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagY;
+ } else if (name->Equals(isolate->heap()->flagZ())) {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagZ;
+ } else if (name->Equals(isolate->heap()->flagW())) {
+ DCHECK(type == INT32x4_TYPE);
+ id = kInt32x4GetFlagW;
+ } else {
+ UNREACHABLE();
+ id = kSIMD128Unreachable;
+ }
+
+ return id;
+}
+
+
void HOptimizedGraphBuilder::BuildStore(Expression* expr,
Property* prop,
BailoutId ast_id,
DCHECK(types != NULL);
if (types->length() > 0) {
- PropertyAccessInfo info(this, access, ToType(types->first()), name);
+ PropertyAccessInfo info(
+ this, access, ToType(types->first()), name,
+ types->first()->instance_type());
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicNamedFieldAccess(
access, expr, ast_id, return_id, object, value, types, name);
// Type::Number() is only supported by polymorphic load/call handling.
DCHECK(!info.type()->Is(Type::Number()));
BuildCheckHeapObject(object);
+
if (AreStringTypes(types)) {
checked_object =
Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreFloat32x4Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->float32x4_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->float32x4_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, FLOAT32x4_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreFloat64x2Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->float64x2_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->float64x2_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, FLOAT64x2_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
+ } else if (info.IsSIMD128PropertyCallback() &&
+ AreInt32x4Types(types) &&
+ CpuFeatures::SupportsSIMD128InCrankshaft()) {
+ Handle<JSFunction> function(
+ isolate()->native_context()->int32x4_function());
+ HInstruction* constant_function = Add<HConstant>(function);
+ HObjectAccess map_access = HObjectAccess::ForPrototypeOrInitialMap();
+ HInstruction* map = Add<HLoadNamedField>(
+ constant_function, static_cast<HValue*>(NULL), map_access);
+ HObjectAccess prototype_access = HObjectAccess::ForMapPrototype();
+ HInstruction* prototype = Add<HLoadNamedField>(
+ map, static_cast<HValue*>(NULL), prototype_access);
+ Handle<Map> initial_function_prototype_map(
+ isolate()->native_context()->int32x4_function_prototype_map());
+ Add<HCheckMaps>(prototype, initial_function_prototype_map);
+ BuiltinFunctionId id = NameToId(isolate(), name, INT32x4_TYPE);
+ return NewUncasted<HUnarySIMDOperation>(object, id);
} else {
checked_object = Add<HCheckMaps>(object, types);
}
int i;
for (i = 0; i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.CanAccessMonomorphic() && info.IsConstant() &&
info.constant()->IsJSFunction()) {
if (info.type()->Is(Type::String())) {
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
- PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
+ PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name,
+ types->at(i)->instance_type());
if (info.type()->Is(Type::String())) {
if (handled_string) continue;
handled_string = true;
return true;
}
break;
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 0) {
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HNullarySIMDOperation>(id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5) \
+ case k##name:
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 1) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HUnarySIMDOperation>(argument, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6) \
+ case k##name:
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 2) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HBinarySIMDOperation>(left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7) \
+ case k##name:
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 3) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HValue* value = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HTernarySIMDOperation>(value, left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7, p8) \
+ case k##name:
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ expr->arguments()->length() == 4) {
+ HValue* w = Pop();
+ HValue* z = Pop();
+ HValue* y = Pop();
+ HValue* x = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HQuarternarySIMDOperation>(x, y, z, w, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
default:
// Not supported for inlining yet.
break;
ast_context()->ReturnValue(index);
return true;
}
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(p1, p2, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 1) {
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HNullarySIMDOperation>(id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_UNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5) \
+ case k##name:
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 2) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HUnarySIMDOperation>(argument, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_BINARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6) \
+ case k##name:
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 3) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op = NewUncasted<HBinarySIMDOperation>(left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7) \
+ case k##name:
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 4) {
+ HValue* right = Pop();
+ HValue* left = Pop();
+ HValue* value = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HTernarySIMDOperation>(value, left, right, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ }
+ break;
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(p1, p2, name, p4, p5, p6, p7, p8) \
+ case k##name:
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 5) {
+ HValue* w = Pop();
+ HValue* z = Pop();
+ HValue* y = Pop();
+ HValue* x = Pop();
+ Drop(2); // Receiver and function.
+ HValue* context = environment()->context();
+ HInstruction* op =
+ HQuarternarySIMDOperation::New(zone(), context, x, y, z, w, id);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ } else if (CpuFeatures::SupportsSIMD128InCrankshaft() &&
+ argument_count == 2) {
+ if (id == kFloat32x4Constructor) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HUnarySIMDOperation>(argument, kFloat32x4Coercion);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ } else if (id == kInt32x4Constructor) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HUnarySIMDOperation>(argument, kInt32x4Coercion);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ } else if (id == kFloat64x2Constructor) {
+ HValue* argument = Pop();
+ Drop(2); // Receiver and function.
+ HInstruction* op =
+ NewUncasted<HUnarySIMDOperation>(argument, kFloat64x2Coercion);
+ ast_context()->ReturnInstruction(op, expr->id());
+ return true;
+ } else {
+ return false;
+ }
+ }
+ break;
+ case kFloat32x4ArrayGetAt:
+ case kFloat64x2ArrayGetAt:
+ case kInt32x4ArrayGetAt:
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 2) {
+ HValue* key = Pop();
+ HValue* typed32x4_array = Pop();
+ DCHECK(typed32x4_array == receiver);
+ Drop(1); // Drop function.
+ HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
+ typed32x4_array, key, NULL,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ receiver_map->elements_kind(),
+ LOAD, // is_store.
+ NEVER_RETURN_HOLE, // load_mode.
+ STANDARD_STORE);
+ ast_context()->ReturnValue(instr);
+ return true;
+ }
+ break;
+ case kFloat32x4ArraySetAt:
+ case kFloat64x2ArraySetAt:
+ case kInt32x4ArraySetAt:
+ if (CpuFeatures::SupportsSIMD128InCrankshaft() && argument_count == 3) {
+ HValue* value = Pop();
+ HValue* key = Pop();
+ HValue* typed32x4_array = Pop();
+ DCHECK(typed32x4_array == receiver);
+ Drop(1); // Drop function.
+ // TODO(haitao): add STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS.
+ KeyedAccessStoreMode store_mode = STANDARD_STORE;
+ BuildUncheckedMonomorphicElementAccess(
+ typed32x4_array, key, value,
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ receiver_map->elements_kind(),
+ STORE, // is_store.
+ NEVER_RETURN_HOLE, // load_mode.
+ store_mode);
+ Push(value);
+ Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ ast_context()->ReturnValue(Pop());
+ return true;
+ }
+ break;
default:
// Not yet supported for inlining.
break;
if (prop->key()->IsPropertyName() && types->length() > 0) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- PropertyAccessInfo info(this, LOAD, ToType(types->first()), name);
+ PropertyAccessInfo info(this, LOAD, ToType(types->first()), name,
+ types->first()->instance_type());
if (!info.CanAccessAsMonomorphic(types)) {
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
length);
HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+ if (IsFixedFloat32x4ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kFloat32x4Zero);
+ } else if (IsFixedFloat64x2ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kFloat64x2Zero);
+ } else if (IsFixedInt32x4ElementsKind(fixed_elements_kind)) {
+ filler = AddUncasted<HNullarySIMDOperation>(kInt32x4Zero);
+ }
{
LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
DoubleRegister::AllocationIndexToString(assigned_reg));
+ } else if (op->IsFloat32x4Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
+ } else if (op->IsFloat64x2Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
+ } else if (op->IsInt32x4Register()) {
+ trace_.Add(" \"%s\"",
+ SIMD128Register::AllocationIndexToString(assigned_reg));
} else {
DCHECK(op->IsRegister());
trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
LOperand* op = range->TopLevel()->GetSpillOperand();
if (op->IsDoubleStackSlot()) {
trace_.Add(" \"double_stack:%d\"", op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ trace_.Add(" \"float32x4_stack:%d\"", op->index());
+ } else if (op->IsFloat64x2StackSlot()) {
+ trace_.Add(" \"float64x2_stack:%d\"", op->index());
+ } else if (op->IsInt32x4StackSlot()) {
+ trace_.Add(" \"int32x4_stack:%d\"", op->index());
} else {
DCHECK(op->IsStackSlot());
trace_.Add(" \"stack:%d\"", op->index());
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
PropertyAccessType access_type,
Type* type,
- Handle<String> name)
+ Handle<String> name,
+ InstanceType instance_type)
: lookup_(builder->isolate()),
builder_(builder),
access_type_(access_type),
type_(type),
name_(name),
field_type_(HType::Tagged()),
- access_(HObjectAccess::ForMap()) { }
+ access_(HObjectAccess::ForMap()),
+ instance_type_(instance_type) { }
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
bool IsConfigurable() const { return lookup_.IsConfigurable(); }
bool IsReadOnly() const { return lookup_.IsReadOnly(); }
+ bool IsSIMD128PropertyCallback() {
+ return (((instance_type_ == Float32x4::kInstanceType ||
+ instance_type_ == Int32x4::kInstanceType) &&
+ (name_->Equals(isolate()->heap()->signMask()) ||
+ name_->Equals(isolate()->heap()->x()) ||
+ name_->Equals(isolate()->heap()->y()) ||
+ name_->Equals(isolate()->heap()->z()) ||
+ name_->Equals(isolate()->heap()->w()))) ||
+ (instance_type_ == Int32x4::kInstanceType &&
+ (name_->Equals(isolate()->heap()->flagX()) ||
+ name_->Equals(isolate()->heap()->flagY()) ||
+ name_->Equals(isolate()->heap()->flagZ()) ||
+ name_->Equals(isolate()->heap()->flagW()))) ||
+ (instance_type_ == Float64x2::kInstanceType &&
+ (name_->Equals(isolate()->heap()->signMask()) ||
+ name_->Equals(isolate()->heap()->x()) ||
+ name_->Equals(isolate()->heap()->y()))));
+ }
private:
Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
SmallMapList field_maps_;
HType field_type_;
HObjectAccess access_;
+ InstanceType instance_type_;
};
HInstruction* BuildMonomorphicAccess(PropertyAccessInfo* info,
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return true; }
static const byte kCallOpcode = 0xE8;
}
+Operand::Operand(const Operand& operand, int32_t offset) {
+ DCHECK(operand.len_ >= 1);
+ // Operand encodes REX ModR/M [SIB] [Disp].
+ byte modrm = operand.buf_[0];
+ DCHECK(modrm < 0xC0); // Disallow mode 3 (register target).
+ bool has_sib = ((modrm & 0x07) == 0x04);
+ byte mode = modrm & 0xC0;
+ int disp_offset = has_sib ? 2 : 1;
+ int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
+ // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+ // displacement.
+ bool is_baseless = (mode == 0) && (base_reg == 0x05); // No base or RIP base.
+ int32_t disp_value = 0;
+ if (mode == 0x80 || is_baseless) {
+ // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+ disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
+ } else if (mode == 0x40) {
+ // Mode 1: Byte displacement.
+ disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+ }
+
+ // Write new operand with same registers, but with modified displacement.
+ DCHECK(offset >= 0 ? disp_value + offset >= disp_value
+ : disp_value + offset < disp_value); // No overflow.
+ disp_value += offset;
+ if (!is_int8(disp_value) || is_baseless) {
+ // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+ buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+ len_ = disp_offset + 4;
+ Memory::int32_at(&buf_[disp_offset]) = disp_value;
+ } else if (disp_value != 0 || (base_reg == 0x05)) {
+ // Need 8 bits of displacement.
+ buf_[0] = (modrm & 0x3f) | 0x40; // Mode 1.
+ len_ = disp_offset + 1;
+ buf_[disp_offset] = static_cast<byte>(disp_value);
+ } else {
+ // Need no displacement.
+ buf_[0] = (modrm & 0x3f); // Mode 0.
+ len_ = disp_offset;
+ }
+ if (has_sib) {
+ buf_[1] = operand.buf_[1];
+ }
+}
+
+
bool Operand::is_reg(Register reg) const {
return ((buf_[0] & 0xF8) == 0xC0) // addressing mode is register only.
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
+void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::andps(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
}
+void Assembler::addpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
}
+void Assembler::andpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::pcmpgtd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x66);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
}
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x11);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
}
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xC6);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::pslld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(esi, reg); // esi == 6
+ EMIT(shift);
+}
+
+
+void Assembler::pslld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(edx, reg); // edx == 2
+ EMIT(shift);
+}
+
+
+void Assembler::psrld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrad(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x72);
+ emit_sse_operand(esp, reg); // esp == 4
+ EMIT(shift);
+}
+
+
+void Assembler::psrad(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xE2);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::psrldq(XMMRegister dst, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x73);
+ emit_sse_operand(ebx, dst); // ebx == 3
+ EMIT(shift);
+}
+
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
EMIT(0x66);
}
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x38);
+ EMIT(0x40);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC2);
+ emit_sse_operand(dst, src);
+ EMIT(cmp);
+}
+
+
+void Assembler::cmpeqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x0);
+}
+
+
+void Assembler::cmpltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x1);
+}
+
+
+void Assembler::cmpleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x2);
+}
+
+
+void Assembler::cmpneqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x4);
+}
+
+
+void Assembler::cmpnltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x5);
+}
+
+
+void Assembler::cmpnleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x6);
+}
+
+
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x21);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
void Assembler::RecordComment(const char* msg, bool force) {
if (FLAG_code_comments || force) {
- EnsureSpace ensure_space(this);
+ EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
typedef XMMRegister DoubleRegister;
-
+typedef XMMRegister SIMD128Register;
const XMMRegister xmm0 = { 0 };
const XMMRegister xmm1 = { 1 };
times_2 = 1,
times_4 = 2,
times_8 = 3,
+ maximal_scale_factor = times_8,
times_int_size = times_4,
times_half_pointer_size = times_2,
times_pointer_size = times_4,
int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE32);
+ // Offset from existing memory operand.
+ // Offset is added to existing displacement as 32-bit signed values and
+ // this must not overflow.
+ Operand(const Operand& base, int32_t offset);
+
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
// SSE instructions
void movaps(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void andps(XMMRegister dst, const Operand& src);
void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
void divps(XMMRegister dst, const Operand& src);
void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
+ void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
+ void minps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
+ void maxps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, XMMRegister src) { sqrtps(dst, Operand(src)); }
+ void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtpd(XMMRegister dst, XMMRegister src) { sqrtpd(dst, Operand(src)); }
+ void sqrtpd(XMMRegister dst, const Operand& src);
+
+ void addpd(XMMRegister dst, const Operand& src);
+ void addpd(XMMRegister dst, XMMRegister src) { addpd(dst, Operand(src)); }
+ void subpd(XMMRegister dst, const Operand& src);
+ void subpd(XMMRegister dst, XMMRegister src) { subpd(dst, Operand(src)); }
+ void mulpd(XMMRegister dst, const Operand& src);
+ void mulpd(XMMRegister dst, XMMRegister src) { mulpd(dst, Operand(src)); }
+ void divpd(XMMRegister dst, const Operand& src);
+ void divpd(XMMRegister dst, XMMRegister src) { divpd(dst, Operand(src)); }
+ void minpd(XMMRegister dst, XMMRegister src) { minpd(dst, Operand(src)); }
+ void minpd(XMMRegister dst, const Operand& src);
+ void maxpd(XMMRegister dst, XMMRegister src) { maxpd(dst, Operand(src)); }
+ void maxpd(XMMRegister dst, const Operand& src);
+
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmpeqps(XMMRegister dst, XMMRegister src);
+ void cmpltps(XMMRegister dst, XMMRegister src);
+ void cmpleps(XMMRegister dst, XMMRegister src);
+ void cmpneqps(XMMRegister dst, XMMRegister src);
+ void cmpnltps(XMMRegister dst, XMMRegister src);
+ void cmpnleps(XMMRegister dst, XMMRegister src);
+
+ // SSE 2, introduced by SIMD
+ void paddd(XMMRegister dst, XMMRegister src) { paddd(dst, Operand(src)); }
+ void paddd(XMMRegister dst, const Operand& src);
+ void psubd(XMMRegister dst, XMMRegister src) { psubd(dst, Operand(src)); }
+ void psubd(XMMRegister dst, const Operand& src);
+ void pmuludq(XMMRegister dst, XMMRegister src) { pmuludq(dst, Operand(src)); }
+ void pmuludq(XMMRegister dst, const Operand& src);
+ void punpackldq(XMMRegister dst, XMMRegister src) {
+ punpackldq(dst, Operand(src));
+ }
+ void punpackldq(XMMRegister dst, const Operand& src);
+ void cvtps2dq(XMMRegister dst, XMMRegister src) {
+ cvtps2dq(dst, Operand(src));
+ }
+ void cvtps2dq(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ cvtdq2ps(dst, Operand(src));
+ }
+ // SSE 4.1, introduced by SIMD
+ void insertps(XMMRegister dst, XMMRegister src, byte imm8);
+ void pmulld(XMMRegister dst, XMMRegister src) { pmulld(dst, Operand(src)); }
+ void pmulld(XMMRegister dst, const Operand& src);
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, const Operand& src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
void andpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, const Operand& src);
void orpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
+ void pcmpgtd(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t shift);
void psllq(XMMRegister dst, XMMRegister src);
+ void pslld(XMMRegister reg, int8_t shift);
+ void pslld(XMMRegister dst, XMMRegister src);
+ void psrld(XMMRegister reg, int8_t shift);
+ void psrld(XMMRegister dst, XMMRegister src);
+ void psrad(XMMRegister reg, int8_t shift);
+ void psrad(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
+ void psrldq(XMMRegister dst, int8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ simd128_value_t zero = {{0.0, 0.0}};
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
+ input_->SetSIMD128Register(i, zero);
}
// Fill the frame content from the actual data on the frame.
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
+ simd128_value_t xmm_value = input_->GetSIMD128Register(i);
+ output_frame->SetSIMD128Register(i, xmm_value);
}
}
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kMaxNumAllocatableRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
+ const int kXMMRegsSize = kSIMD128Size *
+ XMMRegister::kMaxNumAllocatableRegisters;
+ __ sub(esp, Immediate(kXMMRegsSize));
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(esp, offset), xmm_reg);
+ int offset = i * kSIMD128Size;
+ __ movups(Operand(esp, offset), xmm_reg);
}
__ pushad();
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
- kDoubleRegsSize;
+ kXMMRegsSize;
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
__ pop(Operand(ebx, offset));
}
- int double_regs_offset = FrameDescription::double_registers_offset();
+ int xmm_regs_offset = FrameDescription::simd128_registers_offset();
// Fill in the double input registers.
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movsd(xmm0, Operand(esp, src_offset));
- __ movsd(Operand(ebx, dst_offset), xmm0);
+ int dst_offset = i * kSIMD128Size + xmm_regs_offset;
+ int src_offset = i * kSIMD128Size;
+ __ movups(xmm0, Operand(esp, src_offset));
+ __ movups(Operand(ebx, dst_offset), xmm0);
}
// Clear FPU all exceptions.
__ fnclex();
// Remove the bailout id, return address and the double registers.
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ __ add(esp, Immediate(kXMMRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
// In case of a failed STUB, we have to restore the XMM registers.
for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(ebx, src_offset));
+ int src_offset = i * kSIMD128Size + xmm_regs_offset;
+ __ movups(xmm_reg, Operand(ebx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ DCHECK(n < arraysize(simd128_registers_));
+ return simd128_registers_[n].d[0];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ DCHECK(n < arraysize(simd128_registers_));
+ simd128_registers_[n].d[0] = value;
+}
+
+
#undef __
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ } else if (f0byte == 0x10) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movups %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte == 0x11) {
+ AppendToBuffer("movups ");
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (f0byte >= 0x51 && f0byte <= 0x5F) {
const char* const pseudo_op[] = {
+ "sqrtps",
+ "rsqrtps",
"rcpps",
"andps",
"andnps",
"subps",
"minps",
"divps",
- "maxps",
+ "maxps"
};
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
AppendToBuffer("%s %s,",
- pseudo_op[f0byte - 0x53],
+ pseudo_op[f0byte - 0x51],
NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0xC2) {
+ // Intel manual 2A, Table 3-11.
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqps",
+ "cmpltps",
+ "cmpleps",
+ "cmpunordps",
+ "cmpneqps",
+ "cmpnltps",
+ "cmpnleps",
+ "cmpordps"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[data[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data += 2;
} else if (f0byte== 0xC6) {
// shufps xmm, xmm/m128, imm8
data += 2;
NameOfXMMRegister(regop),
static_cast<int>(imm8));
data += 2;
+ } else if (f0byte== 0x5B) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtdq2ps %s,",
+ NameOfXMMRegister(rm));
+ data += PrintRightXMMOperand(data);
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x40) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pmulld %s,%s",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x2A) {
// movntdqa
data++;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x21) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("insertps %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x17) {
data++;
int mod, regop, rm;
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x51) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("sqrtpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x54) {
data++;
int mod, regop, rm;
data++;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ AppendToBuffer("xorpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x58) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("addpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x59) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("mulpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5B) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("cvtps2dq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5C) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("subpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5D) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("minpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("divpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x5F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("maxpd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0x62) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("punpackldq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xF4) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pmuludq %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xFA) {
data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psubd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (*data == 0xFE) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("paddd %s,",
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
} else if (*data == 0x6E) {
data++;
int mod, regop, rm;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x66) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pcmpgtd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x76) {
data++;
int mod, regop, rm;
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0xF2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("pslld %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x72) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ DCHECK(regop == esi || regop == edx);
+ AppendToBuffer("%s %s,%d",
+ (regop == esi) ? "pslld"
+ : ((regop == edx) ? "psrld" : "psrad"),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xC6) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufpd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0xD2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psrld %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0xD3) {
data++;
int mod, regop, rm;
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xE2) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("psrad %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
}
+XMMRegister LCodeGen::ToSIMD128Register(int index) const {
+ return XMMRegister::FromAllocationIndex(index);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
DCHECK(op->IsRegister());
return ToRegister(op->index());
}
+XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
+ DCHECK(op->IsFloat32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
+ DCHECK(op->IsFloat64x2Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
+ DCHECK(op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
+ DCHECK(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
+ op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
return ToRepresentation(op, Representation::Integer32());
}
Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ if (op->IsFloat32x4Register()) return Operand(ToFloat32x4Register(op));
+ if (op->IsFloat64x2Register()) return Operand(ToFloat64x2Register(op));
+ if (op->IsInt32x4Register()) return Operand(ToInt32x4Register(op));
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot() ||
+ op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
+ op->IsInt32x4StackSlot());
if (NeedsEagerFrame()) {
return Operand(ebp, StackSlotOffset(op->index()));
} else {
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT32x4_STACK_SLOT);
+ } else if (op->IsFloat64x2StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT64x2_STACK_SLOT);
+ } else if (op->IsInt32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::INT32x4_STACK_SLOT);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
+ } else if (op->IsFloat32x4Register()) {
+ XMMRegister reg = ToFloat32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
+ } else if (op->IsFloat64x2Register()) {
+ XMMRegister reg = ToFloat64x2Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
+ } else if (op->IsInt32x4Register()) {
+ XMMRegister reg = ToInt32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
+ } else if (r.IsSIMD128()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, no_condition);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Immediate(0));
+
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::HandleExternalArrayOpRequiresTemp(
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
+ static_cast<int>(maximal_scale_factor);
+ if (key_representation.IsSmi()) {
+ pre_shift_size -= kSmiTagSize;
+ }
+ DCHECK(pre_shift_size > 0);
+ __ shl(ToRegister(key), pre_shift_size);
+ } else {
+ __ SmiUntag(ToRegister(key));
+ }
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
+ ExternalArrayOpRequiresTemp(
+ instr->hydrogen()->key()->representation(), elements_kind)) {
+ HandleExternalArrayOpRequiresTemp(
+ key, instr->hydrogen()->key()->representation(), elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(ToSIMD128Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
((constant_value) << shift_size)
+ base_offset);
} else {
- // Take the tag bit into account while computing the shift size.
- if (key_representation.IsSmi() && (shift_size >= 1)) {
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ // Make sure the key is pre-scaled against maximal_scale_factor.
+ shift_size = static_cast<int>(maximal_scale_factor);
+ } else if (key_representation.IsSmi() && (shift_size >= 1)) {
+ // Take the tag bit into account while computing the shift size.
shift_size -= kSmiTagSize;
}
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
if (!key->IsConstantOperand() &&
ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
elements_kind)) {
- __ SmiUntag(ToRegister(key));
+ HandleExternalArrayOpRequiresTemp(
+ key, instr->hydrogen()->key()->representation(), elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(operand, ToSIMD128Register(instr->value()));
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
}
+template<class T>
+void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ class DeferredSIMD128ToTagged FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen,
+ LInstruction* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LInstruction* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ XMMRegister input_reg = ToSIMD128Register(instr->value());
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->temp());
+ Register tmp2 = ToRegister(instr->temp2());
+
+ DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
+ this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+
+ if (FLAG_inline_new) {
+ if (T::kInstanceType == FLOAT32x4_TYPE) {
+ __ AllocateFloat32x4(reg, tmp, tmp2, deferred->entry());
+ } else if (T::kInstanceType == INT32x4_TYPE) {
+ __ AllocateInt32x4(reg, tmp, tmp2, deferred->entry());
+ } else if (T::kInstanceType == FLOAT64x2_TYPE) {
+ __ AllocateFloat64x2(reg, tmp, tmp2, deferred->entry());
+ }
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+
+ // Load the inner FixedTypedArray object.
+ __ mov(tmp, FieldOperand(reg, T::kValueOffset));
+
+ __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
+}
+
+
+void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ if (instr->value()->IsFloat32x4Register()) {
+ HandleSIMD128ToTagged<Float32x4>(instr);
+ } else if (instr->value()->IsFloat64x2Register()) {
+ HandleSIMD128ToTagged<Float64x2>(instr);
+ } else {
+ DCHECK(instr->value()->IsInt32x4Register());
+ HandleSIMD128ToTagged<Int32x4>(instr);
+ }
+}
+
+
+template<class T>
+void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ LOperand* result = instr->result();
+ DCHECK(result->IsSIMD128Register());
+
+ Register input_reg = ToRegister(input);
+ Register temp_reg = ToRegister(instr->temp());
+ XMMRegister result_reg = ToSIMD128Register(result);
+
+ __ test(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr, "value is smi");
+ __ CmpObjectType(input_reg, T::kInstanceType, temp_reg);
+ DeoptimizeIf(not_equal, instr, "value is not simd128");
+
+ // Load the inner FixedTypedArray object.
+ __ mov(temp_reg, FieldOperand(input_reg, T::kValueOffset));
+
+ __ movups(
+ result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
+}
+
+
+void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ if (instr->representation().IsFloat32x4()) {
+ HandleTaggedToSIMD128<Float32x4>(instr);
+ } else if (instr->representation().IsFloat64x2()) {
+ HandleTaggedToSIMD128<Float64x2>(instr);
+ } else {
+ DCHECK(instr->representation().IsInt32x4());
+ HandleTaggedToSIMD128<Int32x4>(instr);
+ }
+}
+
+
+void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Zero: {
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ case kFloat64x2Zero: {
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ xorpd(result_reg, result_reg);
+ return;
+ }
+ case kInt32x4Zero: {
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
+ uint8_t select = 0;
+ switch (instr->op()) {
+ case kFloat32x4Coercion: {
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2Coercion: {
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4Coercion: {
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kSIMD128Change: {
+ Comment(";;; deoptimize: can not perform representation change"
+ "for float32x4 or int32x4");
+ DeoptimizeIf(no_condition, instr, "cannot perform representation change"
+ "for float32x4 or int32x4");
+ return;
+ }
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt: {
+ DCHECK(instr->value()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ switch (instr->op()) {
+ case kFloat32x4Abs:
+ __ absps(input_reg);
+ break;
+ case kFloat32x4Neg:
+ __ negateps(input_reg);
+ break;
+ case kFloat32x4Reciprocal:
+ __ rcpps(input_reg, input_reg);
+ break;
+ case kFloat32x4ReciprocalSqrt:
+ __ rsqrtps(input_reg, input_reg);
+ break;
+ case kFloat32x4Sqrt:
+ __ sqrtps(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt: {
+ DCHECK(instr->value()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ switch (instr->op()) {
+ case kFloat64x2Abs:
+ __ abspd(input_reg);
+ break;
+ case kFloat64x2Neg:
+ __ negatepd(input_reg);
+ break;
+ case kFloat64x2Sqrt:
+ __ sqrtpd(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4Not:
+ case kInt32x4Neg: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ switch (instr->op()) {
+ case kInt32x4Not:
+ __ notps(input_reg);
+ break;
+ case kInt32x4Neg:
+ __ pnegd(input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (instr->op() == kFloat32x4BitsToInt32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ DCHECK(instr->op() == kFloat32x4ToInt32x4);
+ __ cvtps2dq(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (instr->op() == kInt32x4BitsToFloat32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ DCHECK(instr->op() == kInt32x4ToFloat32x4);
+ __ cvtdq2ps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat32x4Splat: {
+ DCHECK(instr->hydrogen()->value()->representation().IsDouble());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, input_reg);
+ __ shufps(xmm_scratch, xmm_scratch, 0x0);
+ __ movaps(result_reg, xmm_scratch);
+ return;
+ }
+ case kInt32x4Splat: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+ Register input_reg = ToRegister(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ movd(result_reg, input_reg);
+ __ shufps(result_reg, result_reg, 0x0);
+ return;
+ }
+ case kInt32x4GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetW:
+ select++;
+ case kFloat32x4GetZ:
+ select++;
+ case kFloat32x4GetY:
+ select++;
+ case kFloat32x4GetX: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
+
+ if (select == 0x0) {
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtss2sd(xmm_scratch, input_reg);
+ if (!xmm_scratch.is(result)) {
+ __ movaps(result, xmm_scratch);
+ }
+ } else {
+ __ pshufd(xmm_scratch, input_reg, select);
+ if (!xmm_scratch.is(result)) {
+ __ xorps(result, result);
+ }
+ __ cvtss2sd(result, xmm_scratch);
+ }
+ return;
+ }
+ case kFloat64x2GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskpd(result, input_reg);
+ return;
+ }
+ case kFloat64x2GetX: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2GetY: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ __ shufpd(result, input_reg, 0x1);
+ return;
+ }
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ bool flag = false;
+ switch (instr->op()) {
+ case kInt32x4GetFlagX:
+ flag = true;
+ case kInt32x4GetX:
+ break;
+ case kInt32x4GetFlagY:
+ flag = true;
+ case kInt32x4GetY:
+ select = 0x1;
+ break;
+ case kInt32x4GetFlagZ:
+ flag = true;
+ case kInt32x4GetZ:
+ select = 0x2;
+ break;
+ case kInt32x4GetFlagW:
+ flag = true;
+ case kInt32x4GetW:
+ select = 0x3;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ if (select == 0x0) {
+ __ movd(result, input_reg);
+ } else {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ extractps(result, input_reg, select);
+ } else {
+ XMMRegister xmm_scratch = xmm0;
+ __ pshufd(xmm_scratch, input_reg, select);
+ __ movd(result, xmm_scratch);
+ }
+ }
+
+ if (flag) {
+ Label false_value, done;
+ __ test(result, result);
+ __ j(zero, &false_value, Label::kNear);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done, Label::kNear);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
+ uint8_t imm8 = 0; // for with operation
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Sub:
+ case kFloat32x4Mul:
+ case kFloat32x4Div:
+ case kFloat32x4Min:
+ case kFloat32x4Max: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ __ addps(left_reg, right_reg);
+ break;
+ case kFloat32x4Sub:
+ __ subps(left_reg, right_reg);
+ break;
+ case kFloat32x4Mul:
+ __ mulps(left_reg, right_reg);
+ break;
+ case kFloat32x4Div:
+ __ divps(left_reg, right_reg);
+ break;
+ case kFloat32x4Min:
+ __ minps(left_reg, right_reg);
+ break;
+ case kFloat32x4Max:
+ __ maxps(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4Scale: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister scratch_reg = xmm0;
+ __ xorps(scratch_reg, scratch_reg);
+ __ cvtsd2ss(scratch_reg, right_reg);
+ __ shufps(scratch_reg, scratch_reg, 0x0);
+ __ mulps(left_reg, scratch_reg);
+ return;
+ }
+ case kFloat64x2Add:
+ case kFloat64x2Sub:
+ case kFloat64x2Mul:
+ case kFloat64x2Div:
+ case kFloat64x2Min:
+ case kFloat64x2Max: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat64x2());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToFloat64x2Register(instr->right());
+ switch (instr->op()) {
+ case kFloat64x2Add:
+ __ addpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Sub:
+ __ subpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Mul:
+ __ mulpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Div:
+ __ divpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Min:
+ __ minpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Max:
+ __ maxpd(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Scale: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ shufpd(right_reg, right_reg, 0x0);
+ __ mulpd(left_reg, right_reg);
+ return;
+ }
+ case kFloat32x4Shuffle: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ __ shufps(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kInt32x4Shuffle: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ __ pshufd(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t shift = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, shift);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else {
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register shift = ToRegister(instr->right());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(xmm_scratch, shift);
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, xmm_scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ switch (instr->op()) {
+ case kFloat32x4LessThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4LessThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpleps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4Equal:
+ if (result_reg.is(left_reg)) {
+ __ cmpeqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpeqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpeqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4NotEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpneqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpneqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpneqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpnltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpnleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnleps(result_reg, right_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4And:
+ case kInt32x4Or:
+ case kInt32x4Xor:
+ case kInt32x4Add:
+ case kInt32x4Sub:
+ case kInt32x4Mul:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsInt32x4());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ XMMRegister right_reg = ToInt32x4Register(instr->right());
+ switch (instr->op()) {
+ case kInt32x4And:
+ __ andps(left_reg, right_reg);
+ break;
+ case kInt32x4Or:
+ __ orps(left_reg, right_reg);
+ break;
+ case kInt32x4Xor:
+ __ xorps(left_reg, right_reg);
+ break;
+ case kInt32x4Add:
+ __ paddd(left_reg, right_reg);
+ break;
+ case kInt32x4Sub:
+ __ psubd(left_reg, right_reg);
+ break;
+ case kInt32x4Mul:
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pmulld(left_reg, right_reg);
+ } else {
+ // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, left_reg);
+ __ pmuludq(left_reg, right_reg);
+ __ psrldq(xmm_scratch, 4);
+ __ psrldq(right_reg, 4);
+ __ pmuludq(xmm_scratch, right_reg);
+ __ pshufd(left_reg, left_reg, 8);
+ __ pshufd(xmm_scratch, xmm_scratch, 8);
+ __ punpackldq(left_reg, xmm_scratch);
+ }
+ break;
+ case kInt32x4GreaterThan:
+ __ pcmpgtd(left_reg, right_reg);
+ break;
+ case kInt32x4Equal:
+ __ pcmpeqd(left_reg, right_reg);
+ break;
+ case kInt32x4LessThan: {
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, right_reg);
+ __ pcmpgtd(xmm_scratch, left_reg);
+ __ movaps(left_reg, xmm_scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4WithW:
+ imm8++;
+ case kFloat32x4WithZ:
+ imm8++;
+ case kFloat32x4WithY:
+ imm8++;
+ case kFloat32x4WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, right_reg);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ imm8 = imm8 << 4;
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ insertps(left_reg, xmm_scratch, imm8);
+ } else {
+ __ sub(esp, Immediate(kFloat32x4Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movss(Operand(esp, imm8 * kFloatSize), xmm_scratch);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat32x4Size));
+ }
+ return;
+ }
+ case kFloat64x2WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movsd(Operand(esp, 0 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2WithY: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2Constructor: {
+ DCHECK(instr->hydrogen()->left()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToDoubleRegister(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ sub(esp, Immediate(kFloat64x2Size));
+ __ movsd(Operand(esp, 0 * kDoubleSize), left_reg);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right_reg);
+ __ movups(result_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kInt32x4WithW:
+ imm8++;
+ case kInt32x4WithZ:
+ imm8++;
+ case kInt32x4WithY:
+ imm8++;
+ case kInt32x4WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsInteger32());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pinsrd(left_reg, right_reg, imm8);
+ } else {
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ movdqu(Operand(esp, 0), left_reg);
+ __ mov(Operand(esp, imm8 * kFloatSize), right_reg);
+ __ movdqu(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ }
+ return;
+ }
+ case kInt32x4WithFlagW:
+ imm8++;
+ case kInt32x4WithFlagZ:
+ imm8++;
+ case kInt32x4WithFlagY:
+ imm8++;
+ case kInt32x4WithFlagX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsTagged());
+ HType type = instr->hydrogen()->right()->type();
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Label load_false_value, done;
+ if (type.IsBoolean()) {
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ movups(Operand(esp, 0), left_reg);
+ __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_value, Label::kNear);
+ } else {
+ Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
+ DeoptimizeIf(no_condition, instr,
+ "other types for int32x4.withFlagX/Y/Z/W");
+ return;
+ }
+ // load true value.
+ __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
+ __ jmp(&done, Label::kNear);
+ __ bind(&load_false_value);
+ __ mov(Operand(esp, imm8 * kFloatSize), Immediate(0x0));
+ __ bind(&done);
+ __ movups(left_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Select: {
+ DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToFloat32x4Register(instr->second());
+ XMMRegister right_reg = ToFloat32x4Register(instr->third());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kInt32x4Select: {
+ DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsInt32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToInt32x4Register(instr->second());
+ XMMRegister right_reg = ToInt32x4Register(instr->third());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kFloat32x4ShuffleMix: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsInteger32());
+ if (instr->hydrogen()->third()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister first_reg = ToFloat32x4Register(instr->first());
+ XMMRegister second_reg = ToFloat32x4Register(instr->second());
+ __ shufps(first_reg, second_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kFloat32x4Clamp: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister value_reg = ToFloat32x4Register(instr->first());
+ XMMRegister lower_reg = ToFloat32x4Register(instr->second());
+ XMMRegister upper_reg = ToFloat32x4Register(instr->third());
+ __ minps(value_reg, upper_reg);
+ __ maxps(value_reg, lower_reg);
+ return;
+ }
+ case kFloat64x2Clamp: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat64x2());
+
+ XMMRegister value_reg = ToFloat64x2Register(instr->first());
+ XMMRegister lower_reg = ToFloat64x2Register(instr->second());
+ XMMRegister upper_reg = ToFloat64x2Register(instr->third());
+ __ minpd(value_reg, upper_reg);
+ __ maxpd(value_reg, lower_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Constructor: {
+ DCHECK(instr->hydrogen()->x()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->y()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->z()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->w()->representation().IsDouble());
+ XMMRegister x_reg = ToDoubleRegister(instr->x());
+ XMMRegister y_reg = ToDoubleRegister(instr->y());
+ XMMRegister z_reg = ToDoubleRegister(instr->z());
+ XMMRegister w_reg = ToDoubleRegister(instr->w());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ sub(esp, Immediate(kFloat32x4Size));
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, x_reg);
+ __ movss(Operand(esp, 0 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, y_reg);
+ __ movss(Operand(esp, 1 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, z_reg);
+ __ movss(Operand(esp, 2 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, w_reg);
+ __ movss(Operand(esp, 3 * kFloatSize), xmm0);
+ __ movups(result_reg, Operand(esp, 0 * kFloatSize));
+ __ add(esp, Immediate(kFloat32x4Size));
+ return;
+ }
+ case kInt32x4Constructor: {
+ DCHECK(instr->hydrogen()->x()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->y()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->z()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->w()->representation().IsInteger32());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ sub(esp, Immediate(kInt32x4Size));
+ __ mov(Operand(esp, 0 * kInt32Size), x_reg);
+ __ mov(Operand(esp, 1 * kInt32Size), y_reg);
+ __ mov(Operand(esp, 2 * kInt32Size), z_reg);
+ __ mov(Operand(esp, 3 * kInt32Size), w_reg);
+ __ movups(result_reg, Operand(esp, 0 * kInt32Size));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ case kInt32x4Bool: {
+ DCHECK(instr->hydrogen()->x()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->y()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->z()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->w()->representation().IsTagged());
+ HType x_type = instr->hydrogen()->x()->type();
+ HType y_type = instr->hydrogen()->y()->type();
+ HType z_type = instr->hydrogen()->z()->type();
+ HType w_type = instr->hydrogen()->w()->type();
+ if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
+ !z_type.IsBoolean() || !w_type.IsBoolean()) {
+ Comment(";;; deoptimize: other types for int32x4.bool.");
+ DeoptimizeIf(no_condition, instr, "other types for int32x4.bool");
+ return;
+ }
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ Label load_false_x, done_x, load_false_y, done_y,
+ load_false_z, done_z, load_false_w, done_w;
+ __ sub(esp, Immediate(kInt32x4Size));
+
+ __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_x, Label::kNear);
+ __ mov(Operand(esp, 0 * kInt32Size), Immediate(-1));
+ __ jmp(&done_x, Label::kNear);
+ __ bind(&load_false_x);
+ __ mov(Operand(esp, 0 * kInt32Size), Immediate(0x0));
+ __ bind(&done_x);
+
+ __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_y, Label::kNear);
+ __ mov(Operand(esp, 1 * kInt32Size), Immediate(-1));
+ __ jmp(&done_y, Label::kNear);
+ __ bind(&load_false_y);
+ __ mov(Operand(esp, 1 * kInt32Size), Immediate(0x0));
+ __ bind(&done_y);
+
+ __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_z, Label::kNear);
+ __ mov(Operand(esp, 2 * kInt32Size), Immediate(-1));
+ __ jmp(&done_z, Label::kNear);
+ __ bind(&load_false_z);
+ __ mov(Operand(esp, 2 * kInt32Size), Immediate(0x0));
+ __ bind(&done_z);
+
+ __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_w, Label::kNear);
+ __ mov(Operand(esp, 3 * kInt32Size), Immediate(-1));
+ __ jmp(&done_w, Label::kNear);
+ __ bind(&load_false_w);
+ __ mov(Operand(esp, 3 * kInt32Size), Immediate(0x0));
+ __ bind(&done_w);
+
+ __ movups(result_reg, Operand(esp, 0));
+ __ add(esp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
#undef __
} } // namespace v8::internal
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ XMMRegister ToFloat32x4Register(LOperand* op) const;
+ XMMRegister ToFloat64x2Register(LOperand* op) const;
+ XMMRegister ToInt32x4Register(LOperand* op) const;
+ XMMRegister ToSIMD128Register(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+ void DoDeferredFloat32x4ToTagged(LInstruction* instr);
+ void DoDeferredInt32x4ToTagged(LInstruction* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LInstruction* instr, Runtime::FunctionId id);
+
+ template<class T>
+ void HandleTaggedToSIMD128(LTaggedToSIMD128* instr);
+ template<class T>
+ void HandleSIMD128ToTagged(LSIMD128ToTagged* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ XMMRegister ToFloat32x4Register(int index) const;
+ XMMRegister ToFloat64x2Register(int index) const;
+ XMMRegister ToInt32x4Register(int index) const;
+ XMMRegister ToSIMD128Register(int index) const;
int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
int32_t ToInteger32(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void HandleExternalArrayOpRequiresTemp(LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind);
+ template<class T>
+ void DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ template<class T>
+ void DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
__ movsd(xmm0, src);
__ movsd(dst, xmm0);
}
+ } else if (source->IsSIMD128Register()) {
+ XMMRegister src = cgen_->ToSIMD128Register(source);
+ if (destination->IsSIMD128Register()) {
+ __ movaps(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSIMD128StackSlot());
+ __ movups(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsSIMD128StackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsSIMD128Register()) {
+ __ movups(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSIMD128StackSlot());
+ __ movups(xmm0, src);
+ __ movups(cgen_->ToOperand(destination), xmm0);
+ }
} else {
UNREACHABLE();
}
__ mov(dst1, tmp);
__ movsd(src0, xmm0);
+ } else if ((source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128StackSlot())) {
+ // Swap two XMM stack slots.
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ Register tmp = EnsureTempRegister();
+ __ movups(xmm0, src);
+ for (int offset = 0; offset < kSIMD128Size; offset += kPointerSize) {
+ __ mov(tmp, Operand(dst, offset));
+ __ mov(Operand(src, offset), tmp);
+ }
+ __ movups(dst, xmm0);
+
+ } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
+ // Swap two XMM registers.
+ XMMRegister source_reg = cgen_->ToSIMD128Register(source);
+ XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
+
+ } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
+ // Swap a xmm register and a xmm stack slot.
+ DCHECK((source->IsSIMD128Register() &&
+ destination->IsSIMD128StackSlot()) ||
+ (source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128Register()));
+ XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
+ ? source
+ : destination);
+ LOperand* other = source->IsSIMD128Register() ? destination : source;
+ DCHECK(other->IsSIMD128StackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movups(xmm0, other_operand);
+ __ movups(other_operand, reg);
+ __ movaps(reg, xmm0);
+
} else {
// No other combinations are possible.
UNREACHABLE();
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
- // Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
- num_double_slots_++;
+ switch (kind) {
+ case GENERAL_REGISTERS: return spill_slot_count_++;
+ case DOUBLE_REGISTERS: {
+ // Skip a slot if for a double-width slot.
+ spill_slot_count_++;
+ spill_slot_count_ |= 1;
+ num_double_slots_++;
+ return spill_slot_count_++;
+ }
+ case FLOAT32x4_REGISTERS:
+ case FLOAT64x2_REGISTERS:
+ case INT32x4_REGISTERS: {
+ // Skip three slots if for a quad-width slot.
+ spill_slot_count_ += 3;
+ num_double_slots_ += 2; // for dynamic frame alignment
+ return spill_slot_count_++;
+ }
+ default:
+ UNREACHABLE();
+ return -1;
}
- return spill_slot_count_++;
}
LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
+ switch (kind) {
+ case GENERAL_REGISTERS: return LStackSlot::Create(index, zone());
+ case DOUBLE_REGISTERS: return LDoubleStackSlot::Create(index, zone());
+ case FLOAT32x4_REGISTERS: return LFloat32x4StackSlot::Create(index, zone());
+ case FLOAT64x2_REGISTERS: return LFloat64x2StackSlot::Create(index, zone());
+ case INT32x4_REGISTERS: return LInt32x4StackSlot::Create(index, zone());
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+
LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
if (!easy_case &&
DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
+ } else if (to.IsSIMD128()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LTaggedToSIMD128* res = new(zone()) LTaggedToSIMD128(value, temp, to);
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
}
}
+ } else if (from.IsSIMD128()) {
+ DCHECK(to.IsTagged());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LOperand* temp2 = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LSIMD128ToTagged* result = new(zone()) LSIMD128ToTagged(value, temp, temp2);
+ return AssignPointerMap(Define(result, result_temp));
}
UNREACHABLE();
return NULL;
(instr->representation().IsInteger32() &&
!(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ (IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsFloat32x4()
+ : instr->representation().IsTagged() &&
+ (IsFloat32x4ElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsFloat64x2()
+ : instr->representation().IsTagged() &&
+ (IsFloat64x2ElementsKind(instr->elements_kind()))) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->representation().IsInt32x4()
+ : instr->representation().IsTagged() &&
+ (IsInt32x4ElementsKind(instr->elements_kind()))));
LOperand* backing_store = UseRegister(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsFloat32x4()
+ : instr->value()->representation().IsTagged() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsFloat64x2()
+ : instr->value()->representation().IsTagged() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (CpuFeatures::SupportsSIMD128InCrankshaft()
+ ? instr->value()->representation().IsInt32x4()
+ : instr->value()->representation().IsTagged() &&
+ IsInt32x4ElementsKind(elements_kind)));
DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
}
+const char* LNullarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ LNullarySIMDOperation* result =
+ new(zone()) LNullarySIMDOperation(instr->op());
+ switch (instr->op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LUnarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+ case kSIMD128Change: return "SIMD128-change";
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(HUnarySIMDOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnarySIMDOperation* result =
+ new(zone()) LUnarySIMDOperation(input, instr->op());
+ switch (instr->op()) {
+ case kSIMD128Change:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt:
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt:
+ case kInt32x4Neg:
+ case kInt32x4Not:
+ return DefineSameAsFirst(result);
+ case kFloat32x4Coercion:
+ case kFloat64x2Coercion:
+ case kInt32x4Coercion:
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4:
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4:
+ case kFloat32x4Splat:
+ case kInt32x4Splat:
+ case kFloat32x4GetSignMask:
+ case kFloat32x4GetX:
+ case kFloat32x4GetY:
+ case kFloat32x4GetZ:
+ case kFloat32x4GetW:
+ case kFloat64x2GetSignMask:
+ case kFloat64x2GetX:
+ case kFloat64x2GetY:
+ case kInt32x4GetSignMask:
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LBinarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Div:
+ case kFloat32x4Max:
+ case kFloat32x4Min:
+ case kFloat32x4Mul:
+ case kFloat32x4Sub:
+ case kFloat32x4Scale:
+ case kFloat32x4WithX:
+ case kFloat32x4WithY:
+ case kFloat32x4WithZ:
+ case kFloat32x4WithW:
+ case kFloat64x2Add:
+ case kFloat64x2Div:
+ case kFloat64x2Max:
+ case kFloat64x2Min:
+ case kFloat64x2Mul:
+ case kFloat64x2Sub:
+ case kFloat64x2Scale:
+ case kFloat64x2WithX:
+ case kFloat64x2WithY:
+ case kInt32x4Add:
+ case kInt32x4And:
+ case kInt32x4Mul:
+ case kInt32x4Or:
+ case kInt32x4Sub:
+ case kInt32x4Xor:
+ case kInt32x4WithX:
+ case kInt32x4WithY:
+ case kInt32x4WithZ:
+ case kInt32x4WithW:
+ case kInt32x4WithFlagX:
+ case kInt32x4WithFlagY:
+ case kInt32x4WithFlagZ:
+ case kInt32x4WithFlagW:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ if (instr->op() == kInt32x4WithFlagX ||
+ instr->op() == kInt32x4WithFlagY ||
+ instr->op() == kInt32x4WithFlagZ ||
+ instr->op() == kInt32x4WithFlagW) {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ } else {
+ return DefineSameAsFirst(result);
+ }
+ }
+ case kFloat64x2Constructor: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4Shuffle:
+ case kInt32x4Shuffle:
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstant(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LTernarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ LOperand* first = UseRegisterAtStart(instr->first());
+ LOperand* second = UseRegisterAtStart(instr->second());
+ LOperand* third = instr->op() == kFloat32x4ShuffleMix
+ ? UseOrConstant(instr->third())
+ : UseRegisterAtStart(instr->third());
+ LTernarySIMDOperation* result =
+ new(zone()) LTernarySIMDOperation(first, second, third, instr->op());
+ switch (instr->op()) {
+ case kInt32x4Select:
+ case kFloat32x4Select: {
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4ShuffleMix: {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4Clamp:
+ case kFloat64x2Clamp: {
+ return DefineSameAsFirst(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LQuarternarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ LOperand* x = UseRegisterAtStart(instr->x());
+ LOperand* y = UseRegisterAtStart(instr->y());
+ LOperand* z = UseRegisterAtStart(instr->z());
+ LOperand* w = UseRegisterAtStart(instr->w());
+ LQuarternarySIMDOperation* result =
+ new(zone()) LQuarternarySIMDOperation(x, y, z, w, instr->op());
+ if (instr->op() == kInt32x4Bool) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
+ V(SIMD128ToTagged) \
+ V(TaggedToSIMD128) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
};
+class LNullarySIMDOperation FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LNullarySIMDOperation(BuiltinFunctionId op)
+ : op_(op) {
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kNullarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LNullarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsNullarySIMDOperation());
+ return reinterpret_cast<LNullarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LUnarySIMDOperation FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kUnarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LUnarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsUnarySIMDOperation());
+ return reinterpret_cast<LUnarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LBinarySIMDOperation FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kBinarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LBinarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsBinarySIMDOperation());
+ return reinterpret_cast<LBinarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LTernarySIMDOperation FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
+ BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = first;
+ inputs_[1] = second;
+ inputs_[2] = third;
+ }
+
+ LOperand* first() { return inputs_[0]; }
+ LOperand* second() { return inputs_[1]; }
+ LOperand* third() { return inputs_[2]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kTernarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LTernarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsTernarySIMDOperation());
+ return reinterpret_cast<LTernarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LQuarternarySIMDOperation FINAL
+ : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
+ LOperand* w, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = x;
+ inputs_[1] = y;
+ inputs_[2] = z;
+ inputs_[3] = w;
+ }
+
+ LOperand* x() { return inputs_[0]; }
+ LOperand* y() { return inputs_[1]; }
+ LOperand* z() { return inputs_[2]; }
+ LOperand* w() { return inputs_[3]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kQuarternarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LQuarternarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsQuarternarySIMDOperation());
+ return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
};
+inline static bool ExternalArrayOpRequiresPreScale(
+ Representation key_representation,
+ ElementsKind kind) {
+ int shift_size = ElementsKindToShiftSize(kind);
+ return key_representation.IsSmi()
+ ? shift_size > static_cast<int>(maximal_scale_factor) + kSmiTagSize
+ : shift_size > static_cast<int>(maximal_scale_factor);
+}
+
+
inline static bool ExternalArrayOpRequiresTemp(
Representation key_representation,
ElementsKind elements_kind) {
- // Operations that require the key to be divided by two to be converted into
- // an index cannot fold the scale operation into a load and need an extra
- // temp register to do the work.
- return key_representation.IsSmi() &&
- (elements_kind == EXTERNAL_INT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_ELEMENTS ||
- elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
- elements_kind == UINT8_ELEMENTS ||
- elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ // Operations that require the key to be scaled by a factor or divided by two
+ // to be converted into an index cannot fold the scale operation into a load
+ // and need an extra temp register to do the work.
+ return ExternalArrayOpRequiresPreScale(key_representation, elements_kind) ||
+ (key_representation.IsSmi() &&
+ (elements_kind == EXTERNAL_INT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_ELEMENTS ||
+ elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
+ elements_kind == UINT8_ELEMENTS ||
+ elements_kind == INT8_ELEMENTS ||
+ elements_kind == UINT8_CLAMPED_ELEMENTS));
}
};
+class LSIMD128ToTagged FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ explicit LSIMD128ToTagged(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SIMD128ToTagged, "simd128-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 1> {
public:
};
+class LTaggedToSIMD128 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LTaggedToSIMD128(LOperand* value, LOperand* temp,
+ Representation representation)
+ : representation_(representation) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Representation representation() const { return representation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToSIMD128, "simd128-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+ private:
+ Representation representation_;
+};
+
+
class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumRegisters * kSIMD128Size +
argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movups(Operand(ebp, offset - ((i + 1) * kSIMD128Size)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movups(reg, Operand(ebp, offset - ((i + 1) * kSIMD128Size)));
}
}
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4, FLOAT32x4) \
+ V(Float64x2, float64x2, FLOAT64x2) \
+ V(Int32x4, int32x4, INT32x4)
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(Type, type, TYPE) \
+void MacroAssembler::Allocate##Type(Register result, \
+ Register scratch1, \
+ Register scratch2, \
+ Label* gc_required) { \
+ /* Allocate SIMD128 object */ \
+ Allocate(Type::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
+ /* Load the initial map and assign to new allocated object. */ \
+ mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); \
+ mov(scratch1, \
+ Operand(scratch1, \
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); \
+ mov(scratch1, \
+ FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); \
+ mov(scratch1, \
+ Operand(scratch1, \
+ Context::SlotOffset(Context::TYPE##_FUNCTION_INDEX))); \
+ LoadGlobalFunctionInitialMap(scratch1, scratch1); \
+ mov(FieldOperand(result, JSObject::kMapOffset), scratch1); \
+ /* Initialize properties and elements. */ \
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), \
+ Immediate(isolate()->factory()->empty_fixed_array())); \
+ mov(FieldOperand(result, JSObject::kElementsOffset), \
+ Immediate(isolate()->factory()->empty_fixed_array())); \
+ /* Allocate FixedTypedArray object */ \
+ Allocate(FixedTypedArrayBase::kDataOffset + k##Type##Size, \
+ scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
+ \
+ mov(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
+ Immediate(isolate()->factory()->fixed_##type##_array_map())); \
+ mov(scratch2, Immediate(1)); \
+ SmiTag(scratch2); \
+ mov(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
+ scratch2); \
+ /* Assign TifxedTypedArray object to SIMD128 object */ \
+ mov(FieldOperand(result, Type::kValueOffset), scratch1); \
+}
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
}
+void MacroAssembler::absps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_absolute_constant =
+ { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::abspd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_absolute_constant =
+ { 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF };
+ andps(dst,
+ Operand(reinterpret_cast<int32_t>(&double_absolute_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::notps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_not_constant =
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_not_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negateps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant =
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ xorps(dst,
+ Operand(reinterpret_cast<int32_t>(&float_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::negatepd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } double_negate_constant =
+ { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
+ xorpd(dst,
+ Operand(reinterpret_cast<int32_t>(&double_negate_constant),
+ RelocInfo::NONE32));
+}
+
+
+void MacroAssembler::pnegd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
+ notps(dst);
+ paddd(dst,
+ Operand(reinterpret_cast<int32_t>(&int32_one_constant),
+ RelocInfo::NONE32));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
Label* gc_required,
MutableMode mode = IMMUTABLE);
+ // Allocate a float32x4, float64x2 and int32x4 object in new space with
+ // undefined value.
+ // Returns tagged pointer in result register, or jumps to gc_required if new
+ // space is full.
+ void AllocateFloat32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ void AllocateFloat64x2(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ void AllocateInt32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
+ // SIMD macros.
+ void absps(XMMRegister dst);
+ void abspd(XMMRegister dst);
+ void negateps(XMMRegister dst);
+ void negatepd(XMMRegister dst);
+ void notps(XMMRegister dst);
+ void pnegd(XMMRegister dst);
+
+ // ---------------------------------------------------------------------------
// String utilities.
// Generate code to do a lookup in the number string cache. If the number in
void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
- if (range->Kind() == DOUBLE_REGISTERS) {
+ if (range->Kind() == DOUBLE_REGISTERS ||
+ IsSIMD128RegisterKind(range->Kind())) {
assigned_double_registers_->Add(reg);
} else {
DCHECK(range->Kind() == GENERAL_REGISTERS);
case DOUBLE_REGISTERS:
op = LDoubleRegister::Create(assigned_register(), zone);
break;
+ case FLOAT32x4_REGISTERS:
+ op = LFloat32x4Register::Create(assigned_register(), zone);
+ break;
+ case FLOAT64x2_REGISTERS:
+ op = LFloat64x2Register::Create(assigned_register(), zone);
+ break;
+ case INT32x4_REGISTERS:
+ op = LInt32x4Register::Create(assigned_register(), zone);
+ break;
default:
UNREACHABLE();
}
if (use_pos->HasOperand()) {
DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
+ op->IsSIMD128Register() || !use_pos->RequiresRegister());
use_pos->operand()->ConvertTo(op->kind(), op->index());
}
use_pos = use_pos->next();
active_live_ranges_(8, zone()),
inactive_live_ranges_(8, zone()),
reusable_slots_(8, zone()),
+ reusable_simd128_slots_(8, zone()),
next_virtual_register_(num_values),
first_artificial_register_(num_values),
mode_(UNALLOCATED_REGISTERS),
double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_,
zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ FLOAT32x4_REGISTERS) {
+ float32x4_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ FLOAT64x2_REGISTERS) {
+ float64x2_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
+ } else if (RequiredRegisterKind(input_copy->virtual_register()) ==
+ INT32x4_REGISTERS) {
+ int32x4_artificial_registers_.Add(
+ cur_input->virtual_register() - first_artificial_register_,
+ zone());
}
AddConstraintsGapMove(gap_index, input_copy, cur_input);
if (branch->HasPointerMap()) {
if (HasTaggedValue(range->id())) {
branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
- } else if (!cur_op->IsDoubleStackSlot() &&
- !cur_op->IsDoubleRegister()) {
+ } else if (!cur_op->IsDoubleStackSlot() &&
+ !cur_op->IsDoubleRegister() &&
+ !cur_op->IsSIMD128StackSlot() &&
+ !cur_op->IsSIMD128Register()) {
branch->pointer_map()->RemovePointer(cur_op);
}
}
if (live_ranges_[i] != NULL) {
if (live_ranges_[i]->Kind() == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
+ } else if (mode_ == DOUBLE_REGISTERS &&
+ IsSIMD128RegisterKind(live_ranges_[i]->Kind())) {
+ AddToUnhandledUnsorted(live_ranges_[i]);
}
}
}
DCHECK(UnhandledIsSorted());
DCHECK(reusable_slots_.is_empty());
+ DCHECK(reusable_simd128_slots_.is_empty());
DCHECK(active_live_ranges_.is_empty());
DCHECK(inactive_live_ranges_.is_empty());
}
reusable_slots_.Rewind(0);
+ reusable_simd128_slots_.Rewind(0);
active_live_ranges_.Rewind(0);
inactive_live_ranges_.Rewind(0);
}
HValue* value = graph_->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
+ } else if (value != NULL && (value->representation().IsFloat32x4())) {
+ return FLOAT32x4_REGISTERS;
+ } else if (value != NULL && (value->representation().IsFloat64x2())) {
+ return FLOAT64x2_REGISTERS;
+ } else if (value != NULL && (value->representation().IsInt32x4())) {
+ return INT32x4_REGISTERS;
}
} else if (double_artificial_registers_.Contains(
virtual_register - first_artificial_register_)) {
return DOUBLE_REGISTERS;
+ } else if (float32x4_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return FLOAT32x4_REGISTERS;
+ } else if (float64x2_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return FLOAT64x2_REGISTERS;
+ } else if (int32x4_artificial_registers_.Contains(
+ virtual_register - first_artificial_register_)) {
+ return INT32x4_REGISTERS;
}
return GENERAL_REGISTERS;
int index = range->TopLevel()->GetSpillOperand()->index();
if (index >= 0) {
- reusable_slots_.Add(range, zone());
+ if (IsSIMD128RegisterKind(range->Kind())) {
+ reusable_simd128_slots_.Add(range, zone());
+ } else {
+ reusable_slots_.Add(range, zone());
+ }
}
}
LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
- if (reusable_slots_.is_empty()) return NULL;
- if (reusable_slots_.first()->End().Value() >
+ ZoneList<LiveRange*>* reusable_slots = IsSIMD128RegisterKind(range->Kind())
+ ? &reusable_simd128_slots_
+ : &reusable_slots_;
+ if (reusable_slots->is_empty()) return NULL;
+ if (reusable_slots->first()->End().Value() >
range->TopLevel()->Start().Value()) {
return NULL;
}
- LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
- reusable_slots_.Remove(0);
+ LOperand* result = reusable_slots->first()->TopLevel()->GetSpillOperand();
+ reusable_slots->Remove(0);
return result;
}
}
LOperand* hint = current->FirstHint();
- if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+ if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister() ||
+ hint->IsSIMD128Register())) {
int register_index = hint->index();
TraceAlloc(
"Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
- if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
+ if (op == NULL) {
+ op = chunk_->GetNextSpillSlot(range->Kind());
+ } else if (range->Kind() == FLOAT32x4_REGISTERS &&
+ op->kind() != LOperand::FLOAT32x4_STACK_SLOT) {
+ // Convert to Float32x4StackSlot.
+ op = LFloat32x4StackSlot::Create(op->index(), zone());
+ } else if (range->Kind() == FLOAT64x2_REGISTERS &&
+ op->kind() != LOperand::FLOAT64x2_STACK_SLOT) {
+ // Convert to Float64x2StackSlot.
+ op = LFloat64x2StackSlot::Create(op->index(), zone());
+ } else if (range->Kind() == INT32x4_REGISTERS &&
+ op->kind() != LOperand::INT32x4_STACK_SLOT) {
+ // Convert to Int32x4StackSlot.
+ op = LInt32x4StackSlot::Create(op->index(), zone());
+ }
first->SetSpillOperand(op);
}
range->MakeSpilled(chunk()->zone());
};
+inline bool IsSIMD128RegisterKind(RegisterKind kind) {
+ return kind == FLOAT32x4_REGISTERS || kind == FLOAT64x2_REGISTERS ||
+ kind == INT32x4_REGISTERS;
+}
+
+
// Representation of the non-empty interval [start,end[.
class UseInterval: public ZoneObject {
public:
ZoneList<LiveRange*> active_live_ranges_;
ZoneList<LiveRange*> inactive_live_ranges_;
ZoneList<LiveRange*> reusable_slots_;
+ // Slots reusable for float32x4, float64x2 and int32x4 register spilling.
+ ZoneList<LiveRange*> reusable_simd128_slots_;
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
int first_artificial_register_;
GrowableBitVector double_artificial_registers_;
+ GrowableBitVector float32x4_artificial_registers_;
+ GrowableBitVector float64x2_artificial_registers_;
+ GrowableBitVector int32x4_artificial_registers_;
RegisterKind mode_;
int num_registers_;
}
break;
}
+ case FLOAT32x4_STACK_SLOT:
+ stream->Add("[float32x4_stack:%d]", index());
+ break;
+ case FLOAT64x2_STACK_SLOT:
+ stream->Add("[float64x2_stack:%d]", index());
+ break;
+ case INT32x4_STACK_SLOT:
+ stream->Add("[int32x4_stack:%d]", index());
+ break;
case DOUBLE_REGISTER: {
int reg_index = index();
if (reg_index < 0 ||
}
break;
}
+ case FLOAT32x4_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
+ case FLOAT64x2_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
+ case INT32x4_REGISTER:
+ stream->Add("[%s|R]",
+ SIMD128Register::AllocationIndexToString(index()));
+ break;
}
}
void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
pointer_operands_.Add(op, zone);
}
void LPointerMap::RemovePointer(LOperand* op) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (pointer_operands_[i]->Equals(op)) {
pointer_operands_.Remove(i);
void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
- DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+ DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot() &&
+ !op->IsFloat32x4Register() && !op->IsFloat32x4StackSlot() &&
+ !op->IsFloat64x2Register() && !op->IsFloat64x2StackSlot() &&
+ !op->IsInt32x4Register() && !op->IsInt32x4StackSlot());
untagged_operands_.Add(op, zone);
}
namespace v8 {
namespace internal {
-#define LITHIUM_OPERAND_LIST(V) \
- V(ConstantOperand, CONSTANT_OPERAND, 128) \
- V(StackSlot, STACK_SLOT, 128) \
- V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
- V(Register, REGISTER, 16) \
- V(DoubleRegister, DOUBLE_REGISTER, 16)
+#define LITHIUM_OPERAND_LIST(V) \
+ V(ConstantOperand, CONSTANT_OPERAND, 128) \
+ V(StackSlot, STACK_SLOT, 128) \
+ V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128) \
+ V(Float32x4StackSlot, FLOAT32x4_STACK_SLOT, 128) \
+ V(Float64x2StackSlot, FLOAT64x2_STACK_SLOT, 128) \
+ V(Int32x4StackSlot, INT32x4_STACK_SLOT, 128) \
+ V(Register, REGISTER, 16) \
+ V(DoubleRegister, DOUBLE_REGISTER, 16) \
+ V(Float32x4Register, FLOAT32x4_REGISTER, 16) \
+ V(Float64x2Register, FLOAT64x2_REGISTER, 16) \
+ V(Int32x4Register, INT32x4_REGISTER, 16)
class LOperand : public ZoneObject {
public:
CONSTANT_OPERAND,
STACK_SLOT,
DOUBLE_STACK_SLOT,
+ FLOAT32x4_STACK_SLOT,
+ FLOAT64x2_STACK_SLOT,
+ INT32x4_STACK_SLOT,
REGISTER,
- DOUBLE_REGISTER
+ DOUBLE_REGISTER,
+ FLOAT32x4_REGISTER,
+ FLOAT64x2_REGISTER,
+ INT32x4_REGISTER
};
LOperand() : value_(KindField::encode(INVALID)) { }
LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
#undef LITHIUM_OPERAND_PREDICATE
- bool Equals(LOperand* other) const { return value_ == other->value_; }
+ bool IsSIMD128Register() const {
+ return kind() == FLOAT32x4_REGISTER || kind() == FLOAT64x2_REGISTER ||
+ kind() == INT32x4_REGISTER;
+ }
+ bool IsSIMD128StackSlot() const {
+ return kind() == FLOAT32x4_STACK_SLOT || kind() == FLOAT64x2_STACK_SLOT ||
+ kind() == INT32x4_STACK_SLOT;
+ }
+ bool Equals(LOperand* other) const {
+ return value_ == other->value_ || (index() == other->index() &&
+ ((IsSIMD128Register() && other->IsSIMD128Register()) ||
+ (IsSIMD128StackSlot() && other->IsSIMD128StackSlot())));
+ }
void PrintTo(StringStream* stream);
void ConvertTo(Kind kind, int index) {
static void TearDownCaches();
protected:
- static const int kKindFieldWidth = 3;
+ static const int kKindFieldWidth = 4;
class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
LOperand(Kind kind, int index) { ConvertTo(kind, index); }
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
- // +------------------------------------------+
- // | slot_index | vreg | 0 | 001 |
- // +------------------------------------------+
+ // +-------------------------------------------+
+ // | slot_index | vreg | 0 | 0001 |
+ // +-------------------------------------------+
//
// For all other (extended) policies:
- // +------------------------------------------+
- // | reg_index | L | PPP | vreg | 1 | 001 | L ... Lifetime
- // +------------------------------------------+ P ... Policy
+ // +-------------------------------------------+
+ // | reg_index | L | PPP | vreg | 1 | 0001 | L ... Lifetime
+ // +-------------------------------------------+ P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
// instead of using the BitField utility class.
// The superclass has a KindField.
- STATIC_ASSERT(kKindFieldWidth == 3);
+ STATIC_ASSERT(kKindFieldWidth == 4);
// BitFields for all unallocated operands.
- class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
- class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+ class BasicPolicyField : public BitField<BasicPolicy, 4, 1> {};
+ class VirtualRegisterField : public BitField<unsigned, 5, 18> {};
// BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField<int, 22, 10> {};
+ class FixedSlotIndexField : public BitField<int, 23, 9> {};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
- class LifetimeField : public BitField<Lifetime, 25, 1> {};
- class FixedRegisterField : public BitField<int, 26, 6> {};
+ class ExtendedPolicyField : public BitField<ExtendedPolicy, 23, 3> {};
+ class LifetimeField : public BitField<Lifetime, 26, 1> {};
+ class FixedRegisterField : public BitField<int, 27, 5> {};
static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
-
enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
- DOUBLE_REGISTERS
+ DOUBLE_REGISTERS,
+ FLOAT32x4_REGISTERS,
+ FLOAT64x2_REGISTERS,
+ INT32x4_REGISTERS
};
// Iterator for non-null temp operands.
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
+macro IsFloat32x4(arg) = (%_ClassOf(arg) === 'float32x4');
+macro IsFloat64x2(arg) = (%_ClassOf(arg) === 'float64x2');
+macro IsInt32x4(arg) = (%_ClassOf(arg) === 'int32x4');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
+ case FLOAT32x4_TYPE:
+ Float32x4::cast(this)->Float32x4Verify();
+ break;
+ case FLOAT64x2_TYPE:
+ Float64x2::cast(this)->Float64x2Verify();
+ break;
+ case INT32x4_TYPE:
+ Int32x4::cast(this)->Int32x4Verify();
+ break;
case FIXED_ARRAY_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
}
+void Float32x4::Float32x4Verify() {
+ CHECK(IsFloat32x4());
+}
+
+
+void Float64x2::Float64x2Verify() {
+ CHECK(IsFloat64x2());
+}
+
+
+void Int32x4::Int32x4Verify() {
+ CHECK(IsInt32x4());
+}
+
+
void ByteArray::ByteArrayVerify() {
CHECK(IsByteArray());
}
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+TYPE_CHECKER(Float32x4, FLOAT32x4_TYPE)
+TYPE_CHECKER(Float64x2, FLOAT64x2_TYPE)
+TYPE_CHECKER(Int32x4, INT32x4_TYPE)
bool Object::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
write_double_field(p, offset, value)
#endif // V8_TARGET_ARCH_MIPS
+#define READ_FLOAT32x4_FIELD(p, offset) \
+ (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT32x4_FIELD(p, offset, value) \
+ (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_FLOAT64x2_FIELD(p, offset) \
+ (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT64x2_FIELD(p, offset, value) \
+ (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_INT32x4_FIELD(p, offset) \
+ (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_INT32x4_FIELD(p, offset, value) \
+ (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
+
+#define READ_FLOAT_FIELD(p, offset) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_FLOAT_FIELD(p, offset, value) \
+ (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
}
+ACCESSORS(Float32x4, value, Object, kValueOffset)
+ACCESSORS(Float64x2, value, Object, kValueOffset)
+ACCESSORS(Int32x4, value, Object, kValueOffset)
+
+
+const char* Float32x4::Name() {
+ return "float32x4";
+}
+
+
+int Float32x4::kRuntimeAllocatorId() {
+ return Runtime::kAllocateFloat32x4;
+}
+
+
+float Float32x4::getAt(int index) {
+ DCHECK(index >= 0 && index < kLanes);
+ return get().storage[index];
+}
+
+
+float32x4_value_t Float32x4::get() {
+ return FixedFloat32x4Array::cast(value())->get_scalar(0);
+}
+
+
+void Float32x4::set(float32x4_value_t f32x4) {
+ FixedFloat32x4Array::cast(value())->set(0, f32x4);
+}
+
+
+const char* Float64x2::Name() {
+ return "float64x2";
+}
+
+
+int Float64x2::kRuntimeAllocatorId() {
+ return Runtime::kAllocateFloat64x2;
+}
+
+
+double Float64x2::getAt(int index) {
+ DCHECK(index >= 0 && index < kLanes);
+ return get().storage[index];
+}
+
+float64x2_value_t Float64x2::get() {
+ return FixedFloat64x2Array::cast(value())->get_scalar(0);
+}
+
+
+void Float64x2::set(float64x2_value_t f64x2) {
+ FixedFloat64x2Array::cast(value())->set(0, f64x2);
+}
+
+
+const char* Int32x4::Name() {
+ return "int32x4";
+}
+
+
+int Int32x4::kRuntimeAllocatorId() {
+ return Runtime::kAllocateInt32x4;
+}
+
+
+int32_t Int32x4::getAt(int index) {
+ DCHECK(index >= 0 && index < kLanes);
+ return get().storage[index];;
+}
+
+
+int32x4_value_t Int32x4::get() {
+ return FixedInt32x4Array::cast(value())->get_scalar(0);
+}
+
+
+void Int32x4::set(int32x4_value_t i32x4) {
+ FixedInt32x4Array::cast(value())->set(0, i32x4);
+}
+
+
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
return JSTypedArray::kSize;
case JS_DATA_VIEW_TYPE:
return JSDataView::kSize;
+ case FLOAT32x4_TYPE:
+ return Float32x4::kSize;
+ case FLOAT64x2_TYPE:
+ return Float64x2::kSize;
+ case INT32x4_TYPE:
+ return Int32x4::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
CAST_ACCESSOR(ExternalArray)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat32x4Array)
CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalFloat64x2Array)
CAST_ACCESSOR(ExternalInt16Array)
CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalInt32x4Array)
CAST_ACCESSOR(ExternalInt8Array)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(Float32x4)
+CAST_ACCESSOR(Float64x2)
+CAST_ACCESSOR(Int32x4)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
}
+float32x4_value_t ExternalFloat32x4Array::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ float32x4_value_t value;
+ value.storage[0] = ptr[index * 4 + 0];
+ value.storage[1] = ptr[index * 4 + 1];
+ value.storage[2] = ptr[index * 4 + 2];
+ value.storage[3] = ptr[index * 4 + 3];
+ return value;
+}
+
+
+Handle<Object> ExternalFloat32x4Array::get(Handle<ExternalFloat32x4Array> array,
+ int index) {
+ float32x4_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewFloat32x4(value);
+}
+
+
+void ExternalFloat32x4Array::set(int index, const float32x4_value_t& value) {
+ DCHECK((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ ptr[index * 4 + 0] = value.storage[0];
+ ptr[index * 4 + 1] = value.storage[1];
+ ptr[index * 4 + 2] = value.storage[2];
+ ptr[index * 4 + 3] = value.storage[3];
+}
+
+
+float64x2_value_t ExternalFloat64x2Array::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ float64x2_value_t value;
+ value.storage[0] = ptr[index * 2 + 0];
+ value.storage[1] = ptr[index * 2 + 1];
+ return value;
+}
+
+
+Handle<Object> ExternalFloat64x2Array::get(Handle<ExternalFloat64x2Array> array,
+ int index) {
+ float64x2_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewFloat64x2(value);
+}
+
+
+void ExternalFloat64x2Array::set(int index, const float64x2_value_t& value) {
+ DCHECK((index >= 0) && (index < this->length()));
+ double* ptr = static_cast<double*>(external_pointer());
+ ptr[index * 2 + 0] = value.storage[0];
+ ptr[index * 2 + 1] = value.storage[1];
+}
+
+
+int32x4_value_t ExternalInt32x4Array::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ int32_t* ptr = static_cast<int32_t*>(external_pointer());
+ int32x4_value_t value;
+ value.storage[0] = ptr[index * 4 + 0];
+ value.storage[1] = ptr[index * 4 + 1];
+ value.storage[2] = ptr[index * 4 + 2];
+ value.storage[3] = ptr[index * 4 + 3];
+ return value;
+}
+
+
+Handle<Object> ExternalInt32x4Array::get(Handle<ExternalInt32x4Array> array,
+ int index) {
+ int32x4_value_t value = array->get_scalar(index);
+ return array->GetIsolate()->factory()->NewInt32x4(value);
+}
+
+
+void ExternalInt32x4Array::set(int index, const int32x4_value_t& value) {
+ DCHECK((index >= 0) && (index < this->length()));
+ int32_t* ptr = static_cast<int32_t*>(external_pointer());
+ ptr[index * 4 + 0] = value.storage[0];
+ ptr[index * 4 + 1] = value.storage[1];
+ ptr[index * 4 + 2] = value.storage[2];
+ ptr[index * 4 + 3] = value.storage[3];
+}
+
+
double ExternalFloat64Array::get_scalar(int index) {
DCHECK((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
return Traits::ToHandle(array->GetIsolate(), cast_value);
}
+template<> inline
+Handle<Object> FixedTypedArray<Float32x4ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Float32x4ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ float32x4_value_t cast_value;
+ cast_value.storage[0] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[1] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[2] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[3] = static_cast<float>(base::OS::nan_value());
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat32x4()) {
+ cast_value = Handle<Float32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Float32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
+
+template<> inline
+Handle<Object> FixedTypedArray<Float64x2ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Float64x2ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ float64x2_value_t cast_value;
+ cast_value.storage[0] = base::OS::nan_value();
+ cast_value.storage[1] = base::OS::nan_value();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat64x2()) {
+ cast_value = Handle<Float64x2>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Float64x2ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
+
+template<> inline
+Handle<Object> FixedTypedArray<Int32x4ArrayTraits>::SetValue(
+ Handle<FixedTypedArray<Int32x4ArrayTraits> > array,
+ uint32_t index, Handle<Object> value) {
+ int32x4_value_t cast_value;
+ cast_value.storage[0] = 0;
+ cast_value.storage[1] = 0;
+ cast_value.storage[2] = 0;
+ cast_value.storage[3] = 0;
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsInt32x4()) {
+ cast_value = Handle<Int32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return Int32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
+}
+
Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
return handle(Smi::FromInt(scalar), isolate);
}
+Handle<Object> Int32x4ArrayTraits::ToHandle(
+ Isolate* isolate, int32x4_value_t scalar) {
+ return isolate->factory()->NewInt32x4(scalar);
+}
+
+
+Handle<Object> Float32x4ArrayTraits::ToHandle(
+ Isolate* isolate, float32x4_value_t scalar) {
+ return isolate->factory()->NewFloat32x4(scalar);
+}
+
+
+Handle<Object> Float64x2ArrayTraits::ToHandle(
+ Isolate* isolate, float64x2_value_t scalar) {
+ return isolate->factory()->NewFloat64x2(scalar);
+}
+
+
Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
HeapNumber::cast(this)->HeapNumberPrint(os);
os << ">";
break;
+ case FLOAT32x4_TYPE:
+ Float32x4::cast(this)->Float32x4Print(os);
+ break;
+ case FLOAT64x2_TYPE:
+ Float64x2::cast(this)->Float64x2Print(os);
+ break;
+ case INT32x4_TYPE:
+ Int32x4::cast(this)->Int32x4Print(os);
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
break;
}
+template<class T>
+static void DoPrintFloat32x4Elements(OStream& os, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ float32x4_value_t value = p->get_scalar(i);
+ os << " " << i << ": (" << value.storage[0] << value.storage[1] <<
+ value.storage[2] << value.storage[3] << ")\n";
+ }
+}
+
+
+template<class T>
+static void DoPrintFloat64x2Elements(OStream& os, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ float64x2_value_t value = p->get_scalar(i);
+ os << " " << i << ": (" << value.storage[0] << value.storage[1] << ")\n";
+ }
+}
+
+
+template<class T>
+static void DoPrintInt32x4Elements(OStream& os, Object* object) {
+ T* p = T::cast(object);
+ for (int i = 0; i < p->length(); i++) {
+ int32x4_value_t value = p->get_scalar(i);
+ os << " " << i << ": (" << value.storage[0] << value.storage[1] <<
+ value.storage[2] << value.storage[3] << ")\n";
+ }
+}
+
+
void JSObject::PrintElements(OStream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
break; \
}
+#define PRINT_FLOAT32x4_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintFloat32x4Elements<Type>(os, elements()); \
+ break; \
+ }
+
+#define PRINT_FLOAT64x2_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintFloat64x2Elements<Type>(os, elements()); \
+ break; \
+ }
+
+#define PRINT_INT32x4_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintInt32x4Elements<Type>(os, elements()); \
+ break; \
+ }
+
PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
PRINT_ELEMENTS(EXTERNAL_UINT8_ELEMENTS,
ExternalUint32Array)
PRINT_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
PRINT_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
+ PRINT_FLOAT32x4_ELEMENTS(EXTERNAL_FLOAT32x4_ELEMENTS,
+ ExternalFloat32x4Array)
+ PRINT_FLOAT64x2_ELEMENTS(EXTERNAL_FLOAT64x2_ELEMENTS,
+ ExternalFloat64x2Array)
+ PRINT_INT32x4_ELEMENTS(EXTERNAL_INT32x4_ELEMENTS, ExternalInt32x4Array)
PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+ PRINT_FLOAT32x4_ELEMENTS(FLOAT32x4_ELEMENTS, FixedFloat32x4Array)
+ PRINT_FLOAT64x2_ELEMENTS(FLOAT64x2_ELEMENTS, FixedFloat64x2Array)
+ PRINT_INT32x4_ELEMENTS(INT32x4_ELEMENTS, FixedInt32x4Array)
#undef PRINT_ELEMENTS
os << '>';
break;
}
+ case FLOAT32x4_TYPE:
+ os << "<Float32x4: ";
+ Float32x4::cast(this)->Float32x4Print(os);
+ os << '>';
+ break;
+ case FLOAT64x2_TYPE:
+ os << "<Float64x2: ";
+ Float64x2::cast(this)->Float64x2Print(os);
+ os << '>';
+ break;
+ case INT32x4_TYPE:
+ os << "<Int32x4: ";
+ Int32x4::cast(this)->Int32x4Print(os);
+ os << '>';
+ break;
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
case JS_FUNCTION_TYPE:
}
+void Float32x4::Float32x4Print(OStream& os) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%.16g %.16g %.16g %.16g", x(), y(), z(), w());
+ os << buffer.start();
+}
+
+
+void Int32x4::Int32x4Print(OStream& os) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%u %u %u %u", x(), y(), z(), w());
+ os << buffer.start();
+}
+
+
+void Float64x2::Float64x2Print(OStream& os) {
+ // The Windows version of vsnprintf can allocate when printing a %g string
+ // into a buffer that may not be big enough. We don't want random memory
+ // allocation when producing post-crash stack traces, so we print into a
+ // buffer that is plenty big enough for any floating point number, then
+ // print that using vsnprintf (which may truncate but never allocate if
+ // there is no more space in the buffer).
+ EmbeddedVector<char, 100> buffer;
+ SNPrintF(buffer, "%.16g %.16g", x(), y());
+ os << buffer.start();
+}
+
+
String* JSReceiver::class_name() {
if (IsJSFunction() || IsJSFunctionProxy()) {
return GetHeap()->Function_string();
case kTagged: return "t";
case kSmi: return "s";
case kDouble: return "d";
+ case kFloat32x4: return "float32x4";
+ case kFloat64x2: return "float64x2";
+ case kInt32x4: return "int32x44";
case kInteger32: return "i";
case kHeapObject: return "h";
case kExternal: return "x";
break;
}
+ case Translation::FLOAT32x4_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << SIMD128Register::AllocationIndexToString(reg_code)
+ << "}";
+ break;
+ }
+
+ case Translation::FLOAT64x2_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << SIMD128Register::AllocationIndexToString(reg_code)
+ << "}";
+ break;
+ }
+
+ case Translation::INT32x4_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input=" << SIMD128Register::AllocationIndexToString(reg_code)
+ << "}";
+ break;
+ }
+
case Translation::STACK_SLOT: {
int input_slot_index = iterator.Next();
os << "{input=" << input_slot_index << "}";
break;
}
+ case Translation::FLOAT32x4_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case Translation::FLOAT64x2_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
+ case Translation::INT32x4_STACK_SLOT: {
+ int input_slot_index = iterator.Next();
+ os << "{input=" << input_slot_index << "}";
+ break;
+ }
+
case Translation::LITERAL: {
unsigned literal_index = iterator.Next();
os << "{literal_id=" << literal_index << "}";
if (object->HasExternalArrayElements() ||
object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() && !value->IsFloat64x2() &&
+ !value->IsInt32x4() && !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value,
Execution::ToNumber(isolate, value), Object);
}
+Handle<Object> ExternalFloat32x4Array::SetValue(
+ Handle<ExternalFloat32x4Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ float32x4_value_t cast_value;
+ cast_value.storage[0] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[1] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[2] = static_cast<float>(base::OS::nan_value());
+ cast_value.storage[3] = static_cast<float>(base::OS::nan_value());
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat32x4()) {
+ cast_value = Handle<Float32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewFloat32x4(cast_value);
+}
+
+
+Handle<Object> ExternalInt32x4Array::SetValue(
+ Handle<ExternalInt32x4Array> array, uint32_t index, Handle<Object> value) {
+ int32x4_value_t cast_value;
+ cast_value.storage[0] = 0;
+ cast_value.storage[1] = 0;
+ cast_value.storage[2] = 0;
+ cast_value.storage[3] = 0;
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsInt32x4()) {
+ cast_value = Handle<Int32x4>::cast(value)->get();
+ } else {
+ // Clamp undefined to zero (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewInt32x4(cast_value);
+}
+
+
+Handle<Object> ExternalFloat64x2Array::SetValue(
+ Handle<ExternalFloat64x2Array> array,
+ uint32_t index,
+ Handle<Object> value) {
+ float64x2_value_t cast_value;
+ cast_value.storage[0] = base::OS::nan_value();
+ cast_value.storage[1] = base::OS::nan_value();
+ if (index < static_cast<uint32_t>(array->length())) {
+ if (value->IsFloat64x2()) {
+ cast_value = Handle<Float64x2>::cast(value)->get();
+ } else {
+ // Clamp undefined to NaN (default). All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value->IsUndefined());
+ }
+ array->set(index, cast_value);
+ }
+ return array->GetIsolate()->factory()->NewFloat64x2(cast_value);
+}
+
+
Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
Handle<JSGlobalObject> global,
Handle<Name> name) {
// - JSTypedArray
// - JSDataView
// - JSCollection
+// - Float32x4
+// - Float64x2
+// - Int32x4
// - JSSet
// - JSMap
// - JSSetIterator
// - ExternalInt32Array
// - ExternalUint32Array
// - ExternalFloat32Array
+// - ExternalFloat32x4Array
+// - ExternalFloat64x2Array
+// - ExternalInt32x4Array
// - Name
// - String
// - SeqString
V(EXTERNAL_INT32_ARRAY_TYPE) \
V(EXTERNAL_UINT32_ARRAY_TYPE) \
V(EXTERNAL_FLOAT32_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT32x4_ARRAY_TYPE) \
+ V(EXTERNAL_FLOAT64x2_ARRAY_TYPE) \
+ V(EXTERNAL_INT32x4_ARRAY_TYPE) \
V(EXTERNAL_FLOAT64_ARRAY_TYPE) \
V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE) \
\
V(FIXED_INT32_ARRAY_TYPE) \
V(FIXED_UINT32_ARRAY_TYPE) \
V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32x4_ARRAY_TYPE) \
+ V(FIXED_INT32x4_ARRAY_TYPE) \
+ V(FIXED_FLOAT64x2_ARRAY_TYPE) \
V(FIXED_FLOAT64_ARRAY_TYPE) \
V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
\
V(JS_ARRAY_BUFFER_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
+ V(FLOAT32x4_TYPE) \
+ V(FLOAT64x2_TYPE) \
+ V(INT32x4_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
EXTERNAL_INT32_ARRAY_TYPE,
EXTERNAL_UINT32_ARRAY_TYPE,
EXTERNAL_FLOAT32_ARRAY_TYPE,
+ EXTERNAL_FLOAT32x4_ARRAY_TYPE,
+ EXTERNAL_FLOAT64x2_ARRAY_TYPE,
+ EXTERNAL_INT32x4_ARRAY_TYPE,
EXTERNAL_FLOAT64_ARRAY_TYPE,
EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE,
FIXED_INT32_ARRAY_TYPE,
+ FIXED_INT32x4_ARRAY_TYPE,
FIXED_UINT32_ARRAY_TYPE,
FIXED_FLOAT32_ARRAY_TYPE,
+ FIXED_FLOAT32x4_ARRAY_TYPE,
+ FIXED_FLOAT64x2_ARRAY_TYPE,
FIXED_FLOAT64_ARRAY_TYPE,
FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
JS_DATA_VIEW_TYPE,
+ FLOAT32x4_TYPE,
+ FLOAT64x2_TYPE,
+ INT32x4_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
JS_SET_ITERATOR_TYPE,
V(ExternalInt32Array) \
V(ExternalUint32Array) \
V(ExternalFloat32Array) \
+ V(ExternalFloat32x4Array) \
+ V(ExternalFloat64x2Array) \
+ V(ExternalInt32x4Array) \
V(ExternalFloat64Array) \
V(ExternalUint8ClampedArray) \
V(FixedTypedArrayBase) \
V(FixedUint32Array) \
V(FixedInt32Array) \
V(FixedFloat32Array) \
+ V(FixedFloat32x4Array) \
+ V(FixedFloat64x2Array) \
+ V(FixedInt32x4Array) \
V(FixedFloat64Array) \
V(FixedUint8ClampedArray) \
V(ByteArray) \
V(JSArrayBufferView) \
V(JSTypedArray) \
V(JSDataView) \
+ V(Float32x4) \
+ V(Float64x2) \
+ V(Int32x4) \
V(JSProxy) \
V(JSFunctionProxy) \
V(JSSet) \
inline bool HasExternalInt32Elements();
inline bool HasExternalUint32Elements();
inline bool HasExternalFloat32Elements();
+ inline bool HasExternalFloat32x4Elements();
+ inline bool HasExternalFloat64x2Elements();
+ inline bool HasExternalInt32x4Elements();
inline bool HasExternalFloat64Elements();
inline bool HasFixedTypedArrayElements();
inline bool HasFixedUint32Elements();
inline bool HasFixedFloat32Elements();
inline bool HasFixedFloat64Elements();
+ inline bool HasFixedFloat32x4Elements();
+ inline bool HasFixedFloat64x2Elements();
+ inline bool HasFixedInt32x4Elements();
bool HasFastArgumentsElements();
bool HasDictionaryArgumentsElements();
// V has parameters (Type, type, TYPE, C type, element_size)
-#define TYPED_ARRAYS(V) \
+#define BUILTIN_TYPED_ARRAY(V) \
V(Uint8, uint8, UINT8, uint8_t, 1) \
V(Int8, int8, INT8, int8_t, 1) \
V(Uint16, uint16, UINT16, uint16_t, 2) \
V(Uint8Clamped, uint8_clamped, UINT8_CLAMPED, uint8_t, 1)
+#define SIMD128_TYPED_ARRAY(V) \
+ V(Float32x4, float32x4, FLOAT32x4, v8::internal::float32x4_value_t, 16) \
+ V(Float64x2, float64x2, FLOAT64x2, v8::internal::float64x2_value_t, 16) \
+ V(Int32x4, int32x4, INT32x4, v8::internal::int32x4_value_t, 16)
+
+
+#define TYPED_ARRAYS(V) \
+ BUILTIN_TYPED_ARRAY(V) \
+ SIMD128_TYPED_ARRAY(V)
+
// An ExternalArray represents a fixed-size array of primitive values
// which live outside the JavaScript heap. Its subclasses are used to
};
+class ExternalFloat32x4Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline float32x4_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat32x4Array> array,
+ int index);
+ inline void set(int index, const float32x4_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalFloat32x4Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ DECLARE_CAST(ExternalFloat32x4Array)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloat32x4Array)
+ DECLARE_VERIFIER(ExternalFloat32x4Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat32x4Array);
+};
+
+
+class ExternalFloat64x2Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline float64x2_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalFloat64x2Array> array,
+ int index);
+ inline void set(int index, const float64x2_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalFloat64x2Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ DECLARE_CAST(ExternalFloat64x2Array)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloat64x2Array)
+ DECLARE_VERIFIER(ExternalFloat64x2Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloat64x2Array);
+};
+
+
+class ExternalInt32x4Array: public ExternalArray {
+ public:
+ // Setter and getter.
+ inline int32x4_value_t get_scalar(int index);
+ static inline Handle<Object> get(Handle<ExternalInt32x4Array> array,
+ int index);
+ inline void set(int index, const int32x4_value_t& value);
+
+ // This accessor applies the correct conversion from Smi, HeapNumber
+ // and undefined.
+ static Handle<Object> SetValue(Handle<ExternalInt32x4Array> array,
+ uint32_t index,
+ Handle<Object> value);
+
+ // Casting.
+ DECLARE_CAST(ExternalInt32x4Array)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalInt32x4Array)
+ DECLARE_VERIFIER(ExternalInt32x4Array)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalInt32x4Array);
+};
+
+
class ExternalFloat64Array: public ExternalArray {
public:
// Setter and getter.
V(Math, clz32, MathClz32) \
V(Math, fround, MathFround)
+#define SIMD_NULLARY_OPERATIONS(V) \
+ V(SIMD.float32x4, zero, Float32x4Zero, Float32x4) \
+ V(SIMD.float64x2, zero, Float64x2Zero, Float64x2) \
+ V(SIMD.int32x4, zero, Int32x4Zero, Int32x4)
+
+#define SIMD_UNARY_OPERATIONS(V) \
+ V(SIMD, float32x4, Float32x4Coercion, Float32x4, Float32x4) \
+ V(SIMD, float64x2, Float64x2Coercion, Float64x2, Float64x2) \
+ V(SIMD, int32x4, Int32x4Coercion, Int32x4, Int32x4) \
+ V(SIMD.float32x4, abs, Float32x4Abs, Float32x4, Float32x4) \
+ V(SIMD.float32x4, fromInt32x4, Int32x4ToFloat32x4, Float32x4, Int32x4) \
+ V(SIMD.float32x4, fromInt32x4Bits, Int32x4BitsToFloat32x4, Float32x4, \
+ Int32x4) \
+ V(SIMD.float32x4, neg, Float32x4Neg, Float32x4, Float32x4) \
+ V(SIMD.float32x4, reciprocal, Float32x4Reciprocal, Float32x4, Float32x4) \
+ V(SIMD.float32x4, reciprocalSqrt, Float32x4ReciprocalSqrt, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, splat, Float32x4Splat, Float32x4, Double) \
+ V(SIMD.float32x4, sqrt, Float32x4Sqrt, Float32x4, Float32x4) \
+ V(SIMD.float64x2, abs, Float64x2Abs, Float64x2, Float64x2) \
+ V(SIMD.float64x2, neg, Float64x2Neg, Float64x2, Float64x2) \
+ V(SIMD.float64x2, sqrt, Float64x2Sqrt, Float64x2, Float64x2) \
+ V(SIMD.int32x4, fromFloat32x4, Float32x4ToInt32x4, Int32x4, Float32x4) \
+ V(SIMD.int32x4, fromFloat32x4Bits, Float32x4BitsToInt32x4, Int32x4, \
+ Float32x4) \
+ V(SIMD.int32x4, neg, Int32x4Neg, Int32x4, Int32x4) \
+ V(SIMD.int32x4, not, Int32x4Not, Int32x4, Int32x4) \
+ V(SIMD.int32x4, splat, Int32x4Splat, Int32x4, Integer32)
+
+// Do not need to install them in InstallExperimentalSIMDBuiltinFunctionIds.
+#define SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(V) \
+ V(SIMD.float32x4.prototype, signMask, Float32x4GetSignMask, Integer32, \
+ Float32x4) \
+ V(SIMD.float32x4.prototype, x, Float32x4GetX, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, y, Float32x4GetY, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, z, Float32x4GetZ, Double, Float32x4) \
+ V(SIMD.float32x4.prototype, w, Float32x4GetW, Double, Float32x4) \
+ V(SIMD.float64x2.prototype, signMask, Float64x2GetSignMask, Integer32, \
+ Float64x2) \
+ V(SIMD.float64x2.prototype, x, Float64x2GetX, Double, Float64x2) \
+ V(SIMD.float64x2.prototype, y, Float64x2GetY, Double, Float64x2) \
+ V(SIMD.int32x4.prototype, signMask, Int32x4GetSignMask, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, x, Int32x4GetX, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, y, Int32x4GetY, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, z, Int32x4GetZ, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, w, Int32x4GetW, Integer32, Int32x4) \
+ V(SIMD.int32x4.prototype, flagX, Int32x4GetFlagX, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagY, Int32x4GetFlagY, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagZ, Int32x4GetFlagZ, Tagged, Int32x4) \
+ V(SIMD.int32x4.prototype, flagW, Int32x4GetFlagW, Tagged, Int32x4)
+
+#define SIMD_BINARY_OPERATIONS(V) \
+ V(SIMD.float32x4, add, Float32x4Add, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, div, Float32x4Div, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, max, Float32x4Max, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, min, Float32x4Min, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, mul, Float32x4Mul, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, sub, Float32x4Sub, Float32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, equal, Float32x4Equal, Int32x4, Float32x4, Float32x4) \
+ V(SIMD.float32x4, notEqual, Float32x4NotEqual, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, greaterThan, Float32x4GreaterThan, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, greaterThanOrEqual, Float32x4GreaterThanOrEqual, Int32x4, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, lessThan, Float32x4LessThan, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, lessThanOrEqual, Float32x4LessThanOrEqual, Int32x4, \
+ Float32x4, Float32x4) \
+ V(SIMD.float32x4, shuffle, Float32x4Shuffle, Float32x4, Float32x4, \
+ Integer32) \
+ V(SIMD.float32x4, scale, Float32x4Scale, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withX, Float32x4WithX, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withY, Float32x4WithY, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withZ, Float32x4WithZ, Float32x4, Float32x4, Double) \
+ V(SIMD.float32x4, withW, Float32x4WithW, Float32x4, Float32x4, Double) \
+ V(SIMD.float64x2, add, Float64x2Add, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, div, Float64x2Div, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, max, Float64x2Max, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, min, Float64x2Min, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, mul, Float64x2Mul, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, sub, Float64x2Sub, Float64x2, Float64x2, Float64x2) \
+ V(SIMD.float64x2, scale, Float64x2Scale, Float64x2, Float64x2, Double) \
+ V(SIMD.float64x2, withX, Float64x2WithX, Float64x2, Float64x2, Double) \
+ V(SIMD.float64x2, withY, Float64x2WithY, Float64x2, Float64x2, Double) \
+ V(SIMD, float64x2, Float64x2Constructor, Float64x2, Double, Double) \
+ V(SIMD.int32x4, add, Int32x4Add, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, and, Int32x4And, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, mul, Int32x4Mul, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, or, Int32x4Or, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, sub, Int32x4Sub, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, xor, Int32x4Xor, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, shuffle, Int32x4Shuffle, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withX, Int32x4WithX, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withY, Int32x4WithY, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withZ, Int32x4WithZ, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withW, Int32x4WithW, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, withFlagX, Int32x4WithFlagX, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagY, Int32x4WithFlagY, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagZ, Int32x4WithFlagZ, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, withFlagW, Int32x4WithFlagW, Int32x4, Int32x4, Tagged) \
+ V(SIMD.int32x4, greaterThan, Int32x4GreaterThan, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, equal, Int32x4Equal, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, lessThan, Int32x4LessThan, Int32x4, Int32x4, Int32x4) \
+ V(SIMD.int32x4, shiftLeft, Int32x4ShiftLeft, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, shiftRight, Int32x4ShiftRight, Int32x4, Int32x4, Integer32) \
+ V(SIMD.int32x4, shiftRightArithmetic, Int32x4ShiftRightArithmetic, Int32x4, \
+ Int32x4, Integer32)
+
+#define SIMD_TERNARY_OPERATIONS(V) \
+ V(SIMD.float32x4, clamp, Float32x4Clamp, Float32x4, Float32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float32x4, shuffleMix, Float32x4ShuffleMix, Float32x4, Float32x4, \
+ Float32x4, Integer32) \
+ V(SIMD.float32x4, select, Float32x4Select, Float32x4, Int32x4, Float32x4, \
+ Float32x4) \
+ V(SIMD.float64x2, clamp, Float64x2Clamp, Float64x2, Float64x2, Float64x2, \
+ Float64x2) \
+ V(SIMD.int32x4, select, Int32x4Select, Int32x4, Int32x4, Int32x4, Int32x4)
+
+#define SIMD_QUARTERNARY_OPERATIONS(V) \
+ V(SIMD, float32x4, Float32x4Constructor, Float32x4, Double, Double, Double, \
+ Double) \
+ V(SIMD, int32x4, Int32x4Constructor, Int32x4, Integer32, Integer32, \
+ Integer32, Integer32) \
+ V(SIMD.int32x4, bool, Int32x4Bool, Int32x4, Tagged, Tagged, Tagged, Tagged)
+
+#define SIMD_ARRAY_OPERATIONS(V) \
+ V(Float32x4Array.prototype, getAt, Float32x4ArrayGetAt) \
+ V(Float32x4Array.prototype, setAt, Float32x4ArraySetAt) \
+ V(Float64x2Array.prototype, getAt, Float64x2ArrayGetAt) \
+ V(Float64x2Array.prototype, setAt, Float64x2ArraySetAt) \
+ V(Int32x4Array.prototype, getAt, Int32x4ArrayGetAt) \
+ V(Int32x4Array.prototype, setAt, Int32x4ArraySetAt)
+
+// Do not need to install them in InstallExperimentalSIMDBuiltinFunctionIds.
+#define SIMD_FAKE_ID_LISTS(V) \
+ V(SIMD, unreachable, SIMD128Unreachable) \
+ V(SIMD, change, SIMD128Change)
+
enum BuiltinFunctionId {
kArrayCode,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
-#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf
+ kMathPowHalf,
+ SIMD_FAKE_ID_LISTS(DECLARE_FUNCTION_ID)
+ SIMD_ARRAY_OPERATIONS(DECLARE_FUNCTION_ID)
+#undef DECLARE_FUNCTION_ID
+#define DECLARE_SIMD_NULLARY_FUNCTION_ID(i1, i2, name, i3) \
+ k##name,
+ SIMD_NULLARY_OPERATIONS(DECLARE_SIMD_NULLARY_FUNCTION_ID)
+#undef DECLARE_SIMD_NULLARY_FUNCTION_ID
+#define DECLARE_SIMD_UNARY_FUNCTION_ID(i1, i2, name, i3, i4) \
+ k##name,
+ SIMD_UNARY_OPERATIONS(DECLARE_SIMD_UNARY_FUNCTION_ID)
+ SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(DECLARE_SIMD_UNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_UNARY_FUNCTION_ID
+#define DECLARE_SIMD_BINARY_FUNCTION_ID(i1, i2, name, i3, i4, i5) \
+ k##name,
+ SIMD_BINARY_OPERATIONS(DECLARE_SIMD_BINARY_FUNCTION_ID)
+#undef DECLARE_SIMD_BINARY_FUNCTION_ID
+#define DECLARE_SIMD_TERNARY_FUNCTION_ID(i1, i2, name, i3, i4, i5, i6) \
+ k##name,
+ SIMD_TERNARY_OPERATIONS(DECLARE_SIMD_TERNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_TERNARY_FUNCTION_ID
+#define DECLARE_SIMD_QUARTERNARY_FUNCTION_ID(i1, i2, name, i3, i4, i5, i6, i7) \
+ k##name,
+ SIMD_QUARTERNARY_OPERATIONS(DECLARE_SIMD_QUARTERNARY_FUNCTION_ID)
+#undef DECLARE_SIMD_QUARTERNARY_FUNCTION_ID
+ kNumberOfBuiltinFunction
};
};
+class Float32x4: public JSObject {
+ public:
+ typedef float32x4_value_t value_t;
+ static const int kValueSize = kFloat32x4Size;
+ static const InstanceType kInstanceType = FLOAT32x4_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedFloat32x4Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ DECLARE_CAST(Float32x4)
+
+ // Dispatched behavior.
+ void Float32x4Print(OStream& os);
+ DECLARE_VERIFIER(Float32x4)
+
+ // Helpers.
+ static const int kLanes = 4;
+ inline float getAt(int index);
+ inline float x() { return getAt(0); }
+ inline float y() { return getAt(1); }
+ inline float z() { return getAt(2); }
+ inline float w() { return getAt(3); }
+ inline float32x4_value_t get();
+ inline void set(float32x4_value_t f32x4);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Float32x4);
+};
+
+
+class Float64x2: public JSObject {
+ public:
+ typedef float64x2_value_t value_t;
+ static const int kValueSize = kFloat64x2Size;
+ static const InstanceType kInstanceType = FLOAT64x2_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedFloat64x2Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ DECLARE_CAST(Float64x2)
+
+ // Dispatched behavior.
+ void Float64x2Print(OStream& os);
+ DECLARE_VERIFIER(Float64x2)
+
+ // Helpers.
+ static const int kLanes = 2;
+ inline double getAt(int index);
+ inline double x() { return getAt(0); }
+ inline double y() { return getAt(1); }
+ inline float64x2_value_t get();
+ inline void set(float64x2_value_t f64x2);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Float64x2);
+};
+
+
+class Int32x4: public JSObject {
+ public:
+ typedef int32x4_value_t value_t;
+ static const int kValueSize = kInt32x4Size;
+ static const InstanceType kInstanceType = INT32x4_TYPE;
+ static inline const char* Name();
+ static inline int kRuntimeAllocatorId();
+
+ // [value]: the FixedInt32x4Array with length 1.
+ DECL_ACCESSORS(value, Object)
+
+ // Casting.
+ DECLARE_CAST(Int32x4)
+
+ // Dispatched behavior.
+ void Int32x4Print(OStream& os);
+ DECLARE_VERIFIER(Int32x4)
+
+ // Helpers.
+ static const int kLanes = 4;
+ inline int32_t getAt(int32_t index);
+ inline int32_t x() { return getAt(0); }
+ inline int32_t y() { return getAt(1); }
+ inline int32_t z() { return getAt(2); }
+ inline int32_t w() { return getAt(3); }
+ inline int32x4_value_t get();
+ inline void set(int32x4_value_t i32x4);
+
+ // Layout description.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kSize = kValueOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Int32x4);
+};
+
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
kSmi,
kInteger32,
kDouble,
+ kFloat32x4,
+ kFloat64x2,
+ kInt32x4,
kHeapObject,
kTagged,
kExternal,
static Representation Smi() { return Representation(kSmi); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
+ static Representation Float32x4() { return Representation(kFloat32x4); }
+ static Representation Float64x2() { return Representation(kFloat64x2); }
+ static Representation Int32x4() { return Representation(kInt32x4); }
static Representation HeapObject() { return Representation(kHeapObject); }
static Representation External() { return Representation(kExternal); }
if (IsHeapObject()) return other.IsNone();
if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
+ if (IsSIMD128() && other.IsSIMD128()) return false;
return kind_ > other.kind_;
}
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsSmiOrInteger32() const { return IsSmi() || IsInteger32(); }
bool IsDouble() const { return kind_ == kDouble; }
+ bool IsFloat32x4() const { return kind_ == kFloat32x4; }
+ bool IsFloat64x2() const { return kind_ == kFloat64x2; }
+ bool IsInt32x4() const { return kind_ == kInt32x4; }
+ bool IsSIMD128() const {
+ return IsFloat32x4() || IsFloat64x2() || IsInt32x4();
+ }
bool IsHeapObject() const { return kind_ == kHeapObject; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
JSObject::ValidateElements(js_object);
if (js_object->HasExternalArrayElements() ||
js_object->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() &&
+ !value->IsFloat64x2() && !value->IsInt32x4() &&
+ !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Execution::ToNumber(isolate, value), Object);
}
if (!object->IsJSObject()) return value;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
if (js_object->HasExternalArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsFloat32x4() &&
+ !value->IsFloat64x2() && !value->IsInt32x4() &&
+ !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, Execution::ToNumber(isolate, value), Object);
}
}
+RUNTIME_FUNCTION(Runtime_AllocateFloat32x4) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+
+ float32x4_value_t zero = {{0, 0, 0, 0}};
+ return *isolate->factory()->NewFloat32x4(zero);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AllocateFloat64x2) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+
+ float64x2_value_t zero = {{0, 0}};
+ return *isolate->factory()->NewFloat64x2(zero);
+}
+
+
+RUNTIME_FUNCTION(Runtime_AllocateInt32x4) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+
+ int32x4_value_t zero = {{0, 0, 0, 0}};
+ return *isolate->factory()->NewInt32x4(zero);
+}
+
+
RUNTIME_FUNCTION(Runtime_NumberAdd) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
}
+static void IterateExternalFloat32x4ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalFloat32x4Array> array(
+ ExternalFloat32x4Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ DCHECK(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewFloat32x4(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
+static void IterateExternalFloat64x2ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalFloat64x2Array> array(
+ ExternalFloat64x2Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ DCHECK(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewFloat64x2(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
+static void IterateExternalInt32x4ArrayElements(Isolate* isolate,
+ Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalInt32x4Array> array(
+ ExternalInt32x4Array::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ DCHECK(visitor != NULL);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewInt32x4(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+}
+
+
// Used for sorting indices in a List<uint32_t>.
static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
uint32_t a = *ap;
isolate, receiver, false, false, visitor);
break;
}
+ case EXTERNAL_FLOAT32x4_ELEMENTS: {
+ IterateExternalFloat32x4ArrayElements(isolate, receiver, visitor);
+ break;
+ }
+ case EXTERNAL_FLOAT64x2_ELEMENTS: {
+ IterateExternalFloat64x2ArrayElements(isolate, receiver, visitor);
+ break;
+ }
+ case EXTERNAL_INT32x4_ELEMENTS: {
+ IterateExternalInt32x4ArrayElements(isolate, receiver, visitor);
+ break;
+ }
case EXTERNAL_FLOAT64_ELEMENTS: {
IterateExternalArrayElements<ExternalFloat64Array, double>(
isolate, receiver, false, false, visitor);
}
+#define RETURN_Float32x4_RESULT(value) \
+ return *isolate->factory()->NewFloat32x4(value);
+
+
+#define RETURN_Float64x2_RESULT(value) \
+ return *isolate->factory()->NewFloat64x2(value);
+
+
+#define RETURN_Int32x4_RESULT(value) \
+ return *isolate->factory()->NewInt32x4(value);
+
+
+RUNTIME_FUNCTION(Runtime_CreateFloat32x4) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+ RUNTIME_ASSERT(args[2]->IsNumber());
+ RUNTIME_ASSERT(args[3]->IsNumber());
+
+ float32x4_value_t value;
+ value.storage[0] = static_cast<float>(args.number_at(0));
+ value.storage[1] = static_cast<float>(args.number_at(1));
+ value.storage[2] = static_cast<float>(args.number_at(2));
+ value.storage[3] = static_cast<float>(args.number_at(3));
+
+ RETURN_Float32x4_RESULT(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateFloat64x2) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ float64x2_value_t value;
+ value.storage[0] = args.number_at(0);
+ value.storage[1] = args.number_at(1);
+
+ RETURN_Float64x2_RESULT(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateInt32x4) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ RUNTIME_ASSERT(args[0]->IsNumber());
+ RUNTIME_ASSERT(args[1]->IsNumber());
+ RUNTIME_ASSERT(args[2]->IsNumber());
+ RUNTIME_ASSERT(args[3]->IsNumber());
+
+ int32x4_value_t value;
+ value.storage[0] = NumberToInt32(args[0]);
+ value.storage[1] = NumberToInt32(args[1]);
+ value.storage[2] = NumberToInt32(args[2]);
+ value.storage[3] = NumberToInt32(args[3]);
+
+ RETURN_Int32x4_RESULT(value);
+}
+
+
+// Used to convert between uint32_t and float32 without breaking strict
+// aliasing rules.
+union float32_uint32 {
+ float f;
+ uint32_t u;
+ float32_uint32(float v) {
+ f = v;
+ }
+ float32_uint32(uint32_t v) {
+ u = v;
+ }
+};
+
+
+union float64_uint64 {
+ double f;
+ uint64_t u;
+ float64_uint64(double v) {
+ f = v;
+ }
+ float64_uint64(uint64_t v) {
+ u = v;
+ }
+};
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4GetSignMask) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ float32_uint32 x(self->x());
+ float32_uint32 y(self->y());
+ float32_uint32 z(self->z());
+ float32_uint32 w(self->w());
+ uint32_t mx = (x.u & 0x80000000) >> 31;
+ uint32_t my = (y.u & 0x80000000) >> 31;
+ uint32_t mz = (z.u & 0x80000000) >> 31;
+ uint32_t mw = (w.u & 0x80000000) >> 31;
+ uint32_t value = mx | (my << 1) | (mz << 2) | (mw << 3);
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2GetSignMask) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ float64_uint64 x(self->x());
+ float64_uint64 y(self->y());
+ uint64_t mx = x.u >> 63;
+ uint64_t my = y.u >> 63;
+ uint32_t value = uint32_t(mx | (my << 1));
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Int32x4GetSignMask) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Int32x4, self, 0);
+ uint32_t mx = (self->x() & 0x80000000) >> 31;
+ uint32_t my = (self->y() & 0x80000000) >> 31;
+ uint32_t mz = (self->z() & 0x80000000) >> 31;
+ uint32_t mw = (self->w() & 0x80000000) >> 31;
+ uint32_t value = mx | (my << 1) | (mz << 2) | (mw << 3);
+ return *isolate->factory()->NewNumberFromUint(value);
+}
+
+
+#define LANE_VALUE(VALUE, LANE) \
+ VALUE->LANE()
+
+
+#define LANE_FLAG(VALUE, LANE) \
+ VALUE->LANE() != 0
+
+
+#define SIMD128_LANE_ACCESS_FUNCTIONS(V) \
+ V(Float32x4, GetX, NewNumber, x, LANE_VALUE) \
+ V(Float32x4, GetY, NewNumber, y, LANE_VALUE) \
+ V(Float32x4, GetZ, NewNumber, z, LANE_VALUE) \
+ V(Float32x4, GetW, NewNumber, w, LANE_VALUE) \
+ V(Float64x2, GetX, NewNumber, x, LANE_VALUE) \
+ V(Float64x2, GetY, NewNumber, y, LANE_VALUE) \
+ V(Int32x4, GetX, NewNumberFromInt, x, LANE_VALUE) \
+ V(Int32x4, GetY, NewNumberFromInt, y, LANE_VALUE) \
+ V(Int32x4, GetZ, NewNumberFromInt, z, LANE_VALUE) \
+ V(Int32x4, GetW, NewNumberFromInt, w, LANE_VALUE) \
+ V(Int32x4, GetFlagX, ToBoolean, x, LANE_FLAG) \
+ V(Int32x4, GetFlagY, ToBoolean, y, LANE_FLAG) \
+ V(Int32x4, GetFlagZ, ToBoolean, z, LANE_FLAG) \
+ V(Int32x4, GetFlagW, ToBoolean, w, LANE_FLAG)
+
+
+#define DECLARE_SIMD_LANE_ACCESS_FUNCTION( \
+ TYPE, NAME, HEAP_FUNCTION, LANE, ACCESS_FUNCTION) \
+RUNTIME_FUNCTION(Runtime_##TYPE##NAME) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ \
+ return *isolate->factory()->HEAP_FUNCTION( \
+ ACCESS_FUNCTION(a, LANE)); \
+}
+
+
+SIMD128_LANE_ACCESS_FUNCTIONS(DECLARE_SIMD_LANE_ACCESS_FUNCTION)
+
+
+template<typename T>
+static inline T Neg(T a) {
+ return -a;
+}
+
+
+template<typename T>
+static inline T Not(T a) {
+ return ~a;
+}
+
+
+template<typename T>
+static inline T Reciprocal(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float Reciprocal<float>(float a) {
+ return 1.0f / a;
+}
+
+
+template<typename T>
+static inline T ReciprocalSqrt(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float ReciprocalSqrt<float>(float a) {
+ return sqrtf(1.0f / a);
+}
+
+
+template<typename T>
+static inline T Sqrt(T a) {
+ UNIMPLEMENTED();
+}
+
+
+template<>
+inline float Sqrt<float>(float a) {
+ return sqrtf(a);
+}
+
+
+template<>
+inline double Sqrt<double>(double a) {
+ return sqrt(a);
+}
+
+
+#define SIMD128_UNARY_FUNCTIONS(V) \
+ V(Float32x4, Abs) \
+ V(Float32x4, Neg) \
+ V(Float32x4, Reciprocal) \
+ V(Float32x4, ReciprocalSqrt) \
+ V(Float32x4, Sqrt) \
+ V(Float64x2, Abs) \
+ V(Float64x2, Neg) \
+ V(Float64x2, Sqrt) \
+ V(Int32x4, Neg) \
+ V(Int32x4, Not)
+
+
+#define DECLARE_SIMD_UNARY_FUNCTION(TYPE, FUNCTION) \
+RUNTIME_FUNCTION(Runtime_##TYPE##FUNCTION) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = FUNCTION(a->getAt(i)); \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_UNARY_FUNCTIONS(DECLARE_SIMD_UNARY_FUNCTION)
+
+
+template<typename T1, typename T2>
+inline void BitsTo(T1 s, T2* t) {
+ memcpy(t, &s, sizeof(T2));
+}
+
+
+template<typename T1, typename T2>
+inline void To(T1 s, T2* t) {
+}
+
+
+template<>
+inline void To<int32_t, float>(int32_t s, float* t) {
+ *t = static_cast<float>(s);
+}
+
+
+template<>
+inline void To<float, int32_t>(float s, int32_t* t) {
+ *t = DoubleToInt32(static_cast<double>(s));
+}
+
+
+#define SIMD128_CONVERSION_FUNCTIONS(V) \
+ V(Float32x4, BitsTo, Int32x4) \
+ V(Float32x4, To, Int32x4) \
+ V(Int32x4, BitsTo, Float32x4) \
+ V(Int32x4, To, Float32x4)
+
+
+#define DECLARE_SIMD_CONVERSION_FUNCTION( \
+ SOURCE_TYPE, FUNCTION, TARGET_TYPE) \
+RUNTIME_FUNCTION( \
+ Runtime_##SOURCE_TYPE##FUNCTION##TARGET_TYPE) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ \
+ CONVERT_ARG_CHECKED(SOURCE_TYPE, a, 0); \
+ \
+ TARGET_TYPE::value_t result; \
+ for (int i = 0; i < SOURCE_TYPE::kLanes; i++) { \
+ FUNCTION(a->getAt(i), &result.storage[i]); \
+ } \
+ \
+ RETURN_##TARGET_TYPE##_RESULT(result); \
+}
+
+
+SIMD128_CONVERSION_FUNCTIONS(DECLARE_SIMD_CONVERSION_FUNCTION)
+
+
+template<typename T>
+static inline T Add(T a, T b) {
+ return a + b;
+}
+
+
+template<typename T>
+static inline T Div(T a, T b) {
+ return a / b;
+}
+
+
+template<typename T>
+static inline T Mul(T a, T b) {
+ return a * b;
+}
+
+
+template<typename T>
+static inline T Sub(T a, T b) {
+ return a - b;
+}
+
+
+template<typename T>
+static inline int32_t Equal(T a, T b) {
+ return a == b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t NotEqual(T a, T b) {
+ return a != b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t GreaterThanOrEqual(T a, T b) {
+ return a >= b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t GreaterThan(T a, T b) {
+ return a > b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t LessThan(T a, T b) {
+ return a < b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline int32_t LessThanOrEqual(T a, T b) {
+ return a <= b ? -1 : 0;
+}
+
+
+template<typename T>
+static inline T And(T a, T b) {
+ return a & b;
+}
+
+
+template<typename T>
+static inline T Or(T a, T b) {
+ return a | b;
+}
+
+
+template<typename T>
+static inline T Xor(T a, T b) {
+ return a ^ b;
+}
+
+
+#define SIMD128_BINARY_FUNCTIONS(V) \
+ V(Float32x4, Add, Float32x4) \
+ V(Float32x4, Div, Float32x4) \
+ V(Float32x4, Max, Float32x4) \
+ V(Float32x4, Min, Float32x4) \
+ V(Float32x4, Mul, Float32x4) \
+ V(Float32x4, Sub, Float32x4) \
+ V(Float32x4, Equal, Int32x4) \
+ V(Float32x4, NotEqual, Int32x4) \
+ V(Float32x4, GreaterThanOrEqual, Int32x4) \
+ V(Float32x4, GreaterThan, Int32x4) \
+ V(Float32x4, LessThan, Int32x4) \
+ V(Float32x4, LessThanOrEqual, Int32x4) \
+ V(Float64x2, Add, Float64x2) \
+ V(Float64x2, Div, Float64x2) \
+ V(Float64x2, Max, Float64x2) \
+ V(Float64x2, Min, Float64x2) \
+ V(Float64x2, Mul, Float64x2) \
+ V(Float64x2, Sub, Float64x2) \
+ V(Int32x4, Add, Int32x4) \
+ V(Int32x4, And, Int32x4) \
+ V(Int32x4, Mul, Int32x4) \
+ V(Int32x4, Or, Int32x4) \
+ V(Int32x4, Sub, Int32x4) \
+ V(Int32x4, Xor, Int32x4) \
+ V(Int32x4, Equal, Int32x4) \
+ V(Int32x4, GreaterThan, Int32x4) \
+ V(Int32x4, LessThan, Int32x4)
+
+
+#define DECLARE_SIMD_BINARY_FUNCTION( \
+ TYPE, FUNCTION, RETURN_TYPE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##FUNCTION) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ CONVERT_ARG_CHECKED(TYPE, b, 1); \
+ \
+ RETURN_TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = FUNCTION(a->getAt(i), b->getAt(i)); \
+ } \
+ \
+ RETURN_##RETURN_TYPE##_RESULT(result); \
+}
+
+
+SIMD128_BINARY_FUNCTIONS(DECLARE_SIMD_BINARY_FUNCTION)
+
+
+#define SIMD128_SHUFFLE_FUNCTIONS(V) \
+ V(Float32x4) \
+ V(Int32x4)
+
+
+#define DECLARE_SIMD_SHUFFLE_FUNCTION(TYPE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##Shuffle) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ RUNTIME_ASSERT(args[1]->IsNumber()); \
+ uint32_t m = NumberToUint32(args[1]); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ result.storage[i] = a->getAt((m >> (i * 2)) & 0x3); \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_SHUFFLE_FUNCTIONS(DECLARE_SIMD_SHUFFLE_FUNCTION)
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4Scale) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ float _s = static_cast<float>(args.number_at(1));
+ float32x4_value_t result;
+ result.storage[0] = self->x() * _s;
+ result.storage[1] = self->y() * _s;
+ result.storage[2] = self->z() * _s;
+ result.storage[3] = self->w() * _s;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2Scale) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ RUNTIME_ASSERT(args[1]->IsNumber());
+
+ double _s = args.number_at(1);
+ float64x2_value_t result;
+ result.storage[0] = self->x() * _s;
+ result.storage[1] = self->y() * _s;
+
+ RETURN_Float64x2_RESULT(result);
+}
+
+
+#define ARG_TO_FLOAT32(x) \
+ CONVERT_DOUBLE_ARG_CHECKED(t, 1); \
+ float x = static_cast<float>(t);
+
+
+#define ARG_TO_FLOAT64(x) \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 1); \
+
+
+#define ARG_TO_INT32(x) \
+ RUNTIME_ASSERT(args[1]->IsNumber()); \
+ int32_t x = NumberToInt32(args[1]);
+
+
+#define ARG_TO_BOOLEAN(x) \
+ CONVERT_BOOLEAN_ARG_CHECKED(flag, 1); \
+ int32_t x = flag ? -1 : 0;
+
+#define SIMD128_SET_LANE_FUNCTIONS(V) \
+ V(Float32x4, WithX, ARG_TO_FLOAT32, 0) \
+ V(Float32x4, WithY, ARG_TO_FLOAT32, 1) \
+ V(Float32x4, WithZ, ARG_TO_FLOAT32, 2) \
+ V(Float32x4, WithW, ARG_TO_FLOAT32, 3) \
+ V(Float64x2, WithX, ARG_TO_FLOAT64, 0) \
+ V(Float64x2, WithY, ARG_TO_FLOAT64, 1) \
+ V(Int32x4, WithX, ARG_TO_INT32, 0) \
+ V(Int32x4, WithY, ARG_TO_INT32, 1) \
+ V(Int32x4, WithZ, ARG_TO_INT32, 2) \
+ V(Int32x4, WithW, ARG_TO_INT32, 3) \
+ V(Int32x4, WithFlagX, ARG_TO_BOOLEAN, 0) \
+ V(Int32x4, WithFlagY, ARG_TO_BOOLEAN, 1) \
+ V(Int32x4, WithFlagZ, ARG_TO_BOOLEAN, 2) \
+ V(Int32x4, WithFlagW, ARG_TO_BOOLEAN, 3)
+
+
+#define DECLARE_SIMD_SET_LANE_FUNCTION( \
+ TYPE, NAME, ARG_FUNCTION, LANE) \
+RUNTIME_FUNCTION(Runtime_##TYPE##NAME) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ \
+ CONVERT_ARG_CHECKED(TYPE, a, 0); \
+ ARG_FUNCTION(value); \
+ \
+ TYPE::value_t result; \
+ for (int i = 0; i < TYPE::kLanes; i++) { \
+ if (i != LANE) \
+ result.storage[i] = a->getAt(i); \
+ else \
+ result.storage[i] = value; \
+ } \
+ \
+ RETURN_##TYPE##_RESULT(result); \
+}
+
+
+SIMD128_SET_LANE_FUNCTIONS(DECLARE_SIMD_SET_LANE_FUNCTION)
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4Clamp) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float32x4, self, 0);
+ CONVERT_ARG_CHECKED(Float32x4, lo, 1);
+ CONVERT_ARG_CHECKED(Float32x4, hi, 2);
+
+ float32x4_value_t result;
+ float _x = self->x() > lo->x() ? self->x() : lo->x();
+ float _y = self->y() > lo->y() ? self->y() : lo->y();
+ float _z = self->z() > lo->z() ? self->z() : lo->z();
+ float _w = self->w() > lo->w() ? self->w() : lo->w();
+ result.storage[0] = _x > hi->x() ? hi->x() : _x;
+ result.storage[1] = _y > hi->y() ? hi->y() : _y;
+ result.storage[2] = _z > hi->z() ? hi->z() : _z;
+ result.storage[3] = _w > hi->w() ? hi->w() : _w;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float64x2Clamp) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float64x2, self, 0);
+ CONVERT_ARG_CHECKED(Float64x2, lo, 1);
+ CONVERT_ARG_CHECKED(Float64x2, hi, 2);
+
+ float64x2_value_t result;
+ double _x = self->x() > lo->x() ? self->x() : lo->x();
+ double _y = self->y() > lo->y() ? self->y() : lo->y();
+ result.storage[0] = _x > hi->x() ? hi->x() : _x;
+ result.storage[1] = _y > hi->y() ? hi->y() : _y;
+
+ RETURN_Float64x2_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4ShuffleMix) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Float32x4, first, 0);
+ CONVERT_ARG_CHECKED(Float32x4, second, 1);
+ RUNTIME_ASSERT(args[2]->IsNumber());
+
+ uint32_t m = NumberToUint32(args[2]);
+ float32x4_value_t result;
+ float data1[4] = { first->x(), first->y(), first->z(), first->w() };
+ float data2[4] = { second->x(), second->y(), second->z(), second->w() };
+ result.storage[0] = data1[m & 0x3];
+ result.storage[1] = data1[(m >> 2) & 0x3];
+ result.storage[2] = data2[(m >> 4) & 0x3];
+ result.storage[3] = data2[(m >> 6) & 0x3];
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Float32x4Select) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Int32x4, self, 0);
+ CONVERT_ARG_CHECKED(Float32x4, tv, 1);
+ CONVERT_ARG_CHECKED(Float32x4, fv, 2);
+
+ uint32_t _maskX = self->x();
+ uint32_t _maskY = self->y();
+ uint32_t _maskZ = self->z();
+ uint32_t _maskW = self->w();
+ // Extract floats and interpret them as masks.
+ float32_uint32 tvx(tv->x());
+ float32_uint32 tvy(tv->y());
+ float32_uint32 tvz(tv->z());
+ float32_uint32 tvw(tv->w());
+ float32_uint32 fvx(fv->x());
+ float32_uint32 fvy(fv->y());
+ float32_uint32 fvz(fv->z());
+ float32_uint32 fvw(fv->w());
+ // Perform select.
+ float32_uint32 tempX((_maskX & tvx.u) | (~_maskX & fvx.u));
+ float32_uint32 tempY((_maskY & tvy.u) | (~_maskY & fvy.u));
+ float32_uint32 tempZ((_maskZ & tvz.u) | (~_maskZ & fvz.u));
+ float32_uint32 tempW((_maskW & tvw.u) | (~_maskW & fvw.u));
+
+ float32x4_value_t result;
+ result.storage[0] = tempX.f;
+ result.storage[1] = tempY.f;
+ result.storage[2] = tempZ.f;
+ result.storage[3] = tempW.f;
+
+ RETURN_Float32x4_RESULT(result);
+}
+
+
+RUNTIME_FUNCTION(Runtime_Int32x4Select) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_CHECKED(Int32x4, self, 0);
+ CONVERT_ARG_CHECKED(Int32x4, tv, 1);
+ CONVERT_ARG_CHECKED(Int32x4, fv, 2);
+
+ uint32_t _maskX = self->x();
+ uint32_t _maskY = self->y();
+ uint32_t _maskZ = self->z();
+ uint32_t _maskW = self->w();
+
+ int32x4_value_t result;
+ result.storage[0] = (_maskX & tv->x()) | (~_maskX & fv->x());
+ result.storage[1] = (_maskY & tv->y()) | (~_maskY & fv->y());
+ result.storage[2] = (_maskZ & tv->z()) | (~_maskZ & fv->z());
+ result.storage[3] = (_maskW & tv->w()) | (~_maskW & fv->w());
+
+ RETURN_Int32x4_RESULT(result);
+}
+
+
// ----------------------------------------------------------------------------
// Reference implementation for inlined runtime functions. Only used when the
// compiler does not support a certain intrinsic. Don't optimize these, but
F(MathFround, 1, 1) \
F(RemPiO2, 1, 1) \
\
+ /* Float32x4 and Int32x4 */ \
+ F(AllocateFloat32x4, 0, 1) \
+ F(AllocateFloat64x2, 0, 1) \
+ F(AllocateInt32x4, 0, 1) \
+ \
+ /* SIMD */ \
+ F(Float32x4Abs, 1, 1) \
+ F(Float32x4BitsToInt32x4, 1, 1) \
+ F(Float32x4Neg, 1, 1) \
+ F(Float32x4Reciprocal, 1, 1) \
+ F(Float32x4ReciprocalSqrt, 1, 1) \
+ F(Float32x4Sqrt, 1, 1) \
+ F(Float32x4ToInt32x4, 1, 1) \
+ F(Float32x4Add, 2, 1) \
+ F(Float32x4Div, 2, 1) \
+ F(Float32x4Max, 2, 1) \
+ F(Float32x4Min, 2, 1) \
+ F(Float32x4Mul, 2, 1) \
+ F(Float32x4Sub, 2, 1) \
+ F(Float32x4Equal, 2, 1) \
+ F(Float32x4NotEqual, 2, 1) \
+ F(Float32x4GreaterThanOrEqual, 2, 1) \
+ F(Float32x4GreaterThan, 2, 1) \
+ F(Float32x4LessThan, 2, 1) \
+ F(Float32x4LessThanOrEqual, 2, 1) \
+ F(Float32x4Shuffle, 2, 1) \
+ F(Float32x4Scale, 2, 1) \
+ F(Float32x4WithX, 2, 1) \
+ F(Float32x4WithY, 2, 1) \
+ F(Float32x4WithZ, 2, 1) \
+ F(Float32x4WithW, 2, 1) \
+ F(Float32x4Clamp, 3, 1) \
+ F(Float32x4ShuffleMix, 3, 1) \
+ F(Float32x4Select, 3, 1) \
+ F(Float64x2Abs, 1, 1) \
+ F(Float64x2Neg, 1, 1) \
+ F(Float64x2Sqrt, 1, 1) \
+ F(Float64x2Add, 2, 1) \
+ F(Float64x2Div, 2, 1) \
+ F(Float64x2Max, 2, 1) \
+ F(Float64x2Min, 2, 1) \
+ F(Float64x2Mul, 2, 1) \
+ F(Float64x2Sub, 2, 1) \
+ F(Float64x2Scale, 2, 1) \
+ F(Float64x2WithX, 2, 1) \
+ F(Float64x2WithY, 2, 1) \
+ F(Float64x2Clamp, 3, 1) \
+ F(Int32x4BitsToFloat32x4, 1, 1) \
+ F(Int32x4Neg, 1, 1) \
+ F(Int32x4Not, 1, 1) \
+ F(Int32x4ToFloat32x4, 1, 1) \
+ F(Int32x4And, 2, 1) \
+ F(Int32x4Or, 2, 1) \
+ F(Int32x4Xor, 2, 1) \
+ F(Int32x4Add, 2, 1) \
+ F(Int32x4Sub, 2, 1) \
+ F(Int32x4Mul, 2, 1) \
+ F(Int32x4Shuffle, 2, 1) \
+ F(Int32x4WithX, 2, 1) \
+ F(Int32x4WithY, 2, 1) \
+ F(Int32x4WithZ, 2, 1) \
+ F(Int32x4WithW, 2, 1) \
+ F(Int32x4WithFlagX, 2, 1) \
+ F(Int32x4WithFlagY, 2, 1) \
+ F(Int32x4WithFlagZ, 2, 1) \
+ F(Int32x4WithFlagW, 2, 1) \
+ F(Int32x4GreaterThan, 2, 1) \
+ F(Int32x4Equal, 2, 1) \
+ F(Int32x4LessThan, 2, 1) \
+ F(Int32x4Select, 3, 1) \
+ \
/* Regular expressions */ \
F(RegExpCompile, 3, 1) \
F(RegExpExecMultiple, 4, 1) \
F(DateSetValue, 3, 1) \
F(DateCacheVersion, 0, 1) \
\
+ /* Float32x4, Float64x2 and Int32x4 */ \
+ F(CreateFloat32x4, 4, 1) \
+ F(Float32x4GetX, 1, 1) \
+ F(Float32x4GetY, 1, 1) \
+ F(Float32x4GetZ, 1, 1) \
+ F(Float32x4GetW, 1, 1) \
+ F(Float32x4GetSignMask, 1, 1) \
+ F(CreateFloat64x2, 2, 1) \
+ F(Float64x2GetX, 1, 1) \
+ F(Float64x2GetY, 1, 1) \
+ F(Float64x2GetSignMask, 1, 1) \
+ F(CreateInt32x4, 4, 1) \
+ F(Int32x4GetX, 1, 1) \
+ F(Int32x4GetY, 1, 1) \
+ F(Int32x4GetZ, 1, 1) \
+ F(Int32x4GetW, 1, 1) \
+ F(Int32x4GetFlagX, 1, 1) \
+ F(Int32x4GetFlagY, 1, 1) \
+ F(Int32x4GetFlagZ, 1, 1) \
+ F(Int32x4GetFlagW, 1, 1) \
+ F(Int32x4GetSignMask, 1, 1) \
+ \
/* Globals */ \
F(CompileString, 2, 1) \
\
F(HasExternalInt32Elements, 1, 1) \
F(HasExternalUint32Elements, 1, 1) \
F(HasExternalFloat32Elements, 1, 1) \
+ F(HasExternalFloat32x4Elements, 1, 1) \
+ F(HasExternalInt32x4Elements, 1, 1) \
F(HasExternalFloat64Elements, 1, 1) \
+ F(HasExternalFloat64x2Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1) \
F(HasFixedInt8Elements, 1, 1) \
F(HasFixedUint8Elements, 1, 1) \
ARRAY_ID_FLOAT32 = 7,
ARRAY_ID_FLOAT64 = 8,
ARRAY_ID_UINT8_CLAMPED = 9,
-
+ ARRAY_ID_FLOAT32x4 = 10,
+ ARRAY_ID_FLOAT64x2 = 11,
+ ARRAY_ID_INT32x4 = 12,
ARRAY_ID_FIRST = ARRAY_ID_UINT8,
- ARRAY_ID_LAST = ARRAY_ID_UINT8_CLAMPED
+ ARRAY_ID_LAST = ARRAY_ID_INT32x4
};
static void ArrayIdToTypeAndSize(int array_id,
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ if (IsFloat32x4(y) || IsFloat64x2(y) || IsInt32x4(y)) {
+ return %StringEquals(x, %ToString(y));
+ }
y = %ToPrimitive(y, NO_HINT);
}
} else if (IS_SYMBOL(x)) {
if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
return 1; // not equal
+ } else if (IsFloat32x4(x)) {
+ while (true) {
+ if (IsFloat32x4(y) || IsInt32x4(y)) {
+ return (x.x == y.x && x.y == y.y && x.z == y.z && x.w == y.w) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
+ } else if (IsFloat64x2(x)) {
+ while (true) {
+ if (IsFloat64x2(y)) {
+ return (x.x == y.x && x.y == y.y) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
+ } else if (IsInt32x4(x)) {
+ while (true) {
+ if (IsFloat32x4(y) || IsInt32x4(y)) {
+ return (x.x == y.x && x.y == y.y && x.z == y.z && x.w == y.w) ? 0 : 1;
+ }
+ if (IS_STRING(y)) return %StringEquals(%ToString(x), y);
+ if (IS_NUMBER(y)) return 1; // not equal
+ if (IS_SYMBOL(y)) return 1; // not equal
+ if (IS_BOOLEAN(y)) return y ? 0 : 1;
+ if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
+ y = %ToPrimitive(y, NO_HINT);
+ }
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
if (IS_NULL_OR_UNDEFINED(y)) return 1;
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_STRING(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (IS_SYMBOL(y)) return 1; // not equal
+ if (IsFloat32x4(y) || IsFloat64x2(y) || IsInt32x4(y)) return x ? 0 : 1;
// y is object.
x = %ToNumber(x);
y = %ToPrimitive(y, NO_HINT);
return %NumberEquals(this, x);
}
+ if (IsFloat32x4(this)) {
+ if (!IsFloat32x4(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y &&
+ this.z == x.z && this.w == x.w) ? 0 : 1;
+ }
+
+ if (IsFloat64x2(this)) {
+ if (!IsFloat64x2(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y) ? 0 : 1;
+ }
+
+ if (IsInt32x4(this)) {
+ if (!IsInt32x4(x)) return 1; // not equal
+ return (this.x == x.x && this.y == x.y &&
+ this.z == x.z && this.w == x.w) ? 0 : 1;
+ }
+
// If anything else gets here, we just do simple identity check.
// Objects (including functions), null, undefined and booleans were
// checked in the CompareStub, so there should be nothing left.
right = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(left) && IS_STRING(right)) {
return %_StringCompare(left, right);
+ } else if ((IsFloat32x4(left) || IsInt32x4(left)) &&
+ (IsFloat32x4(right) || IsInt32x4(right))) {
+ if ((left.x == right.x) && (left.y == right.y) &&
+ (left.z == right.z) && (left.w == right.w)) {
+ return 0; // equal
+ }
+ if ((left.x < right.x) && (left.y < right.y) &&
+ (left.z < right.z) && (left.w < right.w)) {
+ return -1; // less
+ }
+ if ((left.x > right.x) && (left.y > right.y) &&
+ (left.z > right.z) && (left.w > right.w)) {
+ return 1; // great
+ }
+ } else if (IsFloat64x2(left) && IsFloat64x2(right)) {
+ if ((left.x == right.x) && (left.y == right.y)) {
+ return 0; // equal
+ }
+ if ((left.x < right.x) && (left.y < right.y)) {
+ return -1; // less
+ }
+ if ((left.x > right.x) && (left.y > right.y)) {
+ return 1; // great
+ }
} else {
var left_number = %ToNumber(left);
var right_number = %ToNumber(right);
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []);
+ if (IsFloat32x4(x)) return NAN;
+ if (IsFloat64x2(x)) return NAN;
+ if (IsInt32x4(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
if (IS_BOOLEAN(x)) return x ? 1 : 0;
if (IS_UNDEFINED(x)) return NAN;
if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []);
+ if (IsFloat32x4(x)) return NAN;
+ if (IsFloat64x2(x)) return NAN;
+ if (IsInt32x4(x)) return NAN;
return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
}
if (IS_STRING(x)) return new $String(x);
if (IS_NUMBER(x)) return new $Number(x);
if (IS_BOOLEAN(x)) return new $Boolean(x);
+ if (IsFloat32x4(x)) return new $Float32x4(x.x, x.y, x.z, x.w);
+ if (IsFloat64x2(x)) return new $Float64x2(x.x, x.y);
+ if (IsInt32x4(x)) return new $Int32x4(x.x, x.y, x.z, x.w);
if (IS_SYMBOL(x)) return %NewSymbolWrapper(x);
if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
throw %MakeTypeError('undefined_or_null_to_object', []);
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $SIMD = global.SIMD;
+var $Float32x4 = $SIMD.float32x4;
+var $Float64x2 = $SIMD.float64x2;
+var $Int32x4 = $SIMD.int32x4;
+
+macro SIMD128_DATA_TYPES(FUNCTION)
+FUNCTION(Float32x4, float32x4)
+FUNCTION(Float64x2, float64x2)
+FUNCTION(Int32x4, int32x4)
+endmacro
+
+macro DECLARE_DATA_TYPE_COMMON_FUNCTION(NAME, TYPE)
+function ThrowNAMETypeError() {
+ throw MakeTypeError("this is not a TYPE object.");
+}
+
+function CheckNAME(arg) {
+ if (!(arg instanceof $NAME))
+ ThrowNAMETypeError();
+}
+endmacro
+
+SIMD128_DATA_TYPES(DECLARE_DATA_TYPE_COMMON_FUNCTION)
+
+function StringfyFloat32x4JS() {
+ CheckFloat32x4(this);
+ return "float32x4(" + this.x + "," + this.y + "," + this.z + "," + this.w + ")";
+}
+
+function StringfyFloat64x2JS() {
+ CheckFloat64x2(this);
+ return "float64x2(" + this.x + "," + this.y + ")";
+}
+
+function StringfyInt32x4JS() {
+ CheckInt32x4(this);
+ return "int32x4(" + this.x + "," + this.y + "," + this.z + "," + this.w + ")";
+}
+
+macro SIMD128_DATA_TYPE_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, GetX)
+FUNCTION(Float32x4, GetY)
+FUNCTION(Float32x4, GetZ)
+FUNCTION(Float32x4, GetW)
+FUNCTION(Float32x4, GetSignMask)
+FUNCTION(Float64x2, GetX)
+FUNCTION(Float64x2, GetY)
+FUNCTION(Float64x2, GetSignMask)
+FUNCTION(Int32x4, GetX)
+FUNCTION(Int32x4, GetY)
+FUNCTION(Int32x4, GetZ)
+FUNCTION(Int32x4, GetW)
+FUNCTION(Int32x4, GetFlagX)
+FUNCTION(Int32x4, GetFlagY)
+FUNCTION(Int32x4, GetFlagZ)
+FUNCTION(Int32x4, GetFlagW)
+FUNCTION(Int32x4, GetSignMask)
+endmacro
+
+macro DECLARE_DATA_TYPE_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTIONJS() {
+ CheckTYPE(this);
+ return %TYPEFUNCTION(this);
+}
+endmacro
+
+SIMD128_DATA_TYPE_FUNCTIONS(DECLARE_DATA_TYPE_FUNCTION)
+
+function Float32x4Constructor(x, y, z, w) {
+ if (arguments.length == 1) {
+ CheckFloat32x4(x);
+ return %CreateFloat32x4(x.x, x.y, x.z, x.w);
+ } else {
+ x = TO_NUMBER_INLINE(x);
+ y = TO_NUMBER_INLINE(y);
+ z = TO_NUMBER_INLINE(z);
+ w = TO_NUMBER_INLINE(w);
+ return %CreateFloat32x4(x, y, z, w);
+ }
+}
+
+function Float64x2Constructor(x, y) {
+ if (arguments.length == 1) {
+ CheckFloat64x2(x);
+ return %CreateFloat64x2(x.x, x.y);
+ } else {
+ x = TO_NUMBER_INLINE(x);
+ y = TO_NUMBER_INLINE(y);
+ return %CreateFloat64x2(x, y);
+ }
+}
+
+function Int32x4Constructor(x, y, z, w) {
+ if (arguments.length == 1) {
+ CheckInt32x4(x);
+ return %CreateInt32x4(x.x, x.y, x.z, x.w);
+ } else {
+ x = TO_INT32(x);
+ y = TO_INT32(y);
+ z = TO_INT32(z);
+ w = TO_INT32(w);
+ return %CreateInt32x4(x, y, z, w);
+ }
+}
+
+function SetUpFloat32x4() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Float32x4, Float32x4Constructor);
+
+ %FunctionSetPrototype($Float32x4, new $Object());
+ %AddNamedProperty($Float32x4.prototype, "constructor", $Float32x4, DONT_ENUM);
+
+ InstallGetter($Float32x4.prototype, "x", Float32x4GetXJS);
+ InstallGetter($Float32x4.prototype, "y", Float32x4GetYJS);
+ InstallGetter($Float32x4.prototype, "z", Float32x4GetZJS);
+ InstallGetter($Float32x4.prototype, "w", Float32x4GetWJS);
+ InstallGetter($Float32x4.prototype, "signMask", Float32x4GetSignMaskJS);
+ InstallFunctions($Float32x4.prototype, DONT_ENUM, $Array(
+ "toString", StringfyFloat32x4JS
+ ));
+}
+
+function SetUpFloat64x2() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Float64x2, Float64x2Constructor);
+
+ %FunctionSetPrototype($Float64x2, new $Object());
+ %AddNamedProperty($Float64x2.prototype, "constructor", $Float64x2, DONT_ENUM);
+
+ InstallGetter($Float64x2.prototype, "x", Float64x2GetXJS);
+ InstallGetter($Float64x2.prototype, "y", Float64x2GetYJS);
+ InstallGetter($Float64x2.prototype, "signMask", Float64x2GetSignMaskJS);
+ InstallFunctions($Float64x2.prototype, DONT_ENUM, $Array(
+ "toString", StringfyFloat64x2JS
+ ));
+}
+
+function SetUpInt32x4() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Int32x4, Int32x4Constructor);
+
+ %FunctionSetPrototype($Int32x4, new $Object());
+ %AddNamedProperty($Int32x4.prototype, "constructor", $Int32x4, DONT_ENUM);
+
+ InstallGetter($Int32x4.prototype, "x", Int32x4GetXJS);
+ InstallGetter($Int32x4.prototype, "y", Int32x4GetYJS);
+ InstallGetter($Int32x4.prototype, "z", Int32x4GetZJS);
+ InstallGetter($Int32x4.prototype, "w", Int32x4GetWJS);
+ InstallGetter($Int32x4.prototype, "flagX", Int32x4GetFlagXJS);
+ InstallGetter($Int32x4.prototype, "flagY", Int32x4GetFlagYJS);
+ InstallGetter($Int32x4.prototype, "flagZ", Int32x4GetFlagZJS);
+ InstallGetter($Int32x4.prototype, "flagW", Int32x4GetFlagWJS);
+ InstallGetter($Int32x4.prototype, "signMask", Int32x4GetSignMaskJS);
+ InstallFunctions($Int32x4.prototype, DONT_ENUM, $Array(
+ "toString", StringfyInt32x4JS
+ ));
+}
+
+SetUpFloat32x4();
+SetUpFloat64x2();
+SetUpInt32x4();
+
+//------------------------------------------------------------------------------
+macro SIMD128_UNARY_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, Abs)
+FUNCTION(Float32x4, BitsToInt32x4)
+FUNCTION(Float32x4, Neg)
+FUNCTION(Float32x4, Reciprocal)
+FUNCTION(Float32x4, ReciprocalSqrt)
+FUNCTION(Float32x4, Sqrt)
+FUNCTION(Float32x4, ToInt32x4)
+FUNCTION(Float64x2, Abs)
+FUNCTION(Float64x2, Neg)
+FUNCTION(Float64x2, Sqrt)
+FUNCTION(Int32x4, BitsToFloat32x4)
+FUNCTION(Int32x4, Neg)
+FUNCTION(Int32x4, Not)
+FUNCTION(Int32x4, ToFloat32x4)
+endmacro
+
+macro SIMD128_BINARY_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4, Add)
+FUNCTION(Float32x4, Div)
+FUNCTION(Float32x4, Max)
+FUNCTION(Float32x4, Min)
+FUNCTION(Float32x4, Mul)
+FUNCTION(Float32x4, Sub)
+FUNCTION(Float32x4, Equal)
+FUNCTION(Float32x4, NotEqual)
+FUNCTION(Float32x4, GreaterThanOrEqual)
+FUNCTION(Float32x4, GreaterThan)
+FUNCTION(Float32x4, LessThan)
+FUNCTION(Float32x4, LessThanOrEqual)
+FUNCTION(Float64x2, Add)
+FUNCTION(Float64x2, Div)
+FUNCTION(Float64x2, Max)
+FUNCTION(Float64x2, Min)
+FUNCTION(Float64x2, Mul)
+FUNCTION(Float64x2, Sub)
+FUNCTION(Int32x4, Add)
+FUNCTION(Int32x4, And)
+FUNCTION(Int32x4, Mul)
+FUNCTION(Int32x4, Or)
+FUNCTION(Int32x4, Sub)
+FUNCTION(Int32x4, Xor)
+FUNCTION(Int32x4, Equal)
+FUNCTION(Int32x4, GreaterThan)
+FUNCTION(Int32x4, LessThan)
+endmacro
+
+macro SIMD128_BINARY_SHUFFLE_FUNCTIONS(FUNCTION)
+FUNCTION(Float32x4)
+FUNCTION(Int32x4)
+endmacro
+
+macro FLOAT32x4_BINARY_FUNCTIONS_WITH_FLOAT32_PARAMETER(FUNCTION)
+FUNCTION(Scale)
+FUNCTION(WithX)
+FUNCTION(WithY)
+FUNCTION(WithZ)
+FUNCTION(WithW)
+endmacro
+
+macro FLOAT64x2_BINARY_FUNCTIONS_WITH_FLOAT64_PARAMETER(FUNCTION)
+FUNCTION(Scale)
+FUNCTION(WithX)
+FUNCTION(WithY)
+endmacro
+
+macro INT32x4_BINARY_FUNCTIONS_WITH_INT32_PARAMETER(FUNCTION)
+FUNCTION(WithX)
+FUNCTION(WithY)
+FUNCTION(WithZ)
+FUNCTION(WithW)
+endmacro
+
+macro INT32x4_BINARY_FUNCTIONS_WITH_BOOLEAN_PARAMETER(FUNCTION)
+FUNCTION(WithFlagX)
+FUNCTION(WithFlagY)
+FUNCTION(WithFlagZ)
+FUNCTION(WithFlagW)
+endmacro
+
+macro DECLARE_SIMD_UNARY_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTIONJS(x4) {
+ CheckTYPE(x4);
+ return %TYPEFUNCTION(x4);
+}
+endmacro
+
+macro DECLARE_SIMD_BINARY_FUNCTION(TYPE, FUNCTION)
+function TYPEFUNCTIONJS(a4, b4) {
+ CheckTYPE(a4);
+ CheckTYPE(b4);
+ return %TYPEFUNCTION(a4, b4);
+}
+endmacro
+
+macro DECLARE_SIMD_BINARY_SHUFFLE_FUNCTION(TYPE)
+function TYPEShuffleJS(x4, mask) {
+ CheckTYPE(x4);
+ var value = TO_INT32(mask);
+ if ((value < 0) || (value > 0xFF)) {
+ throw MakeRangeError("invalid_simd_shuffle_mask");
+ }
+ return %TYPEShuffle(x4, mask);
+}
+endmacro
+
+macro DECLARE_FLOAT32x4_BINARY_FUNCTION_WITH_FLOAT32_PARAMETER(FUNCTION)
+function Float32x4FUNCTIONJS(x4, f) {
+ CheckFloat32x4(x4);
+ f = TO_NUMBER_INLINE(f);
+ return %Float32x4FUNCTION(x4, f);
+}
+endmacro
+
+macro DECLARE_FLOAT64x2_BINARY_FUNCTION_WITH_FLOAT64_PARAMETER(FUNCTION)
+function Float64x2FUNCTIONJS(x2, f) {
+ CheckFloat64x2(x2);
+ f = TO_NUMBER_INLINE(f);
+ return %Float64x2FUNCTION(x2, f);
+}
+endmacro
+
+macro DECLARE_INT32x4_BINARY_FUNCTION_WITH_INT32_PARAMETER(FUNCTION)
+function Int32x4FUNCTIONJS(x4, i) {
+ CheckInt32x4(x4);
+ i = TO_INT32(i);
+ return %Int32x4FUNCTION(x4, i);
+}
+endmacro
+
+macro DECLARE_INT32x4_BINARY_FUNCTION_WITH_BOOLEAN_PARAMETER(FUNCTION)
+function Int32x4FUNCTIONJS(x4, b) {
+ CheckInt32x4(x4);
+ b = ToBoolean(b);
+ return %Int32x4FUNCTION(x4, b);
+}
+endmacro
+
+SIMD128_UNARY_FUNCTIONS(DECLARE_SIMD_UNARY_FUNCTION)
+SIMD128_BINARY_FUNCTIONS(DECLARE_SIMD_BINARY_FUNCTION)
+SIMD128_BINARY_SHUFFLE_FUNCTIONS(DECLARE_SIMD_BINARY_SHUFFLE_FUNCTION)
+FLOAT32x4_BINARY_FUNCTIONS_WITH_FLOAT32_PARAMETER(DECLARE_FLOAT32x4_BINARY_FUNCTION_WITH_FLOAT32_PARAMETER)
+FLOAT64x2_BINARY_FUNCTIONS_WITH_FLOAT64_PARAMETER(DECLARE_FLOAT64x2_BINARY_FUNCTION_WITH_FLOAT64_PARAMETER)
+INT32x4_BINARY_FUNCTIONS_WITH_INT32_PARAMETER(DECLARE_INT32x4_BINARY_FUNCTION_WITH_INT32_PARAMETER)
+INT32x4_BINARY_FUNCTIONS_WITH_BOOLEAN_PARAMETER(DECLARE_INT32x4_BINARY_FUNCTION_WITH_BOOLEAN_PARAMETER)
+
+function Float32x4SplatJS(f) {
+ f = TO_NUMBER_INLINE(f);
+ return %CreateFloat32x4(f, f, f, f);
+}
+
+function Float32x4ZeroJS() {
+ return %CreateFloat32x4(0.0, 0.0, 0.0, 0.0);
+}
+
+function Float32x4AndJS(a4, b4) {
+ a4 = Float32x4BitsToInt32x4JS(a4);
+ b4 = Float32x4BitsToInt32x4JS(b4);
+ return Int32x4BitsToFloat32x4JS(Int32x4AndJS(a4, b4));
+}
+
+function Float32x4OrJS(a4, b4) {
+ a4 = Float32x4BitsToInt32x4JS(a4);
+ b4 = Float32x4BitsToInt32x4JS(b4);
+ return Int32x4BitsToFloat32x4JS(Int32x4OrJS(a4, b4));
+}
+
+function Float32x4XorJS(a4, b4) {
+ a4 = Float32x4BitsToInt32x4JS(a4);
+ b4 = Float32x4BitsToInt32x4JS(b4);
+ return Int32x4BitsToFloat32x4JS(Int32x4XorJS(a4, b4));
+}
+
+function Float32x4NotJS(x4) {
+ x4 = Float32x4BitsToInt32x4JS(x4);
+ return Int32x4BitsToFloat32x4JS(Int32x4NotJS(x4));
+}
+
+function Float32x4ClampJS(x4, lowerLimit, upperLimit) {
+ CheckFloat32x4(x4);
+ CheckFloat32x4(lowerLimit);
+ CheckFloat32x4(upperLimit);
+ return %Float32x4Clamp(x4, lowerLimit, upperLimit);
+}
+
+function Float32x4ShuffleMixJS(a4, b4, mask) {
+ CheckFloat32x4(a4);
+ CheckFloat32x4(b4);
+ var value = TO_INT32(mask);
+ if ((value < 0) || (value > 0xFF)) {
+ throw MakeRangeError("invalid_simd_shuffleMix_mask");
+ }
+ return %Float32x4ShuffleMix(a4, b4, mask);
+}
+
+function Float32x4SelectJS(x4, trueValue, falseValue) {
+ CheckInt32x4(x4);
+ CheckFloat32x4(trueValue);
+ CheckFloat32x4(falseValue);
+ return %Float32x4Select(x4, trueValue, falseValue);
+}
+
+function Float64x2SplatJS(f) {
+ f = TO_NUMBER_INLINE(f);
+ return %CreateFloat64x2(f, f);
+}
+
+function Float64x2ZeroJS() {
+ return %CreateFloat64x2(0.0, 0.0);
+}
+
+function Float64x2ClampJS(x2, lowerLimit, upperLimit) {
+ CheckFloat64x2(x2);
+ CheckFloat64x2(lowerLimit);
+ CheckFloat64x2(upperLimit);
+ return %Float64x2Clamp(x2, lowerLimit, upperLimit);
+}
+
+function Int32x4ZeroJS() {
+ return %CreateInt32x4(0, 0, 0, 0);
+}
+
+function Int32x4BoolJS(x, y, z, w) {
+ x = x ? -1 : 0;
+ y = y ? -1 : 0;
+ z = z ? -1 : 0;
+ w = w ? -1 : 0;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4SplatJS(s) {
+ s = TO_INT32(s);
+ return %CreateInt32x4(s, s, s, s);
+}
+
+function Int32x4SelectJS(x4, trueValue, falseValue) {
+ CheckInt32x4(x4);
+ CheckInt32x4(trueValue);
+ CheckInt32x4(falseValue);
+ return %Int32x4Select(x4, trueValue, falseValue);
+}
+
+function Int32x4ShiftLeftJS(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x << s;
+ var y = t.y << s;
+ var z = t.z << s;
+ var w = t.w << s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4ShiftRightJS(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x >>> s;
+ var y = t.y >>> s;
+ var z = t.z >>> s;
+ var w = t.w >>> s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function Int32x4ShiftRightArithmeticJS(t, s) {
+ CheckInt32x4(t);
+ s = TO_NUMBER_INLINE(s);
+ var x = t.x >> s;
+ var y = t.y >> s;
+ var z = t.z >> s;
+ var w = t.w >> s;
+ return %CreateInt32x4(x, y, z, w);
+}
+
+function SetUpSIMD() {
+ %CheckIsBootstrapping();
+
+ %OptimizeObjectForAddingMultipleProperties($SIMD, 258);
+ %AddNamedProperty($SIMD, "XXXX", 0x00, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXXY", 0x40, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXXZ", 0x80, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXXW", 0xC0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXYX", 0x10, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXYY", 0x50, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXYZ", 0x90, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXYW", 0xD0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXZX", 0x20, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXZY", 0x60, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXZZ", 0xA0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXZW", 0xE0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXWX", 0x30, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXWY", 0x70, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXWZ", 0xB0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XXWW", 0xF0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYXX", 0x04, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYXY", 0x44, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYXZ", 0x84, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYXW", 0xC4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYYX", 0x14, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYYY", 0x54, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYYZ", 0x94, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYYW", 0xD4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYZX", 0x24, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYZY", 0x64, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYZZ", 0xA4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYZW", 0xE4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYWX", 0x34, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYWY", 0x74, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYWZ", 0xB4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XYWW", 0xF4, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZXX", 0x08, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZXY", 0x48, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZXZ", 0x88, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZXW", 0xC8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZYX", 0x18, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZYY", 0x58, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZYZ", 0x98, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZYW", 0xD8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZZX", 0x28, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZZY", 0x68, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZZZ", 0xA8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZZW", 0xE8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZWX", 0x38, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZWY", 0x78, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZWZ", 0xB8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XZWW", 0xF8, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWXX", 0x0C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWXY", 0x4C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWXZ", 0x8C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWXW", 0xCC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWYX", 0x1C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWYY", 0x5C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWYZ", 0x9C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWYW", 0xDC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWZX", 0x2C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWZY", 0x6C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWZZ", 0xAC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWZW", 0xEC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWWX", 0x3C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWWY", 0x7C, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWWZ", 0xBC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "XWWW", 0xFC, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXXX", 0x01, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXXY", 0x41, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXXZ", 0x81, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXXW", 0xC1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXYX", 0x11, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXYY", 0x51, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXYZ", 0x91, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXYW", 0xD1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXZX", 0x21, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXZY", 0x61, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXZZ", 0xA1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXZW", 0xE1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXWX", 0x31, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXWY", 0x71, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXWZ", 0xB1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YXWW", 0xF1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYXX", 0x05, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYXY", 0x45, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYXZ", 0x85, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYXW", 0xC5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYYX", 0x15, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYYY", 0x55, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYYZ", 0x95, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYYW", 0xD5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYZX", 0x25, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYZY", 0x65, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYZZ", 0xA5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYZW", 0xE5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYWX", 0x35, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYWY", 0x75, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYWZ", 0xB5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YYWW", 0xF5, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZXX", 0x09, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZXY", 0x49, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZXZ", 0x89, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZXW", 0xC9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZYX", 0x19, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZYY", 0x59, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZYZ", 0x99, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZYW", 0xD9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZZX", 0x29, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZZY", 0x69, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZZZ", 0xA9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZZW", 0xE9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZWX", 0x39, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZWY", 0x79, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZWZ", 0xB9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YZWW", 0xF9, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWXX", 0x0D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWXY", 0x4D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWXZ", 0x8D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWXW", 0xCD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWYX", 0x1D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWYY", 0x5D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWYZ", 0x9D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWYW", 0xDD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWZX", 0x2D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWZY", 0x6D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWZZ", 0xAD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWZW", 0xED, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWWX", 0x3D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWWY", 0x7D, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWWZ", 0xBD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "YWWW", 0xFD, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXXX", 0x02, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXXY", 0x42, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXXZ", 0x82, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXXW", 0xC2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXYX", 0x12, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXYY", 0x52, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXYZ", 0x92, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXYW", 0xD2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXZX", 0x22, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXZY", 0x62, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXZZ", 0xA2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXZW", 0xE2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXWX", 0x32, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXWY", 0x72, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXWZ", 0xB2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZXWW", 0xF2, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYXX", 0x06, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYXY", 0x46, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYXZ", 0x86, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYXW", 0xC6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYYX", 0x16, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYYY", 0x56, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYYZ", 0x96, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYYW", 0xD6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYZX", 0x26, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYZY", 0x66, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYZZ", 0xA6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYZW", 0xE6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYWX", 0x36, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYWY", 0x76, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYWZ", 0xB6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZYWW", 0xF6, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZXX", 0x0A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZXY", 0x4A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZXZ", 0x8A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZXW", 0xCA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZYX", 0x1A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZYY", 0x5A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZYZ", 0x9A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZYW", 0xDA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZZX", 0x2A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZZY", 0x6A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZZZ", 0xAA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZZW", 0xEA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZWX", 0x3A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZWY", 0x7A, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZWZ", 0xBA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZZWW", 0xFA, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWXX", 0x0E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWXY", 0x4E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWXZ", 0x8E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWXW", 0xCE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWYX", 0x1E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWYY", 0x5E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWYZ", 0x9E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWYW", 0xDE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWZX", 0x2E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWZY", 0x6E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWZZ", 0xAE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWZW", 0xEE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWWX", 0x3E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWWY", 0x7E, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWWZ", 0xBE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "ZWWW", 0xFE, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXXX", 0x03, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXXY", 0x43, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXXZ", 0x83, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXXW", 0xC3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXYX", 0x13, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXYY", 0x53, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXYZ", 0x93, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXYW", 0xD3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXZX", 0x23, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXZY", 0x63, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXZZ", 0xA3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXZW", 0xE3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXWX", 0x33, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXWY", 0x73, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXWZ", 0xB3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WXWW", 0xF3, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYXX", 0x07, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYXY", 0x47, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYXZ", 0x87, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYXW", 0xC7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYYX", 0x17, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYYY", 0x57, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYYZ", 0x97, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYYW", 0xD7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYZX", 0x27, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYZY", 0x67, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYZZ", 0xA7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYZW", 0xE7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYWX", 0x37, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYWY", 0x77, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYWZ", 0xB7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WYWW", 0xF7, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZXX", 0x0B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZXY", 0x4B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZXZ", 0x8B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZXW", 0xCB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZYX", 0x1B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZYY", 0x5B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZYZ", 0x9B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZYW", 0xDB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZZX", 0x2B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZZY", 0x6B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZZZ", 0xAB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZZW", 0xEB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZWX", 0x3B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZWY", 0x7B, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZWZ", 0xBB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WZWW", 0xFB, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWXX", 0x0F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWXY", 0x4F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWXZ", 0x8F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWXW", 0xCF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWYX", 0x1F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWYY", 0x5F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWYZ", 0x9F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWYW", 0xDF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWZX", 0x2F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWZY", 0x6F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWZZ", 0xAF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWZW", 0xEF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWWX", 0x3F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWWY", 0x7F, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWWZ", 0xBF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %AddNamedProperty($SIMD, "WWWW", 0xFF, DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ %ToFastProperties($SIMD);
+
+ // Set up non-enumerable properties of the SIMD float32x4 object.
+ InstallFunctions($SIMD.float32x4, DONT_ENUM, $Array(
+ // Float32x4 operations
+ "splat", Float32x4SplatJS,
+ "zero", Float32x4ZeroJS,
+ // Unary
+ "abs", Float32x4AbsJS,
+ "fromInt32x4", Int32x4ToFloat32x4JS,
+ "fromInt32x4Bits", Int32x4BitsToFloat32x4JS,
+ "neg", Float32x4NegJS,
+ "reciprocal", Float32x4ReciprocalJS,
+ "reciprocalSqrt", Float32x4ReciprocalSqrtJS,
+ "sqrt", Float32x4SqrtJS,
+ // Binary
+ "add", Float32x4AddJS,
+ "div", Float32x4DivJS,
+ "max", Float32x4MaxJS,
+ "min", Float32x4MinJS,
+ "mul", Float32x4MulJS,
+ "sub", Float32x4SubJS,
+ "lessThan", Float32x4LessThanJS,
+ "lessThanOrEqual", Float32x4LessThanOrEqualJS,
+ "equal", Float32x4EqualJS,
+ "notEqual", Float32x4NotEqualJS,
+ "greaterThanOrEqual", Float32x4GreaterThanOrEqualJS,
+ "greaterThan", Float32x4GreaterThanJS,
+ "and", Float32x4AndJS,
+ "or", Float32x4OrJS,
+ "xor", Float32x4XorJS,
+ "not", Float32x4NotJS,
+ "scale", Float32x4ScaleJS,
+ "withX", Float32x4WithXJS,
+ "withY", Float32x4WithYJS,
+ "withZ", Float32x4WithZJS,
+ "withW", Float32x4WithWJS,
+ "shuffle", Float32x4ShuffleJS,
+ // Ternary
+ "clamp", Float32x4ClampJS,
+ "shuffleMix", Float32x4ShuffleMixJS,
+ "select", Float32x4SelectJS
+ ));
+
+ // Set up non-enumerable properties of the SIMD float64x2 object.
+ InstallFunctions($SIMD.float64x2, DONT_ENUM, $Array(
+ // Float64x2 operations
+ "splat", Float64x2SplatJS,
+ "zero", Float64x2ZeroJS,
+ // Unary
+ "abs", Float64x2AbsJS,
+ "neg", Float64x2NegJS,
+ "sqrt", Float64x2SqrtJS,
+ // Binary
+ "add", Float64x2AddJS,
+ "div", Float64x2DivJS,
+ "max", Float64x2MaxJS,
+ "min", Float64x2MinJS,
+ "mul", Float64x2MulJS,
+ "sub", Float64x2SubJS,
+ "scale", Float64x2ScaleJS,
+ "withX", Float64x2WithXJS,
+ "withY", Float64x2WithYJS,
+ // Ternary
+ "clamp", Float64x2ClampJS
+ ));
+
+ // Set up non-enumerable properties of the SIMD int32x4 object.
+ InstallFunctions($SIMD.int32x4, DONT_ENUM, $Array(
+ // Int32x4 operations
+ "zero", Int32x4ZeroJS,
+ "splat", Int32x4SplatJS,
+ "bool", Int32x4BoolJS,
+ // Unary
+ "fromFloat32x4", Float32x4ToInt32x4JS,
+ "fromFloat32x4Bits", Float32x4BitsToInt32x4JS,
+ "neg", Int32x4NegJS,
+ "not", Int32x4NotJS,
+ // Binary
+ "add", Int32x4AddJS,
+ "and", Int32x4AndJS,
+ "mul", Int32x4MulJS,
+ "or", Int32x4OrJS,
+ "sub", Int32x4SubJS,
+ "xor", Int32x4XorJS,
+ "shuffle", Int32x4ShuffleJS,
+ "withX", Int32x4WithXJS,
+ "withY", Int32x4WithYJS,
+ "withZ", Int32x4WithZJS,
+ "withW", Int32x4WithWJS,
+ "withFlagX", Int32x4WithFlagXJS,
+ "withFlagY", Int32x4WithFlagYJS,
+ "withFlagZ", Int32x4WithFlagZJS,
+ "withFlagW", Int32x4WithFlagWJS,
+ "greaterThan", Int32x4GreaterThanJS,
+ "equal", Int32x4EqualJS,
+ "lessThan", Int32x4LessThanJS,
+ "shiftLeft", Int32x4ShiftLeftJS,
+ "shiftRight", Int32x4ShiftRightJS,
+ "shiftRightArithmetic", Int32x4ShiftRightArithmeticJS,
+ // Ternary
+ "select", Int32x4SelectJS
+ ));
+}
+
+SetUpSIMD();
+
+//------------------------------------------------------------------------------
+macro SIMD128_TYPED_ARRAYS(FUNCTION)
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+FUNCTION(10, Float32x4Array, 16)
+FUNCTION(11, Float64x2Array, 16)
+FUNCTION(12, Int32x4Array, 16)
+endmacro
+
+macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
+ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset =
+ ToPositiveInteger(byteOffset, "invalid_typed_array_length");
+ }
+ if (!IS_UNDEFINED(length)) {
+ length = ToPositiveInteger(length, "invalid_typed_array_length");
+ }
+
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+ var offset;
+ if (IS_UNDEFINED(byteOffset)) {
+ offset = 0;
+ } else {
+ offset = byteOffset;
+
+ if (offset % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["start offset", "NAME", ELEMENT_SIZE]);
+ }
+ if (offset > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % ELEMENT_SIZE !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ ["byte length", "NAME", ELEMENT_SIZE]);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / ELEMENT_SIZE;
+ } else {
+ var newLength = length;
+ newByteLength = newLength * ELEMENT_SIZE;
+ }
+ if ((offset + newByteLength > bufferByteLength)
+ || (newLength > %_MaxSmi())) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength);
+ }
+
+ function NAMEConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ?
+ 0 : ToPositiveInteger(length, "invalid_typed_array_length");
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ var byteLength = l * ELEMENT_SIZE;
+ if (byteLength > %_TypedArrayMaxSizeInHeap()) {
+ var buffer = new $ArrayBuffer(byteLength);
+ %_TypedArrayInitialize(obj, ARRAY_ID, buffer, 0, byteLength);
+ } else {
+ %_TypedArrayInitialize(obj, ARRAY_ID, null, 0, byteLength);
+ }
+ }
+
+ function NAMEConstructByArrayLike(obj, arrayLike) {
+ var length = arrayLike.length;
+ var l = ToPositiveInteger(length, "invalid_typed_array_length");
+
+ if (l > %_MaxSmi()) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ if(!%TypedArrayInitializeFromArrayLike(obj, ARRAY_ID, arrayLike, l)) {
+ for (var i = 0; i < l; i++) {
+ // It is crucial that we let any execptions from arrayLike[i]
+ // propagate outside the function.
+ obj[i] = arrayLike[i];
+ }
+ }
+ }
+
+ function NAMEConstructor(arg1, arg2, arg3) {
+ if (%_IsConstructCall()) {
+ if (IS_ARRAYBUFFER(arg1)) {
+ NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
+ IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
+ NAMEConstructByLength(this, arg1);
+ } else {
+ NAMEConstructByArrayLike(this, arg1);
+ }
+ } else {
+ throw MakeTypeError("constructor_not_function", ["NAME"])
+ }
+ }
+
+ function NAME_GetBuffer() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.buffer", this]);
+ }
+ return %TypedArrayGetBuffer(this);
+ }
+
+ function NAME_GetByteLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteLength", this]);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
+ }
+
+ function NAME_GetByteOffset() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.byteOffset", this]);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
+ }
+
+ function NAME_GetLength() {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.length", this]);
+ }
+ return %_TypedArrayGetLength(this);
+ }
+
+ var $NAME = global.NAME;
+
+ function NAMESubArray(begin, end) {
+ if (!(%_ClassOf(this) === 'NAME')) {
+ throw MakeTypeError('incompatible_method_receiver',
+ ["NAME.subarray", this]);
+ }
+ var beginInt = TO_INTEGER(begin);
+ if (!IS_UNDEFINED(end)) {
+ end = TO_INTEGER(end);
+ }
+
+ var srcLength = %_TypedArrayGetLength(this);
+ if (beginInt < 0) {
+ beginInt = $MathMax(0, srcLength + beginInt);
+ } else {
+ beginInt = $MathMin(srcLength, beginInt);
+ }
+
+ var endInt = IS_UNDEFINED(end) ? srcLength : end;
+ if (endInt < 0) {
+ endInt = $MathMax(0, srcLength + endInt);
+ } else {
+ endInt = $MathMin(endInt, srcLength);
+ }
+ if (endInt < beginInt) {
+ endInt = beginInt;
+ }
+ var newLength = endInt - beginInt;
+ var beginByteOffset =
+ %_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
+ return new $NAME(%TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
+ }
+endmacro
+
+SIMD128_TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+
+function SetupSIMD128TypedArrays() {
+macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
+ %CheckIsBootstrapping();
+ %SetCode(global.NAME, NAMEConstructor);
+ %FunctionSetPrototype(global.NAME, new $Object());
+
+ %AddNamedProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ %AddNamedProperty(global.NAME.prototype,
+ "constructor", global.NAME, DONT_ENUM);
+ %AddNamedProperty(global.NAME.prototype,
+ "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ InstallGetter(global.NAME.prototype, "buffer", NAME_GetBuffer);
+ InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset);
+ InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength);
+ InstallGetter(global.NAME.prototype, "length", NAME_GetLength);
+
+ InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array(
+ "subarray", NAMESubArray,
+ "set", TypedArraySet
+ ));
+endmacro
+
+SIMD128_TYPED_ARRAYS(SETUP_TYPED_ARRAY)
+}
+
+SetupSIMD128TypedArrays();
+
+macro DECLARE_TYPED_ARRAY_FUNCTION(NAME)
+function NAMEArrayGet(i) {
+ return this[i];
+}
+
+function NAMEArraySet(i, v) {
+ CheckNAME(v);
+ this[i] = v;
+}
+
+function SetUpNAMEArray() {
+ InstallFunctions(global.NAMEArray.prototype, DONT_ENUM, $Array(
+ "getAt", NAMEArrayGet,
+ "setAt", NAMEArraySet
+ ));
+}
+endmacro
+
+DECLARE_TYPED_ARRAY_FUNCTION(Float32x4)
+DECLARE_TYPED_ARRAY_FUNCTION(Float64x2)
+DECLARE_TYPED_ARRAY_FUNCTION(Int32x4)
+
+SetUpFloat32x4Array();
+SetUpFloat64x2Array();
+SetUpInt32x4Array();
case JS_MAP_ITERATOR_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case FLOAT32x4_TYPE:
+ case FLOAT64x2_TYPE:
+ case INT32x4_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_ARRAY_TYPE:
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSIMD128InCrankshaft() { return true; }
// -----------------------------------------------------------------------------
if (rm_reg.high_bit()) emit(0x41);
}
+void Assembler::emit_optional_rex_32(XMMRegister reg) {
+ byte rex_bits = (reg.code() & 0x8) >> 1;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
void Assembler::emit_optional_rex_32(const Operand& op) {
if (op.rex_ != 0) emit(0x40 | op.rex_);
}
+void Assembler::addpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5E);
+ emit_sse_operand(dst, src);
+}
+
+
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
}
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x21);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
+void Assembler::pinsrd(XMMRegister dst, Register src, byte imm8) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2); // double
}
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11);
+ emit_sse_operand(src, dst);
+}
+
+
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src, dst);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC6);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
+void Assembler::shufpd(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xC6);
emit_sse_operand(dst, src);
}
+void Assembler::andpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
}
+void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
}
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
+
+
+void Assembler::cmpeqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x0);
+}
+
+
+void Assembler::cmpltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x1);
+}
+
+
+void Assembler::cmpleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x2);
+}
+
+
+void Assembler::cmpneqps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x4);
+}
+
+
+void Assembler::cmpnltps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x5);
+}
+
+
+void Assembler::cmpnleps(XMMRegister dst, XMMRegister src) {
+ cmpps(dst, src, 0x6);
+}
+
+
+void Assembler::pslld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(shift);
+}
+
+
+void Assembler::pslld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrld(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rdx, reg); // rdx == 2
+ emit(shift);
+}
+
+
+void Assembler::psrld(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xD2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrad(XMMRegister reg, int8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg);
+ emit(0x0F);
+ emit(0x72);
+ emit_sse_operand(rsp, reg); // rsp == 4
+ emit(shift);
+}
+
+
+void Assembler::psrad(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xE2);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x76);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pcmpgtd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x66);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src,
Assembler::RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
}
+void Assembler::minps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::sqrtpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x38);
+ emit(0x40);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpackldq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(dst);
+ emit(shift);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
}
+void Assembler::emit_sse_operand(XMMRegister dst) {
+ emit(0xD8 | dst.low_bits());
+}
+
+
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
emit(data);
typedef XMMRegister DoubleRegister;
+typedef XMMRegister SIMD128Register;
enum Condition {
times_2 = 1,
times_4 = 2,
times_8 = 3,
+ maximal_scale_factor = times_8,
times_int_size = times_4,
times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
};
// SSE instructions
void movaps(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+ void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
void divps(XMMRegister dst, XMMRegister src);
void divps(XMMRegister dst, const Operand& src);
+ void addpd(XMMRegister dst, XMMRegister src);
+ void addpd(XMMRegister dst, const Operand& src);
+ void subpd(XMMRegister dst, XMMRegister src);
+ void subpd(XMMRegister dst, const Operand& src);
+ void mulpd(XMMRegister dst, XMMRegister src);
+ void mulpd(XMMRegister dst, const Operand& src);
+ void divpd(XMMRegister dst, XMMRegister src);
+ void divpd(XMMRegister dst, const Operand& src);
+
void movmskps(Register dst, XMMRegister src);
// SSE2 instructions
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, const Operand& src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, const Operand& src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
// SSE 4.1 instruction
void extractps(Register dst, XMMRegister src, byte imm8);
+ void insertps(XMMRegister dst, XMMRegister src, byte imm8);
+ void pinsrd(XMMRegister dst, Register src, byte imm8);
+
+ void minps(XMMRegister dst, XMMRegister src);
+ void minps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, XMMRegister src);
+ void maxps(XMMRegister dst, const Operand& src);
+ void minpd(XMMRegister dst, XMMRegister src);
+ void minpd(XMMRegister dst, const Operand& src);
+ void maxpd(XMMRegister dst, XMMRegister src);
+ void maxpd(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src);
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src);
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, XMMRegister src);
+ void sqrtps(XMMRegister dst, const Operand& src);
+ void sqrtpd(XMMRegister dst, XMMRegister src);
+ void sqrtpd(XMMRegister dst, const Operand& src);
+ void paddd(XMMRegister dst, XMMRegister src);
+ void paddd(XMMRegister dst, const Operand& src);
+ void psubd(XMMRegister dst, XMMRegister src);
+ void psubd(XMMRegister dst, const Operand& src);
+ void pmulld(XMMRegister dst, XMMRegister src);
+ void pmulld(XMMRegister dst, const Operand& src);
+ void pmuludq(XMMRegister dst, XMMRegister src);
+ void pmuludq(XMMRegister dst, const Operand& src);
+ void punpackldq(XMMRegister dst, XMMRegister src);
+ void punpackldq(XMMRegister dst, const Operand& src);
+ void psrldq(XMMRegister dst, uint8_t shift);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void cvtps2dq(XMMRegister dst, XMMRegister src);
+ void cvtps2dq(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, XMMRegister src);
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmpeqps(XMMRegister dst, XMMRegister src);
+ void cmpltps(XMMRegister dst, XMMRegister src);
+ void cmpleps(XMMRegister dst, XMMRegister src);
+ void cmpneqps(XMMRegister dst, XMMRegister src);
+ void cmpnltps(XMMRegister dst, XMMRegister src);
+ void cmpnleps(XMMRegister dst, XMMRegister src);
+
+ void pslld(XMMRegister reg, int8_t shift);
+ void pslld(XMMRegister dst, XMMRegister src);
+ void psrld(XMMRegister reg, int8_t shift);
+ void psrld(XMMRegister dst, XMMRegister src);
+ void psrad(XMMRegister reg, int8_t shift);
+ void psrad(XMMRegister dst, XMMRegister src);
+
+ void pcmpgtd(XMMRegister dst, XMMRegister src);
+ void pcmpeqd(XMMRegister dst, XMMRegister src);
+ void pcmpltd(XMMRegister dst, XMMRegister src);
+
// Debugging
void Print();
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
+ // As for emit_optional_rex_32(Register), except that the register is
+ // an XMM register.
+ inline void emit_optional_rex_32(XMMRegister rm_reg);
+
// Optionally do as emit_rex_32(const Operand&) if the operand register
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
void emit_sse_operand(Register reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst);
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ simd128_value_t zero = {{0.0, 0.0}};
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
- input_->SetDoubleRegister(i, 0.0);
+ input_->SetSIMD128Register(i, zero);
}
// Fill the frame content from the actual data on the frame.
}
-void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+void Deoptimizer::CopySIMD128Registers(FrameDescription* output_frame) {
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
- double double_value = input_->GetDoubleRegister(i);
- output_frame->SetDoubleRegister(i, double_value);
+ simd128_value_t xmm_value = input_->GetSIMD128Register(i);
+ output_frame->SetSIMD128Register(i, xmm_value);
}
}
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
- const int kDoubleRegsSize = kDoubleSize *
+ const int kXMMRegsSize = kSIMD128Size *
XMMRegister::NumAllocatableRegisters();
- __ subp(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kXMMRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movsd(Operand(rsp, offset), xmm_reg);
+ int offset = i * kSIMD128Size;
+ __ movups(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
- kDoubleRegsSize;
+ kXMMRegsSize;
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
__ PopQuad(Operand(rbx, offset));
}
- // Fill in the double input registers.
- int double_regs_offset = FrameDescription::double_registers_offset();
+ // Fill in the xmm input registers.
+ STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
+ int xmm_regs_offset = FrameDescription::simd128_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
+ int dst_offset = i * kSIMD128Size + xmm_regs_offset;
__ popq(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset + kDoubleSize));
}
// Remove the bailout id and return address from the stack.
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
+ int src_offset = i * kSIMD128Size + xmm_regs_offset;
+ __ movups(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
}
+double FrameDescription::GetDoubleRegister(unsigned n) const {
+ DCHECK(n < arraysize(simd128_registers_));
+ return simd128_registers_[n].d[0];
+}
+
+
+void FrameDescription::SetDoubleRegister(unsigned n, double value) {
+ DCHECK(n < arraysize(simd128_registers_));
+ simd128_registers_[n].d[0] = value;
+}
+
+
#undef __
OPERAND_QUADWORD_SIZE = 3
};
+ enum {
+ rax = 0,
+ rcx = 1,
+ rdx = 2,
+ rbx = 3,
+ rsp = 4,
+ rbp = 5,
+ rsi = 6,
+ rdi = 7
+ };
+
const NameConverter& converter_;
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
unsigned int tmp_buffer_pos_;
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x21) {
+ get_modrm(*current, &mod, ®op, &rm);
+ // insertps xmm, xmm, imm8
+ AppendToBuffer("insertps %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm),
+ (*(current + 1)) & 3);
+ current += 2;
+ } else if (third_byte == 0x22) {
+ get_modrm(*current, &mod, ®op, &rm);
+ // pinsrd xmm, reg32, imm8
+ AppendToBuffer("pinsrd %s,%s,%d",
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm),
+ (*(current + 1)) & 3);
+ current += 2;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, ®op, &rm);
// roundsd xmm, xmm/m64, imm8
} else {
UnimplementedInstruction();
}
+ } else if (opcode == 0x38) {
+ byte third_byte = *current;
+ current = data + 3;
+ if (third_byte == 0x40) {
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("pmulld %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else {
+ UnimplementedInstruction();
+ }
} else {
get_modrm(*current, &mod, ®op, &rm);
if (opcode == 0x1f) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ AppendToBuffer("pshufd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0x5B) {
+ AppendToBuffer("cvtps2dq %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xFE) {
+ AppendToBuffer("paddd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xFA) {
+ AppendToBuffer("psubd %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
DCHECK(regop == 6);
AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
+ } else if (opcode == 0x62) {
+ AppendToBuffer("punpackldq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x72) {
+ AppendToBuffer(regop == rsi ? "pslld "
+ : regop == rdx ? "psrld" : "psrad");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0xC6) {
+ AppendToBuffer("shufpd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 0xff);
+ current += 1;
+ } else if (opcode == 0xF4) {
+ AppendToBuffer("pmuludq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
} else {
const char* mnemonic = "?";
- if (opcode == 0x54) {
+ if (opcode == 0x51) {
+ mnemonic = "sqrtpd";
+ } else if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
} else if (opcode == 0x57) {
mnemonic = "xorpd";
+ } else if (opcode == 0x58) {
+ mnemonic = "addpd";
+ } else if (opcode == 0x59) {
+ mnemonic = "mulpd";
+ } else if (opcode == 0x5C) {
+ mnemonic = "subpd";
+ } else if (opcode == 0x5D) {
+ mnemonic = "minpd";
+ } else if (opcode == 0x5E) {
+ mnemonic = "divpd";
+ } else if (opcode == 0x5F) {
+ mnemonic = "maxpd";
} else if (opcode == 0x2E) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
+ } else if (opcode == 0x66) {
+ mnemonic = "pcmpgtd";
+ } else if (opcode == 0x76) {
+ mnemonic = "pcmpeqd";
+ } else if (opcode == 0xD2) {
+ mnemonic = "psrld";
+ } else if (opcode == 0xE2) {
+ mnemonic = "psrad";
+ } else if (opcode == 0xF2) {
+ mnemonic = "pslld";
} else {
UnimplementedInstruction();
}
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x10) {
+ // movups xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("movups %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x11) {
+ // movups xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("movups ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
} else if (opcode == 0xA2) {
// CPUID
AppendToBuffer("%s", mnemonic);
AppendToBuffer(", %d", (*current) & 3);
current += 1;
+ } else if (opcode == 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 3);
+ current += 1;
+
+ } else if (opcode == 0x54) {
+ // andps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("andps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x56) {
+ // orps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("orps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x58) {
+ // addps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("addps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x59) {
+ // mulps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("mulps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5C) {
+ // subps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("subps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5E) {
+ // divps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("divps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5D) {
+ // minps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("minps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5F) {
+ // maxps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("maxps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x5B) {
+ // cvtdq2ps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("cvtdq2ps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+
+ } else if (opcode == 0x53) {
+ // rcpps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("rcpps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x52) {
+ // rsqrtps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("rsqrtps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
+ } else if (opcode == 0x51) {
+ // sqrtps xmm, xmm/m128
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("sqrtps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
AppendToBuffer("movmskps %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // Intel manual 2A, Table 3-11.
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ const char* const pseudo_op[] = {
+ "cmpeqps",
+ "cmpltps",
+ "cmpleps",
+ "cmpunordps",
+ "cmpneqps",
+ "cmpnltps",
+ "cmpnleps",
+ "cmpordps"
+ };
+ AppendToBuffer("%s %s,%s",
+ pseudo_op[current[1]],
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ current += 2;
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
}
+XMMRegister LCodeGen::ToSIMD128Register(int index) const {
+ return XMMRegister::FromAllocationIndex(index);
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
DCHECK(op->IsRegister());
return ToRegister(op->index());
}
+XMMRegister LCodeGen::ToFloat32x4Register(LOperand* op) const {
+ DCHECK(op->IsFloat32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToFloat64x2Register(LOperand* op) const {
+ DCHECK(op->IsFloat64x2Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToInt32x4Register(LOperand* op) const {
+ DCHECK(op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
+XMMRegister LCodeGen::ToSIMD128Register(LOperand* op) const {
+ DCHECK(op->IsFloat32x4Register() || op->IsFloat64x2Register() ||
+ op->IsInt32x4Register());
+ return ToSIMD128Register(op->index());
+}
+
+
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot() ||
+ op->IsFloat32x4StackSlot() || op->IsFloat64x2StackSlot() ||
+ op->IsInt32x4StackSlot());
if (NeedsEagerFrame()) {
return Operand(rbp, StackSlotOffset(op->index()));
} else {
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
+ } else if (op->IsFloat32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT32x4_STACK_SLOT);
+ } else if (op->IsFloat64x2StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::FLOAT64x2_STACK_SLOT);
+ } else if (op->IsInt32x4StackSlot()) {
+ translation->StoreSIMD128StackSlot(op->index(),
+ Translation::INT32x4_STACK_SLOT);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
+ } else if (op->IsFloat32x4Register()) {
+ XMMRegister reg = ToFloat32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT32x4_REGISTER);
+ } else if (op->IsFloat64x2Register()) {
+ XMMRegister reg = ToFloat64x2Register(op);
+ translation->StoreSIMD128Register(reg, Translation::FLOAT64x2_REGISTER);
+ } else if (op->IsInt32x4Register()) {
+ XMMRegister reg = ToInt32x4Register(op);
+ translation->StoreSIMD128Register(reg, Translation::INT32x4_REGISTER);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
+ } else if (r.IsSIMD128()) {
+ DCHECK(!info()->IsStub());
+ EmitBranch(instr, no_condition);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
}
+bool LCodeGen::HandleExternalArrayOpRequiresPreScale(
+ LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind) {
+ Register key_reg = ToRegister(key);
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ int pre_shift_size = ElementsKindToShiftSize(elements_kind) -
+ static_cast<int>(maximal_scale_factor);
+ DCHECK(pre_shift_size > 0);
+ __ shll(key_reg, Immediate(pre_shift_size));
+ return true;
+ }
+ return false;
+}
+
+
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
+ if (!HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind))
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
+ } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
+ HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(ToSIMD128Register(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
if (constant_value & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
+
return Operand(elements_pointer_reg,
(constant_value << shift_size) + offset);
} else {
DCHECK(SmiValuesAre31Bits());
shift_size -= kSmiTagSize;
}
+ if (ExternalArrayOpRequiresPreScale(key_representation, elements_kind)) {
+ // Make sure the key is pre-scaled against maximal_scale_factor.
+ shift_size = static_cast<int>(maximal_scale_factor);
+ }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
}
+void LCodeGen::DoNullarySIMDOperation(LNullarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Zero: {
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ case kFloat64x2Zero: {
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ xorpd(result_reg, result_reg);
+ return;
+ }
+ case kInt32x4Zero: {
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ xorps(result_reg, result_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoUnarySIMDOperation(LUnarySIMDOperation* instr) {
+ uint8_t select = 0;
+ switch (instr->op()) {
+ case kFloat32x4Coercion: {
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2Coercion: {
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4Coercion: {
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kSIMD128Change: {
+ Comment(";;; deoptimize: can not perform representation change"
+ "for float32x4 or int32x4");
+ DeoptimizeIf(no_condition, instr, "can not perform representation change"
+ "for float32x4 or int32x4");
+ return;
+ }
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt: {
+ DCHECK(instr->value()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ switch (instr->op()) {
+ case kFloat32x4Abs:
+ __ absps(input_reg);
+ break;
+ case kFloat32x4Neg:
+ __ negateps(input_reg);
+ break;
+ case kFloat32x4Reciprocal:
+ __ rcpps(input_reg, input_reg);
+ break;
+ case kFloat32x4ReciprocalSqrt:
+ __ rsqrtps(input_reg, input_reg);
+ break;
+ case kFloat32x4Sqrt:
+ __ sqrtps(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt: {
+ DCHECK(instr->value()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ switch (instr->op()) {
+ case kFloat64x2Abs:
+ __ abspd(input_reg);
+ break;
+ case kFloat64x2Neg:
+ __ negatepd(input_reg);
+ break;
+ case kFloat64x2Sqrt:
+ __ sqrtpd(input_reg, input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4Not:
+ case kInt32x4Neg: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ switch (instr->op()) {
+ case kInt32x4Not:
+ __ notps(input_reg);
+ break;
+ case kInt32x4Neg:
+ __ pnegd(input_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ if (instr->op() == kFloat32x4BitsToInt32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ DCHECK(instr->op() == kFloat32x4ToInt32x4);
+ __ cvtps2dq(result_reg, input_reg);
+ }
+ return;
+ }
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ if (instr->op() == kInt32x4BitsToFloat32x4) {
+ if (!result_reg.is(input_reg)) {
+ __ movaps(result_reg, input_reg);
+ }
+ } else {
+ DCHECK(instr->op() == kInt32x4ToFloat32x4);
+ __ cvtdq2ps(result_reg, input_reg);
+ }
+ return;
+ }
+ case kFloat32x4Splat: {
+ DCHECK(instr->hydrogen()->value()->representation().IsDouble());
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, input_reg);
+ __ shufps(xmm_scratch, xmm_scratch, 0x0);
+ __ movaps(result_reg, xmm_scratch);
+ return;
+ }
+ case kInt32x4Splat: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+ Register input_reg = ToRegister(instr->value());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ movd(result_reg, input_reg);
+ __ shufps(result_reg, result_reg, 0x0);
+ return;
+ }
+ case kInt32x4GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskps(result, input_reg);
+ return;
+ }
+ case kFloat32x4GetW:
+ select++;
+ case kFloat32x4GetZ:
+ select++;
+ case kFloat32x4GetY:
+ select++;
+ case kFloat32x4GetX: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat32x4());
+ XMMRegister input_reg = ToFloat32x4Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = result.is(input_reg) ? xmm0 : result;
+
+ if (select == 0x0) {
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtss2sd(xmm_scratch, input_reg);
+ if (!xmm_scratch.is(result)) {
+ __ movaps(result, xmm_scratch);
+ }
+ } else {
+ __ pshufd(xmm_scratch, input_reg, select);
+ if (!xmm_scratch.is(result)) {
+ __ xorps(result, result);
+ }
+ __ cvtss2sd(result, xmm_scratch);
+ }
+ return;
+ }
+ case kFloat64x2GetSignMask: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ Register result = ToRegister(instr->result());
+ __ movmskpd(result, input_reg);
+ return;
+ }
+ case kFloat64x2GetX: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ return;
+ }
+ case kFloat64x2GetY: {
+ DCHECK(instr->hydrogen()->value()->representation().IsFloat64x2());
+ XMMRegister input_reg = ToFloat64x2Register(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (!input_reg.is(result)) {
+ __ movaps(result, input_reg);
+ }
+ __ shufpd(result, input_reg, 0x1);
+ return;
+ }
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW: {
+ DCHECK(instr->hydrogen()->value()->representation().IsInt32x4());
+ bool flag = false;
+ switch (instr->op()) {
+ case kInt32x4GetFlagX:
+ flag = true;
+ case kInt32x4GetX:
+ break;
+ case kInt32x4GetFlagY:
+ flag = true;
+ case kInt32x4GetY:
+ select = 0x1;
+ break;
+ case kInt32x4GetFlagZ:
+ flag = true;
+ case kInt32x4GetZ:
+ select = 0x2;
+ break;
+ case kInt32x4GetFlagW:
+ flag = true;
+ case kInt32x4GetW:
+ select = 0x3;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ XMMRegister input_reg = ToInt32x4Register(instr->value());
+ Register result = ToRegister(instr->result());
+ if (select == 0x0) {
+ __ movd(result, input_reg);
+ } else {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ extractps(result, input_reg, select);
+ } else {
+ XMMRegister xmm_scratch = xmm0;
+ __ pshufd(xmm_scratch, input_reg, select);
+ __ movd(result, xmm_scratch);
+ }
+ }
+
+ if (flag) {
+ Label false_value, done;
+ __ testl(result, result);
+ __ j(zero, &false_value, Label::kNear);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done, Label::kNear);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ }
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoBinarySIMDOperation(LBinarySIMDOperation* instr) {
+ uint8_t imm8 = 0; // for with operation
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Sub:
+ case kFloat32x4Mul:
+ case kFloat32x4Div:
+ case kFloat32x4Min:
+ case kFloat32x4Max: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ __ addps(left_reg, right_reg);
+ break;
+ case kFloat32x4Sub:
+ __ subps(left_reg, right_reg);
+ break;
+ case kFloat32x4Mul:
+ __ mulps(left_reg, right_reg);
+ break;
+ case kFloat32x4Div:
+ __ divps(left_reg, right_reg);
+ break;
+ case kFloat32x4Min:
+ __ minps(left_reg, right_reg);
+ break;
+ case kFloat32x4Max:
+ __ maxps(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4Scale: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister scratch_reg = xmm0;
+ __ xorps(scratch_reg, scratch_reg);
+ __ cvtsd2ss(scratch_reg, right_reg);
+ __ shufps(scratch_reg, scratch_reg, 0x0);
+ __ mulps(left_reg, scratch_reg);
+ return;
+ }
+ case kFloat64x2Add:
+ case kFloat64x2Sub:
+ case kFloat64x2Mul:
+ case kFloat64x2Div:
+ case kFloat64x2Min:
+ case kFloat64x2Max: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat64x2());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToFloat64x2Register(instr->right());
+ switch (instr->op()) {
+ case kFloat64x2Add:
+ __ addpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Sub:
+ __ subpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Mul:
+ __ mulpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Div:
+ __ divpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Min:
+ __ minpd(left_reg, right_reg);
+ break;
+ case kFloat64x2Max:
+ __ maxpd(left_reg, right_reg);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat64x2Scale: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ shufpd(right_reg, right_reg, 0x0);
+ __ mulpd(left_reg, right_reg);
+ return;
+ }
+ case kFloat32x4Shuffle: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ __ shufps(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kInt32x4Shuffle: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ __ pshufd(left_reg, left_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ if (instr->hydrogen()->right()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->right())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->right()));
+ uint8_t shift = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, shift);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, shift);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else {
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register shift = ToRegister(instr->right());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(xmm_scratch, shift);
+ switch (instr->op()) {
+ case kInt32x4ShiftLeft:
+ __ pslld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRight:
+ __ psrld(left_reg, xmm_scratch);
+ break;
+ case kInt32x4ShiftRightArithmetic:
+ __ psrad(left_reg, xmm_scratch);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsFloat32x4());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToFloat32x4Register(instr->right());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ switch (instr->op()) {
+ case kFloat32x4LessThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4LessThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpnleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpleps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4Equal:
+ if (result_reg.is(left_reg)) {
+ __ cmpeqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpeqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpeqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4NotEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpneqps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpneqps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpneqps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThanOrEqual:
+ if (result_reg.is(left_reg)) {
+ __ cmpnltps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpltps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnltps(result_reg, right_reg);
+ }
+ break;
+ case kFloat32x4GreaterThan:
+ if (result_reg.is(left_reg)) {
+ __ cmpnleps(result_reg, right_reg);
+ } else if (result_reg.is(right_reg)) {
+ __ cmpleps(result_reg, left_reg);
+ } else {
+ __ movaps(result_reg, left_reg);
+ __ cmpnleps(result_reg, right_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kInt32x4And:
+ case kInt32x4Or:
+ case kInt32x4Xor:
+ case kInt32x4Add:
+ case kInt32x4Sub:
+ case kInt32x4Mul:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsInt32x4());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ XMMRegister right_reg = ToInt32x4Register(instr->right());
+ switch (instr->op()) {
+ case kInt32x4And:
+ __ andps(left_reg, right_reg);
+ break;
+ case kInt32x4Or:
+ __ orps(left_reg, right_reg);
+ break;
+ case kInt32x4Xor:
+ __ xorps(left_reg, right_reg);
+ break;
+ case kInt32x4Add:
+ __ paddd(left_reg, right_reg);
+ break;
+ case kInt32x4Sub:
+ __ psubd(left_reg, right_reg);
+ break;
+ case kInt32x4Mul:
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pmulld(left_reg, right_reg);
+ } else {
+ // The algorithm is from http://stackoverflow.com/questions/10500766/sse-multiplication-of-4-32-bit-integers
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, left_reg);
+ __ pmuludq(left_reg, right_reg);
+ __ psrldq(xmm_scratch, 4);
+ __ psrldq(right_reg, 4);
+ __ pmuludq(xmm_scratch, right_reg);
+ __ pshufd(left_reg, left_reg, 8);
+ __ pshufd(xmm_scratch, xmm_scratch, 8);
+ __ punpackldq(left_reg, xmm_scratch);
+ }
+ break;
+ case kInt32x4GreaterThan:
+ __ pcmpgtd(left_reg, right_reg);
+ break;
+ case kInt32x4Equal:
+ __ pcmpeqd(left_reg, right_reg);
+ break;
+ case kInt32x4LessThan: {
+ XMMRegister xmm_scratch = xmm0;
+ __ movaps(xmm_scratch, right_reg);
+ __ pcmpgtd(xmm_scratch, left_reg);
+ __ movaps(left_reg, xmm_scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return;
+ }
+ case kFloat32x4WithW:
+ imm8++;
+ case kFloat32x4WithZ:
+ imm8++;
+ case kFloat32x4WithY:
+ imm8++;
+ case kFloat32x4WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat32x4Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ cvtsd2ss(xmm_scratch, right_reg);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ imm8 = imm8 << 4;
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ insertps(left_reg, xmm_scratch, imm8);
+ } else {
+ __ subq(rsp, Immediate(kFloat32x4Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movss(Operand(rsp, imm8 * kFloatSize), xmm_scratch);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat32x4Size));
+ }
+ return;
+ }
+ case kFloat64x2WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 0 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2WithY: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToFloat64x2Register(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kFloat64x2Constructor: {
+ DCHECK(instr->hydrogen()->left()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->right()->representation().IsDouble());
+ XMMRegister left_reg = ToDoubleRegister(instr->left());
+ XMMRegister right_reg = ToDoubleRegister(instr->right());
+ XMMRegister result_reg = ToFloat64x2Register(instr->result());
+ __ subq(rsp, Immediate(kFloat64x2Size));
+ __ movsd(Operand(rsp, 0 * kDoubleSize), left_reg);
+ __ movsd(Operand(rsp, 1 * kDoubleSize), right_reg);
+ __ movups(result_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kFloat64x2Size));
+ return;
+ }
+ case kInt32x4WithW:
+ imm8++;
+ case kInt32x4WithZ:
+ imm8++;
+ case kInt32x4WithY:
+ imm8++;
+ case kInt32x4WithX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsInteger32());
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope(masm(), SSE4_1);
+ __ pinsrd(left_reg, right_reg, imm8);
+ } else {
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movdqu(Operand(rsp, 0), left_reg);
+ __ movl(Operand(rsp, imm8 * kFloatSize), right_reg);
+ __ movdqu(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ }
+ return;
+ }
+ case kInt32x4WithFlagW:
+ imm8++;
+ case kInt32x4WithFlagZ:
+ imm8++;
+ case kInt32x4WithFlagY:
+ imm8++;
+ case kInt32x4WithFlagX: {
+ DCHECK(instr->left()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->left()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->right()->representation().IsTagged());
+ HType type = instr->hydrogen()->right()->type();
+ XMMRegister left_reg = ToInt32x4Register(instr->left());
+ Register right_reg = ToRegister(instr->right());
+ Label load_false_value, done;
+ if (type.IsBoolean()) {
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movups(Operand(rsp, 0), left_reg);
+ __ CompareRoot(right_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_value, Label::kNear);
+ } else {
+ Comment(";;; deoptimize: other types for int32x4.withFlagX/Y/Z/W.");
+ DeoptimizeIf(no_condition, instr,
+ "other types for int32x4.withFlagX/Y/Z/W");
+ return;
+ }
+ // load true value.
+ __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0xFFFFFFFF));
+ __ jmp(&done, Label::kNear);
+ __ bind(&load_false_value);
+ __ movl(Operand(rsp, imm8 * kFloatSize), Immediate(0x0));
+ __ bind(&done);
+ __ movups(left_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoTernarySIMDOperation(LTernarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Select: {
+ DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToFloat32x4Register(instr->second());
+ XMMRegister right_reg = ToFloat32x4Register(instr->third());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kInt32x4Select: {
+ DCHECK(instr->hydrogen()->first()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsInt32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsInt32x4());
+
+ XMMRegister mask_reg = ToInt32x4Register(instr->first());
+ XMMRegister left_reg = ToInt32x4Register(instr->second());
+ XMMRegister right_reg = ToInt32x4Register(instr->third());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ XMMRegister temp_reg = xmm0;
+
+ // Copy mask.
+ __ movaps(temp_reg, mask_reg);
+ // Invert it.
+ __ notps(temp_reg);
+ // temp_reg = temp_reg & falseValue.
+ __ andps(temp_reg, right_reg);
+
+ if (!result_reg.is(mask_reg)) {
+ if (result_reg.is(left_reg)) {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, mask_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ } else {
+ __ movaps(result_reg, mask_reg);
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ } else {
+ // result_reg = result_reg & trueValue.
+ __ andps(result_reg, left_reg);
+ // out = result_reg | temp_reg.
+ __ orps(result_reg, temp_reg);
+ }
+ return;
+ }
+ case kFloat32x4ShuffleMix: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsInteger32());
+ if (instr->hydrogen()->third()->IsConstant() &&
+ HConstant::cast(instr->hydrogen()->third())->HasInteger32Value()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(instr->third()));
+ uint8_t select = static_cast<uint8_t>(value & 0xFF);
+ XMMRegister first_reg = ToFloat32x4Register(instr->first());
+ XMMRegister second_reg = ToFloat32x4Register(instr->second());
+ __ shufps(first_reg, second_reg, select);
+ return;
+ } else {
+ Comment(";;; deoptimize: non-constant selector for shuffle");
+ DeoptimizeIf(no_condition, instr, "non-constant selector for shuffle");
+ return;
+ }
+ }
+ case kFloat32x4Clamp: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat32x4());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat32x4());
+
+ XMMRegister value_reg = ToFloat32x4Register(instr->first());
+ XMMRegister lower_reg = ToFloat32x4Register(instr->second());
+ XMMRegister upper_reg = ToFloat32x4Register(instr->third());
+ __ minps(value_reg, upper_reg);
+ __ maxps(value_reg, lower_reg);
+ return;
+ }
+ case kFloat64x2Clamp: {
+ DCHECK(instr->first()->Equals(instr->result()));
+ DCHECK(instr->hydrogen()->first()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->second()->representation().IsFloat64x2());
+ DCHECK(instr->hydrogen()->third()->representation().IsFloat64x2());
+
+ XMMRegister value_reg = ToFloat64x2Register(instr->first());
+ XMMRegister lower_reg = ToFloat64x2Register(instr->second());
+ XMMRegister upper_reg = ToFloat64x2Register(instr->third());
+ __ minpd(value_reg, upper_reg);
+ __ maxpd(value_reg, lower_reg);
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
+void LCodeGen::DoQuarternarySIMDOperation(LQuarternarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Constructor: {
+ DCHECK(instr->hydrogen()->x()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->y()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->z()->representation().IsDouble());
+ DCHECK(instr->hydrogen()->w()->representation().IsDouble());
+ XMMRegister x_reg = ToDoubleRegister(instr->x());
+ XMMRegister y_reg = ToDoubleRegister(instr->y());
+ XMMRegister z_reg = ToDoubleRegister(instr->z());
+ XMMRegister w_reg = ToDoubleRegister(instr->w());
+ XMMRegister result_reg = ToFloat32x4Register(instr->result());
+ __ subq(rsp, Immediate(kFloat32x4Size));
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, x_reg);
+ __ movss(Operand(rsp, 0 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, y_reg);
+ __ movss(Operand(rsp, 1 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, z_reg);
+ __ movss(Operand(rsp, 2 * kFloatSize), xmm0);
+ __ xorps(xmm0, xmm0);
+ __ cvtsd2ss(xmm0, w_reg);
+ __ movss(Operand(rsp, 3 * kFloatSize), xmm0);
+ __ movups(result_reg, Operand(rsp, 0 * kFloatSize));
+ __ addq(rsp, Immediate(kFloat32x4Size));
+ return;
+ }
+ case kInt32x4Constructor: {
+ DCHECK(instr->hydrogen()->x()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->y()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->z()->representation().IsInteger32());
+ DCHECK(instr->hydrogen()->w()->representation().IsInteger32());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ __ subq(rsp, Immediate(kInt32x4Size));
+ __ movl(Operand(rsp, 0 * kInt32Size), x_reg);
+ __ movl(Operand(rsp, 1 * kInt32Size), y_reg);
+ __ movl(Operand(rsp, 2 * kInt32Size), z_reg);
+ __ movl(Operand(rsp, 3 * kInt32Size), w_reg);
+ __ movups(result_reg, Operand(rsp, 0 * kInt32Size));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ case kInt32x4Bool: {
+ DCHECK(instr->hydrogen()->x()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->y()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->z()->representation().IsTagged());
+ DCHECK(instr->hydrogen()->w()->representation().IsTagged());
+ HType x_type = instr->hydrogen()->x()->type();
+ HType y_type = instr->hydrogen()->y()->type();
+ HType z_type = instr->hydrogen()->z()->type();
+ HType w_type = instr->hydrogen()->w()->type();
+ if (!x_type.IsBoolean() || !y_type.IsBoolean() ||
+ !z_type.IsBoolean() || !w_type.IsBoolean()) {
+ Comment(";;; deoptimize: other types for int32x4.bool.");
+ DeoptimizeIf(no_condition, instr, "other types for int32x4.bool");
+ return;
+ }
+ XMMRegister result_reg = ToInt32x4Register(instr->result());
+ Register x_reg = ToRegister(instr->x());
+ Register y_reg = ToRegister(instr->y());
+ Register z_reg = ToRegister(instr->z());
+ Register w_reg = ToRegister(instr->w());
+ Label load_false_x, done_x, load_false_y, done_y,
+ load_false_z, done_z, load_false_w, done_w;
+ __ subq(rsp, Immediate(kInt32x4Size));
+
+ __ CompareRoot(x_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_x, Label::kNear);
+ __ movl(Operand(rsp, 0 * kInt32Size), Immediate(-1));
+ __ jmp(&done_x, Label::kNear);
+ __ bind(&load_false_x);
+ __ movl(Operand(rsp, 0 * kInt32Size), Immediate(0x0));
+ __ bind(&done_x);
+
+ __ CompareRoot(y_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_y, Label::kNear);
+ __ movl(Operand(rsp, 1 * kInt32Size), Immediate(-1));
+ __ jmp(&done_y, Label::kNear);
+ __ bind(&load_false_y);
+ __ movl(Operand(rsp, 1 * kInt32Size), Immediate(0x0));
+ __ bind(&done_y);
+
+ __ CompareRoot(z_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_z, Label::kNear);
+ __ movl(Operand(rsp, 2 * kInt32Size), Immediate(-1));
+ __ jmp(&done_z, Label::kNear);
+ __ bind(&load_false_z);
+ __ movl(Operand(rsp, 2 * kInt32Size), Immediate(0x0));
+ __ bind(&done_z);
+
+ __ CompareRoot(w_reg, Heap::kTrueValueRootIndex);
+ __ j(not_equal, &load_false_w, Label::kNear);
+ __ movl(Operand(rsp, 3 * kInt32Size), Immediate(-1));
+ __ jmp(&done_w, Label::kNear);
+ __ bind(&load_false_w);
+ __ movl(Operand(rsp, 3 * kInt32Size), Immediate(0x0));
+ __ bind(&done_w);
+
+ __ movups(result_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kInt32x4Size));
+ return;
+ }
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+
void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
- __ SmiToInteger64(key_reg, key_reg);
+ if (!HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind))
+ __ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
+ } else if (kPointerSize == kInt64Size && !key->IsConstantOperand()) {
+ Representation key_representation =
+ instr->hydrogen()->key()->representation();
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind))
+ HandleExternalArrayOpRequiresPreScale(
+ key, key_representation, elements_kind);
}
+
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
__ cvtsd2ss(value, value);
__ movss(operand, value);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
- elements_kind == FLOAT64_ELEMENTS) {
+ elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
+ } else if (IsSIMD128ElementsKind(elements_kind)) {
+ __ movups(operand, ToSIMD128Register(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
__ movl(operand, value);
break;
case EXTERNAL_FLOAT32_ELEMENTS:
+ case EXTERNAL_FLOAT32x4_ELEMENTS:
+ case EXTERNAL_FLOAT64x2_ELEMENTS:
+ case EXTERNAL_INT32x4_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
+ case FLOAT32x4_ELEMENTS:
+ case FLOAT64x2_ELEMENTS:
+ case INT32x4_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
}
+void LCodeGen::DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
+ Runtime::FunctionId id) {
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Smi::FromInt(0));
+
+ {
+ PushSafepointRegistersScope scope(this);
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(id);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ movp(kScratchRegister, rax);
+ }
+ __ movp(reg, kScratchRegister);
+}
+
+
+template<class T>
+void LCodeGen::HandleSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ class DeferredSIMD128ToTagged FINAL : public LDeferredCode {
+ public:
+ DeferredSIMD128ToTagged(LCodeGen* codegen,
+ LSIMD128ToTagged* instr,
+ Runtime::FunctionId id)
+ : LDeferredCode(codegen), instr_(instr), id_(id) { }
+ virtual void Generate() OVERRIDE {
+ codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
+ }
+ virtual LInstruction* instr() OVERRIDE { return instr_; }
+ private:
+ LSIMD128ToTagged* instr_;
+ Runtime::FunctionId id_;
+ };
+
+ XMMRegister input_reg = ToSIMD128Register(instr->value());
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->temp());
+ Register tmp2 = ToRegister(instr->temp2());
+ Register tmp3 = ToRegister(instr->temp3());
+
+ DeferredSIMD128ToTagged* deferred =
+ new(zone()) DeferredSIMD128ToTagged(this, instr,
+ static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
+ if (FLAG_inline_new) {
+ if (T::kInstanceType == FLOAT32x4_TYPE) {
+ __ AllocateFloat32x4(reg, tmp, tmp2, tmp3, deferred->entry());
+ } else if (T::kInstanceType == INT32x4_TYPE) {
+ __ AllocateInt32x4(reg, tmp, tmp2, tmp3, deferred->entry());
+ } else if (T::kInstanceType == FLOAT64x2_TYPE) {
+ __ AllocateFloat64x2(reg, tmp, tmp2, tmp3, deferred->entry());
+ }
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+
+ // Load the inner FixedTypedArray object.
+ __ movp(tmp, FieldOperand(reg, T::kValueOffset));
+
+ __ movups(FieldOperand(tmp, FixedTypedArrayBase::kDataOffset), input_reg);
+}
+
+
+void LCodeGen::DoSIMD128ToTagged(LSIMD128ToTagged* instr) {
+ if (instr->value()->IsFloat32x4Register()) {
+ HandleSIMD128ToTagged<Float32x4>(instr);
+ } else if (instr->value()->IsFloat64x2Register()) {
+ HandleSIMD128ToTagged<Float64x2>(instr);
+ } else {
+ DCHECK(instr->value()->IsInt32x4Register());
+ HandleSIMD128ToTagged<Int32x4>(instr);
+ }
+}
+
+
void LCodeGen::DoSmiTag(LSmiTag* instr) {
HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
}
+template<class T>
+void LCodeGen::HandleTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ LOperand* input = instr->value();
+ DCHECK(input->IsRegister());
+ LOperand* result = instr->result();
+ DCHECK(result->IsSIMD128Register());
+ LOperand* temp = instr->temp();
+ DCHECK(temp->IsRegister());
+
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToSIMD128Register(result);
+ Register temp_reg = ToRegister(temp);
+
+ __ testp(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(zero, instr, "value is smi");
+ __ CmpObjectType(input_reg, T::kInstanceType, kScratchRegister);
+ DeoptimizeIf(not_equal, instr, "value is not simd128");
+
+ // Load the inner FixedTypedArray object.
+ __ movp(temp_reg, FieldOperand(input_reg, T::kValueOffset));
+
+ __ movups(
+ result_reg, FieldOperand(temp_reg, FixedTypedArrayBase::kDataOffset));
+}
+
+
+void LCodeGen::DoTaggedToSIMD128(LTaggedToSIMD128* instr) {
+ if (instr->representation().IsFloat32x4()) {
+ HandleTaggedToSIMD128<Float32x4>(instr);
+ } else if (instr->representation().IsFloat64x2()) {
+ HandleTaggedToSIMD128<Float64x2>(instr);
+ } else {
+ DCHECK(instr->representation().IsInt32x4());
+ HandleTaggedToSIMD128<Int32x4>(instr);
+ }
+}
+
+
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
LOperand* input = instr->value();
DCHECK(input->IsDoubleRegister());
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ XMMRegister ToFloat32x4Register(LOperand* op) const;
+ XMMRegister ToFloat64x2Register(LOperand* op) const;
+ XMMRegister ToInt32x4Register(LOperand* op) const;
+ XMMRegister ToSIMD128Register(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index);
+ void DoDeferredSIMD128ToTagged(LSIMD128ToTagged* instr,
+ Runtime::FunctionId id);
+
+ template<class T>
+ void HandleTaggedToSIMD128(LTaggedToSIMD128* instr);
+ template<class T>
+ void HandleSIMD128ToTagged(LSIMD128ToTagged* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
+ XMMRegister ToSIMD128Register(int index) const;
Operand BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ bool HandleExternalArrayOpRequiresPreScale(LOperand* key,
+ Representation key_representation,
+ ElementsKind elements_kind);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
__ movsd(xmm0, src);
__ movsd(cgen_->ToOperand(destination), xmm0);
}
+ } else if (source->IsSIMD128Register()) {
+ XMMRegister src = cgen_->ToSIMD128Register(source);
+ if (destination->IsSIMD128Register()) {
+ __ movaps(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSIMD128StackSlot());
+ __ movups(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsSIMD128StackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsSIMD128Register()) {
+ __ movups(cgen_->ToSIMD128Register(destination), src);
+ } else {
+ DCHECK(destination->IsSIMD128StackSlot());
+ __ movups(xmm0, src);
+ __ movups(cgen_->ToOperand(destination), xmm0);
+ }
} else {
UNREACHABLE();
}
__ movsd(dst, xmm0);
__ movp(src, kScratchRegister);
+ } else if ((source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128StackSlot())) {
+ // Swap two XMM stack slots.
+ STATIC_ASSERT(kSIMD128Size == 2 * kDoubleSize);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movups(xmm0, src);
+ __ movq(kScratchRegister, dst);
+ __ movq(src, kScratchRegister);
+ __ movq(kScratchRegister, Operand(dst, kDoubleSize));
+ __ movq(Operand(src, kDoubleSize), kScratchRegister);
+ __ movups(dst, xmm0);
+
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
__ movaps(source_reg, destination_reg);
__ movaps(destination_reg, xmm0);
+ } else if (source->IsSIMD128Register() && destination->IsSIMD128Register()) {
+ // Swap two XMM registers.
+ XMMRegister source_reg = cgen_->ToSIMD128Register(source);
+ XMMRegister destination_reg = cgen_->ToSIMD128Register(destination);
+ __ movaps(xmm0, source_reg);
+ __ movaps(source_reg, destination_reg);
+ __ movaps(destination_reg, xmm0);
+
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
__ movsd(other_operand, reg);
__ movaps(reg, xmm0);
+ } else if (source->IsSIMD128Register() || destination->IsSIMD128Register()) {
+ // Swap a xmm register and a xmm stack slot.
+ DCHECK((source->IsSIMD128Register() &&
+ destination->IsSIMD128StackSlot()) ||
+ (source->IsSIMD128StackSlot() &&
+ destination->IsSIMD128Register()));
+ XMMRegister reg = cgen_->ToSIMD128Register(source->IsSIMD128Register()
+ ? source
+ : destination);
+ LOperand* other = source->IsSIMD128Register() ? destination : source;
+ DCHECK(other->IsSIMD128StackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movups(xmm0, other_operand);
+ __ movups(other_operand, reg);
+ __ movaps(reg, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
// TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
spill_slot_count_ |= 1;
}
+
+ switch (kind) {
+ case GENERAL_REGISTERS: return spill_slot_count_++;
+ case DOUBLE_REGISTERS: return spill_slot_count_++;
+ case FLOAT32x4_REGISTERS:
+ case FLOAT64x2_REGISTERS:
+ case INT32x4_REGISTERS: {
+ spill_slot_count_++;
+ return spill_slot_count_++;
+ }
+ default:
+ UNREACHABLE();
+ return -1;
+ }
+
return spill_slot_count_++;
}
// Alternatively, at some point, start using half-size
// stack slots for int32 values.
int index = GetNextSpillIndex(kind);
- if (kind == DOUBLE_REGISTERS) {
- return LDoubleStackSlot::Create(index, zone());
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- return LStackSlot::Create(index, zone());
+ switch (kind) {
+ case GENERAL_REGISTERS: return LStackSlot::Create(index, zone());
+ case DOUBLE_REGISTERS: return LDoubleStackSlot::Create(index, zone());
+ case FLOAT32x4_REGISTERS: return LFloat32x4StackSlot::Create(index, zone());
+ case FLOAT64x2_REGISTERS: return LFloat64x2StackSlot::Create(index, zone());
+ case INT32x4_REGISTERS: return LInt32x4StackSlot::Create(index, zone());
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
}
+const char* LNullarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoNullarySIMDOperation(
+ HNullarySIMDOperation* instr) {
+ LNullarySIMDOperation* result =
+ new(zone()) LNullarySIMDOperation(instr->op());
+ switch (instr->op()) {
+#define SIMD_NULLARY_OPERATION_CASE_ITEM(module, function, name, p4) \
+ case k##name:
+SIMD_NULLARY_OPERATIONS(SIMD_NULLARY_OPERATION_CASE_ITEM)
+#undef SIMD_NULLARY_OPERATION_CASE_ITEM
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LUnarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+ case kSIMD128Change: return "SIMD128-change";
+#define SIMD_UNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_UNARY_OPERATIONS(SIMD_UNARY_OPERATION_CASE_ITEM)
+SIMD_UNARY_OPERATIONS_FOR_PROPERTY_ACCESS(SIMD_UNARY_OPERATION_CASE_ITEM)
+#undef SIMD_UNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoUnarySIMDOperation(HUnarySIMDOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnarySIMDOperation* result =
+ new(zone()) LUnarySIMDOperation(input, instr->op());
+ switch (instr->op()) {
+ case kSIMD128Change:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kFloat32x4Abs:
+ case kFloat32x4Neg:
+ case kFloat32x4Reciprocal:
+ case kFloat32x4ReciprocalSqrt:
+ case kFloat32x4Sqrt:
+ case kFloat64x2Abs:
+ case kFloat64x2Neg:
+ case kFloat64x2Sqrt:
+ case kInt32x4Neg:
+ case kInt32x4Not:
+ return DefineSameAsFirst(result);
+ case kFloat32x4Coercion:
+ case kFloat64x2Coercion:
+ case kInt32x4Coercion:
+ case kFloat32x4BitsToInt32x4:
+ case kFloat32x4ToInt32x4:
+ case kInt32x4BitsToFloat32x4:
+ case kInt32x4ToFloat32x4:
+ case kFloat32x4Splat:
+ case kInt32x4Splat:
+ case kFloat32x4GetSignMask:
+ case kFloat32x4GetX:
+ case kFloat32x4GetY:
+ case kFloat32x4GetZ:
+ case kFloat32x4GetW:
+ case kFloat64x2GetSignMask:
+ case kFloat64x2GetX:
+ case kFloat64x2GetY:
+ case kInt32x4GetSignMask:
+ case kInt32x4GetX:
+ case kInt32x4GetY:
+ case kInt32x4GetZ:
+ case kInt32x4GetW:
+ case kInt32x4GetFlagX:
+ case kInt32x4GetFlagY:
+ case kInt32x4GetFlagZ:
+ case kInt32x4GetFlagW:
+ return DefineAsRegister(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LBinarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_BINARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_BINARY_OPERATIONS(SIMD_BINARY_OPERATION_CASE_ITEM)
+#undef SIMD_BINARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoBinarySIMDOperation(
+ HBinarySIMDOperation* instr) {
+ switch (instr->op()) {
+ case kFloat32x4Add:
+ case kFloat32x4Div:
+ case kFloat32x4Max:
+ case kFloat32x4Min:
+ case kFloat32x4Mul:
+ case kFloat32x4Sub:
+ case kFloat32x4Scale:
+ case kFloat32x4WithX:
+ case kFloat32x4WithY:
+ case kFloat32x4WithZ:
+ case kFloat32x4WithW:
+ case kFloat64x2Add:
+ case kFloat64x2Div:
+ case kFloat64x2Max:
+ case kFloat64x2Min:
+ case kFloat64x2Mul:
+ case kFloat64x2Sub:
+ case kFloat64x2Scale:
+ case kFloat64x2WithX:
+ case kFloat64x2WithY:
+ case kInt32x4Add:
+ case kInt32x4And:
+ case kInt32x4Mul:
+ case kInt32x4Or:
+ case kInt32x4Sub:
+ case kInt32x4Xor:
+ case kInt32x4WithX:
+ case kInt32x4WithY:
+ case kInt32x4WithZ:
+ case kInt32x4WithW:
+ case kInt32x4WithFlagX:
+ case kInt32x4WithFlagY:
+ case kInt32x4WithFlagZ:
+ case kInt32x4WithFlagW:
+ case kInt32x4GreaterThan:
+ case kInt32x4Equal:
+ case kInt32x4LessThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ if (instr->op() == kInt32x4WithFlagX ||
+ instr->op() == kInt32x4WithFlagY ||
+ instr->op() == kInt32x4WithFlagZ ||
+ instr->op() == kInt32x4WithFlagW) {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ } else {
+ return DefineSameAsFirst(result);
+ }
+ }
+ case kFloat64x2Constructor: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ case kFloat32x4Shuffle:
+ case kInt32x4Shuffle:
+ case kInt32x4ShiftLeft:
+ case kInt32x4ShiftRight:
+ case kInt32x4ShiftRightArithmetic: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstant(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4LessThan:
+ case kFloat32x4LessThanOrEqual:
+ case kFloat32x4Equal:
+ case kFloat32x4NotEqual:
+ case kFloat32x4GreaterThanOrEqual:
+ case kFloat32x4GreaterThan: {
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LBinarySIMDOperation* result =
+ new(zone()) LBinarySIMDOperation(left, right, instr->op());
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LTernarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_TERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, p6, \
+ p7) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_TERNARY_OPERATIONS(SIMD_TERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_TERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoTernarySIMDOperation(
+ HTernarySIMDOperation* instr) {
+ LOperand* first = UseRegisterAtStart(instr->first());
+ LOperand* second = UseRegisterAtStart(instr->second());
+ LOperand* third = instr->op() == kFloat32x4ShuffleMix
+ ? UseOrConstant(instr->third())
+ : UseRegisterAtStart(instr->third());
+ LTernarySIMDOperation* result =
+ new(zone()) LTernarySIMDOperation(first, second, third, instr->op());
+ switch (instr->op()) {
+ case kFloat32x4Clamp:
+ case kFloat64x2Clamp: {
+ return DefineSameAsFirst(result);
+ }
+ case kFloat32x4ShuffleMix: {
+ return AssignEnvironment(DefineSameAsFirst(result));
+ }
+ case kFloat32x4Select:
+ case kInt32x4Select: {
+ return DefineAsRegister(result);
+ }
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+const char* LQuarternarySIMDOperation::Mnemonic() const {
+ switch (op()) {
+#define SIMD_QUARTERNARY_OPERATION_CASE_ITEM(module, function, name, p4, p5, \
+ p6, p7, p8) \
+ case k##name: \
+ return #module "-" #function;
+SIMD_QUARTERNARY_OPERATIONS(SIMD_QUARTERNARY_OPERATION_CASE_ITEM)
+#undef SIMD_QUARTERNARY_OPERATION_CASE_ITEM
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoQuarternarySIMDOperation(
+ HQuarternarySIMDOperation* instr) {
+ LOperand* x = UseRegisterAtStart(instr->x());
+ LOperand* y = UseRegisterAtStart(instr->y());
+ LOperand* z = UseRegisterAtStart(instr->z());
+ LOperand* w = UseRegisterAtStart(instr->w());
+ LQuarternarySIMDOperation* result =
+ new(zone()) LQuarternarySIMDOperation(x, y, z, w, instr->op());
+ if (instr->op() == kInt32x4Bool) {
+ return AssignEnvironment(DefineAsRegister(result));
+ } else {
+ return DefineAsRegister(result);
+ }
+}
+
+
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
+ } else if (to.IsSIMD128()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LTaggedToSIMD128* res = new(zone()) LTaggedToSIMD128(value, temp, to);
+ return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
}
}
+ } else if (from.IsSIMD128()) {
+ DCHECK(to.IsTagged());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LSIMD128ToTagged* result =
+ new(zone()) LSIMD128ToTagged(value, temp, temp2, temp3);
+ return AssignPointerMap(Define(result, result_temp));
}
UNREACHABLE();
return NULL;
LInstruction* result = NULL;
if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresPreScale(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
} else {
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
FindDehoistedKeyDefinitions(instr->key());
}
+
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
(instr->representation().IsInteger32() &&
!(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(elements_kind))));
+ (IsDoubleOrFloatElementsKind(elements_kind))) ||
+ (instr->representation().IsFloat32x4() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (instr->representation().IsFloat64x2() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (instr->representation().IsInt32x4() &&
+ IsInt32x4ElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements());
result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
(instr->value()->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->value()->representation().IsDouble() &&
- IsDoubleOrFloatElementsKind(elements_kind)));
+ IsDoubleOrFloatElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsFloat32x4() &&
+ IsFloat32x4ElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsFloat64x2() &&
+ IsFloat64x2ElementsKind(elements_kind)) ||
+ (instr->value()->representation().IsInt32x4() &&
+ IsInt32x4ElementsKind(elements_kind)));
DCHECK((instr->is_fixed_typed_array() &&
instr->elements()->representation().IsTagged()) ||
(instr->is_external() &&
: UseRegister(instr->value());
LOperand* key = NULL;
if (kPointerSize == kInt64Size) {
- key = UseRegisterOrConstantAtStart(instr->key());
+ bool clobbers_key = ExternalArrayOpRequiresPreScale(
+ instr->key()->representation(), elements_kind);
+ key = clobbers_key
+ ? UseTempRegisterOrConstant(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
} else {
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
V(MathSqrt) \
V(ModByConstI) \
V(ModByPowerOf2I) \
+ V(NullarySIMDOperation) \
+ V(UnarySIMDOperation) \
+ V(BinarySIMDOperation) \
+ V(TernarySIMDOperation) \
+ V(QuarternarySIMDOperation) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
+ V(SIMD128ToTagged) \
V(NumberTagI) \
V(NumberTagU) \
V(NumberUntagD) \
+ V(TaggedToSIMD128) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const FINAL OVERRIDE { \
return LInstruction::k##type; \
};
+class LNullarySIMDOperation FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+ explicit LNullarySIMDOperation(BuiltinFunctionId op)
+ : op_(op) {
+ }
+
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kNullarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LNullarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsNullarySIMDOperation());
+ return reinterpret_cast<LNullarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(NullarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LUnarySIMDOperation FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LUnarySIMDOperation(LOperand* value, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kUnarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LUnarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsUnarySIMDOperation());
+ return reinterpret_cast<LUnarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(UnarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LBinarySIMDOperation FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LBinarySIMDOperation(LOperand* left, LOperand* right, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kBinarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LBinarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsBinarySIMDOperation());
+ return reinterpret_cast<LBinarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(BinarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LTernarySIMDOperation FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+ LTernarySIMDOperation(LOperand* first, LOperand* second, LOperand* third,
+ BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = first;
+ inputs_[1] = second;
+ inputs_[2] = third;
+ }
+
+ LOperand* first() { return inputs_[0]; }
+ LOperand* second() { return inputs_[1]; }
+ LOperand* third() { return inputs_[2]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kTernarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LTernarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsTernarySIMDOperation());
+ return reinterpret_cast<LTernarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(TernarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
+class LQuarternarySIMDOperation FINAL
+ : public LTemplateInstruction<1, 4, 0> {
+ public:
+ LQuarternarySIMDOperation(LOperand* x, LOperand* y, LOperand* z,
+ LOperand* w, BuiltinFunctionId op)
+ : op_(op) {
+ inputs_[0] = x;
+ inputs_[1] = y;
+ inputs_[2] = z;
+ inputs_[3] = w;
+ }
+
+ LOperand* x() { return inputs_[0]; }
+ LOperand* y() { return inputs_[1]; }
+ LOperand* z() { return inputs_[2]; }
+ LOperand* w() { return inputs_[3]; }
+ BuiltinFunctionId op() const { return op_; }
+
+ virtual Opcode opcode() const OVERRIDE {
+ return LInstruction::kQuarternarySIMDOperation;
+ }
+ virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+ virtual const char* Mnemonic() const OVERRIDE;
+ static LQuarternarySIMDOperation* cast(LInstruction* instr) {
+ DCHECK(instr->IsQuarternarySIMDOperation());
+ return reinterpret_cast<LQuarternarySIMDOperation*>(instr);
+ }
+
+ DECLARE_HYDROGEN_ACCESSOR(QuarternarySIMDOperation)
+
+ private:
+ BuiltinFunctionId op_;
+};
+
+
class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
};
+inline static bool ExternalArrayOpRequiresPreScale(
+ Representation key_representation,
+ ElementsKind kind) {
+ int shift_size = ElementsKindToShiftSize(kind);
+ return SmiValuesAre31Bits() && key_representation.IsSmi()
+ ? shift_size > static_cast<int>(maximal_scale_factor) + kSmiTagSize
+ : shift_size > static_cast<int>(maximal_scale_factor);
+}
+
+
inline static bool ExternalArrayOpRequiresTemp(
Representation key_representation,
ElementsKind elements_kind) {
// Operations that require the key to be divided by two to be converted into
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
- return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+ return ExternalArrayOpRequiresPreScale(key_representation, elements_kind) ||
+ (SmiValuesAre31Bits() && key_representation.IsSmi() &&
(elements_kind == EXTERNAL_INT8_ELEMENTS ||
elements_kind == EXTERNAL_UINT8_ELEMENTS ||
elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
elements_kind == UINT8_ELEMENTS ||
elements_kind == INT8_ELEMENTS ||
- elements_kind == UINT8_CLAMPED_ELEMENTS);
+ elements_kind == UINT8_CLAMPED_ELEMENTS));
}
};
+class LSIMD128ToTagged FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ explicit LSIMD128ToTagged(LOperand* value, LOperand* temp,
+ LOperand* temp2, LOperand* temp3) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SIMD128ToTagged, "simd128-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
public:
};
+class LTaggedToSIMD128 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+ explicit LTaggedToSIMD128(LOperand* value, LOperand* temp,
+ Representation representation)
+ : representation_(representation) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ Representation representation() const { return representation_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToSIMD128, "simd128-untag")
+ DECLARE_HYDROGEN_ACCESSOR(Change);
+ private:
+ Representation representation_;
+};
+
+
class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(Operand(rsp, i * kDoubleSize), reg);
+ movups(Operand(rsp, i * kSIMD128Size), reg);
}
}
}
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movsd(reg, Operand(rsp, i * kDoubleSize));
+ movups(reg, Operand(rsp, i * kSIMD128Size));
}
- addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
}
+void MacroAssembler::absps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_absolute_constant =
+ { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
+ andps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::abspd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+ } double_absolute_constant =
+ { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
+ andpd(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::negateps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant =
+ { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
+ xorps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::negatepd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+ } double_absolute_constant =
+ { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
+ xorpd(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::notps(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_not_constant =
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
+ xorps(dst, Operand(kScratchRegister, 0));
+}
+
+
+void MacroAssembler::pnegd(XMMRegister dst) {
+ static const struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
+ notps(dst);
+ Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
+ paddd(dst, Operand(kScratchRegister, 0));
+}
+
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
}
}
// Get the return address from the stack and restore the frame pointer.
}
+#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
+ V(Float32x4, float32x4, FLOAT32x4) \
+ V(Float64x2, float64x2, FLOAT64x2) \
+ V(Int32x4, int32x4, INT32x4)
+
+#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(Type, type, TYPE) \
+void MacroAssembler::Allocate##Type(Register result, \
+ Register scratch1, \
+ Register scratch2, \
+ Register scratch3, \
+ Label* gc_required) { \
+ /* Allocate SIMD128 object. */ \
+ Allocate(Type::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
+ /* Load the initial map and assign to new allocated object. */ \
+ movp(scratch1, Operand(rbp, StandardFrameConstants::kContextOffset)); \
+ movp(scratch1, \
+ Operand(scratch1, \
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); \
+ movp(scratch1, \
+ FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); \
+ movp(scratch1, \
+ Operand(scratch1, \
+ Context::SlotOffset(Context::TYPE##_FUNCTION_INDEX))); \
+ LoadGlobalFunctionInitialMap(scratch1, scratch1); \
+ movp(FieldOperand(result, JSObject::kMapOffset), \
+ scratch1); \
+ /* Initialize the properties and elements. */ \
+ MoveHeapObject(kScratchRegister, \
+ isolate()->factory()->empty_fixed_array()); \
+ movp(FieldOperand(result, JSObject::kPropertiesOffset), \
+ kScratchRegister); \
+ movp(FieldOperand(result, JSObject::kElementsOffset), \
+ kScratchRegister); \
+ /* Allocate FixedTypedArray object. */ \
+ Allocate(FixedTypedArrayBase::kDataOffset + k##Type##Size, \
+ scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
+ MoveHeapObject(kScratchRegister, \
+ isolate()->factory()->fixed_##type##_array_map()); \
+ movp(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
+ kScratchRegister); \
+ movp(scratch3, Immediate(1)); \
+ Integer32ToSmi(scratch2, scratch3); \
+ movp(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
+ scratch2); \
+ /* Assign FixedTypedArray object to SIMD128 object. */ \
+ movp(FieldOperand(result, Type::kValueOffset), scratch1); \
+}
+
+SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+
+
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
// ---------------------------------------------------------------------------
+ // SIMD macros.
+ void absps(XMMRegister dst);
+ void abspd(XMMRegister dst);
+ void negateps(XMMRegister dst);
+ void negatepd(XMMRegister dst);
+ void notps(XMMRegister dst);
+ void pnegd(XMMRegister dst);
+
+
+ // ---------------------------------------------------------------------------
// String macros.
// Generate code to do a lookup in the number string cache. If the number in
Label* gc_required,
MutableMode mode = IMMUTABLE);
+
+ // Allocate a float32x4, float64x2 and int32x4 object in new space with
+ // undefined value.
+ // Returns tagged pointer in result register, or jumps to gc_required if new
+ // space is full.
+ void AllocateFloat32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ void AllocateFloat64x2(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ void AllocateInt32x4(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
__ psrlq(xmm0, xmm1);
__ por(xmm0, xmm1);
}
+ {
+ __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
+ __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
+
+ __ addsd(xmm1, xmm0);
+ __ mulsd(xmm1, xmm0);
+ __ subsd(xmm1, xmm0);
+ __ divsd(xmm1, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ __ cmpltsd(xmm0, xmm1);
+
+ __ andpd(xmm0, xmm1);
+ __ psllq(xmm0, 17);
+ __ psllq(xmm0, xmm1);
+ __ psrlq(xmm0, 17);
+ __ psrlq(xmm0, xmm1);
+ __ por(xmm0, xmm1);
+
+ // new instruction introduced by SIMD
+ __ cvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtdq2ps(xmm1, xmm0);
+ __ cvtps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtps2dq(xmm1, xmm0);
+ __ paddd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ paddd(xmm1, xmm0);
+ __ psubd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ psubd(xmm1, xmm0);
+ __ pmuludq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ pmuludq(xmm1, xmm0);
+ __ punpackldq(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ punpackldq(xmm1, xmm0);
+ {
+ __ shufps(xmm1, xmm1, 0x0);
+ __ movups(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movups(Operand(ebx, ecx, times_4, 10000), xmm1);
+
+ __ andps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ andps(xmm1, xmm0);
+ __ xorps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ xorps(xmm1, xmm0);
+ __ orps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ orps(xmm1, xmm0);
+
+ __ addps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ addps(xmm1, xmm0);
+ __ subps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ subps(xmm1, xmm0);
+ __ mulps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ mulps(xmm1, xmm0);
+ __ divps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ divps(xmm1, xmm0);
+ __ minps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ minps(xmm1, xmm0);
+ __ maxps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ maxps(xmm1, xmm0);
+ __ rcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rcpps(xmm1, xmm0);
+ __ rsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rsqrtps(xmm1, xmm0);
+ __ sqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ sqrtps(xmm1, xmm0);
+
+ __ cmpeqps(xmm1, xmm0);
+ __ cmpltps(xmm1, xmm0);
+ __ cmpleps(xmm1, xmm0);
+ __ cmpneqps(xmm1, xmm0);
+ __ cmpnltps(xmm1, xmm0);
+ __ cmpnleps(xmm1, xmm0);
+ }
+ }
// cmov.
{
__ pextrd(eax, xmm0, 1);
__ pinsrd(xmm1, eax, 0);
__ extractps(eax, xmm1, 0);
+ __ insertps(xmm1, xmm0, 0);
+ __ pmulld(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ pmulld(xmm1, xmm0);
}
}
v8::HandleScope sc(CcTest::isolate());
+ heap->new_space()->Grow();
+
// Allocate a big fixed array in the new space.
int array_length =
(Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
shell += ".exe"
output = commands.Execute(
context.command_prefix +
- [shell, "--allow-natives-syntax", "-e",
+ [shell, "--allow-natives-syntax", "--simd-object", "-e",
"try { var natives = %ListNatives();"
" for (var n in natives) { print(natives[n]); }"
"} catch(e) {}"] +
for line in output.stdout.strip().split():
try:
(name, argc) = line.split(",")
- flags = ["--allow-natives-syntax",
+ flags = ["--allow-natives-syntax", "--simd-object",
"-e", "var NAME = '%s', ARGC = %s;" % (name, argc)]
test = testcase.TestCase(self, name, flags)
tests.append(test)
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateFloat32x4();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateFloat64x2();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+%AllocateInt32x4();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _lo = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _hi = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Float32x4Clamp(_self, _lo, _hi);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Float32x4GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.int32x4(0, 0, 0, 0);
+var _tv = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+var _fv = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+%Float32x4Select(_self, _tv, _fv);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float64x2(0.0, 0.0);
+var _lo = SIMD.float64x2(0.0, 0.0);
+var _hi = SIMD.float64x2(0.0, 0.0);
+%Float64x2Clamp(_self, _lo, _hi);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.float64x2(0.0, 0.0);
+%Float64x2GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.int32x4(0, 0, 0, 0);
+%Int32x4GetSignMask(_self);
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --simd-object
+var _self = SIMD.int32x4(0, 0, 0, 0);
+var _tv = SIMD.int32x4(0, 0, 0, 0);
+var _fv = SIMD.int32x4(0, 0, 0, 0);
+%Int32x4Select(_self, _tv, _fv);
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax --use-escape-analysis
+
+function testArgumentsObjectwithFloat32x4Field() {
+ "use strict";
+ var forceDeopt = { deopt:false };
+ function inner(a,b,c,d,e,f,g,h,i,j,k) {
+ var args = arguments;
+ forceDeopt.deopt;
+ assertSame(11, args.length);
+ assertSame(a, args[0]);
+ assertSame(b, args[1]);
+ assertSame(c, args[2]);
+ assertSame(d, args[3]);
+ assertSame(e, args[4]);
+ assertSame(f, args[5]);
+ assertSame(g, args[6]);
+ assertSame(h, args[7]);
+ assertSame(i, args[8]);
+ assertSame(j, args[9]);
+ assertEquals(1, args[10].x);
+ assertEquals(2, args[10].y);
+ assertEquals(3, args[10].z);
+ assertEquals(4, args[10].w);
+ }
+
+ var a = 0.5;
+ var b = 1.7;
+ var c = 123;
+ function outer() {
+ inner(
+ a - 0.3, // double in double register
+ b + 2.3, // integer in double register
+ c + 321, // integer in general register
+ c - 456, // integer in stack slot
+ a + 0.1, a + 0.2, a + 0.3, a + 0.4, a + 0.5,
+ a + 0.6, // double in stack slot
+ SIMD.float32x4(1, 2, 3, 4)
+ );
+ }
+
+ outer();
+ outer();
+ %OptimizeFunctionOnNextCall(outer);
+ outer();
+ delete forceDeopt.deopt;
+ outer();
+}
+
+testArgumentsObjectwithFloat32x4Field();
+
+function testArgumentsObjectwithInt32x4Field() {
+ "use strict";
+ var forceDeopt = { deopt:false };
+ function inner(a,b,c,d,e,f,g,h,i,j,k) {
+ var args = arguments;
+ forceDeopt.deopt;
+ assertSame(11, args.length);
+ assertSame(a, args[0]);
+ assertSame(b, args[1]);
+ assertSame(c, args[2]);
+ assertSame(d, args[3]);
+ assertSame(e, args[4]);
+ assertSame(f, args[5]);
+ assertSame(g, args[6]);
+ assertSame(h, args[7]);
+ assertSame(i, args[8]);
+ assertSame(j, args[9]);
+ assertEquals(1, args[10].x);
+ assertEquals(2, args[10].y);
+ assertEquals(3, args[10].z);
+ assertEquals(4, args[10].w);
+ }
+
+ var a = 0.5;
+ var b = 1.7;
+ var c = 123;
+ function outer() {
+ inner(
+ a - 0.3, // double in double register
+ b + 2.3, // integer in double register
+ c + 321, // integer in general register
+ c - 456, // integer in stack slot
+ a + 0.1, a + 0.2, a + 0.3, a + 0.4, a + 0.5,
+ a + 0.6, // double in stack slot
+ SIMD.int32x4(1, 2, 3, 4)
+ );
+ }
+
+ outer();
+ outer();
+ %OptimizeFunctionOnNextCall(outer);
+ outer();
+ delete forceDeopt.deopt;
+ outer();
+}
+
+testArgumentsObjectwithInt32x4Field();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testArithmeticOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c;
+
+ c = a + b;
+ assertEquals('float32x4(0,0,0,0)float32x4(0,0,0,0)', c);
+ c = a++;
+ assertEquals(NaN, c);
+ c = a - b;
+ assertEquals(NaN, c);
+ c = a--;
+ assertEquals(NaN, c);
+ c = a * b;
+ assertEquals(NaN, c);
+ c = a / b;
+ assertEquals(NaN, c);
+ c = a % b;
+ assertEquals(NaN, c);
+}
+
+testArithmeticOperators();
+testArithmeticOperators();
+%OptimizeFunctionOnNextCall(testArithmeticOperators);
+testArithmeticOperators();
+
+
+function testBitwiseOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c;
+ c = a | b;
+ assertEquals(0, c);
+ c = a & b;
+ assertEquals(0, c);
+ c = a ^ b;
+ assertEquals(0, c);
+ c = ~a;
+ assertEquals(-1, c);
+ c = a << 0;
+ assertEquals(0, c);
+ c = a >> 0;
+ assertEquals(0, c);
+ c = a >>> 0;
+ assertEquals(0, c);
+}
+
+testBitwiseOperators();
+testBitwiseOperators();
+%OptimizeFunctionOnNextCall(testBitwiseOperators);
+testBitwiseOperators();
+
+
+function testAssignmentOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ var c = a;
+ c += b;
+ assertEquals('float32x4(0,0,0,0)float32x4(0,0,0,0)', c);
+ c -= b;
+ assertEquals(NaN, c);
+ c *= b;
+ assertEquals(NaN, c);
+ c /= b;
+ assertEquals(NaN, c);
+ c %= b;
+ assertEquals(NaN, c);
+
+ c &= b;
+ assertEquals(0, c);
+ c |= b;
+ assertEquals(0, c);
+ c ^= b;
+ assertEquals(0, c);
+ c <<= b;
+ assertEquals(0, c);
+ c >>= b;
+ assertEquals(0, c);
+ c >>>= b;
+ assertEquals(0, c);
+}
+
+testAssignmentOperators();
+testAssignmentOperators();
+%OptimizeFunctionOnNextCall(testAssignmentOperators);
+testAssignmentOperators();
+
+
+function testStringOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = "0";
+ var c = a;
+ c += b;
+ assertEquals("float32x4(0,0,0,0)0", c);
+ c = b + a;
+ assertEquals("0float32x4(0,0,0,0)", c);
+}
+
+testStringOperators();
+testStringOperators();
+%OptimizeFunctionOnNextCall(testStringOperators);
+testStringOperators();
+
+
+function testComparisionOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.zero();
+ assertEquals(false, a == b);
+ assertEquals(true, a != b);
+ assertEquals(false, a === b);
+ assertEquals(true, a !== b);
+ assertEquals(false, a > b);
+ assertEquals(true, a >= b);
+ assertEquals(false, a < b);
+ assertEquals(true, a <= b);
+}
+
+testComparisionOperators();
+testComparisionOperators();
+// TODO(ningxin): optimized code will get opposite result.
+//%OptimizeFunctionOnNextCall(testComparisionOperators);
+testComparisionOperators();
+
+
+function testLogicalOperators() {
+ var a = SIMD.float32x4.zero();
+ var b = SIMD.float32x4.splat(1);
+ assertEquals(1, (a && b).x);
+ assertEquals(1, (a && b).y);
+ assertEquals(1, (a && b).z);
+ assertEquals(1, (a && b).w);
+ assertEquals(0, (a || b).x);
+ assertEquals(0, (a || b).y);
+ assertEquals(0, (a || b).z);
+ assertEquals(0, (a || b).w);
+ assertEquals(false, !a);
+}
+
+testLogicalOperators();
+testLogicalOperators();
+%OptimizeFunctionOnNextCall(testLogicalOperators);
+testLogicalOperators();
+
+
+function testConditionalOperators() {
+ var a = SIMD.int32x4.zero();
+ var c = a ? 1 : 0;
+ assertEquals(1, c);
+}
+
+testConditionalOperators();
+testConditionalOperators();
+%OptimizeFunctionOnNextCall(testConditionalOperators);
+testConditionalOperators();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testArithmeticOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c;
+
+ c = a + b;
+ assertEquals('float64x2(0,0)float64x2(0,0)', c);
+ c = a++;
+ assertEquals(NaN, c);
+ c = a - b;
+ assertEquals(NaN, c);
+ c = a--;
+ assertEquals(NaN, c);
+ c = a * b;
+ assertEquals(NaN, c);
+ c = a / b;
+ assertEquals(NaN, c);
+ c = a % b;
+ assertEquals(NaN, c);
+}
+
+testArithmeticOperators();
+testArithmeticOperators();
+%OptimizeFunctionOnNextCall(testArithmeticOperators);
+testArithmeticOperators();
+
+
+function testBitwiseOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c;
+ c = a | b;
+ assertEquals(0, c);
+ c = a & b;
+ assertEquals(0, c);
+ c = a ^ b;
+ assertEquals(0, c);
+ c = ~a;
+ assertEquals(-1, c);
+ c = a << 0;
+ assertEquals(0, c);
+ c = a >> 0;
+ assertEquals(0, c);
+ c = a >>> 0;
+ assertEquals(0, c);
+}
+
+testBitwiseOperators();
+testBitwiseOperators();
+%OptimizeFunctionOnNextCall(testBitwiseOperators);
+testBitwiseOperators();
+
+
+function testAssignmentOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ var c = a;
+ c += b;
+ assertEquals('float64x2(0,0)float64x2(0,0)', c);
+ c -= b;
+ assertEquals(NaN, c);
+ c *= b;
+ assertEquals(NaN, c);
+ c /= b;
+ assertEquals(NaN, c);
+ c %= b;
+ assertEquals(NaN, c);
+
+ c &= b;
+ assertEquals(0, c);
+ c |= b;
+ assertEquals(0, c);
+ c ^= b;
+ assertEquals(0, c);
+ c <<= b;
+ assertEquals(0, c);
+ c >>= b;
+ assertEquals(0, c);
+ c >>>= b;
+ assertEquals(0, c);
+}
+
+testAssignmentOperators();
+testAssignmentOperators();
+%OptimizeFunctionOnNextCall(testAssignmentOperators);
+testAssignmentOperators();
+
+
+function testStringOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = "0";
+ var c = a;
+ c += b;
+ assertEquals("float64x2(0,0)0", c);
+ c = b + a;
+ assertEquals("0float64x2(0,0)", c);
+}
+
+testStringOperators();
+testStringOperators();
+%OptimizeFunctionOnNextCall(testStringOperators);
+testStringOperators();
+
+
+function testComparisionOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.zero();
+ assertEquals(false, a == b);
+ assertEquals(true, a != b);
+ assertEquals(false, a === b);
+ assertEquals(true, a !== b);
+ assertEquals(false, a > b);
+ assertEquals(true, a >= b);
+ assertEquals(false, a < b);
+ assertEquals(true, a <= b);
+}
+
+testComparisionOperators();
+testComparisionOperators();
+// TODO(ningxin): optimized code will get opposite result.
+//%OptimizeFunctionOnNextCall(testComparisionOperators);
+testComparisionOperators();
+
+
+function testLogicalOperators() {
+ var a = SIMD.float64x2.zero();
+ var b = SIMD.float64x2.splat(1);
+ assertEquals(1, (a && b).x);
+ assertEquals(1, (a && b).y);
+ assertEquals(0, (a || b).x);
+ assertEquals(0, (a || b).y);
+ assertEquals(false, !a);
+}
+
+testLogicalOperators();
+testLogicalOperators();
+%OptimizeFunctionOnNextCall(testLogicalOperators);
+testLogicalOperators();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax --use-escape-analysis
+
+function testCapturedObjectwithFloat32x4Field() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = 1.1;
+ this.y = SIMD.float32x4(1,2,3,4);
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt;
+ assertEquals(x, o.x);
+ assertEquals(o.y.x, 1);
+ assertEquals(o.y.y, 2);
+ assertEquals(o.y.z, 3);
+ assertEquals(o.y.w, 4);
+ }
+ field(1); field(2);
+ // TODO(ningxin): fails in x64 test.
+ //%OptimizeFunctionOnNextCall(field);
+ field(3); field(4);
+ delete deopt.deopt;
+ field(5); field(6);
+}
+
+testCapturedObjectwithFloat32x4Field();
+
+function testCapturedObjectwithInt32x4Field() {
+ var deopt = { deopt:false };
+ function constructor() {
+ this.x = 1.1;
+ this.y = SIMD.int32x4(1,2,3,4);
+ }
+ function field(x) {
+ var o = new constructor();
+ o.x = x;
+ deopt.deopt;
+ assertEquals(x, o.x);
+ assertEquals(o.y.x, 1);
+ assertEquals(o.y.y, 2);
+ assertEquals(o.y.z, 3);
+ assertEquals(o.y.w, 4);
+ }
+ field(1); field(2);
+ // TODO(ningxin): fix the failures.
+ //%OptimizeFunctionOnNextCall(field);
+ field(3); field(4);
+ delete deopt.deopt;
+ field(5); field(6);
+}
+
+testCapturedObjectwithInt32x4Field();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testObject() {
+ var a = SIMD.float32x4.zero();
+ var b = Object(a);
+ assertEquals(0, b.x);
+ assertEquals(0, b.y);
+ assertEquals(0, b.z);
+ assertEquals(0, b.w);
+ assertEquals(typeof(b), "object");
+ assertEquals(typeof(b.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(b), "[object Object]");
+}
+
+testObject();
+testObject();
+%OptimizeFunctionOnNextCall(testObject);
+testObject();
+
+
+function testNumber() {
+ var a = SIMD.float32x4.zero();
+ var b = Number(a);
+ assertEquals(NaN, b);
+}
+
+testNumber();
+testNumber();
+%OptimizeFunctionOnNextCall(testNumber);
+testNumber();
+
+
+function testString() {
+ var a = SIMD.float32x4.zero();
+ var b = String(a);
+ assertEquals("float32x4(0,0,0,0)", b);
+}
+
+testString();
+testString();
+%OptimizeFunctionOnNextCall(testString);
+testString();
+
+
+function testBoolean() {
+ var a = SIMD.float32x4.zero();
+ var b = Boolean(a);
+ assertEquals(true, b);
+}
+
+testBoolean();
+testBoolean();
+%OptimizeFunctionOnNextCall(testBoolean);
+testBoolean();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testdeopt(a, b) {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ if (a > 0) {
+ a = 0;
+ } else {
+ a += b; //deopt
+ }
+
+ assertEquals(1.0, b4.x);
+ assertEquals(2.0, b4.y);
+ assertEquals(3.0, b4.z);
+ assertEquals(4.0, b4.w);
+}
+
+testdeopt(1, 1);
+testdeopt(1, 1);
+%OptimizeFunctionOnNextCall(testdeopt);
+testdeopt(0, 1);
+
+function testdeopt2() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+
+ var new_a4 = new SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var new_b4 = SIMD.float32x4.abs(new_a4);
+
+ assertEquals(1.0, new_b4.x);
+ assertEquals(1.0, new_b4.y);
+ assertEquals(1.0, new_b4.z);
+ assertEquals(1.0, new_b4.w);
+
+ // Verifying deoptimization
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testdeopt2();
+testdeopt2();
+%OptimizeFunctionOnNextCall(testdeopt2);
+testdeopt2();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var f4 = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ assertEquals(1.0, f4.x);
+ assertEquals(2.0, f4.y);
+ assertEquals(3.0, f4.z);
+ assertEquals(4.0, f4.w);
+
+ f4 = SIMD.float32x4(1.1, 2.2, 3.3, 4.4);
+ assertEquals(1.100000023841858, f4.x);
+ assertEquals(2.200000047683716, f4.y);
+ assertEquals(3.299999952316284, f4.z);
+ assertEquals(4.400000095367432, f4.w);
+}
+
+testConstructor();
+testConstructor();
+%OptimizeFunctionOnNextCall(testConstructor);
+testConstructor();
+
+function test1ArgumentConstructor() {
+ var f4 = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var f4_new = SIMD.float32x4(f4);
+ assertEquals(f4_new.x, f4.x);
+ assertEquals(f4_new.y, f4.y);
+ assertEquals(f4_new.z, f4.z);
+ assertEquals(f4_new.w, f4.w);
+
+ f4 = SIMD.float32x4(1.1, 2.2, 3.3, 4.4);
+ f4_new = SIMD.float32x4(f4);
+ assertEquals(f4_new.x, f4.x);
+ assertEquals(f4_new.y, f4.y);
+ assertEquals(f4_new.z, f4.z);
+ assertEquals(f4_new.w, f4.w);
+}
+
+test1ArgumentConstructor();
+test1ArgumentConstructor();
+%OptimizeFunctionOnNextCall(test1ArgumentConstructor);
+test1ArgumentConstructor();
+
+function testZeroConstructor() {
+ var z4 = SIMD.float32x4.zero();
+ assertEquals(0.0, z4.x);
+ assertEquals(0.0, z4.y);
+ assertEquals(0.0, z4.z);
+ assertEquals(0.0, z4.w);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testSplatConstructor() {
+ var z4 = SIMD.float32x4.splat(5.0);
+ assertEquals(5.0, z4.x);
+ assertEquals(5.0, z4.y);
+ assertEquals(5.0, z4.z);
+ assertEquals(5.0, z4.w);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var z4 = SIMD.float32x4.zero();
+ assertEquals(typeof(z4), "object");
+
+ var new_z4 = new SIMD.float32x4(0, 0, 0, 0);
+ assertEquals(typeof(new_z4), "object");
+ assertEquals(typeof(new_z4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_z4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.float32x4(-1.0, -2.0, -3.0, -4.0);
+ assertEquals(0xf, a.signMask);
+ var b = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.float32x4(1.0, -2.0, -3.0, 4.0);
+ assertEquals(0x6, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+function testSIMDAbs() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testSIMDAbs();
+testSIMDAbs();
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+testSIMDAbs();
+
+function testSIMDNeg() {
+ var a4 = SIMD.float32x4(1.0, -1.0, 1.0, -1.0);
+ var b4 = SIMD.float32x4.neg(a4);
+
+ assertEquals(-1.0, b4.x);
+ assertEquals(1.0, b4.y);
+ assertEquals(-1.0, b4.z);
+ assertEquals(1.0, b4.w);
+}
+
+testSIMDNeg();
+testSIMDNeg();
+%OptimizeFunctionOnNextCall(testSIMDNeg);
+testSIMDNeg();
+
+function testSIMDAdd() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.add(a4, b4);
+
+ assertEquals(3.0, c4.x);
+ assertEquals(3.0, c4.y);
+ assertEquals(3.0, c4.z);
+ assertEquals(3.0, c4.w);
+}
+
+testSIMDAdd();
+testSIMDAdd();
+%OptimizeFunctionOnNextCall(testSIMDAdd);
+testSIMDAdd();
+
+function testSIMDSub() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.sub(a4, b4);
+
+ assertEquals(-1.0, c4.x);
+ assertEquals(-1.0, c4.y);
+ assertEquals(-1.0, c4.z);
+ assertEquals(-1.0, c4.w);
+}
+
+testSIMDSub();
+testSIMDSub();
+%OptimizeFunctionOnNextCall(testSIMDSub);
+testSIMDSub();
+
+function testSIMDMul() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.mul(a4, b4);
+
+ assertEquals(2.0, c4.x);
+ assertEquals(2.0, c4.y);
+ assertEquals(2.0, c4.z);
+ assertEquals(2.0, c4.w);
+}
+
+testSIMDMul();
+testSIMDMul();
+%OptimizeFunctionOnNextCall(testSIMDMul);
+testSIMDMul();
+
+function testSIMDDiv() {
+ var a4 = SIMD.float32x4(1.0, 1.0, 1.0, 1.0);
+ var b4 = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ var c4 = SIMD.float32x4.div(a4, b4);
+
+ assertEquals(0.5, c4.x);
+ assertEquals(0.5, c4.y);
+ assertEquals(0.5, c4.z);
+ assertEquals(0.5, c4.w);
+}
+
+testSIMDDiv();
+testSIMDDiv();
+%OptimizeFunctionOnNextCall(testSIMDDiv);
+testSIMDDiv();
+
+function testSIMDClamp() {
+ var m = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var lo = SIMD.float32x4(0.0, 0.0, 0.0, 0.0);
+ var hi = SIMD.float32x4(2.0, 2.0, 2.0, 2.0);
+ m = SIMD.float32x4.clamp(m, lo, hi);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+ assertEquals(2.0, m.z);
+ assertEquals(0.0, m.w);
+}
+
+testSIMDClamp();
+testSIMDClamp();
+%OptimizeFunctionOnNextCall(testSIMDClamp);
+testSIMDClamp();
+
+function testSIMDMin() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(1.0, 0.0, 2.5, 5.0);
+ m = SIMD.float32x4.min(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+ assertEquals(2.5, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDMin();
+testSIMDMin();
+%OptimizeFunctionOnNextCall(testSIMDMin);
+testSIMDMin();
+
+function testSIMDMax() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(1.0, 0.0, 2.5, 5.0);
+ m = SIMD.float32x4.max(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(5.0, m.w);
+}
+
+testSIMDMax();
+testSIMDMax();
+%OptimizeFunctionOnNextCall(testSIMDMax);
+testSIMDMax();
+
+function testSIMDReciprocal() {
+ var m = SIMD.float32x4(1.0, 4.0, 9.0, 16.0);
+ m = SIMD.float32x4.reciprocal(m);
+ assertTrue(Math.abs(1.0 - m.x) <= 0.001);
+ assertTrue(Math.abs(0.25 - m.y) <= 0.001);
+ assertTrue(Math.abs(0.1111111 - m.z) <= 0.001);
+ assertTrue(Math.abs(0.0625 - m.w) <= 0.001);
+}
+
+testSIMDReciprocal();
+testSIMDReciprocal();
+%OptimizeFunctionOnNextCall(testSIMDReciprocal);
+testSIMDReciprocal();
+
+function testSIMDReciprocalSqrt() {
+ var m = SIMD.float32x4(1.0, 0.25, 0.111111, 0.0625);
+ m = SIMD.float32x4.reciprocalSqrt(m);
+ assertTrue(Math.abs(1.0 - m.x) <= 0.001);
+ assertTrue(Math.abs(2.0 - m.y) <= 0.001);
+ assertTrue(Math.abs(3.0 - m.z) <= 0.001);
+ assertTrue(Math.abs(4.0 - m.w) <= 0.001);
+}
+
+testSIMDReciprocalSqrt();
+testSIMDReciprocalSqrt();
+%OptimizeFunctionOnNextCall(testSIMDReciprocalSqrt);
+testSIMDReciprocalSqrt();
+
+function testSIMDScale() {
+ var m = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ m = SIMD.float32x4.scale(m, 20.0);
+ assertEquals(20.0, m.x);
+ assertEquals(-40.0, m.y);
+ assertEquals(60.0, m.z);
+ assertEquals(-80.0, m.w);
+}
+
+testSIMDScale();
+testSIMDScale();
+%OptimizeFunctionOnNextCall(testSIMDScale);
+testSIMDScale();
+
+function testSIMDSqrt() {
+ var m = SIMD.float32x4(1.0, 4.0, 9.0, 16.0);
+ m = SIMD.float32x4.sqrt(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDSqrt();
+testSIMDSqrt();
+%OptimizeFunctionOnNextCall(testSIMDSqrt);
+testSIMDSqrt();
+
+function testSIMDShuffle() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var xxxx = SIMD.float32x4.shuffle(m, SIMD.XXXX);
+ assertEquals(1.0, xxxx.x);
+ assertEquals(1.0, xxxx.y);
+ assertEquals(1.0, xxxx.z);
+ assertEquals(1.0, xxxx.w);
+ var yyyy = SIMD.float32x4.shuffle(m, SIMD.YYYY);
+ assertEquals(2.0, yyyy.x);
+ assertEquals(2.0, yyyy.y);
+ assertEquals(2.0, yyyy.z);
+ assertEquals(2.0, yyyy.w);
+ var zzzz = SIMD.float32x4.shuffle(m, SIMD.ZZZZ);
+ assertEquals(3.0, zzzz.x);
+ assertEquals(3.0, zzzz.y);
+ assertEquals(3.0, zzzz.z);
+ assertEquals(3.0, zzzz.w);
+ var wwww = SIMD.float32x4.shuffle(m, SIMD.WWWW);
+ assertEquals(4.0, wwww.x);
+ assertEquals(4.0, wwww.y);
+ assertEquals(4.0, wwww.z);
+ assertEquals(4.0, wwww.w);
+ var wzyx = SIMD.float32x4.shuffle(m, SIMD.WZYX);
+ assertEquals(4.0, wzyx.x);
+ assertEquals(3.0, wzyx.y);
+ assertEquals(2.0, wzyx.z);
+ assertEquals(1.0, wzyx.w);
+ var wwzz = SIMD.float32x4.shuffle(m, SIMD.WWZZ);
+ assertEquals(4.0, wwzz.x);
+ assertEquals(4.0, wwzz.y);
+ assertEquals(3.0, wwzz.z);
+ assertEquals(3.0, wwzz.w);
+ var xxyy = SIMD.float32x4.shuffle(m, SIMD.XXYY);
+ assertEquals(1.0, xxyy.x);
+ assertEquals(1.0, xxyy.y);
+ assertEquals(2.0, xxyy.z);
+ assertEquals(2.0, xxyy.w);
+ var yyww = SIMD.float32x4.shuffle(m, SIMD.YYWW);
+ assertEquals(2.0, yyww.x);
+ assertEquals(2.0, yyww.y);
+ assertEquals(4.0, yyww.z);
+ assertEquals(4.0, yyww.w);
+}
+
+testSIMDShuffle();
+testSIMDShuffle();
+%OptimizeFunctionOnNextCall(testSIMDShuffle);
+testSIMDShuffle();
+
+function testSIMDShuffleMix() {
+ var a = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var b = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ var xxxx = SIMD.float32x4.shuffleMix(a, b, SIMD.XXXX);
+ assertEquals(1.0, xxxx.x);
+ assertEquals(1.0, xxxx.y);
+ assertEquals(5.0, xxxx.z);
+ assertEquals(5.0, xxxx.w);
+ var yyyy = SIMD.float32x4.shuffleMix(a, b, SIMD.YYYY);
+ assertEquals(2.0, yyyy.x);
+ assertEquals(2.0, yyyy.y);
+ assertEquals(6.0, yyyy.z);
+ assertEquals(6.0, yyyy.w);
+ var zzzz = SIMD.float32x4.shuffleMix(a, b, SIMD.ZZZZ);
+ assertEquals(3.0, zzzz.x);
+ assertEquals(3.0, zzzz.y);
+ assertEquals(7.0, zzzz.z);
+ assertEquals(7.0, zzzz.w);
+ var wwww = SIMD.float32x4.shuffleMix(a, b, SIMD.WWWW);
+ assertEquals(4.0, wwww.x);
+ assertEquals(4.0, wwww.y);
+ assertEquals(8.0, wwww.z);
+ assertEquals(8.0, wwww.w);
+ var wzyx = SIMD.float32x4.shuffleMix(a, b, SIMD.WZYX);
+ assertEquals(4.0, wzyx.x);
+ assertEquals(3.0, wzyx.y);
+ assertEquals(6.0, wzyx.z);
+ assertEquals(5.0, wzyx.w);
+ var wwzz = SIMD.float32x4.shuffleMix(a, b, SIMD.WWZZ);
+ assertEquals(4.0, wwzz.x);
+ assertEquals(4.0, wwzz.y);
+ assertEquals(7.0, wwzz.z);
+ assertEquals(7.0, wwzz.w);
+ var xxyy = SIMD.float32x4.shuffleMix(a, b, SIMD.XXYY);
+ assertEquals(1.0, xxyy.x);
+ assertEquals(1.0, xxyy.y);
+ assertEquals(6.0, xxyy.z);
+ assertEquals(6.0, xxyy.w);
+ var yyww = SIMD.float32x4.shuffleMix(a, b, SIMD.YYWW);
+ assertEquals(2.0, yyww.x);
+ assertEquals(2.0, yyww.y);
+ assertEquals(8.0, yyww.z);
+ assertEquals(8.0, yyww.w);
+}
+
+testSIMDShuffleMix();
+testSIMDShuffleMix();
+%OptimizeFunctionOnNextCall(testSIMDShuffleMix);
+testSIMDShuffleMix();
+
+function testSIMDSetters() {
+ var f = SIMD.float32x4.zero();
+ assertEquals(0.0, f.x);
+ assertEquals(0.0, f.y);
+ assertEquals(0.0, f.z);
+ assertEquals(0.0, f.w);
+ f = SIMD.float32x4.withX(f, 4.0);
+ assertEquals(4.0, f.x);
+ f = SIMD.float32x4.withY(f, 3.0);
+ assertEquals(3.0, f.y);
+ f = SIMD.float32x4.withZ(f, 2.0);
+ assertEquals(2.0, f.z);
+ f = SIMD.float32x4.withW(f, 1.0);
+ assertEquals(1.0, f.w);
+ f = SIMD.float32x4.zero();
+}
+
+testSIMDSetters();
+testSIMDSetters();
+%OptimizeFunctionOnNextCall(testSIMDSetters);
+testSIMDSetters();
+
+function testSIMDConversion() {
+ var m = SIMD.int32x4(0x3F800000, 0x40000000, 0x40400000, 0x40800000);
+ var n = SIMD.float32x4.fromInt32x4Bits(m);
+ assertEquals(1.0, n.x);
+ assertEquals(2.0, n.y);
+ assertEquals(3.0, n.z);
+ assertEquals(4.0, n.w);
+ n = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ m = SIMD.int32x4.fromFloat32x4Bits(n);
+ assertEquals(0x40A00000, m.x);
+ assertEquals(0x40C00000, m.y);
+ assertEquals(0x40E00000, m.z);
+ assertEquals(0x41000000, m.w);
+ // Flip sign using bit-wise operators.
+ n = SIMD.float32x4(9.0, 10.0, 11.0, 12.0);
+ m = SIMD.int32x4(0x80000000, 0x80000000, 0x80000000, 0x80000000);
+ var nMask = SIMD.int32x4.fromFloat32x4Bits(n);
+ nMask = SIMD.int32x4.xor(nMask, m); // flip sign.
+ n = SIMD.float32x4.fromInt32x4Bits(nMask);
+ assertEquals(-9.0, n.x);
+ assertEquals(-10.0, n.y);
+ assertEquals(-11.0, n.z);
+ assertEquals(-12.0, n.w);
+ nMask = SIMD.int32x4.fromFloat32x4Bits(n);
+ nMask = SIMD.int32x4.xor(nMask, m); // flip sign.
+ n = SIMD.float32x4.fromInt32x4Bits(nMask);
+ assertEquals(9.0, n.x);
+ assertEquals(10.0, n.y);
+ assertEquals(11.0, n.z);
+ assertEquals(12.0, n.w);
+}
+
+testSIMDConversion();
+testSIMDConversion();
+%OptimizeFunctionOnNextCall(testSIMDConversion);
+testSIMDConversion();
+
+function testSIMDConversion2() {
+ var m = SIMD.int32x4(1, 2, 3, 4);
+ var n = SIMD.float32x4.fromInt32x4(m);
+ assertEquals(1.0, n.x);
+ assertEquals(2.0, n.y);
+ assertEquals(3.0, n.z);
+ assertEquals(4.0, n.w);
+ n = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ m = SIMD.int32x4.fromFloat32x4(n);
+ assertEquals(5, m.x);
+ assertEquals(6, m.y);
+ assertEquals(7, m.z);
+ assertEquals(8, m.w);
+}
+
+testSIMDConversion2();
+testSIMDConversion2();
+%OptimizeFunctionOnNextCall(testSIMDConversion2);
+testSIMDConversion2();
+
+
+function testSIMDComparisons() {
+ var m = SIMD.float32x4(1.0, 2.0, 0.1, 0.001);
+ var n = SIMD.float32x4(2.0, 2.0, 0.001, 0.1);
+ var cmp;
+ cmp = SIMD.float32x4.lessThan(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.lessThanOrEqual(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.equal(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.float32x4.notEqual(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.float32x4.greaterThanOrEqual(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.float32x4.greaterThan(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+}
+
+testSIMDComparisons();
+testSIMDComparisons();
+%OptimizeFunctionOnNextCall(testSIMDComparisons);
+testSIMDComparisons();
+
+function testSIMDAnd() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ o = SIMD.float32x4.and(m,n); // and
+ assertEquals(0, o.x);
+ assertEquals(2, o.y);
+ assertEquals(3, o.z);
+ assertEquals(4, o.w);
+}
+
+testSIMDAnd();
+testSIMDAnd();
+%OptimizeFunctionOnNextCall(testSIMDAnd);
+testSIMDAnd();
+
+function testSIMDOr() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ var o = SIMD.float32x4.or(m,n); // or
+ assertEquals(-Infinity, o.x);
+ assertEquals(2.0, o.y);
+ assertEquals(3.0, o.z);
+ assertEquals(4.0, o.w);
+}
+
+testSIMDOr();
+testSIMDOr();
+%OptimizeFunctionOnNextCall(testSIMDOr);
+testSIMDOr();
+
+function testSIMDXor() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var n = SIMD.float32x4(~1.0, 2.0, 3.0, 4.0);
+ var o = SIMD.float32x4.xor(m,n); // xor
+ assertEquals(-Infinity, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+}
+
+testSIMDXor();
+testSIMDXor();
+%OptimizeFunctionOnNextCall(testSIMDXor);
+testSIMDXor();
+
+function testSIMDNot() {
+ var m = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ m = SIMD.float32x4.not(m);
+ m = SIMD.float32x4.not(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+ assertEquals(3.0, m.z);
+ assertEquals(4.0, m.w);
+}
+
+testSIMDNot();
+testSIMDNot();
+%OptimizeFunctionOnNextCall(testSIMDNot);
+testSIMDNot();
+
+function testSIMDSelect() {
+ var m = SIMD.int32x4.bool(true, true, false, false);
+ var t = SIMD.float32x4(1.0, 2.0, 3.0, 4.0);
+ var f = SIMD.float32x4(5.0, 6.0, 7.0, 8.0);
+ var s = SIMD.float32x4.select(m, t, f);
+ assertEquals(1.0, s.x);
+ assertEquals(2.0, s.y);
+ assertEquals(7.0, s.z);
+ assertEquals(8.0, s.w);
+}
+
+testSIMDSelect();
+testSIMDSelect();
+%OptimizeFunctionOnNextCall(testSIMDSelect);
+testSIMDSelect();
+
+
+function testFloat32x4ArrayBasic() {
+ var a = new Float32x4Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Float32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Float32x4Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Float32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testFloat32x4ArrayBasic();
+
+function testFloat32x4ArrayGetAndSet() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ var b = new Float32x4Array(4);
+ b.setAt(0,SIMD.float32x4(1, 2, 3, 4));
+ b.setAt(1,SIMD.float32x4(5, 6, 7, 8));
+ b.setAt(2,SIMD.float32x4(9, 10, 11, 12));
+ b.setAt(3,SIMD.float32x4(13, 14, 15, 16));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+ assertEquals(b.getAt(0).z, 3);
+ assertEquals(b.getAt(0).w, 4);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+ assertEquals(b.getAt(1).z, 7);
+ assertEquals(b.getAt(1).w, 8);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+ assertEquals(b.getAt(2).z, 11);
+ assertEquals(b.getAt(2).w, 12);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+ assertEquals(b.getAt(3).z, 15);
+ assertEquals(b.getAt(3).w, 16);
+}
+
+testFloat32x4ArrayGetAndSet();
+testFloat32x4ArrayGetAndSet();
+%OptimizeFunctionOnNextCall(testFloat32x4ArrayGetAndSet);
+testFloat32x4ArrayGetAndSet();
+
+function testFloat32x4ArraySwap() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+ assertEquals(a[3].z, 3);
+ assertEquals(a[3].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+ assertEquals(a[0].z, 15);
+ assertEquals(a[0].w, 16);
+}
+
+testFloat32x4ArraySwap();
+
+function testFloat32x4ArrayCopy() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ var b = new Float32x4Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+ assertEquals(a[0].z, b[0].z);
+ assertEquals(a[0].w, b[0].w);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+ assertEquals(a[1].z, b[1].z);
+ assertEquals(a[1].w, b[1].w);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+ assertEquals(a[2].z, b[2].z);
+ assertEquals(a[2].w, b[2].w);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+ assertEquals(a[3].z, b[3].z);
+ assertEquals(a[3].w, b[3].w);
+
+ a[2] = SIMD.float32x4(17, 18, 19, 20);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+ assertEquals(a[2].z, 19);
+ assertEquals(a[2].w, 20);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+ assertTrue(a[2].z != b[2].z);
+ assertTrue(a[2].w != b[2].w);
+}
+
+testFloat32x4ArrayCopy();
+
+function testFloat32x4ArrayViewBasic() {
+ var a = new Float32Array(8);
+ // view with no offset.
+ var b = new Float32x4Array(a.buffer, 0);
+ // view with offset.
+ var c = new Float32x4Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Float32x4Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 2);
+ assertEquals(c.length, 1);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 32);
+ assertEquals(b.byteLength, 32);
+ assertEquals(c.byteLength, 16);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testFloat32x4ArrayViewBasic();
+
+function testFloat32x4ArrayViewValues() {
+ var a = new Float32Array(8);
+ var b = new Float32x4Array(a.buffer, 0);
+ var c = new Float32x4Array(a.buffer, 16);
+ var d = new Float32x4Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0.0, b[i].x);
+ assertEquals(0.0, b[i].y);
+ assertEquals(0.0, b[i].z);
+ assertEquals(0.0, b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0.0, c[i].x);
+ assertEquals(0.0, c[i].y);
+ assertEquals(0.0, c[i].z);
+ assertEquals(0.0, c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0.0, d[i].x);
+ assertEquals(0.0, d[i].y);
+ assertEquals(0.0, d[i].z);
+ assertEquals(0.0, d[i].w);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0.0 != b[i].x);
+ assertTrue(0.0 != b[i].y);
+ assertTrue(0.0 != b[i].z);
+ assertTrue(0.0 != b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0.0 != c[i].x);
+ assertTrue(0.0 != c[i].y);
+ assertTrue(0.0 != c[i].z);
+ assertTrue(0.0 != c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0.0 != d[i].x);
+ assertTrue(0.0 != d[i].y);
+ assertTrue(0.0 != d[i].z);
+ assertTrue(0.0 != d[i].w);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[0].z);
+ assertEquals(start+3, b[0].w);
+ assertEquals(start+4, b[1].x);
+ assertEquals(start+5, b[1].y);
+ assertEquals(start+6, b[1].z);
+ assertEquals(start+7, b[1].w);
+
+ assertEquals(start+4, c[0].x);
+ assertEquals(start+5, c[0].y);
+ assertEquals(start+6, c[0].z);
+ assertEquals(start+7, c[0].w);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+ assertEquals(start+2, d[0].z);
+ assertEquals(start+3, d[0].w);
+}
+
+testFloat32x4ArrayViewValues();
+
+function testViewOnFloat32x4Array() {
+ var a = new Float32x4Array(4);
+ a[0] = SIMD.float32x4(1, 2, 3, 4);
+ a[1] = SIMD.float32x4(5, 6, 7, 8);
+ a[2] = SIMD.float32x4(9, 10, 11, 12);
+ a[3] = SIMD.float32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ // Create view on a.
+ var b = new Float32Array(a.buffer);
+ assertEquals(b.length, 16);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 99);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 1);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+}
+
+testViewOnFloat32x4Array();
+
+function testArrayOfFloat32x4() {
+ var a = [];
+ var a4 = new Float32x4Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.float32x4(i, i + 1, i + 2, i + 3);
+ a4[i] = SIMD.float32x4(i, i + 1, i + 2, i + 3);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ assertEquals(a[i].z, a4[i].z);
+ assertEquals(a[i].w, a4[i].w);
+ }
+}
+
+testArrayOfFloat32x4();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var f4 = SIMD.float64x2(1.0, 2.0);
+ assertEquals(1.0, f4.x);
+ assertEquals(2.0, f4.y);
+
+ f4 = SIMD.float64x2(1.1, 2.2);
+ assertEquals(1.1, f4.x);
+ assertEquals(2.2, f4.y);
+}
+
+testConstructor();
+testConstructor();
+%OptimizeFunctionOnNextCall(testConstructor);
+testConstructor();
+
+function test1ArgumentConstructor() {
+ var f2 = SIMD.float64x2(1.0, 2.0);
+ var f2_new = SIMD.float64x2(f2);
+ assertEquals(f2_new.x, f2.x);
+ assertEquals(f2_new.y, f2.y);
+
+ f2 = SIMD.float64x2(1.1, 2.2);
+ f2_new = SIMD.float64x2(f2);
+ assertEquals(f2_new.x, f2.x);
+ assertEquals(f2_new.y, f2.y);
+}
+
+test1ArgumentConstructor();
+test1ArgumentConstructor();
+%OptimizeFunctionOnNextCall(test1ArgumentConstructor);
+test1ArgumentConstructor();
+
+function testZeroConstructor() {
+ var z4 = SIMD.float64x2.zero();
+ assertEquals(0.0, z4.x);
+ assertEquals(0.0, z4.y);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testSplatConstructor() {
+ var z4 = SIMD.float64x2.splat(5.0);
+ assertEquals(5.0, z4.x);
+ assertEquals(5.0, z4.y);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var z4 = SIMD.float64x2.zero();
+ assertEquals(typeof(z4), "object");
+
+ var new_z4 = new SIMD.float64x2(0, 0);
+ assertEquals(typeof(new_z4), "object");
+ assertEquals(typeof(new_z4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_z4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.float64x2(-1.0, -2.0);
+ assertEquals(0x3, a.signMask);
+ var b = SIMD.float64x2(1.0, 2.0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.float64x2(1.0, -2.0);
+ assertEquals(0x2, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+function testSIMDAbs() {
+ var a4 = SIMD.float64x2(1.0, -1.0);
+ var b4 = SIMD.float64x2.abs(a4);
+
+ assertEquals(1.0, b4.x);
+ assertEquals(1.0, b4.y);
+}
+
+testSIMDAbs();
+testSIMDAbs();
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+testSIMDAbs();
+
+function testSIMDNeg() {
+ var a4 = SIMD.float64x2(1.0, -1.0);
+ var b4 = SIMD.float64x2.neg(a4);
+
+ assertEquals(-1.0, b4.x);
+ assertEquals(1.0, b4.y);
+}
+
+testSIMDNeg();
+testSIMDNeg();
+%OptimizeFunctionOnNextCall(testSIMDNeg);
+testSIMDNeg();
+
+function testSIMDAdd() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.add(a4, b4);
+
+ assertEquals(3.0, c4.x);
+ assertEquals(3.0, c4.y);
+}
+
+testSIMDAdd();
+testSIMDAdd();
+%OptimizeFunctionOnNextCall(testSIMDAdd);
+testSIMDAdd();
+
+function testSIMDSub() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.sub(a4, b4);
+
+ assertEquals(-1.0, c4.x);
+ assertEquals(-1.0, c4.y);
+}
+
+testSIMDSub();
+testSIMDSub();
+%OptimizeFunctionOnNextCall(testSIMDSub);
+testSIMDSub();
+
+function testSIMDMul() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.mul(a4, b4);
+
+ assertEquals(2.0, c4.x);
+ assertEquals(2.0, c4.y);
+}
+
+testSIMDMul();
+testSIMDMul();
+%OptimizeFunctionOnNextCall(testSIMDMul);
+testSIMDMul();
+
+function testSIMDDiv() {
+ var a4 = SIMD.float64x2(1.0, 1.0);
+ var b4 = SIMD.float64x2(2.0, 2.0);
+ var c4 = SIMD.float64x2.div(a4, b4);
+
+ assertEquals(0.5, c4.x);
+ assertEquals(0.5, c4.y);
+}
+
+testSIMDDiv();
+testSIMDDiv();
+%OptimizeFunctionOnNextCall(testSIMDDiv);
+testSIMDDiv();
+
+function testSIMDClamp() {
+ var m = SIMD.float64x2(1.0, -2.0);
+ var lo = SIMD.float64x2(0.0, 0.0);
+ var hi = SIMD.float64x2(2.0, 2.0);
+ m = SIMD.float64x2.clamp(m, lo, hi);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+}
+
+testSIMDClamp();
+testSIMDClamp();
+%OptimizeFunctionOnNextCall(testSIMDClamp);
+testSIMDClamp();
+
+function testSIMDMin() {
+ var m = SIMD.float64x2(1.0, 2.0);
+ var n = SIMD.float64x2(1.0, 0.0);
+ m = SIMD.float64x2.min(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(0.0, m.y);
+}
+
+testSIMDMin();
+testSIMDMin();
+%OptimizeFunctionOnNextCall(testSIMDMin);
+testSIMDMin();
+
+function testSIMDMax() {
+ var m = SIMD.float64x2(1.0, 2.0);
+ var n = SIMD.float64x2(1.0, 0.0);
+ m = SIMD.float64x2.max(m, n);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+}
+
+testSIMDMax();
+testSIMDMax();
+%OptimizeFunctionOnNextCall(testSIMDMax);
+testSIMDMax();
+
+function testSIMDScale() {
+ var m = SIMD.float64x2(1.0, -2.0);
+ m = SIMD.float64x2.scale(m, 20.0);
+ assertEquals(20.0, m.x);
+ assertEquals(-40.0, m.y);
+}
+
+testSIMDScale();
+testSIMDScale();
+%OptimizeFunctionOnNextCall(testSIMDScale);
+testSIMDScale();
+
+function testSIMDSqrt() {
+ var m = SIMD.float64x2(1.0, 4.0);
+ m = SIMD.float64x2.sqrt(m);
+ assertEquals(1.0, m.x);
+ assertEquals(2.0, m.y);
+}
+
+testSIMDSqrt();
+testSIMDSqrt();
+%OptimizeFunctionOnNextCall(testSIMDSqrt);
+testSIMDSqrt();
+
+function testSIMDSetters() {
+ var f = SIMD.float64x2.zero();
+ assertEquals(0.0, f.x);
+ assertEquals(0.0, f.y);
+ f = SIMD.float64x2.withX(f, 4.0);
+ assertEquals(4.0, f.x);
+ f = SIMD.float64x2.withY(f, 3.0);
+ assertEquals(3.0, f.y);
+}
+
+testSIMDSetters();
+testSIMDSetters();
+%OptimizeFunctionOnNextCall(testSIMDSetters);
+testSIMDSetters();
+
+function testFloat64x2ArrayBasic() {
+ var a = new Float64x2Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Float64x2Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Float64x2Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Float64x2Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testFloat64x2ArrayBasic();
+
+function testFloat64x2ArrayGetAndSet() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+
+ var b = new Float64x2Array(4);
+ b.setAt(0,SIMD.float64x2(1, 2));
+ b.setAt(1,SIMD.float64x2(5, 6));
+ b.setAt(2,SIMD.float64x2(9, 10));
+ b.setAt(3,SIMD.float64x2(13, 14));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+}
+
+testFloat64x2ArrayGetAndSet();
+
+function testFloat64x2ArraySwap() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+}
+
+testFloat64x2ArraySwap();
+
+function testFloat64x2ArrayCopy() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ var b = new Float64x2Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+
+ a[2] = SIMD.float64x2(17, 18);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+}
+
+testFloat64x2ArrayCopy();
+
+function testFloat64x2ArrayViewBasic() {
+ var a = new Float64Array(8);
+ // view with no offset.
+ var b = new Float64x2Array(a.buffer, 0);
+ // view with offset.
+ var c = new Float64x2Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Float64x2Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 4);
+ assertEquals(c.length, 3);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 64);
+ assertEquals(b.byteLength, 64);
+ assertEquals(c.byteLength, 48);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testFloat64x2ArrayViewBasic();
+
+function testFloat64x2ArrayViewValues() {
+ var a = new Float64Array(8);
+ var b = new Float64x2Array(a.buffer, 0);
+ var c = new Float64x2Array(a.buffer, 16);
+ var d = new Float64x2Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0.0, b[i].x);
+ assertEquals(0.0, b[i].y);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0.0, c[i].x);
+ assertEquals(0.0, c[i].y);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0.0, d[i].x);
+ assertEquals(0.0, d[i].y);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0.0 != b[i].x);
+ assertTrue(0.0 != b[i].y);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0.0 != c[i].x);
+ assertTrue(0.0 != c[i].y);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0.0 != d[i].x);
+ assertTrue(0.0 != d[i].y);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[1].x);
+ assertEquals(start+3, b[1].y);
+ assertEquals(start+4, b[2].x);
+ assertEquals(start+5, b[2].y);
+ assertEquals(start+6, b[3].x);
+ assertEquals(start+7, b[3].y);
+
+ assertEquals(start+2, c[0].x);
+ assertEquals(start+3, c[0].y);
+ assertEquals(start+4, c[1].x);
+ assertEquals(start+5, c[1].y);
+ assertEquals(start+6, c[2].x);
+ assertEquals(start+7, c[2].y);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+}
+
+testFloat64x2ArrayViewValues();
+
+function testViewOnFloat64x2Array() {
+ var a = new Float64x2Array(4);
+ a[0] = SIMD.float64x2(1, 2);
+ a[1] = SIMD.float64x2(5, 6);
+ a[2] = SIMD.float64x2(9, 10);
+ a[3] = SIMD.float64x2(13, 14);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+
+ // Create view on a.
+ var b = new Float64Array(a.buffer);
+ assertEquals(b.length, 8);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+
+ assertEquals(a[1].x, 99.0);
+ assertEquals(a[1].y, 6);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+
+ assertEquals(a[3].x, 1.0);
+ assertEquals(a[3].y, 14);
+}
+
+testViewOnFloat64x2Array();
+
+function testArrayOfFloat64x2() {
+ var a = [];
+ var a4 = new Float64x2Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.float64x2(i, i + 1);
+ a4[i] = SIMD.float64x2(i, i + 1);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ }
+}
+
+testArrayOfFloat64x2();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testConstructor() {
+ var u4 = SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(1, u4.x);
+ assertEquals(2, u4.y);
+ assertEquals(3, u4.z);
+ assertEquals(4, u4.w);
+}
+
+testConstructor();
+
+function test1ArgumentConstructor() {
+ var u4 = SIMD.int32x4(1, 2, 3, 4);
+ var u4_new = SIMD.int32x4(u4);
+ assertEquals(u4_new.x, u4.x);
+ assertEquals(u4_new.y, u4.y);
+ assertEquals(u4_new.z, u4.z);
+ assertEquals(u4_new.w, u4.w);
+}
+
+test1ArgumentConstructor();
+test1ArgumentConstructor();
+%OptimizeFunctionOnNextCall(test1ArgumentConstructor);
+test1ArgumentConstructor();
+
+function testZeroConstructor() {
+ var u4 = SIMD.int32x4.zero();
+ assertEquals(0, u4.x);
+ assertEquals(0, u4.y);
+ assertEquals(0, u4.z);
+ assertEquals(0, u4.w);
+}
+
+testZeroConstructor();
+testZeroConstructor();
+%OptimizeFunctionOnNextCall(testZeroConstructor);
+testZeroConstructor();
+
+function testBoolConstructor() {
+ var u4 = SIMD.int32x4.bool(true, false, true, false);
+ assertEquals(-1, u4.x);
+ assertEquals(0, u4.y);
+ assertEquals(-1, u4.z);
+ assertEquals(0, u4.w);
+}
+
+testBoolConstructor();
+testBoolConstructor();
+%OptimizeFunctionOnNextCall(testBoolConstructor);
+testBoolConstructor();
+
+function testSplatConstructor() {
+ var u4 = SIMD.int32x4.splat(4);
+ assertEquals(4, u4.x);
+ assertEquals(4, u4.y);
+ assertEquals(4, u4.z);
+ assertEquals(4, u4.w);
+}
+
+testSplatConstructor();
+testSplatConstructor();
+%OptimizeFunctionOnNextCall(testSplatConstructor);
+testSplatConstructor();
+
+function testTypeof() {
+ var u4 = SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(typeof(u4), "object");
+
+ var new_u4 = new SIMD.int32x4(1, 2, 3, 4);
+ assertEquals(typeof(new_u4), "object");
+ assertEquals(typeof(new_u4.valueOf()), "object");
+ assertEquals(Object.prototype.toString.call(new_u4), "[object Object]");
+}
+
+testTypeof();
+
+function testSignMaskGetter() {
+ var a = SIMD.int32x4(0x80000000 - 0xFFFFFFFF - 1, 0x7000000, -1, 0x0);
+ assertEquals(0x5, a.signMask);
+ var b = SIMD.int32x4(0x0, 0x0, 0x0, 0x0);
+ assertEquals(0x0, b.signMask);
+ var c = SIMD.int32x4(-1, -1, -1, -1);
+ assertEquals(0xf, c.signMask);
+}
+
+testSignMaskGetter();
+testSignMaskGetter();
+%OptimizeFunctionOnNextCall(testSignMaskGetter);
+testSignMaskGetter();
+
+
+function testSIMDAnd() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.x);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.y);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.z);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, m.w);
+ assertEquals(0x55555555, n.x);
+ assertEquals(0x55555555, n.y);
+ assertEquals(0x55555555, n.z);
+ assertEquals(0x55555555, n.w);
+ assertEquals(true, n.flagX);
+ assertEquals(true, n.flagY);
+ assertEquals(true, n.flagZ);
+ assertEquals(true, n.flagW);
+ o = SIMD.int32x4.and(m,n); // and
+ assertEquals(0x0, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+ assertEquals(false, o.flagX);
+ assertEquals(false, o.flagY);
+ assertEquals(false, o.flagZ);
+ assertEquals(false, o.flagW);
+}
+
+testSIMDAnd();
+testSIMDAnd();
+%OptimizeFunctionOnNextCall(testSIMDAnd);
+testSIMDAnd();
+
+function testSIMDOr() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ var o = SIMD.int32x4.or(m,n); // or
+ assertEquals(-1, o.x);
+ assertEquals(-1, o.y);
+ assertEquals(-1, o.z);
+ assertEquals(-1, o.w);
+ assertEquals(true, o.flagX);
+ assertEquals(true, o.flagY);
+ assertEquals(true, o.flagZ);
+ assertEquals(true, o.flagW);
+}
+
+testSIMDOr();
+testSIMDOr();
+%OptimizeFunctionOnNextCall(testSIMDOr);
+testSIMDOr();
+
+function testSIMDInt32x4Or() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var o = SIMD.int32x4.xor(m,n); // xor
+ assertEquals(0x0, o.x);
+ assertEquals(0x0, o.y);
+ assertEquals(0x0, o.z);
+ assertEquals(0x0, o.w);
+ assertEquals(false, o.flagX);
+ assertEquals(false, o.flagY);
+ assertEquals(false, o.flagZ);
+ assertEquals(false, o.flagW);
+}
+
+testSIMDInt32x4Or();
+testSIMDInt32x4Or();
+%OptimizeFunctionOnNextCall(testSIMDInt32x4Or);
+testSIMDInt32x4Or();
+
+function testSIMDNot() {
+ var m = SIMD.int32x4(0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1,
+ 0xAAAAAAAA - 0xFFFFFFFF - 1, 0xAAAAAAAA - 0xFFFFFFFF - 1);
+ var n = SIMD.int32x4(0x55555555, 0x55555555, 0x55555555, 0x55555555);
+ m = SIMD.int32x4.not(m);
+ n = SIMD.int32x4.not(n);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.x);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.y);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.z);
+ assertEquals(0xAAAAAAAA - 0xFFFFFFFF - 1, n.w);
+ assertEquals(0x55555555, m.x);
+ assertEquals(0x55555555, m.y);
+ assertEquals(0x55555555, m.z);
+ assertEquals(0x55555555, m.w);
+}
+
+testSIMDNot();
+testSIMDNot();
+%OptimizeFunctionOnNextCall(testSIMDNot);
+testSIMDNot();
+
+function testSIMDNegu32() {
+ var m = SIMD.int32x4(-1, 1, -1, 1);
+ m = SIMD.int32x4.neg(m);
+ assertEquals(1, m.x);
+ assertEquals(-1, m.y);
+ assertEquals(1, m.z);
+ assertEquals(-1, m.w);
+}
+
+testSIMDNegu32();
+testSIMDNegu32();
+%OptimizeFunctionOnNextCall(testSIMDNegu32);
+testSIMDNegu32();
+
+function testSIMDSelect() {
+ var m = SIMD.int32x4.bool(true, true, false, false);
+ var t = SIMD.int32x4(1, 2, 3, 4);
+ var f = SIMD.int32x4(5, 6, 7, 8);
+ var s = SIMD.int32x4.select(m, t, f);
+ assertEquals(1, s.x);
+ assertEquals(2, s.y);
+ assertEquals(7, s.z);
+ assertEquals(8, s.w);
+}
+
+testSIMDSelect();
+testSIMDSelect();
+%OptimizeFunctionOnNextCall(testSIMDSelect);
+testSIMDSelect();
+
+
+function testSIMDWithXu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withX(a, 20);
+ assertEquals(20, c.x);
+ assertEquals(2, c.y);
+ assertEquals(3, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithXu32();
+testSIMDWithXu32();
+%OptimizeFunctionOnNextCall(testSIMDWithXu32);
+testSIMDWithXu32();
+
+function testSIMDWithYu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withY(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(20, c.y);
+ assertEquals(3, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithYu32();
+testSIMDWithYu32();
+%OptimizeFunctionOnNextCall(testSIMDWithYu32);
+testSIMDWithYu32();
+
+function testSIMDWithZu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withZ(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(2, c.y);
+ assertEquals(20, c.z);
+ assertEquals(4, c.w);
+}
+
+testSIMDWithZu32();
+testSIMDWithZu32();
+%OptimizeFunctionOnNextCall(testSIMDWithZu32);
+testSIMDWithZu32();
+
+function testSIMDWithWu32() {
+ var a = SIMD.int32x4(1, 2, 3, 4);
+ var c = SIMD.int32x4.withW(a, 20);
+ assertEquals(1, c.x);
+ assertEquals(2, c.y);
+ assertEquals(3, c.z);
+ assertEquals(20, c.w);
+}
+
+testSIMDWithWu32();
+testSIMDWithWu32();
+%OptimizeFunctionOnNextCall(testSIMDWithWu32);
+testSIMDWithWu32();
+
+function testSIMDWithFlagX() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+
+ // boolean
+ var c = SIMD.int32x4.withFlagX(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagX(a, false);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // smi
+ c = SIMD.int32x4.withFlagX(a, 2);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, 0);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // string
+ c = SIMD.int32x4.withFlagX(a, 'true');
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, '');
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // heap number
+ c = SIMD.int32x4.withFlagX(a, 3.14);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+ c = SIMD.int32x4.withFlagX(a, 0.0);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ // JS Array
+ var array = [1];
+ c = SIMD.int32x4.withFlagX(a, array);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+
+ c = SIMD.int32x4.withFlagX(a, undefined);
+ assertEquals(false, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(0x0, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagX();
+testSIMDWithFlagX();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagX);
+testSIMDWithFlagX();
+
+function testSIMDWithFlagY() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagY(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(true, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagY(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagY();
+testSIMDWithFlagY();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagY);
+testSIMDWithFlagY();
+
+function testSIMDWithFlagZ() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagZ(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ c = SIMD.int32x4.withFlagZ(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(false, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(0x0, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagZ();
+testSIMDWithFlagZ();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagZ);
+testSIMDWithFlagZ();
+
+function testSIMDWithFlagW() {
+ var a = SIMD.int32x4.bool(true, false, true, false);
+ var c = SIMD.int32x4.withFlagW(a, true);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(true, c.flagW);
+ c = SIMD.int32x4.withFlagW(a, false);
+ assertEquals(true, c.flagX);
+ assertEquals(false, c.flagY);
+ assertEquals(true, c.flagZ);
+ assertEquals(false, c.flagW);
+ assertEquals(-1, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(-1, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDWithFlagW();
+testSIMDWithFlagW();
+%OptimizeFunctionOnNextCall(testSIMDWithFlagW);
+testSIMDWithFlagW();
+
+function testSIMDAddu32() {
+ var a = SIMD.int32x4(-1, -1, 0x7fffffff, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x1, -1);
+ var c = SIMD.int32x4.add(a, b);
+ assertEquals(0x0, c.x);
+ assertEquals(-2, c.y);
+ assertEquals(0x80000000 - 0xFFFFFFFF - 1, c.z);
+ assertEquals(-1, c.w);
+}
+
+testSIMDAddu32();
+testSIMDAddu32();
+%OptimizeFunctionOnNextCall(testSIMDAddu32);
+testSIMDAddu32();
+
+function testSIMDSubu32() {
+ var a = SIMD.int32x4(-1, -1, 0x80000000 - 0xFFFFFFFF - 1, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x1, -1);
+ var c = SIMD.int32x4.sub(a, b);
+ assertEquals(-2, c.x);
+ assertEquals(0x0, c.y);
+ assertEquals(0x7FFFFFFF, c.z);
+ assertEquals(0x1, c.w);
+}
+
+testSIMDSubu32();
+testSIMDSubu32();
+%OptimizeFunctionOnNextCall(testSIMDSubu32);
+testSIMDSubu32();
+
+function testSIMDMulu32() {
+ var a = SIMD.int32x4(-1, -1, 0x80000000 - 0xFFFFFFFF - 1, 0x0);
+ var b = SIMD.int32x4(0x1, -1, 0x80000000 - 0xFFFFFFFF - 1, -1);
+ var c = SIMD.int32x4.mul(a, b);
+ assertEquals(-1, c.x);
+ assertEquals(0x1, c.y);
+ assertEquals(0x0, c.z);
+ assertEquals(0x0, c.w);
+}
+
+testSIMDMulu32();
+testSIMDMulu32();
+%OptimizeFunctionOnNextCall(testSIMDMulu32);
+testSIMDMulu32();
+
+function testSIMDShuffleu32() {
+ var m = SIMD.int32x4(1, 2, 3, 4);
+ var xxxx = SIMD.int32x4.shuffle(m, SIMD.XXXX);
+ assertEquals(1, xxxx.x);
+ assertEquals(1, xxxx.y);
+ assertEquals(1, xxxx.z);
+ assertEquals(1, xxxx.w);
+ var yyyy = SIMD.int32x4.shuffle(m, SIMD.YYYY);
+ assertEquals(2, yyyy.x);
+ assertEquals(2, yyyy.y);
+ assertEquals(2, yyyy.z);
+ assertEquals(2, yyyy.w);
+ var zzzz = SIMD.int32x4.shuffle(m, SIMD.ZZZZ);
+ assertEquals(3, zzzz.x);
+ assertEquals(3, zzzz.y);
+ assertEquals(3, zzzz.z);
+ assertEquals(3, zzzz.w);
+ var wwww = SIMD.int32x4.shuffle(m, SIMD.WWWW);
+ assertEquals(4, wwww.x);
+ assertEquals(4, wwww.y);
+ assertEquals(4, wwww.z);
+ assertEquals(4, wwww.w);
+ var wzyx = SIMD.int32x4.shuffle(m, SIMD.WZYX);
+ assertEquals(4, wzyx.x);
+ assertEquals(3, wzyx.y);
+ assertEquals(2, wzyx.z);
+ assertEquals(1, wzyx.w);
+ var wwzz = SIMD.int32x4.shuffle(m, SIMD.WWZZ);
+ assertEquals(4, wwzz.x);
+ assertEquals(4, wwzz.y);
+ assertEquals(3, wwzz.z);
+ assertEquals(3, wwzz.w);
+ var xxyy = SIMD.int32x4.shuffle(m, SIMD.XXYY);
+ assertEquals(1, xxyy.x);
+ assertEquals(1, xxyy.y);
+ assertEquals(2, xxyy.z);
+ assertEquals(2, xxyy.w);
+ var yyww = SIMD.int32x4.shuffle(m, SIMD.YYWW);
+ assertEquals(2, yyww.x);
+ assertEquals(2, yyww.y);
+ assertEquals(4, yyww.z);
+ assertEquals(4, yyww.w);
+}
+
+testSIMDShuffleu32();
+testSIMDShuffleu32();
+%OptimizeFunctionOnNextCall(testSIMDShuffleu32);
+testSIMDShuffleu32();
+
+function testSIMDComparisons() {
+ var m = SIMD.int32x4(1, 2, 100, 1);
+ var n = SIMD.int32x4(2, 2, 1, 100);
+ var cmp;
+ cmp = SIMD.int32x4.lessThan(m, n);
+ assertEquals(-1, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(-1, cmp.w);
+
+ cmp = SIMD.int32x4.equal(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(-1, cmp.y);
+ assertEquals(0x0, cmp.z);
+ assertEquals(0x0, cmp.w);
+
+ cmp = SIMD.int32x4.greaterThan(m, n);
+ assertEquals(0x0, cmp.x);
+ assertEquals(0x0, cmp.y);
+ assertEquals(-1, cmp.z);
+ assertEquals(0x0, cmp.w);
+}
+
+testSIMDComparisons();
+testSIMDComparisons();
+%OptimizeFunctionOnNextCall(testSIMDComparisons);
+testSIMDComparisons();
+
+function testSIMDShift() {
+ var m = SIMD.int32x4(1, 2, 100, 0);
+
+ var a = SIMD.int32x4.shiftLeft(m, 2);
+ assertEquals(4, a.x);
+ assertEquals(8, a.y);
+ assertEquals(400, a.z);
+ assertEquals(0, a.w);
+
+ var b = SIMD.int32x4.shiftRight(a, 2);
+ assertEquals(1, b.x);
+ assertEquals(2, b.y);
+ assertEquals(100, b.z);
+ assertEquals(0, b.w);
+
+ var n = SIMD.int32x4(-8, 2, 1, 100);
+
+ var c = SIMD.int32x4.shiftRightArithmetic(n, 2);
+ assertEquals(-2, c.x);
+ assertEquals(0, c.y);
+ assertEquals(0, c.z);
+ assertEquals(25, c.w);
+}
+
+testSIMDShift();
+testSIMDShift();
+%OptimizeFunctionOnNextCall(testSIMDShift);
+testSIMDShift();
+
+function testInt32x4ArrayBasic() {
+ var a = new Int32x4Array(1);
+ assertEquals(1, a.length);
+ assertEquals(16, a.byteLength);
+ assertEquals(16, a.BYTES_PER_ELEMENT);
+ assertEquals(16, Int32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, a.byteOffset);
+ assertTrue(undefined != a.buffer);
+ var b = new Int32x4Array(4);
+ assertEquals(4, b.length);
+ assertEquals(64, b.byteLength);
+ assertEquals(16, b.BYTES_PER_ELEMENT);
+ assertEquals(16, Int32x4Array.BYTES_PER_ELEMENT);
+ assertEquals(0, b.byteOffset);
+ assertTrue(undefined != b.buffer);
+}
+
+testInt32x4ArrayBasic();
+
+function testInt32x4ArrayGetAndSet() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ var b = new Int32x4Array(4);
+ b.setAt(0,SIMD.int32x4(1, 2, 3, 4));
+ b.setAt(1,SIMD.int32x4(5, 6, 7, 8));
+ b.setAt(2,SIMD.int32x4(9, 10, 11, 12));
+ b.setAt(3,SIMD.int32x4(13, 14, 15, 16));
+
+ assertEquals(b.getAt(0).x, 1);
+ assertEquals(b.getAt(0).y, 2);
+ assertEquals(b.getAt(0).z, 3);
+ assertEquals(b.getAt(0).w, 4);
+
+ assertEquals(b.getAt(1).x, 5);
+ assertEquals(b.getAt(1).y, 6);
+ assertEquals(b.getAt(1).z, 7);
+ assertEquals(b.getAt(1).w, 8);
+
+ assertEquals(b.getAt(2).x, 9);
+ assertEquals(b.getAt(2).y, 10);
+ assertEquals(b.getAt(2).z, 11);
+ assertEquals(b.getAt(2).w, 12);
+
+ assertEquals(b.getAt(3).x, 13);
+ assertEquals(b.getAt(3).y, 14);
+ assertEquals(b.getAt(3).z, 15);
+ assertEquals(b.getAt(3).w, 16);
+}
+
+testInt32x4ArrayGetAndSet();
+
+function testInt32x4ArraySwap() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+
+ // Swap element 0 and element 3
+ var t = a[0];
+ a[0] = a[3];
+ a[3] = t;
+
+ assertEquals(a[3].x, 1);
+ assertEquals(a[3].y, 2);
+ assertEquals(a[3].z, 3);
+ assertEquals(a[3].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[0].x, 13);
+ assertEquals(a[0].y, 14);
+ assertEquals(a[0].z, 15);
+ assertEquals(a[0].w, 16);
+}
+
+testInt32x4ArraySwap();
+testInt32x4ArraySwap();
+%OptimizeFunctionOnNextCall(testInt32x4ArraySwap);
+testInt32x4ArraySwap();
+
+function testInt32x4ArrayCopy() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ var b = new Int32x4Array(a);
+ assertEquals(a[0].x, b[0].x);
+ assertEquals(a[0].y, b[0].y);
+ assertEquals(a[0].z, b[0].z);
+ assertEquals(a[0].w, b[0].w);
+
+ assertEquals(a[1].x, b[1].x);
+ assertEquals(a[1].y, b[1].y);
+ assertEquals(a[1].z, b[1].z);
+ assertEquals(a[1].w, b[1].w);
+
+ assertEquals(a[2].x, b[2].x);
+ assertEquals(a[2].y, b[2].y);
+ assertEquals(a[2].z, b[2].z);
+ assertEquals(a[2].w, b[2].w);
+
+ assertEquals(a[3].x, b[3].x);
+ assertEquals(a[3].y, b[3].y);
+ assertEquals(a[3].z, b[3].z);
+ assertEquals(a[3].w, b[3].w);
+
+ a[2] = SIMD.int32x4(17, 18, 19, 20);
+
+ assertEquals(a[2].x, 17);
+ assertEquals(a[2].y, 18);
+ assertEquals(a[2].z, 19);
+ assertEquals(a[2].w, 20);
+
+ assertTrue(a[2].x != b[2].x);
+ assertTrue(a[2].y != b[2].y);
+ assertTrue(a[2].z != b[2].z);
+ assertTrue(a[2].w != b[2].w);
+}
+
+testInt32x4ArrayCopy();
+
+function testInt32x4ArrayViewBasic() {
+ var a = new Uint32Array(8);
+ // view with no offset.
+ var b = new Int32x4Array(a.buffer, 0);
+ // view with offset.
+ var c = new Int32x4Array(a.buffer, 16);
+ // view with no offset but shorter than original list.
+ var d = new Int32x4Array(a.buffer, 0, 1);
+ assertEquals(a.length, 8);
+ assertEquals(b.length, 2);
+ assertEquals(c.length, 1);
+ assertEquals(d.length, 1);
+ assertEquals(a.byteLength, 32);
+ assertEquals(b.byteLength, 32);
+ assertEquals(c.byteLength, 16);
+ assertEquals(d.byteLength, 16)
+ assertEquals(a.byteOffset, 0);
+ assertEquals(b.byteOffset, 0);
+ assertEquals(c.byteOffset, 16);
+ assertEquals(d.byteOffset, 0);
+}
+
+testInt32x4ArrayViewBasic();
+
+function testInt32x4ArrayViewValues() {
+ var a = new Uint32Array(8);
+ var b = new Int32x4Array(a.buffer, 0);
+ var c = new Int32x4Array(a.buffer, 16);
+ var d = new Int32x4Array(a.buffer, 0, 1);
+ var start = 100;
+ for (var i = 0; i < b.length; i++) {
+ assertEquals(0, b[i].x);
+ assertEquals(0, b[i].y);
+ assertEquals(0, b[i].z);
+ assertEquals(0, b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertEquals(0, c[i].x);
+ assertEquals(0, c[i].y);
+ assertEquals(0, c[i].z);
+ assertEquals(0, c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertEquals(0, d[i].x);
+ assertEquals(0, d[i].y);
+ assertEquals(0, d[i].z);
+ assertEquals(0, d[i].w);
+ }
+ for (var i = 0; i < a.length; i++) {
+ a[i] = i+start;
+ }
+ for (var i = 0; i < b.length; i++) {
+ assertTrue(0 != b[i].x);
+ assertTrue(0 != b[i].y);
+ assertTrue(0 != b[i].z);
+ assertTrue(0 != b[i].w);
+ }
+ for (var i = 0; i < c.length; i++) {
+ assertTrue(0 != c[i].x);
+ assertTrue(0 != c[i].y);
+ assertTrue(0 != c[i].z);
+ assertTrue(0 != c[i].w);
+ }
+ for (var i = 0; i < d.length; i++) {
+ assertTrue(0 != d[i].x);
+ assertTrue(0 != d[i].y);
+ assertTrue(0 != d[i].z);
+ assertTrue(0 != d[i].w);
+ }
+ assertEquals(start+0, b[0].x);
+ assertEquals(start+1, b[0].y);
+ assertEquals(start+2, b[0].z);
+ assertEquals(start+3, b[0].w);
+ assertEquals(start+4, b[1].x);
+ assertEquals(start+5, b[1].y);
+ assertEquals(start+6, b[1].z);
+ assertEquals(start+7, b[1].w);
+
+ assertEquals(start+4, c[0].x);
+ assertEquals(start+5, c[0].y);
+ assertEquals(start+6, c[0].z);
+ assertEquals(start+7, c[0].w);
+
+ assertEquals(start+0, d[0].x);
+ assertEquals(start+1, d[0].y);
+ assertEquals(start+2, d[0].z);
+ assertEquals(start+3, d[0].w);
+}
+
+testInt32x4ArrayViewValues();
+
+function testViewOnInt32x4Array() {
+ var a = new Int32x4Array(4);
+ a[0] = SIMD.int32x4(1, 2, 3, 4);
+ a[1] = SIMD.int32x4(5, 6, 7, 8);
+ a[2] = SIMD.int32x4(9, 10, 11, 12);
+ a[3] = SIMD.int32x4(13, 14, 15, 16);
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 3);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 7);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+
+ // Create view on a.
+ var b = new Uint32Array(a.buffer);
+ assertEquals(b.length, 16);
+ assertEquals(b.byteLength, 64);
+ b[2] = 99.0;
+ b[6] = 1.0;
+
+ // Observe changes in "a"
+ assertEquals(a[0].x, 1);
+ assertEquals(a[0].y, 2);
+ assertEquals(a[0].z, 99);
+ assertEquals(a[0].w, 4);
+
+ assertEquals(a[1].x, 5);
+ assertEquals(a[1].y, 6);
+ assertEquals(a[1].z, 1);
+ assertEquals(a[1].w, 8);
+
+ assertEquals(a[2].x, 9);
+ assertEquals(a[2].y, 10);
+ assertEquals(a[2].z, 11);
+ assertEquals(a[2].w, 12);
+
+ assertEquals(a[3].x, 13);
+ assertEquals(a[3].y, 14);
+ assertEquals(a[3].z, 15);
+ assertEquals(a[3].w, 16);
+}
+
+testViewOnInt32x4Array();
+
+function testArrayOfInt32x4() {
+ var a = [];
+ var a4 = new Int32x4Array(2);
+ for (var i = 0; i < a4.length; i++) {
+ a[i] = SIMD.int32x4(i, i + 1, i + 2, i + 3);
+ a4[i] = SIMD.int32x4(i, i + 1, i + 2, i + 3);
+ }
+
+ for (var i = 0; i < a4.length; i++) {
+ assertEquals(a[i].x, a4[i].x);
+ assertEquals(a[i].y, a4[i].y);
+ assertEquals(a[i].z, a4[i].z);
+ assertEquals(a[i].w, a4[i].w);
+ }
+}
+
+testArrayOfInt32x4();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object
+
+function testSIMDAbs() {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ var b4;
+ for (var i = 0; i < 100000; i++) {
+ b4 = SIMD.float32x4.abs(a4);
+ }
+
+ assertEquals(1.0, b4.x);
+ assertEquals(2.0, b4.y);
+ assertEquals(3.0, b4.z);
+ assertEquals(4.0, b4.w);
+}
+
+testSIMDAbs();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testFloat32x4Prototype() {
+ var a4 = SIMD.float32x4(1.0, -2.0, 3.0, -4.0);
+ SIMD.float32x4.prototype = {};
+ try {
+ var x = a4.x;
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a float32x4 object.>");
+ }
+}
+
+testFloat32x4Prototype();
+testFloat32x4Prototype();
+%OptimizeFunctionOnNextCall(testFloat32x4Prototype);
+testFloat32x4Prototype();
+
+function testInt32x4Prototype() {
+ var a4 = SIMD.int32x4(1.0, -2.0, 3.0, -4.0);
+ SIMD.int32x4.prototype = {};
+ try {
+ var x = a4.x;
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a int32x4 object.>");
+ }
+}
+
+testInt32x4Prototype();
+testInt32x4Prototype();
+%OptimizeFunctionOnNextCall(testInt32x4Prototype);
+testInt32x4Prototype();
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --simd_object --allow-natives-syntax
+
+function testSIMDAbs(i) {
+ var a;
+ if (i < 3) {
+ a = SIMD.float32x4(1, 1, 1, 1);
+ } else {
+ a = SIMD.int32x4(2, 2, 2, 2);
+ }
+ return SIMD.float32x4.abs(a);
+}
+
+function tryTestSIMDAbs(i) {
+ var r = 0;
+ try {
+ r = testSIMDAbs(i);
+ } catch (o) {
+ assertEquals(o instanceof TypeError, true);
+ assertEquals(o.message, "<unknown message this is not a float32x4 object.>");
+ }
+}
+
+tryTestSIMDAbs(1);
+tryTestSIMDAbs(2);
+%OptimizeFunctionOnNextCall(testSIMDAbs);
+tryTestSIMDAbs(3);
'../../src/harmony-string.js',
'../../src/harmony-array.js',
'../../src/harmony-classes.js',
+ '../../src/simd128.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
filter_chain.append(lambda l: ExpandConstants(l, consts))
+ filter_chain.append(lambda l: ExpandInlineMacros(l))
filter_chain.append(lambda l: ExpandMacros(l, macros))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
- ExpandInlineMacros,
ExpandInlineConstants,
Validate,
jsmin.JavaScriptMinifier().JSMinify
# Edit these when rolling DEPS.xwalk.
# -----------------------------------
-chromium_crosswalk_rev = '4e78cd026ad6b9158f6a29bc15606c6a25d5cb2e'
+chromium_crosswalk_rev = '88fc58a654d73e2df3dffc946077486c450f3bdb'
blink_crosswalk_rev = 'ed2bae8ced284782cbd55f1597e02d6ee621621b'
-v8_crosswalk_rev = '83372d40b7585fbbce96bca7866c7a96a5e3a74f'
-ozone_wayland_rev = '6f1674ee4d554444cd5c48a3665b61d20be210b8'
+v8_crosswalk_rev = '390bd33f39ea5a12e403ebb52f8b553b0772aa2c'
+ozone_wayland_rev = '9a04e61a2c373bc02dce2b6dfac6f56d99981598'
crosswalk_git = 'https://github.com/crosswalk-project'
ozone_wayland_git = 'https://github.com/01org'
MAJOR=10
MINOR=39
-BUILD=226
+BUILD=233
PATCH=0
import android.content.Context;
import android.content.Intent;
import android.util.AttributeSet;
-import android.widget.FrameLayout;
+import android.widget.LinearLayout;
/**
* This class is to provide public APIs which are called by web application
// Implementation notes.
// Please be careful to change any public APIs for the backward compatibility
// is very important to us. Don't change any of them without permisson.
-public class XWalkRuntimeView extends FrameLayout {
+public class XWalkRuntimeView extends LinearLayout {
// The actual implementation to hide the internals to API users.
private XWalkRuntimeViewProvider mProvider;
private void init(Context context, Activity activity) {
mProvider = XWalkRuntimeViewProviderFactory.getProvider(context, activity);
+ setOrientation(LinearLayout.VERTICAL);
this.addView(mProvider.getView(),
- new FrameLayout.LayoutParams(
- FrameLayout.LayoutParams.MATCH_PARENT,
- FrameLayout.LayoutParams.MATCH_PARENT));
+ new LinearLayout.LayoutParams(
+ LinearLayout.LayoutParams.MATCH_PARENT,
+ LinearLayout.LayoutParams.MATCH_PARENT));
}
/**
import shutil
import stat
import sys
-import tempfile
# get xwalk absolute path so we can run this script from any location
xwalk_dir = os.path.dirname(os.path.abspath(__file__))
from handle_xml import EditElementAttribute
from handle_xml import EditElementValueByNodeName
from handle_permissions import HandlePermissions
-from util import CleanDir, CreateAndCopyDir
+from util import CleanDir, CreateAndCopyDir, GetBuildDir
from xml.dom import minidom
+
TEMPLATE_DIR_NAME = 'template'
+
def VerifyPackageName(value):
regex = r'^[a-z][a-z0-9_]*(\.[a-z][a-z0-9_]*)+$'
descrpt = 'Each part of package'
"""
# create new app_dir in temp dir
app_name = app_info.android_name
- app_dir = os.path.join(tempfile.gettempdir(), app_name)
+ app_dir = GetBuildDir(app_name)
app_package = app_info.package
app_root = app_info.app_root
template_app_dir = os.path.join(xwalk_dir, TEMPLATE_DIR_NAME)
def CustomizeStringXML(name, description):
- strings_path = os.path.join(tempfile.gettempdir(), name, 'res', 'values',
+ strings_path = os.path.join(GetBuildDir(name), 'res', 'values',
'strings.xml')
if not os.path.isfile(strings_path):
print ('Please make sure strings_xml'
def CustomizeThemeXML(name, fullscreen, manifest):
- theme_path = os.path.join(tempfile.gettempdir(), name, 'res', 'values-v14',
- 'theme.xml')
+ theme_path = os.path.join(GetBuildDir(name), 'res', 'values-v14', 'theme.xml')
if not os.path.isfile(theme_path):
print('Error: theme.xml is missing in the build tool.')
sys.exit(6)
orientation = app_info.orientation
package = app_info.package
app_name = app_info.app_name
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
# Chinese character with unicode get from 'manifest.json' will cause
# 'UnicodeEncodeError' when finally wrote to 'AndroidManifest.xml'.
app_name = EncodingUnicodeValue(app_name)
def CustomizeJava(app_info, app_url, app_local_path, keep_screen_on):
name = app_info.android_name
package = app_info.package
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
app_pkg_dir = os.path.join(app_dir, 'src', package.replace('.', os.path.sep))
dest_activity = os.path.join(app_pkg_dir, name + 'Activity.java')
ReplaceString(dest_activity, 'org.xwalk.app.template', package)
if not extensions:
return
name = app_info.android_name
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
apk_assets_path = os.path.join(app_dir, 'assets')
extensions_string = 'xwalk-extensions'
def GenerateCommandLineFile(app_info, xwalk_command_line):
if xwalk_command_line == '':
return
- assets_path = os.path.join(tempfile.gettempdir(), app_info.android_name,
- 'assets')
+ assets_path = os.path.join(GetBuildDir(app_info.android_name), 'assets')
file_path = os.path.join(assets_path, 'xwalk-command-line')
command_line_file = open(file_path, 'w')
command_line_file.write('xwalk ' + xwalk_command_line)
def CustomizeIconByDict(name, app_root, icon_dict):
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
icon_name = None
drawable_dict = {'ldpi': [1, 37], 'mdpi': [37, 72], 'hdpi': [72, 96],
'xhdpi': [96, 120], 'xxhdpi': [120, 144],
def CustomizeIconByOption(name, icon):
if os.path.isfile(icon):
- drawable_path = os.path.join(tempfile.gettempdir(), name, 'res', 'drawable')
+ drawable_path = os.path.join(GetBuildDir(name), 'res', 'drawable')
if not os.path.exists(drawable_path):
os.makedirs(drawable_path)
icon_file = os.path.basename(icon)
# build project is now in /tmp/<name>. Copy to project_dir
if options.project_dir:
- src_dir = os.path.join(tempfile.gettempdir(), app_info.android_name)
+ src_dir = GetBuildDir(app_info.android_name)
dest_dir = os.path.join(options.project_dir, app_info.android_name)
CreateAndCopyDir(src_dir, dest_dir, True)
print('Exiting with error code: %d' % ec.code)
return ec.code
finally:
- CleanDir(os.path.join(tempfile.gettempdir(), app_info.android_name))
+ CleanDir(GetBuildDir(app_info.android_name))
return 0
import os
import shutil
import sys
-import tempfile
+
+from util import GetBuildDir
def CopyToPathWithName(root, name, final_path, rename):
if name == '':
def CopyDrawables(image_dict, orientation, sanitized_name, name, app_root):
- drawable = os.path.join(tempfile.gettempdir(), sanitized_name, 'res',
+ drawable = os.path.join(GetBuildDir(sanitized_name), 'res',
'drawable')
if orientation == 'landscape':
drawable = drawable + '-land'
orientation,
sanitized_name,
app_root):
- background_path = os.path.join(tempfile.gettempdir(), sanitized_name, 'res',
+ background_path = os.path.join(GetBuildDir(sanitized_name), 'res',
'drawable', 'launchscreen_bg.xml')
if not os.path.isfile(background_path):
print('Error: launchscreen_bg.xml is missing in the build tool.')
from extension_manager import GetExtensionList, GetExtensionStatus
from handle_permissions import permission_mapping_table
from util import AllArchitectures, CleanDir, GetVersion, RunCommand, \
- CreateAndCopyDir
+ CreateAndCopyDir, GetBuildDir
from manifest_json_parser import HandlePermissionList
from manifest_json_parser import ManifestJsonParser
else:
print('Error: there is no app launch path defined in manifest.json.')
sys.exit(9)
+ options.icon_dict = {}
if parser.GetAppRoot():
options.app_root = parser.GetAppRoot()
options.icon_dict = parser.GetIcons()
def Execution(options, name):
arch_string = (' ('+options.arch+')' if options.arch else '')
print('\nStarting application build' + arch_string)
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
android_path = Which('android')
api_level = GetAndroidApiLevel(android_path)
target_string = 'android-%d' % api_level
CheckSystemRequirements()
Customize(options, app_info, manifest)
name = app_info.android_name
- app_dir = os.path.join(tempfile.gettempdir(), name)
+ app_dir = GetBuildDir(name)
packaged_archs = []
if options.mode == 'shared':
# For shared mode, it's not necessary to use the whole xwalk core library,
xpk_temp_dir = ''
if options.xpk:
xpk_name = os.path.splitext(os.path.basename(options.xpk))[0]
- xpk_temp_dir = os.path.join(tempfile.gettempdir(), xpk_name + '_xpk')
+ xpk_temp_dir = tempfile.mkdtemp(prefix="%s-" % xpk_name + '_xpk')
CleanDir(xpk_temp_dir)
ParseXPK(options, xpk_temp_dir)
+ if options.manifest:
+ options.manifest = os.path.abspath(options.manifest)
+ if not os.path.isfile(options.manifest):
+ print('Error: The manifest file does not exist.')
+ sys.exit(8)
+
if options.app_root and not options.manifest:
manifest_path = os.path.join(options.app_root, 'manifest.json')
if os.path.exists(manifest_path):
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
- if options.project_dir:
- if options.project_dir == tempfile.gettempdir():
- print('\nmake_apk.py error: Option --project-dir can not be '
- 'the system temporary\ndirectory.')
- sys.exit(8)
if options.project_only and not options.project_dir:
print('\nmake_apk.py error: Option --project-only must be used '
'with --project-dir')
except SystemExit as ec:
return ec.code
finally:
- CleanDir(os.path.join(tempfile.gettempdir(), app_info.android_name))
+ CleanDir(GetBuildDir(app_info.android_name))
CleanDir(xpk_temp_dir)
return 0
import shutil
import subprocess
import sys
+import tempfile
+
+build_dir = None
+
+def GetBuildDir(name):
+ global build_dir
+ if not build_dir:
+ build_dir = tempfile.mkdtemp(prefix="%s-" % name)
+ return build_dir
def CleanDir(path):
const char kAccessSubdomainsKey[] = "@subdomains";
#if defined(OS_TIZEN)
+const char kTizenWidgetKey[] = "widget";
const char kIcon128Key[] = "widget.icon.@src";
const char kTizenApplicationKey[] = "widget.application";
// Child keys inside 'kTizenApplicationKey'
const char kTizenSplashScreenSrcKey[] = "@src";
const char kContentNamespace[] = "widget.content.@namespace";
const char kTizenScreenOrientationKey[] = "widget.setting.@screen-orientation";
+const char kTizenAppWidgetFullKey[] = "widget.app-widget";
+const char kTizenAppWidgetKey[] = "app-widget";
+const char kTizenAppWidgetIdKey[] = "@id";
+const char kTizenAppWidgetPrimaryKey[] = "@primary";
+const char kTizenAppWidgetUpdatePeriodKey[] = "@update-period";
+const char kTizenAppWidgetAutoLaunchKey[] = "@auto-launch";
+const char kTizenAppWidgetBoxLabelKey[] = "box-label";
+const char kTizenAppWidgetBoxLabelLangKey[] = "@lang";
+const char kTizenAppWidgetBoxLabelTextKey[] = "#text";
+const char kTizenAppWidgetBoxIconKey[] = "box-icon";
+const char kTizenAppWidgetBoxIconSrcKey[] = "@src";
+const char kTizenAppWidgetBoxContentKey[] = "box-content";
+const char kTizenAppWidgetBoxContentSrcKey[] = "@src";
+const char kTizenAppWidgetBoxContentMouseEventKey[] = "@mouse-event";
+const char kTizenAppWidgetBoxContentTouchEffectKey[] = "@touch-effect";
+const char kTizenAppWidgetBoxContentSizeKey[] = "box-size";
+const char kTizenAppWidgetBoxContentSizeTextKey[] = "#text";
+const char kTizenAppWidgetBoxContentSizePreviewKey[] = "@preview";
+const char kTizenAppWidgetBoxContentSizeUseDecorationKey[] = "@use-decoration";
+const char kTizenAppWidgetBoxContentDropViewKey[] = "pd";
+const char kTizenAppWidgetBoxContentDropViewSrcKey[] = "@src";
+const char kTizenAppWidgetBoxContentDropViewWidthKey[] = "@width";
+const char kTizenAppWidgetBoxContentDropViewHeightKey[] = "@height";
#endif
} // namespace application_widget_keys
extern const char kPreferencesValueKey[];
extern const char kPreferencesReadonlyKey[];
#if defined(OS_TIZEN)
+ extern const char kTizenWidgetKey[];
extern const char kTizenApplicationKey[];
extern const char kTizenApplicationIdKey[];
extern const char kTizenApplicationPackageKey[];
extern const char kTizenSplashScreenSrcKey[];
extern const char kContentNamespace[];
extern const char kTizenScreenOrientationKey[];
+ extern const char kTizenAppWidgetFullKey[];
+ extern const char kTizenAppWidgetKey[];
+ extern const char kTizenAppWidgetIdKey[];
+ extern const char kTizenAppWidgetPrimaryKey[];
+ extern const char kTizenAppWidgetUpdatePeriodKey[];
+ extern const char kTizenAppWidgetAutoLaunchKey[];
+ extern const char kTizenAppWidgetBoxLabelKey[];
+ extern const char kTizenAppWidgetBoxLabelLangKey[];
+ extern const char kTizenAppWidgetBoxLabelTextKey[];
+ extern const char kTizenAppWidgetBoxIconKey[];
+ extern const char kTizenAppWidgetBoxIconSrcKey[];
+ extern const char kTizenAppWidgetBoxContentKey[];
+ extern const char kTizenAppWidgetBoxContentSrcKey[];
+ extern const char kTizenAppWidgetBoxContentMouseEventKey[];
+ extern const char kTizenAppWidgetBoxContentTouchEffectKey[];
+ extern const char kTizenAppWidgetBoxContentSizeKey[];
+ extern const char kTizenAppWidgetBoxContentSizeTextKey[];
+ extern const char kTizenAppWidgetBoxContentSizePreviewKey[];
+ extern const char kTizenAppWidgetBoxContentSizeUseDecorationKey[];
+ extern const char kTizenAppWidgetBoxContentDropViewKey[];
+ extern const char kTizenAppWidgetBoxContentDropViewSrcKey[];
+ extern const char kTizenAppWidgetBoxContentDropViewWidthKey[];
+ extern const char kTizenAppWidgetBoxContentDropViewHeightKey[];
#endif
} // namespace application_widget_keys
#if defined(OS_TIZEN)
#include "xwalk/application/common/manifest_handlers/navigation_handler.h"
#include "xwalk/application/common/manifest_handlers/tizen_application_handler.h"
+#include "xwalk/application/common/manifest_handlers/tizen_appwidget_handler.h"
#include "xwalk/application/common/manifest_handlers/tizen_metadata_handler.h"
#include "xwalk/application/common/manifest_handlers/tizen_setting_handler.h"
#include "xwalk/application/common/manifest_handlers/tizen_splash_screen_handler.h"
handlers.push_back(new CSPHandler(Manifest::TYPE_WIDGET));
handlers.push_back(new NavigationHandler);
handlers.push_back(new TizenApplicationHandler);
+ handlers.push_back(new TizenAppWidgetHandler);
handlers.push_back(new TizenSettingHandler);
handlers.push_back(new TizenMetaDataHandler);
handlers.push_back(new TizenSplashScreenHandler);
#if defined(OS_TIZEN)
TEST_F(CSPHandlerTest, WGTEmptyCSP) {
manifest.SetString(widget_keys::kNameKey, "no name");
- manifest.SetString(widget_keys::kXWalkVersionKey, "0");
+ manifest.SetString(widget_keys::kVersionKey, "0");
manifest.SetString(widget_keys::kCSPKey, "");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
TEST_F(CSPHandlerTest, WGTCSP) {
manifest.SetString(widget_keys::kNameKey, "no name");
- manifest.SetString(widget_keys::kXWalkVersionKey, "0");
+ manifest.SetString(widget_keys::kVersionKey, "0");
manifest.SetString(widget_keys::kCSPKey, "default-src 'self' ");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
#include <string>
#include <vector>
#include "xwalk/application/common/application_manifest_constants.h"
+#include "xwalk/application/common/manifest.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace xwalk {
public:
virtual void SetUp() OVERRIDE {
manifest.SetString(keys::kNameKey, "no name");
- manifest.SetString(keys::kXWalkVersionKey, "0");
+ manifest.SetString(keys::kVersionKey, "0");
}
scoped_refptr<ApplicationData> CreateApplication() {
std::string error;
scoped_refptr<ApplicationData> application = ApplicationData::Create(
- base::FilePath(), ApplicationData::LOCAL_DIRECTORY,
- manifest, "", &error);
+ base::FilePath(), std::string(), ApplicationData::LOCAL_DIRECTORY,
+ make_scoped_ptr(new Manifest(make_scoped_ptr(manifest.DeepCopy()))),
+ &error);
return application;
}
manifest.SetString(keys::kAllowNavigationKey, "http://www.sample.com");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
- EXPECT_EQ(application->GetPackageType(), Manifest::TYPE_WGT);
+ EXPECT_EQ(application->GetManifest()->type(), Manifest::TYPE_WIDGET);
const NavigationInfo* info = GetNavigationInfo(application);
EXPECT_TRUE(info);
const std::vector<std::string>& list = info->GetAllowedDomains();
"http://www.sample1.com www.sample2.com");
scoped_refptr<ApplicationData> application = CreateApplication();
EXPECT_TRUE(application.get());
- EXPECT_EQ(application->GetPackageType(), Manifest::TYPE_WGT);
+ EXPECT_EQ(application->GetManifest()->type(), Manifest::TYPE_WIDGET);
const NavigationInfo* info = GetNavigationInfo(application);
EXPECT_TRUE(info);
const std::vector<std::string>& list = info->GetAllowedDomains();
--- /dev/null
+// Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "xwalk/application/common/manifest_handlers/tizen_appwidget_handler.h"
+
+#include <limits>
+#include <set>
+
+#include "base/macros.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/values.h"
+#include "third_party/re2/re2/re2.h"
+#include "xwalk/application/common/application_manifest_constants.h"
+#include "xwalk/application/common/manifest_handlers/tizen_application_handler.h"
+
+namespace xwalk {
+
+namespace keys = application_widget_keys;
+
+namespace application {
+
+namespace {
+
+const char kErrMsgInvalidDictionary[] =
+ "Cannot get key value as a dictionary. Key name: ";
+const char kErrMsgInvalidList[] =
+ "Cannot get key value as a list. Key name: ";
+const char kErrMsgNoMandatoryKey[] =
+ "Cannot find mandatory key. Key name: ";
+const char kErrMsgInvalidKeyValue[] =
+ "Invalid key value. Key name: ";
+const char kErrMsgMultipleKeys[] =
+ "Too many keys found. Key name: ";
+const char kErrMsgAppWidgetInfoNotFound[] =
+ "Cannot access app-widget info object.";
+const char kErrMsgApplicationInfoNotFound[] =
+ "Cannot access application info object.";
+const char kErrMsgDuplicatedAppWidgetId[] =
+ "Duplicated value of an id attribute in app-widget element. The value: ";
+const char kErrMsgInvalidAppWidgetIdBeginning[] =
+ "Invalid beginning of an id attribute value in app-widget element."
+ " The value: ";
+const char kErrMsgInvalidAppWidgetIdFormat[] =
+ "Invalid format of an id attribute value in app-widget element."
+ " The value: ";
+const char kErrMsgNoPrimaryAppWidget[] =
+ "No primary app-widget element (primary='true').";
+const char kErrMsgToManyPrimaryAppWidgets[] =
+ "Too many primary app-widget elements (primary='true').";
+const char kErrMsgUpdatePeriodOutOfDomain[] =
+ "Value of an update-period attribute in app-widget element out of domain."
+ " The value: ";
+const char kErrMsgNoLabel[] =
+ "No box-label element in app-widget element.";
+const char kErrMsgInvalidIconSrc[] =
+ "Invalid path in a src attribute of box-icon element. The value: ";
+const char kErrMsgInvalidContentSrc[] =
+ "Invalid path or url in a src attribute of box-content element."
+ " The value: ";
+const char kErrMsgInvalidContentSizePreview[] =
+ "Invalid path in a preview attribute of box-size element. The value: ";
+const char kErrMsgNoMandatoryContentSize1x1[] =
+ "No mandatory box-size element (1x1) in box-content element.";
+const char kErrMsgInvalidContentDropViewSrc[] =
+ "Invalid path or url in a src attribute of pd element. The value: ";
+const char kErrMsgContentDropViewHeightOutOfDomain[] =
+ "Value of a height attribute in box-content element out of domain."
+ " The value: ";
+
+void SetError(const std::string& message,
+ const std::string& arg, std::string* error) {
+ if (error)
+ *error = message + arg;
+}
+
+void SetError(const std::string& message,
+ const std::string& arg, base::string16* error) {
+ if (error)
+ *error = base::ASCIIToUTF16(message + arg);
+}
+
+// Converts given text value to a value of specific type. Returns true
+// if convertion is successful or false otherwise.
+template <typename ValueType>
+bool ConvertValue(const std::string& str_value, ValueType* value) {
+ NOTREACHED() << "Use one of already defined template specializations"
+ " or define a new one.";
+ return false;
+}
+
+// Converts given text value to a string value. Returns true
+// if convertion is successful or false otherwise.
+template <>
+bool ConvertValue(const std::string& str_value, std::string* value) {
+ DCHECK(value);
+ *value = str_value;
+ return true;
+}
+
+// Converts given text value to a boolean value. Returns true
+// if convertion is successful or false otherwise.
+template <>
+bool ConvertValue(const std::string& str_value, bool* value) {
+ DCHECK(value);
+ if (str_value == "true") {
+ *value = true;
+ return true;
+ }
+ if (str_value == "false") {
+ *value = false;
+ return true;
+ }
+ return false;
+}
+
+// Converts given text value to an integer value. Returns true
+// if convertion is successful or false otherwise.
+template <>
+bool ConvertValue(const std::string& str_value, int* value) {
+ DCHECK(value);
+ return base::StringToInt(str_value, value);
+}
+
+// Converts given text value to a floating point value. Returns true
+// if convertion is successful or false otherwise.
+template <>
+bool ConvertValue(const std::string& str_value, double* value) {
+ DCHECK(value);
+ return base::StringToDouble(str_value, value);
+}
+
+// Retrieves a mandatory value from specified dictionary and specified key.
+// Returns true if the value is found or false otherwise. If the error parameter
+// is specified it is also filled with proper message.
+template <typename ValueType>
+bool GetMandatoryValue(const base::DictionaryValue& dict,
+ const std::string& key, ValueType* value, base::string16* error) {
+ DCHECK(value);
+
+ std::string tmp;
+ if (!dict.GetString(key, &tmp)) {
+ SetError(kErrMsgNoMandatoryKey, key, error);
+ return false;
+ }
+
+ bool result = ConvertValue(tmp, value);
+ if (!result)
+ SetError(kErrMsgInvalidKeyValue, key, error);
+ return result;
+}
+
+// Retrieves an optional value from specified dictionary and specified key.
+// If the value is found the function returns true and fills value
+// parameter. If the value is not found the function returns true and fills
+// value parameter with default value. If an error occurs it returns false
+// and fills error parameter if it is set.
+template <typename ValueType>
+bool GetOptionalValue(const base::DictionaryValue& dict,
+ const std::string& key, ValueType default_value, ValueType* value,
+ base::string16* error) {
+ DCHECK(value);
+
+ std::string tmp;
+ if (!dict.GetString(key, &tmp)) {
+ *value = default_value;
+ return true;
+ }
+
+ bool result = ConvertValue(tmp, value);
+ if (!result)
+ SetError(kErrMsgInvalidKeyValue, key, error);
+ return result;
+}
+
+// Helper function for ParseEach. Do not use directly.
+template <typename ParseSingleType, typename DataContainerType>
+bool ParseEachInternal(const base::Value& value, const std::string& key,
+ ParseSingleType parse_single, DataContainerType* data_container,
+ base::string16* error) {
+ DCHECK(data_container);
+
+ const base::DictionaryValue* inner_dict;
+ if (!value.GetAsDictionary(&inner_dict)) {
+ SetError(kErrMsgInvalidDictionary, key, error);
+ return false;
+ }
+ if (!parse_single(*inner_dict, key, data_container, error))
+ return false;
+
+ return true;
+}
+
+// Parsing helper function calling 'parse_single' for each dictionary contained
+// in 'dict' under a 'key'. This helper function takes two template arguments:
+// - a function with following prototype:
+// bool ParseSingleExample(const base::Value& value, const std::string& key,
+// DataContainerType* data_container, base::string16* error);
+// - a DataContainerType object where the above function stores data
+template <typename ParseSingleType, typename DataContainerType>
+bool ParseEach(const base::DictionaryValue& dict, const std::string& key,
+ bool mandatory, ParseSingleType parse_single,
+ DataContainerType* data_container, base::string16* error) {
+ DCHECK(data_container);
+
+ const base::Value* value = nullptr;
+ if (!dict.Get(key, &value) || !value) {
+ if (mandatory) {
+ SetError(kErrMsgNoMandatoryKey, key, error);
+ return false;
+ }
+ return true;
+ }
+
+ if (value->IsType(base::Value::TYPE_DICTIONARY)) {
+ if (!ParseEachInternal(*value, key, parse_single, data_container, error))
+ return false;
+ } else if (value->IsType(base::Value::TYPE_LIST)) {
+ const base::ListValue* list;
+ if (!value->GetAsList(&list)) {
+ SetError(kErrMsgInvalidList, key, error);
+ return false;
+ }
+ for (const base::Value* value : *list)
+ if (!ParseEachInternal(*value, key, parse_single, data_container, error))
+ return false;
+ }
+
+ return true;
+}
+
+// Parses box-label part
+bool ParseLabel(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidget* app_widget, base::string16* error) {
+ DCHECK(app_widget);
+
+ std::string lang;
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetBoxLabelLangKey,
+ std::string(), &lang, error))
+ return false;
+
+ std::string text;
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetBoxLabelTextKey,
+ &text, error))
+ return false;
+
+ if (lang.empty()) {
+ // Note: Tizen 2.2 WRT Core Spec does not determine how many times the value
+ // without lang attribute can appear in one app-widget, so overwrite.
+ app_widget->label.default_value = text;
+ } else {
+ // Note: Tizen 2.2 WRT Core Spec does not determine how many times the value
+ // with specific lang attribute can appear in one app-widget, so overwrite.
+ app_widget->label.lang_value_map[lang] = text;
+ }
+
+ return true;
+}
+
+// Parses box-icon part
+bool ParseIcon(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidget* app_widget, base::string16* error) {
+ DCHECK(app_widget);
+
+ if (!app_widget->icon_src.empty()) {
+ SetError(kErrMsgMultipleKeys, key, error);
+ return false;
+ }
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetBoxIconSrcKey,
+ &app_widget->icon_src, error))
+ return false;
+
+ return true;
+}
+
+// Converts size type from text to enum representation
+bool StringToSizeType(const std::string& str_type,
+ TizenAppWidgetSizeType* enum_type) {
+ DCHECK(enum_type);
+ if (str_type == "1x1") {
+ *enum_type = TizenAppWidgetSizeType::k1x1;
+ return true;
+ }
+ if (str_type == "2x1") {
+ *enum_type = TizenAppWidgetSizeType::k2x1;
+ return true;
+ }
+ if (str_type == "2x2") {
+ *enum_type = TizenAppWidgetSizeType::k2x2;
+ return true;
+ }
+ return false;
+}
+
+// Parses box-size part
+bool ParseContentSizes(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidget* app_widget, base::string16* error) {
+ DCHECK(app_widget);
+
+ TizenAppWidgetSize size;
+
+ std::string str_type;
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetBoxContentSizeTextKey,
+ &str_type, error))
+ return false;
+
+ TizenAppWidgetSizeType type;
+ if (!StringToSizeType(str_type, &type)) {
+ SetError(kErrMsgInvalidKeyValue,
+ keys::kTizenAppWidgetBoxContentSizeTextKey, error);
+ return false;
+ }
+ size.type = type;
+
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetBoxContentSizePreviewKey,
+ std::string(), &size.preview, error))
+ return false;
+
+ if (!GetOptionalValue(dict,
+ keys::kTizenAppWidgetBoxContentSizeUseDecorationKey,
+ true, &size.use_decoration, error))
+ return false;
+
+ app_widget->content_size.push_back(size);
+
+ return true;
+}
+
+// Parses pd part
+bool ParseContentDropView(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidget* app_widget, base::string16* error) {
+ DCHECK(app_widget);
+
+ if (!app_widget->content_drop_view.empty()) {
+ SetError(kErrMsgMultipleKeys, key, error);
+ return false;
+ }
+
+ TizenAppWidgetDropView drop_view;
+
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetBoxContentDropViewSrcKey,
+ &drop_view.src, error))
+ return false;
+
+ if (!GetMandatoryValue(dict,
+ keys::kTizenAppWidgetBoxContentDropViewWidthKey,
+ &drop_view.width, error))
+ return false;
+
+ if (!GetMandatoryValue(dict,
+ keys::kTizenAppWidgetBoxContentDropViewHeightKey,
+ &drop_view.height, error))
+ return false;
+
+ app_widget->content_drop_view.push_back(drop_view);
+
+ return true;
+}
+
+// Parses box-content part
+bool ParseContent(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidget* app_widget, base::string16* error) {
+ DCHECK(app_widget);
+
+ if (!app_widget->content_src.empty()) {
+ SetError(kErrMsgMultipleKeys, key, error);
+ return false;
+ }
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetBoxContentSrcKey,
+ &app_widget->content_src, error))
+ return false;
+
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetBoxContentMouseEventKey,
+ false, &app_widget->content_mouse_event, error))
+ return false;
+
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetBoxContentTouchEffectKey,
+ true, &app_widget->content_touch_effect, error))
+ return false;
+
+ if (!ParseEach(dict, keys::kTizenAppWidgetBoxContentSizeKey,
+ true, ParseContentSizes, app_widget, error))
+ return false;
+
+ if (!ParseEach(dict, keys::kTizenAppWidgetBoxContentDropViewKey,
+ false, ParseContentDropView, app_widget, error))
+ return false;
+
+ return true;
+}
+
+// Parses app-widget part
+bool ParseAppWidget(const base::DictionaryValue& dict,
+ const std::string& key, TizenAppWidgetVector* app_widgets,
+ base::string16* error) {
+ DCHECK(app_widgets);
+
+ TizenAppWidget app_widget;
+
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetIdKey,
+ &app_widget.id, error))
+ return false;
+
+ if (!GetMandatoryValue(dict, keys::kTizenAppWidgetPrimaryKey,
+ &app_widget.primary, error))
+ return false;
+
+ double update_period;
+ double no_update_period = std::numeric_limits<double>::min();
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetUpdatePeriodKey,
+ no_update_period, &update_period, error))
+ return false;
+ if (update_period != no_update_period)
+ app_widget.update_period.push_back(update_period);
+
+ if (!GetOptionalValue(dict, keys::kTizenAppWidgetAutoLaunchKey,
+ false, &app_widget.auto_launch, error))
+ return false;
+
+ if (!ParseEach(dict, keys::kTizenAppWidgetBoxLabelKey,
+ true, ParseLabel, &app_widget, error))
+ return false;
+
+ if (!ParseEach(dict, keys::kTizenAppWidgetBoxIconKey,
+ true, ParseIcon, &app_widget, error))
+ return false;
+
+ if (!ParseEach(dict, keys::kTizenAppWidgetBoxContentKey,
+ true, ParseContent, &app_widget, error))
+ return false;
+
+ app_widgets->push_back(app_widget);
+
+ return true;
+}
+
+// Validates all app-widget ids
+bool ValidateEachId(const TizenAppWidgetVector& app_widgets,
+ const std::string& app_id, std::string* error) {
+ std::set<std::string> unique_values;
+
+ for (const TizenAppWidget& app_widget : app_widgets) {
+ if (!unique_values.insert(app_widget.id).second) {
+ SetError(kErrMsgDuplicatedAppWidgetId, app_widget.id, error);
+ return false;
+ }
+
+ const size_t app_id_len = app_id.length();
+
+ if (app_widget.id.find(app_id) != 0) {
+ SetError(kErrMsgInvalidAppWidgetIdBeginning, app_widget.id, error);
+ return false;
+ }
+
+ const char kStringPattern[] = "[.][0-9a-zA-Z]+";
+ if (!RE2::FullMatch(app_widget.id.substr(app_id_len), kStringPattern)) {
+ SetError(kErrMsgInvalidAppWidgetIdFormat, app_widget.id, error);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Validates all app-widget primary attributes
+bool ValidateEachPrimary(const TizenAppWidgetVector& app_widgets,
+ std::string* error) {
+ int primary_count = 0;
+
+ for (const TizenAppWidget& app_widget : app_widgets)
+ if (app_widget.primary)
+ ++primary_count;
+
+ if (!primary_count) {
+ SetError(kErrMsgNoPrimaryAppWidget, "", error);
+ return false;
+ }
+
+ if (primary_count > 1) {
+ SetError(kErrMsgToManyPrimaryAppWidgets, "", error);
+ return false;
+ }
+
+ return true;
+}
+
+// Tests if specified string represents valid remote url
+bool IsValidUrl(const std::string& value) {
+ // TODO(tweglarski): implement me (it's not crucial atm)
+ return true;
+}
+
+// Tests if specified string represents valid path
+bool IsValidPath(const std::string& value) {
+ // TODO(tweglarski): implement me (it's not crucial atm)
+ return true;
+}
+
+// Tests if specified string represents valid path or remote url
+bool IsValidPathOrUrl(const std::string& value) {
+ return IsValidPath(value) || IsValidUrl(value);
+}
+
+// Validates all content sizes in an app-widget
+bool ValidateContentSize(const TizenAppWidgetSizeVector& content_size,
+ std::string* error) {
+ bool mandatory_1x1_found = false;
+
+ for (const TizenAppWidgetSize& size : content_size) {
+ mandatory_1x1_found |= size.type == TizenAppWidgetSizeType::k1x1;
+
+ if (!size.preview.empty() && !IsValidPath(size.preview)) {
+ SetError(kErrMsgInvalidContentSizePreview, size.preview, error);
+ return false;
+ }
+ }
+
+ if (!mandatory_1x1_found) {
+ SetError(kErrMsgNoMandatoryContentSize1x1, "", error);
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+TizenAppWidgetInfo::TizenAppWidgetInfo(const TizenAppWidgetVector& app_widgets)
+ : app_widgets_(app_widgets) {
+}
+
+TizenAppWidgetInfo::~TizenAppWidgetInfo() {
+}
+
+TizenAppWidgetHandler::TizenAppWidgetHandler() {
+}
+
+TizenAppWidgetHandler::~TizenAppWidgetHandler() {
+}
+
+bool TizenAppWidgetHandler::Parse(scoped_refptr<ApplicationData> application,
+ base::string16* error) {
+ const Manifest* manifest = application->GetManifest();
+ DCHECK(manifest);
+
+ if (!manifest->HasPath(keys::kTizenWidgetKey)) {
+ SetError(kErrMsgNoMandatoryKey, keys::kTizenWidgetKey, error);
+ return false;
+ }
+
+ const base::DictionaryValue* dict = nullptr;
+ if (!manifest->GetDictionary(keys::kTizenWidgetKey, &dict) || !dict) {
+ SetError(kErrMsgInvalidDictionary, keys::kTizenWidgetKey, error);
+ return false;
+ }
+
+ TizenAppWidgetVector app_widgets;
+
+ if (!ParseEach(*dict, keys::kTizenAppWidgetKey,
+ false, ParseAppWidget, &app_widgets, error))
+ return false;
+
+ scoped_ptr<TizenAppWidgetInfo> info(new TizenAppWidgetInfo(app_widgets));
+ application->SetManifestData(keys::kTizenAppWidgetFullKey, info.release());
+
+ return true;
+}
+
+bool TizenAppWidgetHandler::Validate(
+ scoped_refptr<const ApplicationData> application,
+ std::string* error) const {
+ const TizenAppWidgetInfo* app_widget_info =
+ static_cast<const TizenAppWidgetInfo*>(
+ application->GetManifestData(keys::kTizenAppWidgetFullKey));
+ const TizenApplicationInfo* app_info =
+ static_cast<const TizenApplicationInfo*>(
+ application->GetManifestData(keys::kTizenApplicationKey));
+
+ if (!app_widget_info) {
+ SetError(kErrMsgAppWidgetInfoNotFound, "", error);
+ return false;
+ }
+ if (!app_info) {
+ SetError(kErrMsgApplicationInfoNotFound, "", error);
+ return false;
+ }
+
+ const TizenAppWidgetVector& app_widgets = app_widget_info->app_widgets();
+
+ if (!ValidateEachId(app_widgets, app_info->id(), error))
+ return false;
+
+ if (!ValidateEachPrimary(app_widgets, error))
+ return false;
+
+ for (const TizenAppWidget& app_widget : app_widgets) {
+ if (!app_widget.update_period.empty()
+ && app_widget.update_period.front() < 1800) {
+ SetError(kErrMsgUpdatePeriodOutOfDomain,
+ base::DoubleToString(app_widget.update_period.front()), error);
+ return false;
+ }
+
+ if (app_widget.label.default_value.empty()
+ && app_widget.label.lang_value_map.empty()) {
+ SetError(kErrMsgNoLabel, "", error);
+ return false;
+ }
+
+ if (!app_widget.icon_src.empty()
+ && !IsValidPathOrUrl(app_widget.icon_src)) {
+ SetError(kErrMsgInvalidIconSrc, app_widget.icon_src, error);
+ return false;
+ }
+
+ if (!IsValidPathOrUrl(app_widget.content_src)) {
+ SetError(kErrMsgInvalidContentSrc, app_widget.content_src, error);
+ return false;
+ }
+
+ if (!ValidateContentSize(app_widget.content_size, error))
+ return false;
+
+ if (!app_widget.content_drop_view.empty()) {
+ const TizenAppWidgetDropView& drop_view
+ = app_widget.content_drop_view.front();
+
+ if (!IsValidPathOrUrl(drop_view.src)) {
+ SetError(kErrMsgInvalidContentDropViewSrc, drop_view.src, error);
+ return false;
+ }
+
+ if (drop_view.height < 1 || drop_view.height > 380) {
+ SetError(kErrMsgContentDropViewHeightOutOfDomain,
+ base::IntToString(drop_view.height), error);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+std::vector<std::string> TizenAppWidgetHandler::Keys() const {
+ return std::vector<std::string>(1, keys::kTizenAppWidgetFullKey);
+}
+
+} // namespace application
+} // namespace xwalk
--- /dev/null
+// Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef XWALK_APPLICATION_COMMON_MANIFEST_HANDLERS_TIZEN_APPWIDGET_HANDLER_H_
+#define XWALK_APPLICATION_COMMON_MANIFEST_HANDLERS_TIZEN_APPWIDGET_HANDLER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "xwalk/application/common/application_data.h"
+#include "xwalk/application/common/manifest_handler.h"
+
+namespace xwalk {
+namespace application {
+
+typedef std::map<std::string, std::string> TizenAppWidgetLabelLangValueMap;
+
+struct TizenAppWidgetLabel {
+ // may be empty
+ std::string default_value;
+
+ // may be empty if default is set
+ TizenAppWidgetLabelLangValueMap lang_value_map;
+};
+
+enum TizenAppWidgetSizeType {
+ k1x1, k2x1, k2x2
+};
+
+struct TizenAppWidgetSize {
+ // mandatory
+ TizenAppWidgetSizeType type;
+
+ // optional, relative to web app directory
+ std::string preview;
+
+ // optional, default: true
+ bool use_decoration;
+};
+
+typedef std::vector<TizenAppWidgetSize> TizenAppWidgetSizeVector;
+
+struct TizenAppWidgetDropView {
+ // mandatory, relative to web app directory or remote URL
+ std::string src;
+
+ // mandatory
+ int width;
+
+ // mandatory, <1, 380>
+ int height;
+};
+
+typedef std::vector<TizenAppWidgetDropView> TizenAppWidgetDropViewVector;
+
+struct TizenAppWidget {
+ // mandatory, unique, must start with application id and end with label
+ // separated with dot, the label can contain only 0-9, a-z, A-Z
+ std::string id;
+
+ // mandatory, only one may contain true
+ bool primary;
+
+ // optional(0-1), min: 1800.0, default: no update
+ std::vector<double> update_period;
+
+ // optional, default: false
+ bool auto_launch;
+
+ // box label, multiple(1+)
+ TizenAppWidgetLabel label;
+
+ // box icon, optional(0-1), src, mandatory, relative to web app directory
+ std::string icon_src;
+
+ // box content, mandatory(1) -[
+
+ // mandatory, relative to web app directory or remote URL
+ std::string content_src;
+
+ // optional, default: false
+ bool content_mouse_event;
+
+ // optional, default: true
+ bool content_touch_effect;
+
+ // box size, mandatory(1-3), 1x1 must exist
+ TizenAppWidgetSizeVector content_size;
+
+ // drop view, optional(0-1)
+ TizenAppWidgetDropViewVector content_drop_view;
+
+ // ]- box content
+};
+
+typedef std::vector<TizenAppWidget> TizenAppWidgetVector;
+
+class TizenAppWidgetInfo : public ApplicationData::ManifestData {
+ public:
+ explicit TizenAppWidgetInfo(const TizenAppWidgetVector& app_widgets);
+ virtual ~TizenAppWidgetInfo();
+
+ const TizenAppWidgetVector& app_widgets() const {
+ return app_widgets_;
+ }
+
+ private:
+ // multiple(0+)
+ TizenAppWidgetVector app_widgets_;
+};
+
+class TizenAppWidgetHandler : public ManifestHandler {
+ public:
+ TizenAppWidgetHandler();
+ virtual ~TizenAppWidgetHandler();
+
+ bool Parse(scoped_refptr<ApplicationData> application,
+ base::string16* error) override;
+ bool Validate(scoped_refptr<const ApplicationData> application,
+ std::string* error) const override;
+ std::vector<std::string> Keys() const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TizenAppWidgetHandler);
+};
+
+} // namespace application
+} // namespace xwalk
+
+#endif // XWALK_APPLICATION_COMMON_MANIFEST_HANDLERS_TIZEN_APPWIDGET_HANDLER_H_
#define XWALK_APPLICATION_COMMON_PACKAGE_PACKAGE_H_
#include <string>
-#include <vector>
#include "base/files/file_path.h"
#include "base/files/scoped_file.h"
#include "xwalk/application/common/package/wgt_package.h"
+#include <string>
+
#include "base/file_util.h"
#include "base/files/scoped_file.h"
#include "third_party/libxml/chromium/libxml_utils.h"
namespace xwalk {
namespace application {
+
namespace {
+
#if defined(OS_TIZEN)
const char kIdNodeName[] = "application";
#else
const char kIdNodeName[] = "widget";
#endif
-}
+
+} // namespace
WGTPackage::~WGTPackage() {
}
WGTPackage::WGTPackage(const base::FilePath& path)
- : Package(path) {
+ : Package(path) {
if (!base::PathExists(path))
return;
manifest_type_ = Manifest::TYPE_WIDGET;
if (!value.empty()) {
#if defined(OS_TIZEN)
id_ = value;
- is_valid_ =
- SignatureValidator::Check(extracted_path) != SignatureValidator::INVALID;
+ is_valid_ = SignatureValidator::Check(extracted_path) !=
+ SignatureValidator::INVALID;
#else
id_ = GenerateId(value);
is_valid_ = true;
#endif
}
scoped_ptr<base::ScopedFILE> file(
- new base::ScopedFILE(base::OpenFile(path, "rb")));
+ new base::ScopedFILE(base::OpenFile(path, "rb")));
file_ = file.Pass();
}
+
} // namespace application
} // namespace xwalk
#ifndef XWALK_APPLICATION_COMMON_PACKAGE_WGT_PACKAGE_H_
#define XWALK_APPLICATION_COMMON_PACKAGE_WGT_PACKAGE_H_
-#include <string>
-#include <vector>
-
#include "base/files/file_path.h"
-#include "base/memory/scoped_ptr.h"
#include "xwalk/application/common/package/package.h"
namespace xwalk {
#include "xwalk/application/common/package/xpk_package.h"
+#include <string>
+
#include "base/file_util.h"
#include "base/files/scoped_file.h"
#include "crypto/signature_verifier.h"
}
XPKPackage::XPKPackage(const base::FilePath& path)
- : Package(path) {
+ : Package(path) {
if (!base::PathExists(path))
return;
manifest_type_ = Manifest::TYPE_MANIFEST;
is_valid_ = false;
if (len < sizeof(header_))
return;
- if (!strncmp(XPKPackage::kXPKPackageHeaderMagic,
- header_.magic,
+ if (!strncmp(XPKPackage::kXPKPackageHeaderMagic, header_.magic,
sizeof(header_.magic)) &&
header_.key_size > 0 &&
header_.key_size <= XPKPackage::kMaxPublicKeySize &&
header_.signature_size > 0 &&
header_.signature_size <= XPKPackage::kMaxSignatureKeySize) {
- is_valid_ = true;
- zip_addr_ = sizeof(header_) + header_.key_size + header_.signature_size;
- fseek(file_->get(), sizeof(header_), SEEK_SET);
- key_.resize(header_.key_size);
- size_t len = fread(
- &key_.front(), sizeof(uint8), header_.key_size, file_->get());
- if (len < header_.key_size)
- is_valid_ = false;
-
- signature_.resize(header_.signature_size);
- len = fread(&signature_.front(),
- sizeof(uint8),
- header_.signature_size,
- file_->get());
- if (len < header_.signature_size)
- is_valid_ = false;
-
- if (!VerifySignature())
- is_valid_ = false;
-
- std::string public_key =
- std::string(reinterpret_cast<char*>(&key_.front()), key_.size());
- id_ = GenerateId(public_key);
+ is_valid_ = true;
+ zip_addr_ = sizeof(header_) + header_.key_size + header_.signature_size;
+ fseek(file_->get(), sizeof(header_), SEEK_SET);
+ key_.resize(header_.key_size);
+ size_t len = fread(&key_.front(), sizeof(uint8), header_.key_size,
+ file_->get());
+ if (len < header_.key_size)
+ is_valid_ = false;
+
+ signature_.resize(header_.signature_size);
+ len = fread(&signature_.front(), sizeof(uint8), header_.signature_size,
+ file_->get());
+ if (len < header_.signature_size)
+ is_valid_ = false;
+
+ if (!VerifySignature())
+ is_valid_ = false;
+
+ std::string public_key =
+ std::string(reinterpret_cast<char*>(&key_.front()), key_.size());
+ id_ = GenerateId(public_key);
}
}
#ifndef XWALK_APPLICATION_COMMON_PACKAGE_XPK_PACKAGE_H_
#define XWALK_APPLICATION_COMMON_PACKAGE_XPK_PACKAGE_H_
-#include <string>
#include <vector>
#include "base/files/file_path.h"
-#include "base/memory/scoped_ptr.h"
#include "xwalk/application/common/package/package.h"
namespace xwalk {
typedef ail_cb_ret_e (*PropertyCallback)(const ail_appinfo_h, void*, uid_t);
-ail_cb_ret_e callback_x_slp_exe_path(const ail_appinfo_h appinfo,
- void* user_data, uid_t /*uid*/) {
- char* package_exec;
- ail_appinfo_get_str(appinfo, AIL_PROP_X_SLP_EXE_PATH, &package_exec);
- if (!package_exec)
- return AIL_CB_RET_CONTINUE;
-
- std::string* x_slp_exe_path = static_cast<std::string*>(user_data);
- *x_slp_exe_path = package_exec;
- return AIL_CB_RET_CANCEL;
-}
-
-ail_cb_ret_e callback_installed_time(const ail_appinfo_h appinfo,
- void* user_data, uid_t /*uid*/) {
- int* installed_time = static_cast<int*>(user_data);
- ail_appinfo_get_int(appinfo, AIL_PROP_X_SLP_INSTALLEDTIME_INT,
- installed_time);
- return AIL_CB_RET_CANCEL;
-}
+char kFieldInstalledTime[] = AIL_PROP_X_SLP_INSTALLEDTIME_INT;
+char kFieldPackageType[] = AIL_PROP_X_SLP_PACKAGETYPE_STR;
+char kFieldExePath[] = AIL_PROP_X_SLP_EXE_PATH;
+
+template<const char* field> struct CallbackForStr {
+ static ail_cb_ret_e callback(const ail_appinfo_h appinfo,
+ void* user_data, uid_t /*uid*/) {
+ char* str_name;
+ ail_appinfo_get_str(appinfo, field, &str_name);
+ if (!str_name)
+ return AIL_CB_RET_CONTINUE;
+
+ std::string* data = static_cast<std::string*>(user_data);
+ *data = str_name;
+ return AIL_CB_RET_CANCEL;
+ }
+};
+
+template<const char* field> struct CallbackForInt {
+ static ail_cb_ret_e callback(const ail_appinfo_h appinfo,
+ void* user_data, uid_t /*uid*/) {
+ int* data = static_cast<int*>(user_data);
+ ail_appinfo_get_int(appinfo, field, data);
+ return AIL_CB_RET_CANCEL;
+ }
+};
void GetProperty(const std::string& id,
const char* type,
return;
}
-
if (uid != GLOBAL_USER)
ail_filter_list_usr_appinfo_foreach(filter, callback,
user_data, uid);
std::string x_slp_exe_path;
GetProperty(app_id,
type,
- callback_x_slp_exe_path,
- static_cast<void*>(&x_slp_exe_path));
+ CallbackForStr<kFieldExePath>::callback,
+ &x_slp_exe_path);
if (x_slp_exe_path.empty()) {
return base::FilePath();
return GetPath(pkg_id, AIL_PROP_X_SLP_PKGID_STR);
}
+std::string GetPackageType(const std::string& pkg_id) {
+ std::string type;
+ GetProperty(pkg_id,
+ AIL_PROP_X_SLP_PKGID_STR,
+ CallbackForStr<kFieldPackageType>::callback,
+ &type);
+ return type;
+}
+
base::Time GetApplicationInstallationTime(const std::string& app_id) {
int installed_time = 0; // seconds since epoch
GetProperty(app_id,
AIL_PROP_X_SLP_APPID_STR,
- callback_installed_time,
- static_cast<void*>(&installed_time));
+ CallbackForInt<kFieldInstalledTime>::callback,
+ &installed_time);
return base::Time::FromTimeT(installed_time);
}
base::FilePath GetApplicationPath(const std::string& app_id);
base::FilePath GetPackagePath(const std::string& pkg_id);
+std::string GetPackageType(const std::string& pkg_id);
+
base::Time GetApplicationInstallationTime(const std::string& app_id);
-} // application
-} // xwalk
+} // namespace application
+} // namespace xwalk
#endif // XWALK_APPLICATION_COMMON_TIZEN_PACKAGE_QUERY_H_
'manifest_handlers/navigation_handler.h',
'manifest_handlers/tizen_application_handler.cc',
'manifest_handlers/tizen_application_handler.h',
+ 'manifest_handlers/tizen_appwidget_handler.cc',
+ 'manifest_handlers/tizen_appwidget_handler.h',
'manifest_handlers/tizen_metadata_handler.cc',
'manifest_handlers/tizen_metadata_handler.h',
'manifest_handlers/tizen_setting_handler.cc',
}
this.getItem = function(itemKey) {
- return _itemStorage[String(itemKey)];
+ var item = _itemStorage[String(itemKey)];
+ return item !== undefined ? item : null;
}
this.setItem = function(itemKey, itemValue) {
}
}
- var props = {};
var newException;
try {
- document.removeChild({})
+ document.removeChild(document.createTextNode(""));
} catch (e) {
- newException = Object.create(e)
+ newException = Object.create(Object.getPrototypeOf(e));
}
- var proto = newException.__proto__;
+ var props = {
+ value: null,
+ writable: true,
+ enumerable: true,
+ configurable: true
+ };
- props = Object.getOwnPropertyDescriptor(proto, "name");
props.value = _name;
Object.defineProperty(newException, "name", props);
-
- props = Object.getOwnPropertyDescriptor(proto, "code");
props.value = _code;
Object.defineProperty(newException, "code", props);
-
- props = Object.getOwnPropertyDescriptor(proto, "message");
props.value = _message;
Object.defineProperty(newException, "message", props);
props.value = function() {
- return _name + ": " + _message;
+ return this.name + ": " + this.code;
}
Object.defineProperty(newException, "toString", props);
'../../../build/system.gyp:tizen',
],
'sources': [
+ '../tizen/xwalk_tizen_user.cc',
+ '../tizen/xwalk_tizen_user.h',
'../../../runtime/common/xwalk_paths.cc',
'../../../runtime/common/xwalk_paths.h',
'../../../runtime/common/xwalk_system_locale.cc',
#include "base/path_service.h"
#include "xwalk/application/common/tizen/application_storage.h"
+#include "xwalk/application/tools/tizen/xwalk_tizen_user.h"
#include "xwalk/runtime/common/xwalk_paths.h"
#include "dbus/bus.h"
} // namespace
int main(int argc, char* argv[]) {
+#if defined(OS_TIZEN)
+ if (xwalk_tizen_check_user_for_xwalkctl())
+ exit(1);
+#endif
GOptionContext* context = g_option_context_new("- Crosswalk Setter");
g_option_context_add_main_entries(context, entries, NULL);
GError* error = nullptr;
int PkgmgrBackendPlugin::DetailedInfoPkg(
const std::string& pkg_path,
package_manager_pkg_detail_info_t* pkg_detail_info) {
- if (!base::PathExists(base::FilePath(pkg_path))) {
+ base::FilePath path(pkg_path);
+ if (!base::PathExists(path)) {
return kPkgmgrPluginFalse;
}
return kPkgmgrPluginFalse;
}
- SaveDetailInfo(app_data, pkg_detail_info);
+ SaveDetailInfo(app_data, pkg_detail_info,
+ !path.Extension().empty() ? path.Extension() : std::string("unknown"));
return kPkgmgrPluginTrue;
}
int PkgmgrBackendPlugin::IsAppInstalled(const std::string& pkgid) {
// this will fetch app_id if exists
std::string app_id = xwalk::application::PkgIdToAppId(pkgid);
- return app_id.empty() ? kPkgmgrPluginFalse : kPkgmgrPluginTrue;
+
+ if (app_id.empty())
+ return kPkgmgrPluginFalse;
+
+ // backendlib handles both xpk and wgt
+ // check if plugin was loaded for given type of package
+ std::string type_from_db = xwalk::application::GetPackageType(pkgid);
+
+ return type() == type_from_db ? kPkgmgrPluginTrue : kPkgmgrPluginFalse;
}
int PkgmgrBackendPlugin::AppsList(package_manager_pkg_info_t** list,
return kPkgmgrPluginTrue;
}
+void PkgmgrBackendPlugin::SetLoadSet(pkg_plugin_set* set) {
+ set_ = set;
+}
+
+std::string PkgmgrBackendPlugin::type() const {
+ return std::string(set_->pkg_type);
+}
+
PkgmgrBackendPlugin::PkgmgrBackendPlugin() {
base::FilePath data_path;
xwalk::RegisterPathProvider();
void PkgmgrBackendPlugin::SaveInfo(
scoped_refptr<xwalk::application::ApplicationData> app_data,
- package_manager_pkg_info_t* pkg_detail_info) {
- strncpy(pkg_detail_info->pkg_type, "xpk", PKG_TYPE_STRING_LEN_MAX - 1);
- strncpy(pkg_detail_info->pkg_name, app_data->GetPackageID().c_str(),
- PKG_NAME_STRING_LEN_MAX - 1);
- strncpy(pkg_detail_info->pkgid, app_data->GetPackageID().c_str(),
- PKG_NAME_STRING_LEN_MAX - 1);
+ package_manager_pkg_info_t* pkg_detail_info,
+ const std::string& force_type) {
+ std::string pkg_id = app_data->GetPackageID();
+ if (force_type.empty())
+ strncpy(pkg_detail_info->pkg_type,
+ xwalk::application::GetPackageType(pkg_id).c_str(),
+ PKG_TYPE_STRING_LEN_MAX - 1);
+ else // force package type
+ strncpy(pkg_detail_info->pkg_type,
+ force_type.c_str(),
+ PKG_TYPE_STRING_LEN_MAX - 1);
+ strncpy(pkg_detail_info->pkg_name, pkg_id.c_str(),
+ PKG_NAME_STRING_LEN_MAX - 1);
+ strncpy(pkg_detail_info->pkgid, pkg_id.c_str(),
+ PKG_NAME_STRING_LEN_MAX - 1);
if (app_data->Version() != NULL) {
strncpy(pkg_detail_info->version, app_data->Version()->GetString().c_str(),
PKG_VERSION_STRING_LEN_MAX - 1);
void PkgmgrBackendPlugin::SaveDetailInfo(
scoped_refptr<xwalk::application::ApplicationData> app_data,
- package_manager_pkg_detail_info_t* pkg_detail_info) {
- strncpy(pkg_detail_info->pkg_type, "xpk", PKG_TYPE_STRING_LEN_MAX - 1);
- strncpy(pkg_detail_info->pkg_name, app_data->GetPackageID().c_str(),
- PKG_NAME_STRING_LEN_MAX - 1);
- strncpy(pkg_detail_info->pkgid, app_data->GetPackageID().c_str(),
- PKG_NAME_STRING_LEN_MAX - 1);
+ package_manager_pkg_detail_info_t* pkg_detail_info,
+ const std::string& force_type) {
+ std::string pkg_id = app_data->GetPackageID();
+ if (force_type.empty())
+ strncpy(pkg_detail_info->pkg_type,
+ xwalk::application::GetPackageType(pkg_id).c_str(),
+ PKG_TYPE_STRING_LEN_MAX - 1);
+ else // force package type
+ strncpy(pkg_detail_info->pkg_type,
+ force_type.c_str(),
+ PKG_TYPE_STRING_LEN_MAX - 1);
+ strncpy(pkg_detail_info->pkg_name, pkg_id.c_str(),
+ PKG_NAME_STRING_LEN_MAX - 1);
+ strncpy(pkg_detail_info->pkgid, pkg_id.c_str(),
+ PKG_NAME_STRING_LEN_MAX - 1);
if (app_data->Version() != NULL) {
strncpy(pkg_detail_info->version, app_data->Version()->GetString().c_str(),
PKG_VERSION_STRING_LEN_MAX - 1);
}
strncpy(pkg_detail_info->pkg_description, app_data->Description().c_str(),
- PKG_VALUE_STRING_LEN_MAX - 1);
+ PKG_VALUE_STRING_LEN_MAX - 1);
// xpk do not have this key in manifest
if (app_data->manifest_type() == Manifest::TYPE_WIDGET) {
pkg_detail_info->data_size = data_size;
strncpy(pkg_detail_info->optional_id, app_data->GetPackageID().c_str(),
- PKG_NAME_STRING_LEN_MAX - 1);
+ PKG_NAME_STRING_LEN_MAX - 1);
pkg_detail_info->pkg_optional_info = NULL;
}
int IsAppInstalled(const std::string& pkgid);
int AppsList(package_manager_pkg_info_t** list, int* count);
+ void SetLoadSet(pkg_plugin_set* set);
+
+ std::string type() const;
+
private:
PkgmgrBackendPlugin();
void SaveInfo(scoped_refptr<xwalk::application::ApplicationData> app_data,
- package_manager_pkg_info_t* pkg_detail_info);
+ package_manager_pkg_info_t* pkg_detail_info,
+ const std::string& force_type = std::string());
void SaveDetailInfo(
scoped_refptr<xwalk::application::ApplicationData> app_data,
- package_manager_pkg_detail_info_t* pkg_detail_info);
+ package_manager_pkg_detail_info_t* pkg_detail_info,
+ const std::string& force_type = std::string());
scoped_refptr<xwalk::application::ApplicationData> GetApplicationDataFromPkg(
const std::string& pkg_path, base::ScopedTempDir* dir);
friend struct DefaultSingletonTraits<PkgmgrBackendPlugin>;
scoped_ptr<xwalk::application::ApplicationStorage> storage_;
+ pkg_plugin_set* set_;
DISALLOW_COPY_AND_ASSIGN(PkgmgrBackendPlugin);
};
set->get_pkg_detail_info_from_package =
pkg_plugin_get_app_detail_info_from_package;
+ // FIXME: store load set which contains
+ // pkgmgr sets pkg_type after calling 'pkg_plugin_on_load'
+ // we need to store load set to recover type of package
+ // for which backendlib was loaded - wgt or xpk
+ PkgmgrBackendPlugin::GetInstance()->SetLoadSet(set);
+
return 0;
}
void pkg_plugin_on_unload() {
- LOG(INFO) << "Crosswalk backend plugin - unload";
+ LOG(INFO) << "Crosswalk backend plugin ("
+ << PkgmgrBackendPlugin::GetInstance()->type()
+ << ") - unload";
}
int pkg_plugin_get_app_detail_info(
const char *pkg_name, package_manager_pkg_detail_info_t *pkg_detail_info) {
- LOG(INFO) << "Crosswalk backend plugin - pkg_plugin_get_app_detail_info";
+ LOG(INFO) << "Crosswalk backend plugin ("
+ << PkgmgrBackendPlugin::GetInstance()->type()
+ << ") - pkg_plugin_get_app_detail_info";
return PkgmgrBackendPlugin::GetInstance()->DetailedInfo(pkg_name,
pkg_detail_info);
int pkg_plugin_get_app_detail_info_from_package(
const char *pkg_path, package_manager_pkg_detail_info_t *pkg_detail_info) {
- LOG(INFO)
- << "Crosswalk backend plugin - pkg_plugin_get_app_detail_info_from_package";
+ LOG(INFO) << "Crosswalk backend plugin ("
+ << PkgmgrBackendPlugin::GetInstance()->type()
+ << ") - pkg_plugin_get_app_detail_info_from_package";
return PkgmgrBackendPlugin::GetInstance()->DetailedInfoPkg(pkg_path,
pkg_detail_info);
}
int pkg_plugin_app_is_installed(const char *pkg_name) {
- LOG(INFO) << "Crosswalk backend plugin - pkg_plugin_app_is_installed";
+ LOG(INFO) << "Crosswalk backend plugin ("
+ << PkgmgrBackendPlugin::GetInstance()->type()
+ << ") - pkg_plugin_app_is_installed";
return PkgmgrBackendPlugin::GetInstance()->IsAppInstalled(pkg_name);
}
const char* option,
package_manager_pkg_info_t** list,
int* count) {
- LOG(INFO) << "Crosswalk backend plugin - pkg_plugin_get_installed_apps_list";
+ LOG(INFO) << "Crosswalk backend plugin ("
+ << PkgmgrBackendPlugin::GetInstance()->type()
+ << ") - pkg_plugin_get_installed_apps_list";
return PkgmgrBackendPlugin::GetInstance()->AppsList(list, count);
}
xml_writer.StartElement("manifest");
xml_writer.AddAttribute("xmlns", "http://tizen.org/ns/packages");
xml_writer.AddAttribute("package", package_id);
- xml_writer.AddAttribute("type", "wgt");
+ xml_writer.AddAttribute("type",
+ application->GetManifest()->type() == Manifest::TYPE_MANIFEST
+ ? "xpk" : "wgt");
xml_writer.AddAttribute("version", application->VersionString());
xml_writer.WriteElement("label", application->Name());
xml_writer.WriteElement("description", application->Description());
base::FilePath icon =
icon_name.empty() ? kDefaultIcon : app_dir.AppendASCII(icon_name);
- // args for pkgmgr
- const char* pkgmgr_argv[5];
- pkgmgr_argv[2] = "-k";
- pkgmgr_argv[3] = key_.c_str();
- pkgmgr_argv[4] = "-q";
-
- PlatformInstaller platform_installer(app_id);
-
if (xml_path.empty() || icon.empty()) {
LOG(ERROR) << "Xml or icon path is empty";
return false;
}
- if (!key_.empty()) {
- pkgmgr_argv[0] = "-i";
- pkgmgr_argv[1] = app_id.c_str(); // this value is ignored by pkgmgr
- platform_installer.InitializePkgmgrSignal((quiet_ ? 5 : 4), pkgmgr_argv);
- }
+ PlatformInstaller platform_installer(app_id);
+
+ InitializePkgmgrSignal(&platform_installer, "-i", app_id);
if (!platform_installer.InstallApplication(xml_path, icon))
return false;
}
bool PackageInstaller::PlatformUninstall(const std::string& app_id) {
- base::FilePath data_dir;
- CHECK(PathService::Get(xwalk::DIR_DATA_PATH, &data_dir));
-
- // args for pkgmgr
- const char* pkgmgr_argv[5];
- pkgmgr_argv[2] = "-k";
- pkgmgr_argv[3] = key_.c_str();
- pkgmgr_argv[4] = "-q";
-
PlatformInstaller platform_installer(app_id);
- if (!key_.empty()) {
- pkgmgr_argv[0] = "-d";
- pkgmgr_argv[1] = app_id.c_str(); // this value is ignored by pkgmgr
- platform_installer.InitializePkgmgrSignal((quiet_ ? 5 : 4), pkgmgr_argv);
- }
+ InitializePkgmgrSignal(&platform_installer, "-d", app_id);
return platform_installer.UninstallApplication();
}
base::FilePath icon =
icon_name.empty() ? kDefaultIcon : app_dir.AppendASCII(icon_name);
- // args for pkgmgr
- const char* pkgmgr_argv[5];
- pkgmgr_argv[2] = "-k";
- pkgmgr_argv[3] = key_.c_str();
- pkgmgr_argv[4] = "-q";
-
PlatformInstaller platform_installer(app_id);
- if (!key_.empty()) {
- pkgmgr_argv[0] = "-i";
- pkgmgr_argv[1] = app_id.c_str(); // this value is ignored by pkgmgr
- platform_installer.InitializePkgmgrSignal((quiet_ ? 5 : 4), pkgmgr_argv);
- }
+ InitializePkgmgrSignal(&platform_installer, "-i", app_id);
- if (!platform_installer.InstallApplication(new_xml_path, icon))
+ if (!platform_installer.UpdateApplication(new_xml_path, icon))
return false;
app_dir_cleaner.Dismiss();
}
bool PackageInstaller::PlatformReinstall(const base::FilePath& path) {
- // args for pkgmgr
- const char* pkgmgr_argv[5];
- pkgmgr_argv[2] = "-k";
- pkgmgr_argv[3] = key_.c_str();
- pkgmgr_argv[4] = "-q";
-
PlatformInstaller platform_installer;
- if (!key_.empty()) {
- pkgmgr_argv[0] = "-r";
- pkgmgr_argv[1] = path.value().c_str(); // this value is ignored by pkgmgr
- platform_installer.InitializePkgmgrSignal((quiet_ ? 5 : 4), pkgmgr_argv);
- }
+ InitializePkgmgrSignal(&platform_installer, "-r", path.value());
return platform_installer.ReinstallApplication();
}
}
}
}
+
+void PackageInstaller::InitializePkgmgrSignal(
+ PlatformInstaller* platform_installer, const std::string& action,
+ const std::string& action_arg) {
+ DCHECK(platform_installer);
+ DCHECK(!action.empty());
+ DCHECK(!action_arg.empty());
+
+ if (key_.empty())
+ return;
+
+ const char* pkgmgr_argv[5];
+ pkgmgr_argv[0] = action.c_str();
+ pkgmgr_argv[1] = action_arg.c_str(); // this value is ignored by pkgmgr
+ pkgmgr_argv[2] = "-k";
+ pkgmgr_argv[3] = key_.c_str();
+ pkgmgr_argv[4] = "-q";
+
+ platform_installer->InitializePkgmgrSignal((quiet_ ? 5 : 4), pkgmgr_argv);
+}
// Copyright (c) 2014 Intel Corporation. All rights reserved.
+// Copyright (c) 2014 Samsung Electronics Co., Ltd All Rights Reserved
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
} // namespace application
} // namespace xwalk
+class PlatformInstaller;
+
class PackageInstaller {
public:
static scoped_ptr<PackageInstaller> Create(
xwalk::application::ApplicationStorage* storage_;
bool quiet_;
std::string key_;
-};
-
+ private:
+ void InitializePkgmgrSignal(PlatformInstaller* platform_installer,
+ const std::string& action, const std::string& action_arg);
+};
#endif // XWALK_APPLICATION_TOOLS_TIZEN_XWALK_PACKAGE_INSTALLER_H_
from common_function import RemoveUnusedFilesInReleaseMode
from xml.dom.minidom import Document
-LIBRARY_PROJECT_NAME = 'xwalk_core_library'
XWALK_CORE_SHELL_APK = 'xwalk_core_shell_apk'
def AddGeneratorOptions(option_parser):
option_parser.add_option('-t', dest='target',
help='Product out target directory.',
type='string')
+ option_parser.add_option('--src-package', action='store_true',
+ default=False,
+ help='Use java sources instead of java libs.')
-def CleanLibraryProject(out_dir):
- out_project_path = os.path.join(out_dir, LIBRARY_PROJECT_NAME)
- if os.path.exists(out_project_path):
- for item in os.listdir(out_project_path):
- sub_path = os.path.join(out_project_path, item)
+def CleanLibraryProject(out_project_dir):
+ if os.path.exists(out_project_dir):
+ for item in os.listdir(out_project_dir):
+ sub_path = os.path.join(out_project_dir, item)
if os.path.isdir(sub_path):
shutil.rmtree(sub_path)
elif os.path.isfile(sub_path):
os.remove(sub_path)
-def CopyProjectFiles(project_source, out_dir):
+def CopyProjectFiles(project_source, out_project_dir):
"""cp xwalk/build/android/xwalkcore_library_template/<file>
out/Release/xwalk_core_library/<file>
"""
]
for f in files_to_copy:
source_file = os.path.join(template_dir, f)
- target_file = os.path.join(out_dir, LIBRARY_PROJECT_NAME, f)
+ target_file = os.path.join(out_project_dir, f)
shutil.copy2(source_file, target_file)
-def CopyJSBindingFiles(project_source, out_dir):
+def CopyJSBindingFiles(project_source, out_project_dir):
print 'Copying js binding files...'
- jsapi_dir = os.path.join(out_dir,
- LIBRARY_PROJECT_NAME,
- 'res',
- 'raw')
+ jsapi_dir = os.path.join(out_project_dir, 'res', 'raw')
if not os.path.exists(jsapi_dir):
os.makedirs(jsapi_dir)
shutil.copyfile(source_file, target_file)
-def CopyBinaries(out_dir):
+def CopyBinaries(out_dir, out_project_dir, src_package):
"""cp out/Release/<pak> out/Release/xwalk_core_library/res/raw/<pak>
cp out/Release/lib.java/<lib> out/Release/xwalk_core_library/libs/<lib>
cp out/Release/xwalk_core_shell_apk/libs/*
print 'Copying binaries...'
# Copy assets.
- res_raw_dir = os.path.join(
- out_dir, LIBRARY_PROJECT_NAME, 'res', 'raw')
- res_value_dir = os.path.join(
- out_dir, LIBRARY_PROJECT_NAME, 'res', 'values')
+ res_raw_dir = os.path.join(out_project_dir, 'res', 'raw')
+ res_value_dir = os.path.join(out_project_dir, 'res', 'values')
if not os.path.exists(res_raw_dir):
os.mkdir(res_raw_dir)
if not os.path.exists(res_value_dir):
pak_list_xml.writexml(pak_list_file, newl='\n', encoding='utf-8')
pak_list_file.close()
- # Copy jar files to libs.
- libs_dir = os.path.join(out_dir, LIBRARY_PROJECT_NAME, 'libs')
+ libs_dir = os.path.join(out_project_dir, 'libs')
if not os.path.exists(libs_dir):
os.mkdir(libs_dir)
- libs_to_copy = [
- 'xwalk_core_library_java_app_part.jar',
- 'xwalk_core_library_java_library_part.jar',
- ]
+ # Copy jar files to libs.
+ if src_package:
+ libs_to_copy = [
+ 'eyesfree_java.jar',
+ 'jsr_305_javalib.jar',
+ ]
+ else:
+ libs_to_copy = [
+ 'xwalk_core_library_java_app_part.jar',
+ 'xwalk_core_library_java_library_part.jar',
+ ]
for lib in libs_to_copy:
source_file = os.path.join(out_dir, 'lib.java', lib)
source_file = os.path.abspath(os.path.join(dirname, filename))
target_file = os.path.join(filepath, filename)
shutil.copyfile(source_file, target_file)
- return
-
+ return
+
-def CopyResources(project_source, out_dir):
+def CopyResources(project_source, out_dir, out_project_dir):
print 'Copying resources...'
- res_dir = os.path.join(out_dir, LIBRARY_PROJECT_NAME, 'res')
- temp_dir = os.path.join(out_dir, LIBRARY_PROJECT_NAME, 'temp')
+ res_dir = os.path.join(out_project_dir, 'res')
+ temp_dir = os.path.join(out_project_dir, 'temp')
if os.path.exists(res_dir):
shutil.rmtree(res_dir)
if os.path.exists(temp_dir):
ReplaceCrunchedImage(project_source, filename, dirname)
-def PostCopyLibraryProject(out_dir):
- print 'Post Copy Library Project...'
- aidls_to_remove = [
- 'org/chromium/content/common/common.aidl',
- 'org/chromium/net/IRemoteAndroidKeyStoreInterface.aidl',
- ]
- for aidl in aidls_to_remove:
- aidl_file = os.path.join(out_dir, LIBRARY_PROJECT_NAME, 'src', aidl)
- if os.path.exists(aidl_file):
- os.remove(aidl_file)
-
-
def main(argv):
print 'Generating XWalkCore Library Project...'
option_parser = optparse.OptionParser()
print 'Source project does not exist, please provide correct directory.'
sys.exit(1)
out_dir = options.target
+ if options.src_package:
+ out_project_dir = os.path.join(out_dir, 'xwalk_core_library_src')
+ else:
+ out_project_dir = os.path.join(out_dir, 'xwalk_core_library')
# Clean directory for project first.
- CleanLibraryProject(out_dir)
+ CleanLibraryProject(out_project_dir)
- out_project_dir = os.path.join(out_dir, LIBRARY_PROJECT_NAME)
if not os.path.exists(out_project_dir):
os.mkdir(out_project_dir)
# Copy Eclipse project files of library project.
- CopyProjectFiles(options.source, out_dir)
+ CopyProjectFiles(options.source, out_project_dir)
# Copy binaries and resuorces.
- CopyResources(options.source, out_dir)
- CopyBinaries(out_dir)
+ CopyResources(options.source, out_dir, out_project_dir)
+ CopyBinaries(out_dir, out_project_dir, options.src_package)
# Copy JS API binding files.
- CopyJSBindingFiles(options.source, out_dir)
- # Post copy library project.
- PostCopyLibraryProject(out_dir)
+ CopyJSBindingFiles(options.source, out_project_dir)
# Remove unused files.
mode = os.path.basename(os.path.normpath(out_dir))
RemoveUnusedFilesInReleaseMode(mode,
- os.path.join(out_dir, LIBRARY_PROJECT_NAME, 'libs'))
+ os.path.join(out_project_dir, 'libs'))
# Create empty src directory
src_dir = os.path.join(out_project_dir, 'src')
if not os.path.isdir(src_dir):
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Intel Corporation. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import optparse
+import os
+import re
+import sys
+import shutil
+
+
+def DoCopy(path, target_path):
+ if os.path.isfile(path):
+ package = ''
+ package_re = re.compile(
+ '^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
+ for line in open(path).readlines():
+ match = package_re.match(line)
+ if match:
+ package = match.group('package')
+ break
+ sub_path = os.path.sep.join(package.split('.'))
+ shutil.copy(path, os.path.join(target_path, sub_path))
+ return
+
+ for dirpath, _, files in os.walk(path):
+ if not files:
+ continue
+ sub_path = os.path.relpath(dirpath, path)
+ target_dirpath = os.path.join(target_path, sub_path)
+ if not os.path.isdir(target_dirpath):
+ os.makedirs(target_dirpath)
+ for f in files:
+ fpath = os.path.join(dirpath, f)
+ # "interface type;" is invalid for normal android project,
+ # It's only for chromium's build system, ignore these aidl files.
+ if f.endswith('.aidl'):
+ invalid_lines = []
+ for line in open(fpath).readlines():
+ if re.match('^interface .*;$', line):
+ invalid_lines.append(line)
+ if invalid_lines:
+ continue
+ elif not f.endswith('.java'):
+ continue
+ shutil.copy(fpath, target_dirpath)
+
+
+def main():
+ parser = optparse.OptionParser()
+ info = ('The java source dirs to merge.')
+ parser.add_option('--dirs', help=info)
+ info = ('The target to place all the sources.')
+ parser.add_option('--target-path', help=info)
+ options, _ = parser.parse_args()
+
+ if os.path.isdir(options.target_path):
+ shutil.rmtree(options.target_path)
+ os.makedirs(options.target_path)
+
+ for path in options.dirs.split(' '):
+ if path.startswith('"') and path.endswith('"'):
+ path = eval(path)
+ DoCopy(path, options.target_path)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
<assign>
<filesystem path="/usr/bin/xwalkctl" exec_label="User" />
<filesystem path="/usr/bin/xwalk-launcher" exec_label="User" />
+ <filesystem path="/usr/lib/xwalk/xwalk_backend" exec_label="User" />
<filesystem path="/usr/lib64/xwalk/xwalk_backend" exec_label="User" />
</assign>
</manifest>
%define _binary_payload w3.gzdio
Name: crosswalk
-Version: 10.39.226.0
+Version: 10.39.233.0
Release: 0
Summary: Chromium-based app runtime
License: (BSD-3-Clause and LGPL-2.1+)
mContentsClientBridge.onGeolocationPermissionsHidePrompt();
}
- public String enableRemoteDebugging(int allowedUid) {
+ public void enableRemoteDebugging(int allowedUid) {
// Chrome looks for "devtools_remote" pattern in the name of a unix domain socket
// to identify a debugging page
final String socketName = getContext().getApplicationContext().getPackageName() + "_devtools_remote";
mDevToolsServer.allowConnectionFromUid(allowedUid);
mDevToolsServer.setRemoteDebuggingEnabled(true);
}
- // devtools/page is hardcoded in devtools_http_handler_impl.cc (kPageUrlPrefix)
- return "ws://" + socketName + "/devtools/page/" + devToolsAgentId();
}
// Enables remote debugging and returns the URL at which the dev tools server is listening
// for commands. Only the current process is allowed to connect to the server.
- String enableRemoteDebugging() {
- return enableRemoteDebugging(getContext().getApplicationInfo().uid);
+ void enableRemoteDebugging() {
+ enableRemoteDebugging(getContext().getApplicationInfo().uid);
}
void disableRemoteDebugging() {
mDevToolsServer = null;
}
+ public String getRemoteDebuggingUrl() {
+ if (mDevToolsServer == null) return "";
+ // devtools/page is hardcoded in devtools_http_handler_impl.cc (kPageUrlPrefix)
+ return "ws://" + mDevToolsServer.getSocketName() + "/devtools/page/" + devToolsAgentId();
+ }
+
@Override
public void onKeyValueChanged(String key, XWalkPreferencesInternal.PreferenceValue value) {
if (key == null) return;
private static final String DEBUG_PERMISSION_SIFFIX = ".permission.DEBUG";
private long mNativeDevToolsServer = 0;
+ private String mSocketName = null;
// Defines what processes may access to the socket.
public enum Security {
public XWalkDevToolsServer(String socketName) {
mNativeDevToolsServer = nativeInitRemoteDebugging(socketName);
+ mSocketName = socketName;
}
public void destroy() {
nativeAllowConnectionFromUid(mNativeDevToolsServer, uid);
}
+ public String getSocketName() {
+ return mSocketName;
+ }
+
private native long nativeInitRemoteDebugging(String socketName);
private native void nativeDestroyRemoteDebugging(long devToolsServer);
private native boolean nativeIsRemoteDebuggingEnabled(long devToolsServer);
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.ref.WeakReference;
+import java.net.MalformedURLException;
+import java.net.URL;
import org.chromium.base.ActivityState;
import org.chromium.base.ApplicationStatus;
* server is listening for commands.
* The allowedUid argument can be used to specify the uid of the process
* that is permitted to connect.
- * TODO(wang16): Hide or remove this API after new API for getting remote
- * debugging url available.
- *
- * @hide
+ */
+ public void enableRemoteDebugging(int allowedUid) {
+ if (mContent == null) return;
+ checkThreadSafety();
+ mContent.enableRemoteDebugging(allowedUid);
+ }
+
+ /**
+ * Get the websocket url for remote debugging.
+ * @return the web socket url to remote debug this xwalk view.
+ * null will be returned if remote debugging is not enabled.
+ * @since 4.0
*/
@XWalkAPI
- public String enableRemoteDebugging(int allowedUid) {
+ public URL getRemoteDebuggingUrl() {
if (mContent == null) return null;
checkThreadSafety();
- return mContent.enableRemoteDebugging(allowedUid);
+ String wsUrl = mContent.getRemoteDebuggingUrl();
+ if (wsUrl == null || wsUrl.isEmpty()) return null;
+
+ try {
+ return new URL(wsUrl);
+ } catch (MalformedURLException e) {
+ return null;
+ }
}
/**
// Enables remote debugging and returns the URL at which the dev tools server is listening
// for commands. Only the current process is allowed to connect to the server.
- String enableRemoteDebugging() {
- return enableRemoteDebugging(mContext.getApplicationInfo().uid);
+ void enableRemoteDebugging() {
+ enableRemoteDebugging(mContext.getApplicationInfo().uid);
}
void disableRemoteDebugging() {
import android.net.Uri;
import android.util.Log;
import android.view.KeyEvent;
+import org.chromium.content.browser.ContentVideoView;
class XWalkWebContentsDelegateAdapter extends XWalkWebContentsDelegate {
@Override
public void toggleFullscreen(boolean enterFullscreen) {
+ if (!enterFullscreen) {
+ ContentVideoView videoView = ContentVideoView.getContentVideoView();
+ if (videoView != null) videoView.exitFullscreen(false);
+ }
if (mXWalkContentsClient != null) mXWalkContentsClient.onToggleFullscreen(enterFullscreen);
}
net::UnixDomainServerSocket::AuthCallback auth_callback =
allow_debug_permission ?
base::Bind(&AuthorizeSocketAccessWithDebugPermission) :
- base::Bind(&content::CanUserConnectToDevTools);
+ base::Bind(&XWalkDevToolsServer::CanUserConnectToDevTools,
+ base::Unretained(this));
scoped_ptr<content::DevToolsHttpHandler::ServerSocketFactory> factory(
new UnixDomainServerSocketFactory(socket_name_));
command_line->AppendSwitch(switches::kAllowFileAccessFromFiles);
// Enable SIMD.JS API by default.
- /*std::string js_flags("--simd_object");
+ std::string js_flags("--simd_object");
if (command_line->HasSwitch(switches::kJavaScriptFlags)) {
js_flags += " ";
js_flags +=
command_line->GetSwitchValueASCII(switches::kJavaScriptFlags);
}
- command_line->AppendSwitchASCII(switches::kJavaScriptFlags, js_flags);*/
+ command_line->AppendSwitchASCII(switches::kJavaScriptFlags, js_flags);
startup_url_ = GetURLFromCommandLine(*command_line);
}
@Override
public void setUp() throws Exception {
super.setUp();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
public void tearDown() throws Exception {
- if (mWebServer != null) {
- mWebServer.shutdown();
- }
+ mWebServer.shutdown();
super.tearDown();
}
@MediumTest
@Feature({"AcceptCookie"})
public void testAcceptCookie() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
String path = "/cookie_test.html";
String responseStr =
"<html><head><title>TEST!</title></head><body>HELLO!</body></html>";
// Clean up all cookies.
mCookieManager.removeAllCookie();
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
TestCallbackHelperContainer.OnPageFinishedHelper onPageFinishedHelper =
mTestHelperBridge.getOnPageFinishedHelper();
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String testHtml = "<html><head>Header</head><body>Body</body></html>";
final String testPath = "/test.html";
assertEquals(syncUrl, onPageFinishedHelper.getUrl());
assertEquals(2, onPageFinishedHelper.getCallCount());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
@MediumTest
@Feature({"OnPageLoadStarted"})
public void testOnPageLoadStartedWithServer() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String testHtml = "<html><head>Header</head><body>Body</body></html>";
final String testPath = "/test.html";
mOnPageStartedHelper.waitForCallback(currentCallCount);
assertEquals(testUrl, mOnPageStartedHelper.getUrl());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
*/
public class OnPageLoadStoppedTest extends XWalkViewTestBase {
private static final long WAIT_TIMEOUT_MS = scaleTimeout(2000);
- TestWebServer mWebServer = null;
+ TestWebServer mWebServer;
TestCallbackHelperContainer.OnPageFinishedHelper mOnPageFinishedHelper;
TestCallbackHelperContainer.OnReceivedErrorHelper mOnReceivedErrorHelper;
TestCallbackHelperContainer.OnPageStartedHelper mOnPageStartedHelper;
public void setUp() throws Exception {
super.setUp();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
mOnPageFinishedHelper = mTestHelperBridge.getOnPageFinishedHelper();
mOnReceivedErrorHelper = mTestHelperBridge.getOnReceivedErrorHelper();
mOnPageStartedHelper = mTestHelperBridge.getOnPageStartedHelper();
super.setUp();
mOnProgressChangedHelper = mTestHelperBridge.getOnProgressChangedHelper();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
public void tearDown() throws Exception {
- if (mWebServer != null) {
- mWebServer.shutdown();
- }
+ mWebServer.shutdown();
super.tearDown();
}
super.setUp();
mOnTitleUpdatedHelper = mTestHelperBridge.getOnTitleUpdatedHelper();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
@SmallTest
@Feature({"Profile"})
public void testCustomizeProfile() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
final String testHtml = getFileContent("profile.html");
final String testPath = "/profile.html";
final String testUrl = webServer.setResponse(testPath, testHtml, null);
assertTrue(userDataDir.isDirectory());
assertFalse(defaultUserDataDir.exists());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
public void setUp() throws Exception {
super.setUp();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
public void tearDown() throws Exception {
- if (mWebServer != null) {
- mWebServer.shutdown();
- }
+ mWebServer.shutdown();
super.tearDown();
}
});
mUrls = new String[NUM_NAVIGATIONS];
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
public void tearDown() throws Exception {
- if (mWebServer != null) {
- mWebServer.shutdown();
- }
+ mWebServer.shutdown();
super.tearDown();
}
}
});
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
protected void setUp() throws Exception {
super.setUp();
mShouldOverrideUrlLoadingHelper = mTestHelperBridge.getShouldOverrideUrlLoadingHelper();
- mWebServer = new TestWebServer(false);
+ mWebServer = TestWebServer.start();
}
@Override
public void clickOnElementId(final String id, String frameName) throws Exception {
String str;
if (frameName != null) {
- str = "top.window." + "LeftFrame" + ".document.getElementById('" + id + "')";
+ str = "top.window." + frameName + ".document.getElementById('" + id + "')";
} else {
str = "document.getElementById('" + id + "')";
}
}, WAIT_TIMEOUT_MS, CHECK_INTERVAL));
try {
- String result = executeJavaScriptAndWaitForResult(
- "var evObj = document.createEvent('Events'); " +
+ loadJavaScriptUrl("javascript:var evObj = document.createEvent('Events'); " +
"evObj.initEvent('click', true, false); " +
script2 +
"console.log('element with id [" + id + "] clicked');");
settings.setJavaScriptEnabled(true);
settings.setAppCacheEnabled(false);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
ManifestTestHelper helper = new ManifestTestHelper(
webServer, "testAppCache.html", "appcache.manifest");
loadUrlSyncByContent(
helper.getHtmlUrl());
helper.waitUntilManifestIsRequested(0);
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
// AppCachePath setting is global, no need to set it for the second view.
settings1.setAppCacheEnabled(true);
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
try {
- webServer = new TestWebServer(false);
ManifestTestHelper helper0 = new ManifestTestHelper(
webServer, "testAppCache_0.html", "appcache.manifest_0");
mContentClient = views.getClient0();
assertEquals(
prevManifestRequestCount, webServer.getRequestCount(helper1.getManifestPath()));
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
final String customUserAgentString =
"testUserAgentWithTestServerUserAgent";
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start();
String fileName = null;
try {
- webServer = new TestWebServer(false);
final String httpPath = "/testUserAgentWithTestServer.html";
final String url = webServer.setResponse(httpPath, "foo", null);
Header header = matchingHeaders[0];
assertEquals(customUserAgentString, header.getValue());
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
}
// For Cross-Origin XHR.
public void testCrossOriginXhr() throws Throwable {
- TestWebServer webServer = null;
+ TestWebServer webServer = TestWebServer.start(4444);
try {
// The server will be accessed by XMLHttpRequest from js.
- webServer = new TestWebServer(false);
final String path = "/cross_origin_xhr_test.html";
final String responseStr = "Cross-Origin XHR";
final String url = webServer.setResponse(path, responseStr, null);
}
});
} finally {
- if (webServer != null) webServer.shutdown();
+ webServer.shutdown();
}
}
// Used when running in single-process mode.
#if defined(OS_TIZEN)
-base::LazyInstance<XWalkContentRendererClientTizen>::Leaky
+base::LazyInstance<xwalk::XWalkContentRendererClientTizen>::Leaky
g_xwalk_content_renderer_client = LAZY_INSTANCE_INITIALIZER;
#else
base::LazyInstance<XWalkContentRendererClient>::Leaky
'target_name': 'pack_xwalk_core_library',
'type': 'none',
'dependencies': [
- 'xwalk_core_library'
+ 'xwalk_core_library',
+ 'xwalk_core_library_src',
],
'actions': [
{
'<(PRODUCT_DIR)/xwalk_core_library'
],
},
+ {
+ 'action_name': 'pack_xwalk_core_library_src',
+ 'message': 'Packaging XwalkCore Library Project Source.',
+ 'inputs': [
+ '<(DEPTH)/xwalk/tools/tar.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/xwalk_core_library_src.tar.gz',
+ '<(PRODUCT_DIR)/pack_xwalk_core_library_src_intermediate/always_run',
+ ],
+ 'action': [
+ 'python', 'tools/tar.py',
+ '<(PRODUCT_DIR)/xwalk_core_library_src'
+ ],
+ },
],
},
{
],
},
{
+ 'target_name': 'xwalk_core_library_src',
+ 'type': 'none',
+ 'dependencies': [
+ 'xwalk_core_library',
+ ],
+ 'variables': {
+ # TODO(wang16): This list is hard coded for now. It might be broken by rebase to
+ # chromium new base. Need to check manually after each rebase.
+ 'java_source_dirs': [
+ '<(DEPTH)/base/android/java/src',
+ '<(DEPTH)/components/web_contents_delegate_android/android/java/src',
+ '<(DEPTH)/components/navigation_interception/android/java/src',
+ '<(DEPTH)/content/public/android/java/src',
+ '<(DEPTH)/media/base/android/java/src',
+ '<(DEPTH)/net/android/java/src',
+ '<(DEPTH)/ui/android/java/src',
+ '<(DEPTH)/xwalk/extensions/android/java/src',
+ '<(DEPTH)/xwalk/runtime/android/core/src',
+ '<(DEPTH)/xwalk/runtime/android/core_internal/src',
+ '<(PRODUCT_DIR)/gen/enums/bitmap_format_java',
+ '<(PRODUCT_DIR)/gen/enums/window_open_disposition_java',
+ '<(PRODUCT_DIR)/gen/templates',
+ '<(PRODUCT_DIR)/resource_map',
+ # NativeLibraries.java must be copied later than gen/templates to override the empty
+ # NativeLibraries.java in gen/templates.
+ '<(PRODUCT_DIR)/xwalk_core_internal_empty_embedder_apk/native_libraries_java/NativeLibraries.java',
+ '>(reflection_gen_dir)/bridge',
+ '>(reflection_gen_dir)/wrapper',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'generate_xwalk_core_library_src_package',
+ 'message': 'Generating Source Package of XwalkCore Library Project.',
+ 'inputs': [
+ '<(DEPTH)/xwalk/build/android/common_function.py',
+ '<(DEPTH)/xwalk/build/android/generate_xwalk_core_library.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/xwalk_core_library_src/always_run',
+ '<(PRODUCT_DIR)/xwalk_core_library_src/src/README.md',
+ ],
+ 'action': [
+ 'python', '<(DEPTH)/xwalk/build/android/generate_xwalk_core_library.py',
+ '-s', '<(DEPTH)',
+ '-t', '<(PRODUCT_DIR)',
+ '--src-package',
+ ],
+ },
+ {
+ 'action_name': 'copy_xwalk_core_library_src',
+ 'message': 'Copy java sources of xwalk core library',
+ 'inputs': [
+ 'build/android/merge_java_srcs.py',
+ '<(PRODUCT_DIR)/xwalk_core_library_src/src/README.md',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/xwalk_core_library_src/copy_src_always_run',
+ ],
+ 'action': [
+ 'python', 'build/android/merge_java_srcs.py',
+ '--dirs=>(java_source_dirs)',
+ '--target-path=<(PRODUCT_DIR)/xwalk_core_library_src/src',
+ ],
+ },
+ ],
+ },
+ {
'target_name': 'xwalk_core_library_aar',
'type': 'none',
'dependencies': [