diff --git a/.gitattributes b/.gitattributes
index 37ce5f1..3b1db11 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -2,4 +2,3 @@
.gitattributes export-ignore
README export-ignore
.info export-ignore
-tests/** export-ignore
diff --git a/tests/actionscript/RayTracer.as b/tests/actionscript/RayTracer.as
deleted file mode 100644
index e13635a..0000000
--- a/tests/actionscript/RayTracer.as
+++ /dev/null
@@ -1,1113 +0,0 @@
- //print("sanity check");
- var size;
- //Scene scene;
- var scene;
- /**
- * Lights for the rendering scene
- * Light lights[];
- */
- var lights;
-
- /**
- * Objects (spheres) for the rendering scene
- * Primitive prim[];
- */
- var prim;
-
- /**
- * The view for the rendering scene
- * View view;
- */
- var view;
- /**
- * Temporary ray
- * Ray tRay = new Ray();
- */
- //print("point 1");
- var tRay = new Ray();
- //print("point2");
- /**
- * Alpha channel
- */
- var alpha = 255 << 24;
-
- /**
- * Null vector (for speedup, instead of new Vec(0,0,0)
- */
- var voidVec = new Vec(0.0,0.0,0.0);
-
- /**
- * Temporary vect
- */
- var L = new Vec(0.0,0.0,0.0);
-
- /**
- * Current intersection instance (only one is needed!)
- */
- var inter = new Isect();
-
- /**
- * Height of the Image to be rendered
- */
- var height;
-
- /**
- * Width of the Image to be rendered
- */
- var width;
-
- var datasizes = new Array(4);
- datasizes[0] = 50;
- datasizes[1] = 150;
- datasizes[2] = 500;
- datasizes[3] = 7;
-
- var checksum = 0;
- var lastValue = 0;
- var size;
-
- var numobjects;
-
- var start = new Date();
- JGFrun(3);
- var elapsed = new Date() - start;
-
- function JGFsetsize(sizel) {
- size = sizel;
- }
-
- function JGFinitialise() {
- //print("Entering JGFinitialise");
- // set image size
- width = height = datasizes[size];
-
- // create the objects to be rendered
- scene = createScene();
-
- // get lights, objects etc. from scene.
- setScene(scene);
-
- numobjects = scene.getObjects();
-
- }
-
- function JGFapplication() {
- // Set interval to be rendered to the whole picture
- // (overkill, but will be useful to retain this for parallel versions)
- var interval = new Interval(0, width, height, 0, height, 1);
-
- // Do the business!
- render(interval);
-
- }
-
- function JGFvalidate() {
- //print("entering JFGvalidate");
- var refval = new Array(4);
- refval[0] = 2676692;
- refval[1] = 29827635;
- refval[2] = 29827635;
- refval[3] = 5158;
- var dev = checksum - refval[size];
- if (dev != 0) {
- print("Validation failed");
- print("Pixel checksum = " + checksum);
- print("Reference value = " + refval[size]);
- }else
- {
- print("Validation successfull " + checksum);
- }
- }
-
- function JGFtidyup() {
- scene = null;
- lights = null;
- prim = null;
- tRay = null;
- inter = null;
- }
-
- function JGFrun(size) {
- //print("entering JFGrun");
- JGFsetsize(size);
- JGFinitialise();
- JGFapplication();
- JGFvalidate();
- //JGFtidyup();
- }
-
- /****************Start Class RayTracer*************/
- /**
- * Create and initialize the scene for the rendering picture.
- *
- * @return The scene just created
- */
-
- function createScene() {
- //print("entering createScene");
- var x = 0;
- var y = 0;
-
- var scene = new Scene();
-
- /* create spheres */
-
- var p;
- var nx = 4;
- var ny = 4;
- var nz = 4;
- for (var i = 0; i < nx; i++) {
- for (var j = 0; j < ny; j++) {
- for (var k = 0; k < nz; k++) {
- var xx = 20.0 / (nx - 1) * i - 10.0;
- var yy = 20.0 / (ny - 1) * j - 10.0;
- var zz = 20.0 / (nz - 1) * k - 10.0;
- p = new Sphere(new Vec(xx, yy, zz), 3);
-
- // p.setColor(i/(double) (nx-1), j/(double)(ny-1),
- // k/(double) (nz-1));
- p.setColor(0, 0, (i + j) / (nx + ny - 2));
- p.surf.shine = 15.0;
- p.surf.ks = 1.5 - 1.0;
- p.surf.kt = 1.5 - 1.0;
- scene.addObject(p);
- }
- }
- }
-
- /* Creates five lights for the scene */
- scene.addLight(new Light(100, 100, -50, 1.0));
- scene.addLight(new Light(-100, 100, -50, 1.0));
- scene.addLight(new Light(100, -100, -50, 1.0));
- scene.addLight(new Light(-100, -100, -50, 1.0));
- scene.addLight(new Light(200, 200, 0, 1.0));
-
- /* Creates a View (viewing point) for the rendering scene */
- var v = new View(new Vec(x, 20, -30), new Vec(x, y, 0), new Vec(0, 1,0), 1.0, 35.0 * 3.14159265 / 180.0, 1.0);
- /*
- * v.from = new Vec(x, y, -30); v.at = new Vec(x, y, -15); v.up = new
- * Vec(0, 1, 0); v.angle = 35.0 * 3.14159265 / 180.0; v.aspect = 1.0;
- * v.dist = 1.0;
- *
- */
- scene.setView(v);
-
- return scene;
- }
-
- function setScene(scene) {
- //print("entering setScene");
- // Get the objects count
- var nLights = scene.getLights();
- var nObjects = scene.getObjects();
-
- lights = new Array(nLights);
- prim = new Array(nObjects);
-
- // Get the lights
- for (var l = 0; l < nLights; l++) {
- lights[l] = scene.getLight(l);
- }
-
- // Get the primitives
- for (var o = 0; o < nObjects; o++) {
- prim[o] = scene.getObject(o);
- }
-
- // Set the view
- view = scene.getView();
- }
-
- function render(interval) {
- //print("entering render");
- // Screen variables
- var row = new Array(interval.width * (interval.yto - interval.yfrom));
- var pixCounter = 0; // iterator
-
- // Rendering variables
- var x, y, red, green, blue;
- var xlen, ylen;
- var viewVec;
-
- viewVec = Vec.sub(view.at, view.from);
-
- viewVec.normalize();
-
- var tmpVec = new Vec(0.0,0.0,0.0);
- tmpVec.setVec(viewVec);
- tmpVec.scale(Vec.dot(view.up, viewVec));
-
- var upVec = Vec.sub(view.up, tmpVec);
- upVec.normalize();
-
- var leftVec = Vec.cross(view.up, viewVec);
- leftVec.normalize();
-
- var frustrumwidth = view.dist * Math.tan(view.angle);
-
- upVec.scale(-frustrumwidth);
- leftVec.scale(view.aspect * frustrumwidth);
-
- var r = new Ray();
- r.setRay(view.from, voidVec);
- var col = new Vec(0.0,0.0,0.0);
-
- // Header for .ppm file
- // System.out.println("P3");
- // System.out.println(width + " " + height);
- // System.out.println("255");
-
- // All loops are reversed for 'speedup' (cf. thinking in java p331)
-
- // For each line
- for (y = interval.yfrom; y < interval.yto; y++) {
- //print("outer loop in render :"+y);
- ylen = (2.0 * y) / interval.width - 1.0;
- // System.out.println("Doing line " + y);
- // For each pixel of the line
- for (x = 0; x < interval.width; x++) {
- //print("innter loop in render: "+x);
- xlen = (2.0 * x) / interval.width - 1.0;
- r.D = Vec.comb(xlen, leftVec, ylen, upVec);
- r.D.add(viewVec);
- r.D.normalize();
- //print("executing trace");
- col = trace2(0, 1.0, r);
- if(col == undefined)
- {
- print("col is set: "+col);
- print("r is: "+r);
- }
- // computes the color of the ray
- red = (col.x * 255.0);
- if (red > 255)
- red = 255;
- green = (col.y * 255.0);
- //print("green is set");
- if (green > 255)
- green = 255;
- blue = (col.z * 255.0);
- //print("blue is set");
- if (blue > 255)
- blue = 255;
- //print("adding checksum");
- red = Math.floor(red);
- green = Math.floor(green);
- blue = Math.floor(blue);
- checksum += red;
- checksum += green;
- checksum += blue;
-
- // RGB values for .ppm file
- // System.out.println(red + " " + green + " " + blue);
- // Sets the pixels
- row[pixCounter++] = alpha | (red << 16) | (green << 8) | (blue);
- } // end for (x)
- } // end for (y)
-
- }
-
- function intersect( r, maxt) {
- var tp;
- var i, nhits;
- //print("entering intersect");
- nhits = 0;
- inter.t = 1e9;
- for (i = 0; i < prim.length; i++) {
- // uses global temporary Prim (tp) as temp.object for speedup
- tp = prim[i].intersect(r);
- if (tp != null && tp.t < inter.t) {
- inter.t = tp.t;
- inter.prim = tp.prim;
- inter.surf = tp.surf;
- inter.enter = tp.enter;
- nhits++;
- }
- }
- return nhits > 0 ? true : false;
- }
-
- /**
- * Checks if there is a shadow
- *
- * @param r
- * The ray
- * @return Returns 1 if there is a shadow, 0 if there isn't
- */
- function Shadow( r, tmax) {
- if (intersect(r, tmax))
- return 0;
- return 1;
- }
-
- /**
- * Return the Vector's reflection direction
- *
- * @return The specular direction
- */
- function SpecularDirection( I, N) {
- var r;
- r = Vec.comb(1.0 / Math.abs(Vec.dot(I, N)), I, 2.0, N);
- r.normalize();
- return r;
- }
-
- /**
- * Return the Vector's transmission direction
- */
- function TransDir( m1, m2, I, N) {
- var n1, n2, eta, c1, cs2;
- var r;
- n1 = m1 == null ? 1.0 : m1.ior;
- n2 = m2 == null ? 1.0 : m2.ior;
- eta = n1 / n2;
- c1 = -Vec.dot(I, N);
- cs2 = 1.0 - eta * eta * (1.0 - c1 * c1);
- if (cs2 < 0.0)
- return null;
- r = Vec.comb(eta, I, eta * c1 - Math.sqrt(cs2), N);
- r.normalize();
- return r;
- }
-
- /**
- * Returns the shaded color
- *
- * @return The color in Vec form (rgb)
- */
- function shade( level, weight, P, N, I, hit) {
- var n1, n2, eta, c1, cs2;
- var r;
- var tcol;
- var R;
- var t, diff, spec;
- var surf;
- var col;
- var l;
-
- col = new Vec(0.0,0.0,0.0);
- surf = hit.surf;
- R = new Vec(0.0,0.0,0.0);
- if (surf.shine > 1e-6) {
- R = SpecularDirection(I, N);
- }
-
- // Computes the effectof each light
- for (l = 0; l < lights.length; l++) {
- L.sub2(lights[l].pos, P);
- if (Vec.dot(N, L) >= 0.0) {
- t = L.normalize();
-
- tRay.P = P;
- tRay.D = L;
-
- // Checks if there is a shadow
- if (Shadow(tRay, t) > 0) {
- diff = Vec.dot(N, L) * surf.kd * lights[l].brightness;
-
- col.adds_two(diff, surf.color);
- if (surf.shine > 1e-6) {
- spec = Vec.dot(R, L);
- if (spec > 1e-6) {
- spec = Math.pow(spec, surf.shine);
- col.x += spec;
- col.y += spec;
- col.z += spec;
- }
- }
- }
- } // if
- } // for
-
- tRay.P = P;
- if (surf.ks * weight > 1e-3) {
- tRay.D = SpecularDirection(I, N);
- tcol = trace2(level + 1, surf.ks * weight, tRay);
- col.adds_two(surf.ks, tcol);
- }
- if (surf.kt * weight > 1e-3) {
- if (hit.enter > 0)
- tRay.D = TransDir(null, surf, I, N);
- else
- tRay.D = TransDir(surf, null, I, N);
- tcol = trace2(level + 1, surf.kt * weight, tRay);
- col.adds_two(surf.kt, tcol);
- }
-
- // garbaging...
- tcol = null;
- surf = null;
-
- return col;
- }
-
- /**
- * Launches a ray
- */
- function trace2( level, weight, r) {
- //print("entering trace");
- var P, N;
- var hit;
- //print("checking recursion in trace");
- // Checks the recursion level
- if (level > 6) {
- return new Vec(0.0,0.0,0.0);
- }
-
- hit = intersect(r, 1e6);
- //print("hit is: "+hit);
- if (hit) {
- P = r.point(inter.t);
- N = inter.prim.normal(P);
- if (Vec.dot(r.D, N) >= 0.0) {
- N.negate();
- }
- return shade(level, weight, P, N, r.D, inter);
- }
- // no intersection --> col = 0,0,0
- return voidVec;
- }
- /****************End Class RayTracer***************/
-
-class Interval {
- /*
- * public int number; public int width; public int height; public int yfrom;
- * public int yto; public int total;
- */
- var number;
-
- var width;
-
- var height;
-
- var yfrom;
-
- var yto;
-
- var total;
-
- function Interval( number, width, height, yfrom, yto, total) {
- this.number = number;
- this.width = width;
- this.height = height;
- this.yfrom = yfrom;
- this.yto = yto;
- this.total = total;
- }
-}
-class Isect {
- //public double t;
- var t;
- //public int enter;
- var enter;
- //public Primitive prim;
- var prim;
- //public Surface surf;
- var surf;
-}
-
-class Light {
- //public Vec pos;
- var pos;
- //public double brightness;
- var brightness;
- function Light( x, y, z, brightnessl) {
- this.pos = new Vec(x, y, z);
- this.brightness = brightnessl;
- }
-}
-
-class Primitive {
- var surf = new Surface();
-
- function setColor( r, g, b) {
- surf.color = new Vec(r, g, b);
- }
- //abstract
- function normal(pnt){}
- //abstract
- function intersect(ry){}
-
- //abstract, override
- function toString(){}
- //abstract
- function getCenter(){}
- //abstract
-
- function setCenter(c){}
-}
-
-class Ray {
- var P, D;
-
- function setRay(pnt, dir) {
- //print("set ray start");
- P = new Vec(pnt.x, pnt.y, pnt.z);
- D = new Vec(dir.x, dir.y, dir.z);
- //print("set ray after init");
- D.normalize();
- //print("set ray after normalize");
- }
- function Ray()
- {
- //print("start Ray()");
- P = new Vec(0.0,0.0,0.0);
- D = new Vec(0.0,0.0,0.0);
- //print("end Ray()");
- }
-
- function point(t) {
- return new Vec(P.x + D.x * t, P.y + D.y * t, P.z + D.z * t);
- }
-
- //@Override
- function toString() {
- return "{" + P.toString() + " -> " + D.toString() + "}";
- }
-}
-class Scene {
- var lights;
-
- var objects;
-
- var view;
-
- function Scene() {
- this.lights = new Array();
- this.objects = new Array();
- }
-
- function addLight(l) {
- this.lights.push(l);
- }
-
- function addObject(object) {
- this.objects.push(object);
- }
-
- function setView(view) {
- this.view = view;
- }
-
- function getView() {
- return this.view;
- }
-
- function getLight(number) {
- return this.lights[number];
- }
-
- function getObject( number) {
- return objects[number];
- }
-
- function getLights() {
- //print("start getLights");
- return this.lights.length;
- }
-
- function getObjects() {
- return this.objects.length;
- }
-
- function setObject(object, pos) {
- this.objects[pos] = object;
- }
-}
-class Sphere extends Primitive {
- var c;
-
- var r, r2;
-
- var v, b; // temporary vecs used to minimize the memory load
-
- function Sphere( center, radius) {
- c = center;
- r = radius;
- r2 = r * r;
- v = new Vec(0.0,0.0,0.0);
- b = new Vec(0.0,0.0,0.0);
- }
-
- //@Override
- override function intersect(ry) {
- var b, disc, t;
- var ip;
- v.sub2(c, ry.P);
- b = Vec.dot(v, ry.D);
- disc = b * b - Vec.dot(v, v) + r2;
- if (disc < 0.0) {
- return null;
- }
- disc = Math.sqrt(disc);
- t = (b - disc < 1e-6) ? b + disc : b - disc;
- if (t < 1e-6) {
- return null;
- }
- ip = new Isect();
- ip.t = t;
- ip.enter = Vec.dot(v, v) > r2 + 1e-6 ? 1 : 0;
- ip.prim = this;
- ip.surf = surf;
- return ip;
- }
-
- //@Override
- override function normal(p) {
- var r;
- r = Vec.sub(p, c);
- r.normalize();
- return r;
- }
-
- //@Override
- override function toString() {
- return "Sphere {" + c.toString() + "," + r + "}";
- }
-
- //@Override
- override function getCenter() {
- return c;
- }
-
- //@Override
- override function setCenter(c) {
- this.c = c;
- }
-}
-
-class Surface {
- var color;
-
- var kd;
-
- var ks;
-
- var shine;
-
- var kt;
-
- var ior;
-
- function Surface() {
- color = new Vec(1, 0, 0);
- kd = 1.0;
- ks = 0.0;
- shine = 0.0;
- kt = 0.0;
- ior = 1.0;
- }
-
- //@Override
- function toString() {
- return "Surface { color=" + color + "}";
- }
-}
-/**
- * This class reflects the 3d vectors used in 3d computations
- */
-class Vec {
-
- /**
- * The x coordinate
- */
- var x;
-
- /**
- * The y coordinate
- */
- var y;
-
- /**
- * The z coordinate
- */
- var z;
-
- x = 0.0;
- y = 0.0;
- z = 0.0;
-
- /**
- * Constructor
- *
- * @param a
- * the x coordinate
- * @param b
- * the y coordinate
- * @param c
- * the z coordinate
- */
- function Vec( a, b, c) {
- x = a;
- y = b;
- z = c;
- }
- /**
- * Copy constructor
- */
- function setVec(a) {
- x = a.x;
- y = a.y;
- z = a.z;
- }
- /**
- * Add a vector to the current vector
- *
- * @param: a The vector to be added
- */
- function add( a) {
- x += a.x;
- y += a.y;
- z += a.z;
- }
-
- /**
- * adds: Returns a new vector such as new = sA + B
- */
- function adds( s, a, b) {
- return new Vec(s * a.x + b.x, s * a.y + b.y, s * a.z + b.z);
- }
-
- /**
- * Adds vector such as: this+=sB
- *
- * @param: s The multiplier
- * @param: b The vector to be added
- */
- function adds_two( s, b) {
- x += s * b.x;
- y += s * b.y;
- z += s * b.z;
- }
-
- /**
- * Substracs two vectors
- */
- static function sub( a, b) {
- return new Vec(a.x - b.x, a.y - b.y, a.z - b.z);
- }
-
- /**
- * Substracts two vects and places the results in the current vector Used
- * for speedup with local variables -there were too much Vec to be gc'ed
- * Consumes about 10 units, whether sub consumes nearly 999 units!! cf
- * thinking in java p. 831,832
- */
- function sub2( a, b) {
- this.x = a.x - b.x;
- this.y = a.y - b.y;
- this.z = a.z - b.z;
- }
-
- static function mult( a, b) {
- return new Vec(a.x * b.x, a.y * b.y, a.z * b.z);
- }
-
- static function cross( a, b) {
- return new Vec(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y
- - a.y * b.x);
- }
-
- static function dot( a, b) {
- return a.x * b.x + a.y * b.y + a.z * b.z;
- }
-
- static function comb( a, A, b, B) {
- return new Vec(a * A.x + b * B.x, a * A.y + b * B.y, a * A.z + b * B.z);
- }
-
- function comb2( a, A, b, B) {
- x = a * A.x + b * B.x;
- y = a * A.y + b * B.y;
- z = a * A.z + b * B.z;
- }
-
- function scale( t) {
- x *= t;
- y *= t;
- z *= t;
- }
-
- function negate() {
- x = -x;
- y = -y;
- z = -z;
- }
-
- function normalize() {
- var len;
- len = Math.sqrt(x * x + y * y + z * z);
- if (len > 0.0) {
- x /= len;
- y /= len;
- z /= len;
- }
- return len;
- }
-
- //@Override
- function toString() {
- return "<" + x + "," + y + "," + z + ">";
- }
-}
-
-class View {
- /*
- * public Vec from; public Vec at; public Vec up; public double dist; public
- * double angle; public double aspect;
- */
- var from;
-
- var at;
-
- var up;
-
- var dist;
-
- var angle;
-
- var aspect;
-
- function View( froml, atl, upl, distl, anglel, aspectl) {
- this.from = froml;
- this.at = atl;
- this.up = upl;
- this.dist = distl;
- this.angle = anglel;
- this.aspect = aspectl;
- }
-}
-/*****************Start Vector class*****************/
-//http://sourceforge.net/projects/jsvector/
-// Vector Constructor -- constructs the object
-// Vector Constructor -- constructs the object
-
-// Vector is now a builtin class - using that instead
-/*
-function Vector(inc) {
- if (inc == 0) {
- inc = 100;
- }
-
- // Properties
- this.data = new Array(inc);
- this.increment = inc;
- this.size = 0;
-
- // Methods
- this.getCapacity = getCapacity;
- this.getSize = getSize;
- this.isEmpty = isEmpty;
- this.getLastElement = getLastElement;
- this.getFirstElement = getFirstElement;
- this.getElementAt = getElementAt;
- this.addElement = addElement;
- this.insertElementAt = insertElementAt;
- this.removeElementAt = removeElementAt;
- this.removeAllElements = removeAllElements;
- this.indexOf = indexOf;
- this.contains = contains
- this.resize = resize;
- this.toString = toString;
- this.sort = sort;
- this.trimToSize = trimToSize;
- this.clone = clone;
- this.overwriteElementAt;
-}
-
-// getCapacity() -- returns the number of elements the vector can hold
-function getCapacity() {
- return this.data.length;
-}
-
-// getSize() -- returns the current size of the vector
-function getSize() {
- return this.size;
-}
-
-// isEmpty() -- checks to see if the Vector has any elements
-function isEmpty() {
- return this.getSize() == 0;
-}
-
-// getLastElement() -- returns the last element
-function getLastElement() {
- if (this.data[this.getSize() - 1] != null) {
- return this.data[this.getSize() - 1];
- }
-}
-
-// getFirstElement() -- returns the first element
-function getFirstElement() {
- if (this.data[0] != null) {
- return this.data[0];
- }
-}
-
-// getElementAt() -- returns an element at a specified index
-function getElementAt(i) {
- try {
- return this.data[i];
- }
- catch (e) {
- return "Exception " + e + " occured when accessing " + i;
- }
-}
-
-// addElement() -- adds a element at the end of the Vector
-function addElement(obj) {
- if(this.getSize() == this.data.length) {
- this.resize();
- }
- this.data[this.size++] = obj;
-}
-
-// insertElementAt() -- inserts an element at a given position
-function insertElementAt(obj, index) {
- try {
- if (this.size == this.capacity) {
- this.resize();
- }
-
- for (var i=this.getSize(); i > index; i--) {
- this.data[i] = this.data[i-1];
- }
- this.data[index] = obj;
- this.size++;
- }
- catch (e) {
- return "Invalid index " + i;
- }
-}
-
-// removeElementAt() -- removes an element at a specific index
-function removeElementAt(index) {
- try {
- var element = this.data[index];
-
- for(var i=index; i<(this.getSize()-1); i++) {
- this.data[i] = this.data[i+1];
- }
-
- this.data[getSize()-1] = null;
- this.size--;
- return element;
- }
- catch(e) {
- return "Invalid index " + index;
- }
-}
-
-// removeAllElements() -- removes all elements in the Vector
-function removeAllElements() {
- this.size = 0;
-
- for (var i=0; i=0 && compareValue > currentValue) {
- this.data[j+1] = this.data[j];
- j--;
- if (j >=0) {
- compareObj = this.data[j];
- compareValue = compareObj[f];
- }
- }
- this.data[j+1] = currentObj;
- }
-}
-
-// clone() -- copies the contents of a Vector to another Vector returning the new Vector.
-function clone() {
- var newVector = new Vector(this.size);
-
- for (var i=0; i
-
- simple example build file
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/tests/asm/gameport.asm b/tests/asm/gameport.asm
deleted file mode 100644
index fb920d9..0000000
--- a/tests/asm/gameport.asm
+++ /dev/null
@@ -1,39 +0,0 @@
-Game Port
-
-; GAMEPORT.ASM
-;
-
- .MODEL TINY
-
- .DATA
-
- yes DB 13,10,"Game port is installed.",13,10,"$"
- no DB 13,10,"Game port is not installed.",13,10,"$"
-
- .CODE
- ORG 100h
-
-start: mov al, 1 ;value to write to port
- mov dx, 201h ;port number
- out dx, al ;write to port
- mov cx, 0F00h ;# of loops
-
-port_loop:
- in al, dx ;read from port
- and al, 0Fh ;if jstick present, then AL should be
- cmp al, 0Fh ; 0Fh after ANDing with 0Fh.
- je jstick_exists
- loop port_loop
- mov dx, OFFSET no ;gameport not installed
- jmp SHORT done
-
-jstick_exists:
- mov dx, OFFSET yes ;gameport installed
-
-done: mov ah, 9h
- int 21h
-
- mov ax, 4c00h
- int 21h
-
-END start
diff --git a/tests/asm/queueservers.asm b/tests/asm/queueservers.asm
deleted file mode 100644
index 8d34da8..0000000
--- a/tests/asm/queueservers.asm
+++ /dev/null
@@ -1,164 +0,0 @@
-Gets a list of Queue servers under Novell Netware 3.11
-
-%PAGESIZE 55,200
-%SUBTTL "Get List of Queue Servers under Netware 3.11"
-; Net_Q.Asm
-;
-
- .MODEL SMALL
-
-
- .STACK 100h
-
-DOSint macro function
- mov ah,function
- int 21h
-ENDM
-
- .DATA
- STDOUT = 1 ; the stdout device handle
-
- DOS_WRITE_TO_HANDLE = 040h ; Write to File Handle
- DOS_TERMINATE_EXE = 04Ch ; Terminate Program
-
- NOVELL_FUNCTION = 0E3h
-;
-; Object Types
-; note that they're all big endian
-;
- OT_USER = 0100h
- OT_USER_GROUP = 0200h
- OT_PRINT_QUEUE = 0300h ; Print Queue object type
- OT_FILE_SERVER = 0400h
-
-
-BragMsg DB 0dh,0ah,"NET_Q.EXE",9,"WWW"
- DB 9,"Version 1.00",0dh,0ah
- DB 9,9,"released to the public domain by the author",0dh,0ah,0dh,0ah
-BragLen = $ - BragMsg
-
-Crlf DB 0dh,0ah,0
-
- SCAN_REQ STRUC ; bindery ScanObject request packet structure
- MyLength DW 55 ; the length of this buffer
- Function DB 37h ; scan object subfunction number
- ObjectID DD -1 ; all ones for initial object search
- ObjectType DW -1 ; wild card -- looks for all objects
- ObjNameLen DB 1 ; at least one character
- ObjName DB 47 DUP ('*') ; fill with wildcards to start
- SCAN_REQ ENDS
-
- SCAN_REP STRUC ; bindery ScanObject request packet structure
- MyLength DW 57
- RObjectID DD 0 ; all ones for initial object search
- RObjectType DW 0 ; wild card -- looks for all objects
- RObjName DB 48 DUP (0) ; fill with wildcards to start
- ObjFlag DB 0
- ObjSecurty DB 0
- ObjHasProp DB 0
- ENDS
-
- ScanObjReq SCAN_REQ <>
- ScanObjRep SCAN_REP <>
-
- .CODE
-
-;
-; This is the main part of the code
-;
-; Test code gets and prints the name of all print queues from the
-; logged server -- NO ERROR CHECKING IS DONE, so be careful!
-;
-
-Start:
- mov ax,@data
- mov ds,ax ; set up the data segment
- mov dx,OFFSET BragMsg ; prepare to print out brag line(s)
- mov cx,BragLen
- mov bx,STDOUT ; print to STDOUT
- DOSint DOS_WRITE_TO_HANDLE
- jc Exit ; if carry is set, there was an error
-
- mov [ScanObjReq.ObjectType],OT_PRINT_QUEUE
- ;
- ; in this case the name is already set up, (a wildcard) but if a
- ; specific name were desired, it would be moved to
- ; ScanObjReq.ObjName, with the appropriate length (not including
- ; optional terminating NULL char set up in ScanObjReq.ObjNameLen.
- ;
-@@MoreQueues:
- call BindScan
- jc Exit
-
- lea dx,[ScanObjRep.ObjName]
- call Puts
- lea dx,[Crlf]
- call Puts
- jmp @@MoreQueues
-
-Exit:
- DOSint DOS_TERMINATE_EXE ; return with error code preset in AL
-
-;
-; BindScan
-;
-; scans the bindery for the object name set in the request buffer
-;
-BindScan proc
- push ds si di es dx ax
-
- lea si,[ScanObjReq] ; point DS:DI to request buffer
- mov dx,ds
- mov es,dx
- lea di,[ScanObjRep] ; point ES:SI to reply buffer
- DOSint NOVELL_FUNCTION
- jb @@Exit
-
- cld ; make sure to count up
- mov si,OFFSET ScanObjRep.ObjectID
- mov di,OFFSET ScanObjReq.ObjectID
- movsw
- movsw
-
- clc
-
-@@Exit:
- pop ax dx es di si ds
- ret
-
-BindScan endp
-
-; Puts
-;
-; prints a NUL terminated string to stdout
-;
-; INPUTS: ds:dx points to ASCIIZ string
-;
-; OUTPUTS: prints string to stdout
-;
-; RETURNS: ax = number of bytes actually printed
-; carry set on error
-;
-; DESTROYED: ax
-;
-Puts proc
- push bx cx di es
-
- push ds
- pop es
- mov cx,0ffffh ; maximum length of string
- mov di,dx
- cld
- mov al,0 ; we're looking for NUL
- repne scasb
- dec di
- mov cx,di
- sub cx,dx
- mov bx,STDOUT ; write to this device
- DOSint DOS_WRITE_TO_HANDLE
-
- pop es di cx bx
- ret
-Puts endp
-
- END Start
diff --git a/tests/asp/serialnumber.asp b/tests/asp/serialnumber.asp
deleted file mode 100644
index edaef7c..0000000
--- a/tests/asp/serialnumber.asp
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-<%
-dim fs,d
-set fs=Server.CreateObject("Scripting.FileSystemObject")
-set d=fs.GetDrive("c:")
-Response.Write("The serialnumber is " & d.SerialNumber)
-set d=nothing
-set fs=nothing
-%>
-
-
-
diff --git a/tests/awk/ctime.awk b/tests/awk/ctime.awk
deleted file mode 100644
index 0a50d26..0000000
--- a/tests/awk/ctime.awk
+++ /dev/null
@@ -1,11 +0,0 @@
-# ctime.awk
-#
-# awk version of C ctime(3) function
-
-function ctime(ts, format)
-{
- format = "%a %b %d %H:%M:%S %Z %Y"
- if (ts == 0)
- ts = systime() # use current time as default
- return strftime(format, ts)
-}
diff --git a/tests/basic/threads.bas b/tests/basic/threads.bas
deleted file mode 100644
index 92b68f4..0000000
--- a/tests/basic/threads.bas
+++ /dev/null
@@ -1,30 +0,0 @@
-SuperStrict
-
-' Threading tutorial 1:
-' A basic loading thread
-
-
-' a threadable function
-' threadable functions must return an Object and take 1 object as input, they don't need to be used
-Function loadResources:Object(in:Object)
- Print "Starting a child thread..."
- For Local counter:Int = 0 Until 20 ' just a loop to make stuff happen
- Print "Pretending to load resource " + counter
- Delay(300) ' just to make this take some time like loading a real resource would
- Next
- Print "Child thread complete."
-End Function
-
-
-
-'####### Main code starts here
-
-' Create a thread with loadResources() and Null as it's input object value
-Local loadingThread:TThread = CreateThread(loadResources, Null)
-
-Print "Starting the main loop..."
-While(ThreadRunning(loadingThread)) ' as long as that child thread is still running...
- Print "Waiting on our resources..."
- Delay(100) ' we could do whatever we want here...
-Wend
-Print "Main loop complete."
diff --git a/tests/cobol/Acme99.cbl b/tests/cobol/Acme99.cbl
deleted file mode 100644
index 3dca945..0000000
--- a/tests/cobol/Acme99.cbl
+++ /dev/null
@@ -1,200 +0,0 @@
- $ SET SOURCEFORMAT"FREE"
-IDENTIFICATION DIVISION.
-PROGRAM-ID. ACME99.
-AUTHOR. Michael Coughlan.
-*CS431399R-EXAM.
-
-ENVIRONMENT DIVISION.
-INPUT-OUTPUT SECTION.
-FILE-CONTROL.
- SELECT ORDER-FILE ASSIGN TO "ORDERS.DAT"
- ORGANIZATION IS LINE SEQUENTIAL.
-
- SELECT STOCK-FILE ASSIGN TO "STOCK.DAT"
- ORGANIZATION IS RELATIVE
- ACCESS MODE IS DYNAMIC
- RELATIVE KEY IS STOCK-REC-POINTER-WB
- FILE STATUS IS STOCK-STATUS-WB.
-
- SELECT MANF-FILE ASSIGN TO "MANF.DAT"
- ORGANIZATION IS INDEXED
- ACCESS MODE IS RANDOM
- RECORD KEY IS MANF-CODE-FC
- ALTERNATE RECORD KEY IS MANF-NAME-FC
- WITH DUPLICATES
- FILE STATUS IS MANF-STATUS-WB.
-
-
-
-DATA DIVISION.
-FILE SECTION.
-FD ORDER-FILE.
-01 ORDER-REC-FA.
- 02 ITEM-DESC-FA PIC X(30).
- 02 MANF-NAME-FA PIC X(30).
- 02 QTY-REQUIRED-FA PIC 9(6).
- 02 COST-OF-ITEMS-FA PIC 9(5)V99.
- 02 POSTAGE-FA PIC 99V99.
-
-FD STOCK-FILE.
-01 STOCK-REC-FB.
- 02 STOCK-NUM-FB PIC 9(5).
- 02 MANF-CODE-FB PIC X(4).
- 02 ITEM-DESC-FB PIC X(30).
- 02 QTY-IN-STOCK-FB PIC 9(6).
- 02 REORDER-LEVEL-FB PIC 999.
- 02 REORDER-QTY-FB PIC 9(6).
- 02 ITEM-COST-FB PIC 9(5).
- 02 ITEM-WEIGHT-FB PIC 9(5).
- 02 ON-ORDER-FB PIC X.
- 88 NOT-ON-ORDER VALUE "N".
- 88 ON-ORDER VALUE "Y".
-
-FD MANF-FILE.
-01 MANF-REC-FC.
- 02 MANF-CODE-FC PIC X(4).
- 02 MANF-NAME-FC PIC X(30).
- 02 MANF-ADDRESS-FC PIC X(70).
-
-
-
-
-WORKING-STORAGE SECTION.
-01 CALL-ITEMS-WA.
- 02 POST-CHARGE-WA PIC 99V99.
- 02 POST-NUM-WA PIC 99.
-
-01 FILE-DATA-WB.
- 02 STOCK-REC-POINTER-WB PIC 9(5).
- 02 STOCK-STATUS-WB PIC XX.
- 02 MANF-STATUS-WB PIC XX.
- 02 FILLER PIC 9 VALUE 0.
- 88 END-OF-FILE VALUE 1.
-
-01 UNSTRING-DATA-WC.
- 02 UNSTRING-POINTER-WC PIC 99.
- 88 END-OF-ADDRESS VALUE 71.
- 02 HOLD-STRING-WC PIC X(10).
- 02 COUNTY-WC PIC X(9).
- 88 NORTHERN-COUNTY
- VALUE "ANTRIM", "ARMAGH", "DERRY", "DOWN",
- "FERMANAGH", "TYRONE".
- 02 COUNTRY-WC PIC X(10).
- 88 EEC-COUNTRY
- VALUE "AUSTRIA", "BELGIUM", "DENMARK", "ENGLAND", "FINLAND",
- "FRANCE", "GERMANY", "GREECE", "IRELAND", "ITALY",
- "LUXEMBOURG", "PORTUGAL", "SCOTLAND", "SPAIN",
- "SWEDEN", "WALES".
- 88 IRELAND VALUE "IRELAND".
-
- 02 COUNTRY-FLAGS-WC PIC 9.
- 88 OTHER-EEC VALUE 1.
- 88 REPUBLIC VALUE 0.
-
-01 POSTAGE-DATA-WD.
- 02 TOTAL-WEIGHT-WD PIC 9(5).
- 88 OVER-WEIGHT VALUE 50001 THRU 99999.
-
-
-
-PROCEDURE DIVISION.
-CREATE-REORDER-FILE.
- OPEN I-O STOCK-FILE.
- OPEN INPUT MANF-FILE.
- OPEN OUTPUT ORDER-FILE.
- READ STOCK-FILE NEXT RECORD
- AT END SET END-OF-FILE TO TRUE
- END-READ.
- PERFORM UNTIL END-OF-FILE
- IF (QTY-IN-STOCK-FB NOT GREATER THAN REORDER-LEVEL-FB)
- AND (NOT-ON-ORDER)
- PERFORM CREATE-REORDER-RECORD
- PERFORM UPDATE-STOCK-RECORD
- END-IF
- READ STOCK-FILE NEXT RECORD
- AT END SET END-OF-FILE TO TRUE
- END-READ
- END-PERFORM.
- CLOSE STOCK-FILE, MANF-FILE, ORDER-FILE.
- STOP RUN.
-
-CREATE-REORDER-RECORD.
- MOVE MANF-CODE-FB TO MANF-CODE-FC.
- READ MANF-FILE
- KEY IS MANF-CODE-FC
- INVALID KEY DISPLAY "CRR MANF STATUS = "
- MANF-STATUS-WB "CODE = " MANF-CODE-FC
- END-READ.
- PERFORM EXTRACT-ADDRESS-ITEMS.
-
- MOVE ZEROS TO POSTAGE-FA, COST-OF-ITEMS-FA.
- IF EEC-COUNTRY
- PERFORM GET-POSTAGE
- MULTIPLY ITEM-COST-FB BY REORDER-QTY-FB
- GIVING COST-OF-ITEMS-FA
- MOVE POST-CHARGE-WA TO POSTAGE-FA
- END-IF.
-
- MOVE ITEM-DESC-FB TO ITEM-DESC-FA.
- MOVE MANF-NAME-FC TO MANF-NAME-FA.
- MOVE REORDER-QTY-FB TO QTY-REQUIRED-FA.
- WRITE ORDER-REC-FA.
-
-GET-POSTAGE.
- IF IRELAND AND NOT NORTHERN-COUNTY
- SET REPUBLIC TO TRUE
- ELSE
- SET OTHER-EEC TO TRUE
- END-IF.
- MULTIPLY ITEM-WEIGHT-FB BY REORDER-QTY-FB
- GIVING TOTAL-WEIGHT-WD
- ON SIZE ERROR MOVE 99999 TO TOTAL-WEIGHT-WD.
-
- EVALUATE TOTAL-WEIGHT-WD ALSO REPUBLIC ALSO OTHER-EEC
- WHEN 1 THRU 500 ALSO TRUE ALSO FALSE MOVE 1 TO POST-NUM-WA
- WHEN 1 THRU 500 ALSO FALSE ALSO TRUE MOVE 2 TO POST-NUM-WA
- WHEN 501 THRU 1000 ALSO TRUE ALSO FALSE MOVE 3 TO POST-NUM-WA
- WHEN 501 THRU 1000 ALSO FALSE ALSO TRUE MOVE 4 TO POST-NUM-WA
- WHEN 1001 THRU 3000 ALSO TRUE ALSO FALSE MOVE 5 TO POST-NUM-WA
- WHEN 1001 THRU 3000 ALSO FALSE ALSO TRUE MOVE 6 TO POST-NUM-WA
- WHEN 3001 THRU 5000 ALSO TRUE ALSO FALSE MOVE 7 TO POST-NUM-WA
- WHEN 3001 THRU 5000 ALSO FALSE ALSO TRUE MOVE 8 TO POST-NUM-WA
- WHEN 5001 THRU 10000 ALSO TRUE ALSO FALSE MOVE 9 TO POST-NUM-WA
- WHEN 5001 THRU 10000 ALSO FALSE ALSO TRUE MOVE 10 TO POST-NUM-WA
- WHEN 10001 THRU 50000 ALSO TRUE ALSO FALSE MOVE 11 TO POST-NUM-WA
- WHEN 10001 THRU 50000 ALSO FALSE ALSO TRUE MOVE 12 TO POST-NUM-WA
- WHEN 50001 THRU 99999 ALSO ANY ALSO ANY MOVE ZEROS
- TO POST-CHARGE-WA
- WHEN OTHER DISPLAY "EVALUATE WRONG:- WEIGHT = " TOTAL-WEIGHT-WD
- " COUNTRY FLAG = " COUNTRY-FLAGS-WC
- END-EVALUATE.
- IF NOT OVER-WEIGHT
- CALL "POSTAGE-RATE"
- USING BY CONTENT POST-NUM-WA
- BY REFERENCE POST-CHARGE-WA
- END-IF.
-
-
-
-UPDATE-STOCK-RECORD.
- MOVE "Y" TO ON-ORDER-FB.
- REWRITE STOCK-REC-FB
- INVALID KEY DISPLAY "STOCK REWRITE STATUS = " STOCK-STATUS-WB
- END-REWRITE.
-
-
-
-EXTRACT-ADDRESS-ITEMS.
- MOVE 1 TO UNSTRING-POINTER-WC.
- PERFORM UNTIL END-OF-ADDRESS
- MOVE HOLD-STRING-WC TO COUNTY-WC
- UNSTRING MANF-ADDRESS-FC DELIMITED BY ","
- INTO HOLD-STRING-WC
- WITH POINTER UNSTRING-POINTER-WC
- END-PERFORM.
- MOVE HOLD-STRING-WC TO COUNTRY-WC.
-
-*debugging displays
- DISPLAY "COUNTY = " COUNTY-WC.
- DISPLAY "COUNTRY = " COUNTRY-WC.
-
diff --git a/tests/cobol/DriverProg.cbl b/tests/cobol/DriverProg.cbl
deleted file mode 100644
index 7af7f1d..0000000
--- a/tests/cobol/DriverProg.cbl
+++ /dev/null
@@ -1,132 +0,0 @@
- $ SET SOURCEFORMAT"FREE"
-IDENTIFICATION DIVISION.
-PROGRAM-ID. DriverProg.
-AUTHOR. Michael Coughlan.
-* This program demonstrates the use of the CALL verb
-* it calls three external sub-programs that help to demonstrate
-* some of the features of the CALL.
-* The "MultiplyNums" sub-program takes five parameters. The first two
-* are the numbers to be multiplied, the second two are strings to
-* demonstrate that strings can be passed as parameters and the
-* last is the returned result of multiplying the two numbers.
-* The "Fickle" sub-program demonstrates a program that exhibits
-* State Memory.
-* The "Steadfast" sub-program demonstrates how a sub-program that
-* uses the IS INITIAL phrase can avoid State Memory.
-
-ENVIRONMENT DIVISION.
-DATA DIVISION.
-
-WORKING-STORAGE SECTION.
-01 UserNumber PIC 99.
-
-01 PrnResult PIC 9(6).
-* field declared as COMP cannot be DISPLAYed
-* it is necessary to move it to a DISPLAY field.
-* DISPLAY is the default value for a field and
-* need not be declared.
-
-
-* Parameters must be either 01-level's or elementry
-* data-items.
-01 Parameters.
- 02 Number1 PIC 9(3).
- 02 Number2 PIC 9(3).
- 02 FirstString PIC X(19) VALUE "First parameter = ".
- 02 SecondString PIC X(19) VALUE "Second parameter = ".
- 02 Result PIC 9(6) COMP.
-* I've made this a COMP field to demonstrate that COMP
-* items can be passed as parameters but a COMP field cannot
-* be DISPLAYed and so is moved to a DISPLAY field before DISPLAYing it.
-
-
-
-PROCEDURE DIVISION.
-Begin.
- PERFORM CallMultiplyNums.
- PERFORM CallFickle
- PERFORM CallSteadfast
-
- PERFORM MakeFickleSteadfast.
-
- STOP RUN.
-
-
-CallMultiplyNums.
- DISPLAY "Input 2 numbers (3 digits each) to be multiplied"
- DISPLAY "First number - " WITH NO ADVANCING
- ACCEPT Number1
- DISPLAY "Second number - " WITH NO ADVANCING
- ACCEPT Number2.
- DISPLAY "The first string is " FirstString.
- DISPLAY "The second string is " SecondString.
- DISPLAY ">>>>>>>>> Calling the sub-program now".
-
- CALL "MultiplyNums"
- USING BY CONTENT Number1, Number2, FirstString,
- BY REFERENCE SecondString, Result.
-
-* The USING phrase specifies the parameters to be passed to the
-* sub-program. The order of the parameters is important as the
-* sub-program recognizes them by relative location not by name
-*
-* Parameters should be passed BY CONTENT when you are not expecting
-* them to get a value from the called program. We have not passed
-* SecondString by content and you can see that its value is
-* overwritten by the called program.
-
- DISPLAY "Back in the main program now <<<<<<<<<<<".
- MOVE Result to PrnResult.
- DISPLAY Number1 " multiplied by " Number2 " is = " PrnResult.
-
- DISPLAY "The first string is " FirstString.
- DISPLAY "The second string is " SecondString.
-
-
-CallFickle.
- DISPLAY SPACE
- DISPLAY "------------------- Calling Fickle ---------"
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber.
-* Every time I call Fickle with the same value
-* produces a different result. This is because
-* it remembers its state from one call to the next.
-* It has "State Memory".
-
-
-CallSteadFast.
- DISPLAY SPACE
- DISPLAY "------------------- Calling Steadfast ---------"
- MOVE 10 TO UserNumber
- CALL "Steadfast" USING BY CONTENT UserNumber
- MOVE 10 TO UserNumber
- CALL "Steadfast" USING BY CONTENT UserNumber
- MOVE 10 TO UserNumber
- CALL "Steadfast" USING BY CONTENT UserNumber.
-* Every time I call Steadfast with the same value
-* it produces the same result. We have eliminated
-* State Memory by using the IS INITIAL phrase in
-* Steadfast
-
-
-MakeFickleSteadfast.
- DISPLAY SPACE
- DISPLAY "----- Making fickle act like Steadfast -------"
- CANCEL "Fickle"
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber
-
- CANCEL "Fickle"
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber
-
- CANCEL "Fickle"
- MOVE 10 TO UserNumber
- CALL "Fickle" USING BY CONTENT UserNumber.
-* We can make Fickle act like Steadfast by using
-* the CANCEL verb to set it into its initial state
-* each time we call it
diff --git a/tests/coffee/campfire.coffee b/tests/coffee/campfire.coffee
deleted file mode 100644
index 6fb5586..0000000
--- a/tests/coffee/campfire.coffee
+++ /dev/null
@@ -1,251 +0,0 @@
-Robot = require '../robot'
-Adapter = require '../adapter'
-
-HTTPS = require 'https'
-EventEmitter = require('events').EventEmitter
-
-class Campfire extends Adapter
-
- send: (user, strings...) ->
- if strings.length > 0
- @bot.Room(user.room).speak strings.shift(), (err, data) =>
- @robot.logger.error "Campfire error: #{err}" if err?
- @send user, strings...
-
- reply: (user, strings...) ->
- @send user, strings.map((str) -> "#{user.name}: #{str}")...
-
- topic: (user, strings...) ->
- @bot.Room(user.room).topic strings.join(" / "), (err, data) =>
- @robot.logger.error "Campfire error: #{err}" if err?
-
- run: ->
- self = @
-
- options =
- token: process.env.HUBOT_CAMPFIRE_TOKEN
- rooms: process.env.HUBOT_CAMPFIRE_ROOMS
- account: process.env.HUBOT_CAMPFIRE_ACCOUNT
-
- bot = new CampfireStreaming(options, @robot)
-
- withAuthor = (callback) -> (id, created, room, user, body) ->
- bot.User user, (err, userData) ->
- if userData.user
- author = self.userForId(userData.user.id, userData.user)
- self.robot.brain.data.users[userData.user.id].name = userData.user.name
- self.robot.brain.data.users[userData.user.id].email_address = userData.user.email_address
- author.room = room
- callback id, created, room, user, body, author
-
- bot.on "TextMessage", withAuthor (id, created, room, user, body, author) ->
- unless bot.info.id == author.id
- self.receive new Robot.TextMessage(author, body)
-
- bot.on "EnterMessage", withAuthor (id, created, room, user, body, author) ->
- unless bot.info.id == author.id
- self.receive new Robot.EnterMessage(author)
-
- bot.on "LeaveMessage", withAuthor (id, created, room, user, body, author) ->
- unless bot.info.id == author.id
- self.receive new Robot.LeaveMessage(author)
-
- bot.Me (err, data) ->
- bot.info = data.user
- bot.name = bot.info.name
-
- for roomId in bot.rooms
- do (roomId) ->
- bot.Room(roomId).join (err, callback) ->
- bot.Room(roomId).listen()
-
- bot.on "reconnect", (roomId) ->
- bot.Room(roomId).join (err, callback) ->
- bot.Room(roomId).listen()
-
- @bot = bot
-
- self.emit "connected"
-
-exports.use = (robot) ->
- new Campfire robot
-
-class CampfireStreaming extends EventEmitter
- constructor: (options, @robot) ->
- unless options.token? and options.rooms? and options.account?
- @robot.logger.error "Not enough parameters provided. I Need a token, rooms and account"
- process.exit(1)
-
- @token = options.token
- @rooms = options.rooms.split(",")
- @account = options.account
- @domain = @account + ".campfirenow.com"
- @authorization = "Basic " + new Buffer("#{@token}:x").toString("base64")
-
- Rooms: (callback) ->
- @get "/rooms", callback
-
- User: (id, callback) ->
- @get "/users/#{id}", callback
-
- Me: (callback) ->
- @get "/users/me", callback
-
- Room: (id) ->
- self = @
- logger = @robot.logger
-
- show: (callback) ->
- self.post "/room/#{id}", "", callback
-
- join: (callback) ->
- self.post "/room/#{id}/join", "", callback
-
- leave: (callback) ->
- self.post "/room/#{id}/leave", "", callback
-
- lock: (callback) ->
- self.post "/room/#{id}/lock", "", callback
-
- unlock: (callback) ->
- self.post "/room/#{id}/unlock", "", callback
-
- # say things to this channel on behalf of the token user
- paste: (text, callback) ->
- @message text, "PasteMessage", callback
-
- topic: (text, callback) ->
- body = {room: {topic: text}}
- self.put "/room/#{id}", body, callback
-
- sound: (text, callback) ->
- @message text, "SoundMessage", callback
-
- speak: (text, callback) ->
- body = { message: { "body":text } }
- self.post "/room/#{id}/speak", body, callback
-
- message: (text, type, callback) ->
- body = { message: { "body":text, "type":type } }
- self.post "/room/#{id}/speak", body, callback
-
- # listen for activity in channels
- listen: ->
- headers =
- "Host" : "streaming.campfirenow.com"
- "Authorization" : self.authorization
-
- options =
- "agent" : false
- "host" : "streaming.campfirenow.com"
- "port" : 443
- "path" : "/room/#{id}/live.json"
- "method" : "GET"
- "headers": headers
-
- request = HTTPS.request options, (response) ->
- response.setEncoding("utf8")
-
- buf = ''
-
- response.on "data", (chunk) ->
- if chunk is ' '
- # campfire api sends a ' ' heartbeat every 3s
-
- else if chunk.match(/^\s*Access Denied/)
- # errors are not json formatted
- logger.error "Campfire error on room #{id}: #{chunk}"
-
- else
- # api uses newline terminated json payloads
- # buffer across tcp packets and parse out lines
- buf += chunk
-
- while (offset = buf.indexOf("\r")) > -1
- part = buf.substr(0, offset)
- buf = buf.substr(offset + 1)
-
- if part
- try
- data = JSON.parse part
- self.emit data.type, data.id, data.created_at, data.room_id, data.user_id, data.body
- catch err
- logger.error "Campfire error: #{err}"
-
- response.on "end", ->
- logger.error "Streaming connection closed for room #{id}. :("
- setTimeout (->
- self.emit "reconnect", id
- ), 5000
-
- response.on "error", (err) ->
- logger.error "Campfire response error: #{err}"
-
- request.on "error", (err) ->
- logger.error "Campfire request error: #{err}"
-
- request.end()
-
- # Convenience HTTP Methods for posting on behalf of the token"d user
- get: (path, callback) ->
- @request "GET", path, null, callback
-
- post: (path, body, callback) ->
- @request "POST", path, body, callback
-
- put: (path, body, callback) ->
- @request "PUT", path, body, callback
-
- request: (method, path, body, callback) ->
- logger = @robot.logger
-
- headers =
- "Authorization" : @authorization
- "Host" : @domain
- "Content-Type" : "application/json"
-
- options =
- "agent" : false
- "host" : @domain
- "port" : 443
- "path" : path
- "method" : method
- "headers": headers
-
- if method is "POST" || method is "PUT"
- if typeof(body) isnt "string"
- body = JSON.stringify body
-
- body = new Buffer(body)
- options.headers["Content-Length"] = body.length
-
- request = HTTPS.request options, (response) ->
- data = ""
-
- response.on "data", (chunk) ->
- data += chunk
-
- response.on "end", ->
- if response.statusCode >= 400
- switch response.statusCode
- when 401
- throw new Error "Invalid access token provided, campfire refused the authentication"
- else
- logger.error "Campfire error: #{response.statusCode}"
-
- try
- callback null, JSON.parse(data)
- catch err
- callback null, data or { }
-
- response.on "error", (err) ->
- logger.error "Campfire response error: #{err}"
- callback err, { }
-
- if method is "POST" || method is "PUT"
- request.end(body, 'binary')
- else
- request.end()
-
- request.on "error", (err) ->
- logger.error "Campfire request error: #{err}"
diff --git a/tests/cpp/DeadlockDetector.h b/tests/cpp/DeadlockDetector.h
deleted file mode 100644
index 0f9e3ab..0000000
--- a/tests/cpp/DeadlockDetector.h
+++ /dev/null
@@ -1,588 +0,0 @@
-/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: sw=4 ts=4 et :
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is mozilla.org code.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1998
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- * Chris Jones
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-#ifndef mozilla_DeadlockDetector_h
-#define mozilla_DeadlockDetector_h
-
-#include
-
-#include "plhash.h"
-#include "prlock.h"
-
-#include "nsTArray.h"
-
-#ifdef NS_TRACE_MALLOC
-# include "nsTraceMalloc.h"
-#endif // ifdef NS_TRACE_MALLOC
-
-namespace mozilla {
-
-
-// FIXME bug 456272: split this off into a convenience API on top of
-// nsStackWalk?
-class NS_COM_GLUE CallStack
-{
-private:
-#ifdef NS_TRACE_MALLOC
- typedef nsTMStackTraceID callstack_id;
- // needs to be a macro to avoid disturbing the backtrace
-# define NS_GET_BACKTRACE() NS_TraceMallocGetStackTrace()
-#else
- typedef void* callstack_id;
-# define NS_GET_BACKTRACE() 0
-#endif // ifdef NS_TRACE_MALLOC
-
- callstack_id mCallStack;
-
-public:
- /**
- * CallStack
- * *ALWAYS* *ALWAYS* *ALWAYS* call this with no arguments. This
- * constructor takes an argument *ONLY* so that |GET_BACKTRACE()|
- * can be evaluated in the stack frame of the caller, rather than
- * that of the constructor.
- *
- * *BEWARE*: this means that calling this constructor with no
- * arguments is not the same as a "default, do-nothing"
- * constructor: it *will* construct a backtrace. This can cause
- * unexpected performance issues.
- */
- CallStack(const callstack_id aCallStack = NS_GET_BACKTRACE()) :
- mCallStack(aCallStack)
- {
- }
- CallStack(const CallStack& aFrom) :
- mCallStack(aFrom.mCallStack)
- {
- }
- CallStack& operator=(const CallStack& aFrom)
- {
- mCallStack = aFrom.mCallStack;
- return *this;
- }
- bool operator==(const CallStack& aOther) const
- {
- return mCallStack == aOther.mCallStack;
- }
- bool operator!=(const CallStack& aOther) const
- {
- return mCallStack != aOther.mCallStack;
- }
-
- // FIXME bug 456272: if this is split off,
- // NS_TraceMallocPrintStackTrace should be modified to print into
- // an nsACString
- void Print(FILE* f) const
- {
-#ifdef NS_TRACE_MALLOC
- if (this != &kNone && mCallStack) {
- NS_TraceMallocPrintStackTrace(f, mCallStack);
- return;
- }
-#endif
- fputs(" [stack trace unavailable]\n", f);
- }
-
- /** The "null" callstack. */
- static const CallStack kNone;
-};
-
-
-/**
- * DeadlockDetector
- *
- * The following is an approximate description of how the deadlock detector
- * works.
- *
- * The deadlock detector ensures that all blocking resources are
- * acquired according to a partial order P. One type of blocking
- * resource is a lock. If a lock l1 is acquired (locked) before l2,
- * then we say that |l1 <_P l2|. The detector flags an error if two
- * locks l1 and l2 have an inconsistent ordering in P; that is, if
- * both |l1 <_P l2| and |l2 <_P l1|. This is a potential error
- * because a thread acquiring l1,l2 according to the first order might
- * race with a thread acquiring them according to the second order.
- * If this happens under the right conditions, then the acquisitions
- * will deadlock.
- *
- * This deadlock detector doesn't know at compile-time what P is. So,
- * it tries to discover the order at run time. More precisely, it
- * finds some order P, then tries to find chains of resource
- * acquisitions that violate P. An example acquisition sequence, and
- * the orders they impose, is
- * l1.lock() // current chain: [ l1 ]
- * // order: { }
- *
- * l2.lock() // current chain: [ l1, l2 ]
- * // order: { l1 <_P l2 }
- *
- * l3.lock() // current chain: [ l1, l2, l3 ]
- * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
- * // (note: <_P is transitive, so also |l1 <_P l3|)
- *
- * l2.unlock() // current chain: [ l1, l3 ]
- * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3 }
- * // (note: it's OK, but weird, that l2 was unlocked out
- * // of order. we still have l1 <_P l3).
- *
- * l2.lock() // current chain: [ l1, l3, l2 ]
- * // order: { l1 <_P l2, l2 <_P l3, l1 <_P l3,
- * l3 <_P l2 (!!!) }
- * BEEP BEEP! Here the detector will flag a potential error, since
- * l2 and l3 were used inconsistently (and potentially in ways that
- * would deadlock).
- */
-template
-class DeadlockDetector
-{
-public:
- /**
- * ResourceAcquisition
- * Consists simply of a resource and the calling context from
- * which it was acquired. We pack this information together so
- * that it can be returned back to the caller when a potential
- * deadlock has been found.
- */
- struct ResourceAcquisition
- {
- const T* mResource;
- CallStack mCallContext;
-
- ResourceAcquisition(
- const T* aResource,
- const CallStack aCallContext=CallStack::kNone) :
- mResource(aResource),
- mCallContext(aCallContext)
- {
- }
- ResourceAcquisition(const ResourceAcquisition& aFrom) :
- mResource(aFrom.mResource),
- mCallContext(aFrom.mCallContext)
- {
- }
- ResourceAcquisition& operator=(const ResourceAcquisition& aFrom)
- {
- mResource = aFrom.mResource;
- mCallContext = aFrom.mCallContext;
- return *this;
- }
- };
- typedef nsTArray ResourceAcquisitionArray;
-
-private:
- typedef nsTArray HashEntryArray;
- typedef typename HashEntryArray::index_type index_type;
- typedef typename HashEntryArray::size_type size_type;
- enum {
- NoIndex = HashEntryArray::NoIndex
- };
-
- /**
- * Value type for the ordering table. Contains the other
- * resources on which an ordering constraint |key < other|
- * exists. The catch is that we also store the calling context at
- * which the other resource was acquired; this improves the
- * quality of error messages when potential deadlock is detected.
- */
- struct OrderingEntry
- {
- OrderingEntry() :
- mFirstSeen(CallStack::kNone),
- mOrderedLT() // FIXME bug 456272: set to empirical
- { // dep size?
- }
- ~OrderingEntry()
- {
- }
-
- CallStack mFirstSeen; // first site from which the resource appeared
- HashEntryArray mOrderedLT; // this <_o Other
- };
-
- static void* TableAlloc(void* /*pool*/, PRSize size)
- {
- return operator new(size);
- }
- static void TableFree(void* /*pool*/, void* item)
- {
- operator delete(item);
- }
- static PLHashEntry* EntryAlloc(void* /*pool*/, const void* key)
- {
- return new PLHashEntry;
- }
- static void EntryFree(void* /*pool*/, PLHashEntry* entry, PRUintn flag)
- {
- delete static_cast(const_cast(entry->key));
- delete static_cast(entry->value);
- entry->value = 0;
- if (HT_FREE_ENTRY == flag)
- delete entry;
- }
- static PLHashNumber HashKey(const void* aKey)
- {
- return NS_PTR_TO_INT32(aKey) >> 2;
- }
- static const PLHashAllocOps kAllocOps;
-
- // Hash table "interface" the rest of the code should use
-
- PLHashEntry** GetEntry(const T* aKey)
- {
- return PL_HashTableRawLookup(mOrdering, HashKey(aKey), aKey);
- }
-
- void PutEntry(T* aKey)
- {
- PL_HashTableAdd(mOrdering, aKey, new OrderingEntry());
- }
-
- // XXX need these helper methods because OrderingEntry doesn't have
- // XXX access to underlying PLHashEntry
-
- /**
- * Add the order |aFirst <_o aSecond|.
- *
- * WARNING: this does not check whether it's sane to add this
- * order. In the "best" bad case, when this order already exists,
- * adding it anyway may unnecessarily result in O(n^2) space. In
- * the "worst" bad case, adding it anyway will cause
- * |InTransitiveClosure()| to diverge.
- */
- void AddOrder(PLHashEntry* aLT, PLHashEntry* aGT)
- {
- static_cast(aLT->value)->mOrderedLT
- .InsertElementSorted(aGT);
- }
-
- /**
- * Return true iff the order |aFirst < aSecond| has been
- * *explicitly* added.
- *
- * Does not consider transitivity.
- */
- bool IsOrdered(const PLHashEntry* aFirst, const PLHashEntry* aSecond)
- const
- {
- return NoIndex !=
- static_cast(aFirst->value)->mOrderedLT
- .BinaryIndexOf(aSecond);
- }
-
- /**
- * Return a pointer to the array of all elements "that" for
- * which the order |this < that| has been explicitly added.
- *
- * NOTE: this does *not* consider transitive orderings.
- */
- PLHashEntry* const* GetOrders(const PLHashEntry* aEntry) const
- {
- return static_cast(aEntry->value)->mOrderedLT
- .Elements();
- }
-
- /**
- * Return the number of elements "that" for which the order
- * |this < that| has been explicitly added.
- *
- * NOTE: this does *not* consider transitive orderings.
- */
- size_type NumOrders(const PLHashEntry* aEntry) const
- {
- return static_cast(aEntry->value)->mOrderedLT
- .Length();
- }
-
- /** Make a ResourceAcquisition out of |aEntry|. */
- ResourceAcquisition MakeResourceAcquisition(const PLHashEntry* aEntry)
- const
- {
- return ResourceAcquisition(
- static_cast(aEntry->key),
- static_cast(aEntry->value)->mFirstSeen);
- }
-
- // Throwaway RAII lock to make the following code safer.
- struct PRAutoLock
- {
- PRAutoLock(PRLock* aLock) : mLock(aLock) { PR_Lock(mLock); }
- ~PRAutoLock() { PR_Unlock(mLock); }
- PRLock* mLock;
- };
-
-public:
- static const PRUint32 kDefaultNumBuckets;
-
- /**
- * DeadlockDetector
- * Create a new deadlock detector.
- *
- * @param aNumResourcesGuess Guess at approximate number of resources
- * that will be checked.
- */
- DeadlockDetector(PRUint32 aNumResourcesGuess = kDefaultNumBuckets)
- {
- mOrdering = PL_NewHashTable(aNumResourcesGuess,
- HashKey,
- PL_CompareValues, PL_CompareValues,
- &kAllocOps, 0);
- if (!mOrdering)
- NS_RUNTIMEABORT("couldn't initialize resource ordering table");
-
- mLock = PR_NewLock();
- if (!mLock)
- NS_RUNTIMEABORT("couldn't allocate deadlock detector lock");
- }
-
- /**
- * ~DeadlockDetector
- *
- * *NOT* thread safe.
- */
- ~DeadlockDetector()
- {
- PL_HashTableDestroy(mOrdering);
- PR_DestroyLock(mLock);
- }
-
- /**
- * Add
- * Make the deadlock detector aware of |aResource|.
- *
- * WARNING: The deadlock detector owns |aResource|.
- *
- * Thread safe.
- *
- * @param aResource Resource to make deadlock detector aware of.
- */
- void Add(T* aResource)
- {
- PRAutoLock _(mLock);
- PutEntry(aResource);
- }
-
- // Nb: implementing a Remove() method makes the detector "more
- // unsound." By removing a resource from the orderings, deadlocks
- // may be missed that would otherwise have been found. However,
- // removing resources possibly reduces the # of false positives,
- // and additionally saves space. So it's a trade off; we have
- // chosen to err on the side of caution and not implement Remove().
-
- /**
- * CheckAcquisition This method is called after acquiring |aLast|,
- * but before trying to acquire |aProposed| from |aCallContext|.
- * It determines whether actually trying to acquire |aProposed|
- * will create problems. It is OK if |aLast| is NULL; this is
- * interpreted as |aProposed| being the thread's first acquisition
- * of its current chain.
- *
- * Iff acquiring |aProposed| may lead to deadlock for some thread
- * interleaving (including the current one!), the cyclical
- * dependency from which this was deduced is returned. Otherwise,
- * 0 is returned.
- *
- * If a potential deadlock is detected and a resource cycle is
- * returned, it is the *caller's* responsibility to free it.
- *
- * Thread safe.
- *
- * @param aLast Last resource acquired by calling thread (or 0).
- * @param aProposed Resource calling thread proposes to acquire.
- * @param aCallContext Calling context whence acquisiton request came.
- */
- ResourceAcquisitionArray* CheckAcquisition(const T* aLast,
- const T* aProposed,
- const CallStack& aCallContext)
- {
- NS_ASSERTION(aProposed, "null resource");
- PRAutoLock _(mLock);
-
- PLHashEntry* second = *GetEntry(aProposed);
- OrderingEntry* e = static_cast(second->value);
- if (CallStack::kNone == e->mFirstSeen)
- e->mFirstSeen = aCallContext;
-
- if (!aLast)
- // don't check if |0 < proposed|; just vamoose
- return 0;
-
- PLHashEntry* first = *GetEntry(aLast);
-
- // this is the crux of the deadlock detector algorithm
-
- if (first == second) {
- // reflexive deadlock. fastpath b/c InTransitiveClosure is
- // not applicable here.
- ResourceAcquisitionArray* cycle = new ResourceAcquisitionArray();
- if (!cycle)
- NS_RUNTIMEABORT("can't allocate dep. cycle array");
- cycle->AppendElement(MakeResourceAcquisition(first));
- cycle->AppendElement(ResourceAcquisition(aProposed,
- aCallContext));
- return cycle;
- }
- if (InTransitiveClosure(first, second)) {
- // we've already established |last < proposed|. all is well.
- return 0;
- }
- if (InTransitiveClosure(second, first)) {
- // the order |proposed < last| has been deduced, perhaps
- // transitively. we're attempting to violate that
- // constraint by acquiring resources in the order
- // |last < proposed|, and thus we may deadlock under the
- // right conditions.
- ResourceAcquisitionArray* cycle = GetDeductionChain(second, first);
- // show how acquiring |proposed| would complete the cycle
- cycle->AppendElement(ResourceAcquisition(aProposed,
- aCallContext));
- return cycle;
- }
- // |last|, |proposed| are unordered according to our
- // poset. this is fine, but we now need to add this
- // ordering constraint.
- AddOrder(first, second);
- return 0;
- }
-
- /**
- * Return true iff |aTarget| is in the transitive closure of |aStart|
- * over the ordering relation `<_this'.
- *
- * @precondition |aStart != aTarget|
- */
- bool InTransitiveClosure(const PLHashEntry* aStart,
- const PLHashEntry* aTarget) const
- {
- if (IsOrdered(aStart, aTarget))
- return true;
-
- index_type i = 0;
- size_type len = NumOrders(aStart);
- for (const PLHashEntry* const* it = GetOrders(aStart);
- i < len; ++i, ++it)
- if (InTransitiveClosure(*it, aTarget))
- return true;
- return false;
- }
-
- /**
- * Return an array of all resource acquisitions
- * aStart <_this r1 <_this r2 <_ ... <_ aTarget
- * from which |aStart <_this aTarget| was deduced, including
- * |aStart| and |aTarget|.
- *
- * Nb: there may be multiple deductions of |aStart <_this
- * aTarget|. This function returns the first ordering found by
- * depth-first search.
- *
- * Nb: |InTransitiveClosure| could be replaced by this function.
- * However, this one is more expensive because we record the DFS
- * search stack on the heap whereas the other doesn't.
- *
- * @precondition |aStart != aTarget|
- */
- ResourceAcquisitionArray* GetDeductionChain(
- const PLHashEntry* aStart,
- const PLHashEntry* aTarget)
- {
- ResourceAcquisitionArray* chain = new ResourceAcquisitionArray();
- if (!chain)
- NS_RUNTIMEABORT("can't allocate dep. cycle array");
- chain->AppendElement(MakeResourceAcquisition(aStart));
-
- NS_ASSERTION(GetDeductionChain_Helper(aStart, aTarget, chain),
- "GetDeductionChain called when there's no deadlock");
- return chain;
- }
-
- // precondition: |aStart != aTarget|
- // invariant: |aStart| is the last element in |aChain|
- bool GetDeductionChain_Helper(const PLHashEntry* aStart,
- const PLHashEntry* aTarget,
- ResourceAcquisitionArray* aChain)
- {
- if (IsOrdered(aStart, aTarget)) {
- aChain->AppendElement(MakeResourceAcquisition(aTarget));
- return true;
- }
-
- index_type i = 0;
- size_type len = NumOrders(aStart);
- for (const PLHashEntry* const* it = GetOrders(aStart);
- i < len; ++i, ++it) {
- aChain->AppendElement(MakeResourceAcquisition(*it));
- if (GetDeductionChain_Helper(*it, aTarget, aChain))
- return true;
- aChain->RemoveElementAt(aChain->Length() - 1);
- }
- return false;
- }
-
- /**
- * The partial order on resource acquisitions used by the deadlock
- * detector.
- */
- PLHashTable* mOrdering; // T* -> PLHashEntry
-
- /**
- * Protects contentious methods.
- * Nb: can't use mozilla::Mutex since we are used as its deadlock
- * detector.
- */
- PRLock* mLock;
-
- DeadlockDetector(const DeadlockDetector& aDD);
- DeadlockDetector& operator=(const DeadlockDetector& aDD);
-};
-
-
-template
-const PLHashAllocOps DeadlockDetector::kAllocOps = {
- DeadlockDetector::TableAlloc, DeadlockDetector::TableFree,
- DeadlockDetector::EntryAlloc, DeadlockDetector::EntryFree
-};
-
-
-template
-// FIXME bug 456272: tune based on average workload
-const PRUint32 DeadlockDetector::kDefaultNumBuckets = 64;
-
-
-} // namespace mozilla
-
-#endif // ifndef mozilla_DeadlockDetector_h
diff --git a/tests/cpp/ceded-test.cpp b/tests/cpp/ceded-test.cpp
deleted file mode 100644
index 5a706e8..0000000
--- a/tests/cpp/ceded-test.cpp
+++ /dev/null
@@ -1,597 +0,0 @@
-/* Test file for C++ language.
- * Attempt to include as many aspects of the C++ language as possible.
- * Do not include things tested in test.c since that shares the
- * same language.
- *
- * $Id: test.cpp,v 1.22 2008/05/17 20:12:55 zappo Exp $
- *
- */
-
-/* An include test */
-#include
-
-#include
-
-#include "c++-test.hh"
-
-#include
-
-double var1 = 1.2;
-
-int simple1(int a) {
-
-}
-
-struct foo1 {
- int test;
-};
-
-struct foo2 : public foo1 {
- const int foo21(int a, int b);
- const int foo22(int a, int b) { return 1 }
-};
-
-/* Classes */
-class class1 {
-private:
- int var11;
- struct foo1 var12;
-public:
- int p_var11;
- struct foo p_var12;
-};
-
-class i_class1 : public class1 {
-private:
- int var11;
- struct foo var12;
-public:
- int p_var11;
- struct foo p_var12;
-};
-
-class class2 {
-private:
- int var21;
- struct foo var22;
-public:
- int p_var21;
- struct foo p_var22;
-};
-
-class i_class2 : public class1, public class2 {
-private:
- int var21;
- struct foo var22;
-protected:
- int pt_var21;
-public:
- int p_var21;
- struct foo p_var22;
-};
-
-class class3 {
- /* A class with strange things in it */
-public:
- class3(); /* A constructor */
- enum embedded_foo_enum {
- a, b, c
- } embed1;
- struct embedded_bar_struct {
- int a;
- int b;
- } embed2;
- class embedded_baz_class {
- embedded_baz_class();
- ~embedded_baz_class();
- } embed3;
- ~class3(); /* destructor */
-
- /* Methods */
- int method_for_class3(int a, char b);
-
- int inline_method(int c) { return c; }
-
- /* Operators */
- class3& operator^= (const class3& something);
-
- /* Funny declmods */
- const class3 * const method_const_ptr_ptr(const int * const argconst) const = 0;
-};
-
-class3::class3()
-{
- /* Constructor outside the definition. */
-}
-
-int class3::method_for_class3(int a, char b)
-{
-}
-
-int class3::method1_for_class3( int a, int &b)
-{
- int cvariablename;
- class3 fooy[];
- class3 moose = new class3;
-
- // Complktion testing line should find external members.
- a = fooy[1].me ;
- b = cv ;
-
- if (fooy.emb) {
- simple1(c);
- }
-
- cos(10);
- abs(10);
-
- return 1;
-}
-
-char class3::method2_for_class3( int a, int b) throw ( exception1 )
-{
- return 'a';
-}
-
-void *class3::method3_for_class3( int a, int b) throw ( exception1, exception2 )
-{
- int q = a;
- return "Moose";
-}
-
-void *class3::method31_for_class3( int a, int b) throw ( )
-{
- int q = a;
- return "Moose";
-}
-
-void *class3::method4_for_class3( int a, int b) reentrant
-{
- class3 ct;
-
- ct.method5_for_class3(1,a);
-
- pritf();
-}
-
-/*
- * A method on class3.
- */
-void *class3::method5_for_class3( int a, int b) const
-{
-}
-
-/*
- * Namespace parsing tests
- */
-namespace NS {
- class class_in_namespace {
- int equiv(const NS::class_in_namespace *) const;
- };
-}
-
-int NS::class_in_namespace::equiv(const NS::class_in_namespace *cin) const
-{
- return 0;
-}
-
-// Stuff Klaus found.
-// Inheritance w/out a specifying for public.
-class class4 : class1 {
- // Pure virtual methods.
- void virtual print () const = 0;
-
-public:
- // The whacky constructor type
- class4()
- try : class1(args)
- {
- // constructor body
- }
- catch ()
- {
-
- }
-
-
-};
-
-class class5 : public virtual class4 {
- // Virtual inheritance
-};
-
-class class6 : class1 {
- // Mutable
- mutable int i;
-};
-
-/* Namespaces */
-namespace namespace1 {
- void ns_method1() { }
-
- class n_class1 {
- public:
- void method11(int a) { }
- };
-
- /* This shouldn't parse due to missing semicolon. */
- class _n_class2 : public n_class1 {
- void n_c2_method1(int a, int b) { }
- };
-
- // Macros in the namespace
-#define NSMACRO 1
-
- // Template in the namespace
- template T nsti1(const Foo& foo);
- template<> int nsti1(const Foo& foo);
-
-}
-
-namespace namespace2 {
-
- using namespace1::n_class1;
-
-}
-
-/* Initializers */
-void tinitializers1(): inita1(False),
- inita2(False)
-{
- inita1= 1;
-}
-
-/* How about Extern C type things. */
-int funny_prototype(int ,int b,float c)
-{
-
-}
-
-extern "C"
-int extern_c_1(int a, int b)
-{
-
- funny_prototype(1,2,3.4);
-
- printf("Moose", );
-
- return 1;
-}
-
-extern "C" {
-
- int extern_c_2(int a, int b)
- {
- return 1;
- }
-
-}
-
-// Some operator stuff
-class Action
-{
- // Problems!! operator() and operator[] can not be parsed with semantic
- // 1.4.2 but with latest c.by
- virtual void operator()(int i, char *p ) = 0;
- virtual String& operator[]() = 0;
- virtual void operator!() = 0;
- virtual void operator->() = 0;
- virtual T& operator+=();
- virtual T& operator*();
- virtual T& operator*=();
-};
-
-// class with namespace qualified parents
-class Multiinherit : public virtual POA::Parent,
- public virtual POA::Parent1,
- Parent
-{
-private:
- int i;
-
-public:
- Multiinherit();
- ~Multiinherit();
-
- // method with a list of qualified exceptions
- void* throwtest()
- throw(Exception0,
- Testnamespace::Exception1,
- Testnamespace::Excpetion2,
- Testnamespace::testnamespace1::Exception3);
-
-};
-
-void*
-Multiinherit::throwtest()
- throw (Exception0,
- Testnamespace::Exception1,
- Testnamespace::Excpetion2,
- Testnamespace::testnamespace1::Exception3)
-{
- return;
-}
-
-// Jens Rock : Nested classes or structs defined
-// outside of the containing class/struct.
-class container
-{
- public:
- struct contained;
- container();
- ~container();
-};
-
-struct container::contained
-{
- public:
- contained();
- ~contained();
-};
-
-/*
- * Ok, how about some template stuff.
- */
-template >
-const CT& max (const CT& a, const CT& b)
-{
- return a < b ? b : a;
-}
-
-// Arne Schmitz found this one
-std::vector &a, &b, &c;
-
-class TemplateUsingClass
-{
- typedef TestClassMap::iterator iterator;
- typedef map TestClassMap;
-
- // typedefs with const and volatile
- typedef const map const_TestClassMap;
- typedef TestClassMap::iterator volatile volatile_iterator;
-
- map mapclassvarthingy;
-};
-
-template T ti1(const Foo& foo);
-template<> int ti1(const Foo& foo);
-
-
-// -----------------------------------
-// Now some namespace and related stuff
-// -----------------------------------
-
-using CORBA::LEX::get_token;
-using Namespace1;
-
-using namespace POA::std;
-using namespace Test;
-
-
-
-namespace Parser
-{
- namespace
- {
- using Lexer::get_test;
- string str = "";
- }
-
- namespace XXX
- {
-
- class Foobar : public virtual POA::Parent,
- public virtual POA::Parent1,
- private POA::list,
- private map
- {
- int i;
- list >::const_iterator l;
- public:
-
- Foobar();
- ~Foobar();
- };
- }
-
-
- void test_function(int i);
-
-};
-
-// unnamed namespaces - even nested
-namespace
-{
- namespace
- {
- using Lexer::get_test;
- string str = "";
- class FooClass
- {
- FooClass();
- };
- }
-
- // some builtin types
- long long ll = 0;
- long double d = 0.0;
- unsigned test;
- unsigned long int **uli = 0;
- signed si = 0;
- signed short ss = 0;
- short int i = 0;
- long int li = 0;
-
- // expressions with namespace/class-qualifyiers
- ORB_var cGlobalOrb = ORB::_nil();
- ORB_var1 cGlobalOrb1 = ORB::_test;
-
- class Testclass
- {
- #define TEST 0
- ini i;
-
- public:
-
- Testclass();
- ~Testclass();
- };
-
- static void test_function(unsigned int i);
-
-};
-
-
-// outside method implementations which should be grouped to type Test
-XXX&
-Test::waiting()
-{
- return;
-}
-
-void
-Test::print()
-{
- return;
-}
-
-// outside method implementations with namespaces which should be grouped to
-// their complete (incl. namespace) types
-void*
-Parser::XXX::Foobar::wait(int i, const char const * const * p)
-{
- return;
-}
-
-void*
-Namespace1::Test::wait1(int i)
-{
- return;
-}
-
-int
-Namespace1::Test::waiting(int i)
-{
- return;
-}
-
-// a class with some outside implementations which should all be grouped to
-// this class declaration
-class ClassWithExternals
-{
-private:
- int i;
-
-public:
- ClassWithExternals();
- ~ClassWithExternals();
- void non_nil();
-};
-
-
-// Foobar is not displayed; seems that semantic tries to add this to the class
-// Foobar but can not find/display it, because contained in the namespace above.
-void
-Foobar::non_nil()
-{
- return;
-}
-
-// are correctly grouped to the ClassWithExternals class
-void
-ClassWithExternals::non_nil()
-{
- String s = "lödfjg dlfgkdlfkgjdl";
- return;
-}
-
-ClassWithExternals::ClassWithExternals()
-{
- return;
-}
-
-void
-ClassWithExternals::~ClassWithExternals()
-{
- return;
-}
-
-
-// -------------------------------
-// Now some macro and define stuff
-// -------------------------------
-
-#define TEST 0
-#define TEST1 "String"
-
-// The first backslash makes this macro unmatched syntax with semantic 1.4.2!
-// With flexing \+newline as nothing all is working fine!
-#define MZK_ENTER(METHOD) \
-{ \
- CzkMethodLog lMethodLog(METHOD,"Framework");\
-}
-
-#define ZK_ASSERTM(METHOD,ASSERTION,MESSAGE) \
- { if(!(ASSERTION))\
- {\
- std::ostringstream lMesgStream; \
- lMesgStream << "Assertion failed: " \
- << MESSAGE; \
- CzkLogManager::doLog(CzkLogManager::FATAL,"",METHOD, \
- "Assert",lMesgStream); \
- assert(ASSERTION);\
- }\
- }
-
-// Test if not newline-backslashes are handled correctly
-string s = "My \"quoted\" string";
-
-// parsed fine as macro
-#define FOO (arg) method(arg, "foo");
-
-// With semantic 1.4.2 this parsed as macro BAR *and* function method.
-// With latest c.bnf at least one-liner macros can be parsed correctly.
-#define BAR (arg) CzkMessageLog method(arg, "bar");
-
-// some const and volatile stuff
-char * p1 = "Hello"; // 1. variable Pointer, variable Data
-const char * p2 = "Hello"; // 2. variable pointer, constant data
-char * const p3 = "Hello"; // 3. constant pointer, variable data
-const char * const p4 = "Hello"; // 4. constant pointer, constant data
-
-// Case 2 and 4 can exchange first "const" and "char"
-char const * p21 = "Hello"; // variable pointer, constant data
-char const * const p41 = "Hello"; // constant pointer, constant data
-
-char volatile a = 0; // a volatile char
-void foo(bar const &arg); // a reference to a const bar
-int foobar(bar const * const p); // a const pointer to a const bar
-int foobar(bar const volatile * const p); // a const pointer to a const bar
-int foobar3(char* p); // a const pointer to a const bar
-
-// Should not be parsed because this is invalid code
-int const & const r3 = i;
-
-boolean i = 0;
-boolean & r1 = i;
-boolean const & r2 = i;
-
-// const * sequences can be very long in C++ ;-)
-char const * const * const * const * ppp;
-
-// complex function declarationen with named pointer-arguments
-const char** foobar1(volatile char const * const **p);
-const char** foobar11(volatile Test::Namespace::Char const * const **p);
-
-// complex function declarationen with unnamed pointer-arguments
-const char* foobar2(const char***);
-const char* foobar21(const Test::Namespace::Char***);
-
-// string literal parsing even with wchar_t
-char const *p = "string1";
-char const *q = "string1" "str\"ing2" "string3";
-wchar_t testc = L'a';
-
-wchar_t const *wp = L"string with a \" in it";
-wchar_t const *wq = L"string \n\t\"test" L"string2";
-wchar_t const *wr = L"string L";
diff --git a/tests/cpp/issue82.cpp b/tests/cpp/issue82.cpp
deleted file mode 100644
index 8267704..0000000
--- a/tests/cpp/issue82.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-#include
-
-namespace phantom { namespace io_stream { namespace proto_http {
-namespace handler_bts {
-
-} // namespace handler_bts
-
-}}} // namespace phantom::io_stream::proto_http
diff --git a/tests/cpp/jstracer.cpp b/tests/cpp/jstracer.cpp
deleted file mode 100644
index 8ff4923..0000000
--- a/tests/cpp/jstracer.cpp
+++ /dev/null
@@ -1,14392 +0,0 @@
-/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=4 sw=4 et tw=99:
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
- * May 28, 2008.
- *
- * The Initial Developer of the Original Code is
- * Brendan Eich
- *
- * Contributor(s):
- * Andreas Gal
- * Mike Shaver
- * David Anderson
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#include "jsstdint.h"
-#include "jsbit.h" // low-level (NSPR-based) headers next
-#include "jsprf.h"
-#include // standard headers next
-
-#if defined(_MSC_VER) || defined(__MINGW32__)
-#include
-#ifdef _MSC_VER
-#define alloca _alloca
-#endif
-#endif
-#ifdef SOLARIS
-#include
-#endif
-#include
-
-#include "nanojit/nanojit.h"
-#include "jsapi.h" // higher-level library and API headers
-#include "jsarray.h"
-#include "jsbool.h"
-#include "jscntxt.h"
-#include "jsdate.h"
-#include "jsdbgapi.h"
-#include "jsemit.h"
-#include "jsfun.h"
-#include "jsinterp.h"
-#include "jsiter.h"
-#include "jsmath.h"
-#include "jsobj.h"
-#include "jsopcode.h"
-#include "jsregexp.h"
-#include "jsscope.h"
-#include "jsscript.h"
-#include "jsstaticcheck.h"
-#include "jstracer.h"
-#include "jsxml.h"
-
-#include "jsatominlines.h"
-#include "jsscriptinlines.h"
-
-#include "jsautooplen.h" // generated headers last
-#include "imacros.c.out"
-
-using namespace nanojit;
-
-#if JS_HAS_XML_SUPPORT
-#define ABORT_IF_XML(v) \
- JS_BEGIN_MACRO \
- if (!JSVAL_IS_PRIMITIVE(v) && OBJECT_IS_XML(BOGUS_CX, JSVAL_TO_OBJECT(v)))\
- ABORT_TRACE("xml detected"); \
- JS_END_MACRO
-#else
-#define ABORT_IF_XML(v) ((void) 0)
-#endif
-
-/*
- * Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and
- * the type. What you want to use is JSVAL_IS_SPECIAL(x) and then handle the
- * undefined case properly (bug 457363).
- */
-#undef JSVAL_IS_BOOLEAN
-#define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0)
-
-JS_STATIC_ASSERT(sizeof(JSTraceType) == 1);
-
-/* Map to translate a type tag into a printable representation. */
-static const char typeChar[] = "OIDXSNBF";
-static const char tagChar[] = "OIDISIBI";
-
-/* Blacklist parameters. */
-
-/*
- * Number of iterations of a loop where we start tracing. That is, we don't
- * start tracing until the beginning of the HOTLOOP-th iteration.
- */
-#define HOTLOOP 2
-
-/* Attempt recording this many times before blacklisting permanently. */
-#define BL_ATTEMPTS 2
-
-/* Skip this many hits before attempting recording again, after an aborted attempt. */
-#define BL_BACKOFF 32
-
-/* Number of times we wait to exit on a side exit before we try to extend the tree. */
-#define HOTEXIT 1
-
-/* Number of times we try to extend the tree along a side exit. */
-#define MAXEXIT 3
-
-/* Maximum number of peer trees allowed. */
-#define MAXPEERS 9
-
-/* Max call depths for inlining. */
-#define MAX_CALLDEPTH 10
-
-/* Max native stack size. */
-#define MAX_NATIVE_STACK_SLOTS 1024
-
-/* Max call stack size. */
-#define MAX_CALL_STACK_ENTRIES 64
-
-/* Max global object size. */
-#define MAX_GLOBAL_SLOTS 4096
-
-/* Max memory needed to rebuild the interpreter stack when falling off trace. */
-#define MAX_INTERP_STACK_BYTES \
- (MAX_NATIVE_STACK_SLOTS * sizeof(jsval) + \
- MAX_CALL_STACK_ENTRIES * sizeof(JSInlineFrame) + \
- sizeof(JSInlineFrame)) /* possibly slow native frame at top of stack */
-
-/* Max number of branches per tree. */
-#define MAX_BRANCHES 32
-
-#define CHECK_STATUS(expr) \
- JS_BEGIN_MACRO \
- JSRecordingStatus _status = (expr); \
- if (_status != JSRS_CONTINUE) \
- return _status; \
- JS_END_MACRO
-
-#ifdef JS_JIT_SPEW
-#define ABORT_TRACE_RV(msg, value) \
- JS_BEGIN_MACRO \
- debug_only_printf(LC_TMAbort, "abort: %d: %s\n", __LINE__, (msg)); \
- return (value); \
- JS_END_MACRO
-#else
-#define ABORT_TRACE_RV(msg, value) return (value)
-#endif
-
-#define ABORT_TRACE(msg) ABORT_TRACE_RV(msg, JSRS_STOP)
-#define ABORT_TRACE_ERROR(msg) ABORT_TRACE_RV(msg, JSRS_ERROR)
-
-#ifdef JS_JIT_SPEW
-struct __jitstats {
-#define JITSTAT(x) uint64 x;
-#include "jitstats.tbl"
-#undef JITSTAT
-} jitstats = { 0LL, };
-
-JS_STATIC_ASSERT(sizeof(jitstats) % sizeof(uint64) == 0);
-
-enum jitstat_ids {
-#define JITSTAT(x) STAT ## x ## ID,
-#include "jitstats.tbl"
-#undef JITSTAT
- STAT_IDS_TOTAL
-};
-
-static JSPropertySpec jitstats_props[] = {
-#define JITSTAT(x) { #x, STAT ## x ## ID, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT },
-#include "jitstats.tbl"
-#undef JITSTAT
- { 0 }
-};
-
-static JSBool
-jitstats_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
-{
- int index = -1;
-
- if (JSVAL_IS_STRING(id)) {
- JSString* str = JSVAL_TO_STRING(id);
- if (strcmp(JS_GetStringBytes(str), "HOTLOOP") == 0) {
- *vp = INT_TO_JSVAL(HOTLOOP);
- return JS_TRUE;
- }
- }
-
- if (JSVAL_IS_INT(id))
- index = JSVAL_TO_INT(id);
-
- uint64 result = 0;
- switch (index) {
-#define JITSTAT(x) case STAT ## x ## ID: result = jitstats.x; break;
-#include "jitstats.tbl"
-#undef JITSTAT
- default:
- *vp = JSVAL_VOID;
- return JS_TRUE;
- }
-
- if (result < JSVAL_INT_MAX) {
- *vp = INT_TO_JSVAL(result);
- return JS_TRUE;
- }
- char retstr[64];
- JS_snprintf(retstr, sizeof retstr, "%llu", result);
- *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, retstr));
- return JS_TRUE;
-}
-
-JSClass jitstats_class = {
- "jitstats",
- 0,
- JS_PropertyStub, JS_PropertyStub,
- jitstats_getProperty, JS_PropertyStub,
- JS_EnumerateStub, JS_ResolveStub,
- JS_ConvertStub, NULL,
- JSCLASS_NO_OPTIONAL_MEMBERS
-};
-
-void
-js_InitJITStatsClass(JSContext *cx, JSObject *glob)
-{
- JS_InitClass(cx, glob, NULL, &jitstats_class, NULL, 0, jitstats_props, NULL, NULL, NULL);
-}
-
-#define AUDIT(x) (jitstats.x++)
-#else
-#define AUDIT(x) ((void)0)
-#endif /* JS_JIT_SPEW */
-
-/*
- * INS_CONSTPTR can be used to embed arbitrary pointers into the native code. It should not
- * be used directly to embed GC thing pointers. Instead, use the INS_CONSTOBJ/FUN/STR/SPROP
- * variants which ensure that the embedded pointer will be kept alive across GCs.
- */
-
-#define INS_CONST(c) addName(lir->insImm(c), #c)
-#define INS_CONSTPTR(p) addName(lir->insImmPtr(p), #p)
-#define INS_CONSTWORD(v) addName(lir->insImmPtr((void *) (v)), #v)
-#define INS_CONSTVAL(v) addName(insImmVal(v), #v)
-#define INS_CONSTOBJ(obj) addName(insImmObj(obj), #obj)
-#define INS_CONSTFUN(fun) addName(insImmFun(fun), #fun)
-#define INS_CONSTSTR(str) addName(insImmStr(str), #str)
-#define INS_CONSTSPROP(sprop) addName(insImmSprop(sprop), #sprop)
-#define INS_ATOM(atom) INS_CONSTSTR(ATOM_TO_STRING(atom))
-#define INS_NULL() INS_CONSTPTR(NULL)
-#define INS_VOID() INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))
-
-static avmplus::AvmCore s_core = avmplus::AvmCore();
-static avmplus::AvmCore* core = &s_core;
-
-/* Allocator SPI implementation. */
-
-void*
-nanojit::Allocator::allocChunk(size_t nbytes)
-{
- VMAllocator *vma = (VMAllocator*)this;
- JS_ASSERT(!vma->outOfMemory());
- void *p = malloc(nbytes);
- if (!p) {
- JS_ASSERT(nbytes < sizeof(vma->mReserve));
- vma->mOutOfMemory = true;
- p = (void*) &vma->mReserve[0];
- }
- vma->mSize += nbytes;
- return p;
-}
-
-void
-nanojit::Allocator::freeChunk(void *p) {
- VMAllocator *vma = (VMAllocator*)this;
- if (p != &vma->mReserve[0])
- free(p);
-}
-
-void
-nanojit::Allocator::postReset() {
- VMAllocator *vma = (VMAllocator*)this;
- vma->mOutOfMemory = false;
- vma->mSize = 0;
-}
-
-
-#ifdef JS_JIT_SPEW
-static void
-DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
-#endif
-
-/*
- * We really need a better way to configure the JIT. Shaver, where is
- * my fancy JIT object?
- *
- * NB: this is raced on, if jstracer.cpp should ever be running MT.
- * I think it's harmless tho.
- */
-static bool did_we_check_processor_features = false;
-
-/* ------ Debug logging control ------ */
-
-/*
- * All the logging control stuff lives in here. It is shared between
- * all threads, but I think that's OK.
- */
-LogControl js_LogController;
-
-#ifdef JS_JIT_SPEW
-
-/*
- * NB: this is raced on too, if jstracer.cpp should ever be running MT.
- * Also harmless.
- */
-static bool did_we_set_up_debug_logging = false;
-
-static void
-InitJITLogController()
-{
- char *tm, *tmf;
- uint32_t bits;
-
- js_LogController.lcbits = 0;
-
- tm = getenv("TRACEMONKEY");
- if (tm) {
- fflush(NULL);
- printf(
- "The environment variable $TRACEMONKEY has been replaced by $TMFLAGS.\n"
- "Try 'TMFLAGS=help js -j' for a list of options.\n"
- );
- exit(0);
- }
-
- tmf = getenv("TMFLAGS");
- if (!tmf) return;
-
- /* Using strstr() is really a cheap hack as far as flag decoding goes. */
- if (strstr(tmf, "help")) {
- fflush(NULL);
- printf(
- "usage: TMFLAGS=option,option,option,... where options can be:\n"
- "\n"
- " help show this message\n"
- " ------ options for jstracer & jsregexp ------\n"
- " minimal ultra-minimalist output; try this first\n"
- " full everything except 'treevis' and 'nocodeaddrs'\n"
- " tracer tracer lifetime (FIXME:better description)\n"
- " recorder trace recording stuff (FIXME:better description)\n"
- " abort show trace recording aborts\n"
- " stats show trace recording stats\n"
- " regexp show compilation & entry for regexps\n"
- " treevis spew that tracevis/tree.py can parse\n"
- " ------ options for Nanojit ------\n"
- " fragprofile count entries and exits for each fragment\n"
- " activation show activation info\n"
- " liveness show LIR liveness at start of rdr pipeline\n"
- " readlir show LIR as it enters the reader pipeline\n"
- " aftersf show LIR after StackFilter\n"
- " regalloc show regalloc details\n"
- " assembly show final aggregated assembly code\n"
- " nocodeaddrs don't show code addresses in assembly listings\n"
- "\n"
- );
- exit(0);
- /*NOTREACHED*/
- }
-
- bits = 0;
-
- /* flags for jstracer.cpp */
- if (strstr(tmf, "minimal") || strstr(tmf, "full")) bits |= LC_TMMinimal;
- if (strstr(tmf, "tracer") || strstr(tmf, "full")) bits |= LC_TMTracer;
- if (strstr(tmf, "recorder") || strstr(tmf, "full")) bits |= LC_TMRecorder;
- if (strstr(tmf, "abort") || strstr(tmf, "full")) bits |= LC_TMAbort;
- if (strstr(tmf, "stats") || strstr(tmf, "full")) bits |= LC_TMStats;
- if (strstr(tmf, "regexp") || strstr(tmf, "full")) bits |= LC_TMRegexp;
- if (strstr(tmf, "treevis")) bits |= LC_TMTreeVis;
-
- /* flags for nanojit */
- if (strstr(tmf, "fragprofile")) bits |= LC_FragProfile;
- if (strstr(tmf, "liveness") || strstr(tmf, "full")) bits |= LC_Liveness;
- if (strstr(tmf, "activation") || strstr(tmf, "full")) bits |= LC_Activation;
- if (strstr(tmf, "readlir") || strstr(tmf, "full")) bits |= LC_ReadLIR;
- if (strstr(tmf, "aftersf") || strstr(tmf, "full")) bits |= LC_AfterSF;
- if (strstr(tmf, "regalloc") || strstr(tmf, "full")) bits |= LC_RegAlloc;
- if (strstr(tmf, "assembly") || strstr(tmf, "full")) bits |= LC_Assembly;
- if (strstr(tmf, "nocodeaddrs")) bits |= LC_NoCodeAddrs;
-
- js_LogController.lcbits = bits;
- return;
-
-}
-#endif
-
-/* ------------------ Frag-level profiling support ------------------ */
-
-#ifdef JS_JIT_SPEW
-
-/*
- * All the allocations done by this profile data-collection and
- * display machinery, are done in JSTraceMonitor::profAlloc. That is
- * emptied out at the end of js_FinishJIT. It has a lifetime from
- * js_InitJIT to js_FinishJIT, which exactly matches the span
- * js_FragProfiling_init to js_FragProfiling_showResults.
- */
-template
-static
-Seq* reverseInPlace(Seq* seq)
-{
- Seq* prev = NULL;
- Seq* curr = seq;
- while (curr) {
- Seq* next = curr->tail;
- curr->tail = prev;
- prev = curr;
- curr = next;
- }
- return prev;
-}
-
-// The number of top blocks to show in the profile
-#define N_TOP_BLOCKS 50
-
-// Contains profile info for a single guard
-struct GuardPI {
- uint32_t guardID; // identifying number
- uint32_t count; // count.
-};
-
-struct FragPI {
- uint32_t count; // entry count for this Fragment
- uint32_t nStaticExits; // statically: the number of exits
- size_t nCodeBytes; // statically: the number of insn bytes in the main fragment
- size_t nExitBytes; // statically: the number of insn bytes in the exit paths
- Seq* guards; // guards, each with its own count
- uint32_t largestGuardID; // that exists in .guards
-};
-
-/* A mapping of Fragment.profFragID to FragPI */
-typedef HashMap FragStatsMap;
-
-void
-js_FragProfiling_FragFinalizer(Fragment* f, JSTraceMonitor* tm)
-{
- // Recover profiling data from 'f', which is logically at the end
- // of its useful lifetime.
- if (!(js_LogController.lcbits & LC_FragProfile))
- return;
-
- NanoAssert(f);
- // Valid profFragIDs start at 1
- NanoAssert(f->profFragID >= 1);
- // Should be called exactly once per Fragment. This will assert if
- // you issue the same FragID to more than one Fragment.
- NanoAssert(!tm->profTab->containsKey(f->profFragID));
-
- FragPI pi = { f->profCount,
- f->nStaticExits,
- f->nCodeBytes,
- f->nExitBytes,
- NULL, 0 };
-
- // Begin sanity check on the guards
- SeqBuilder guardsBuilder(*tm->profAlloc);
- GuardRecord* gr;
- uint32_t nGs = 0;
- uint32_t sumOfDynExits = 0;
- for (gr = f->guardsForFrag; gr; gr = gr->nextInFrag) {
- nGs++;
- // Also copy the data into our auxiliary structure.
- // f->guardsForFrag is in reverse order, and so this
- // copy preserves that ordering (->add adds at end).
- // Valid profGuardIDs start at 1.
- NanoAssert(gr->profGuardID > 0);
- sumOfDynExits += gr->profCount;
- GuardPI gpi = { gr->profGuardID, gr->profCount };
- guardsBuilder.add(gpi);
- if (gr->profGuardID > pi.largestGuardID)
- pi.largestGuardID = gr->profGuardID;
- }
- pi.guards = guardsBuilder.get();
- // And put the guard list in forwards order
- pi.guards = reverseInPlace(pi.guards);
-
- // Why is this so? Because nGs is the number of guards
- // at the time the LIR was generated, whereas f->nStaticExits
- // is the number of them observed by the time it makes it
- // through to the assembler. It can be the case that LIR
- // optimisation removes redundant guards; hence we expect
- // nGs to always be the same or higher.
- NanoAssert(nGs >= f->nStaticExits);
-
- // Also we can assert that the sum of the exit counts
- // can't exceed the entry count. It'd be nice to assert that
- // they are exactly equal, but we can't because we don't know
- // how many times we got to the end of the trace.
- NanoAssert(f->profCount >= sumOfDynExits);
-
- // End sanity check on guards
-
- tm->profTab->put(f->profFragID, pi);
-}
-
-static void
-js_FragProfiling_showResults(JSTraceMonitor* tm)
-{
- uint32_t topFragID[N_TOP_BLOCKS];
- FragPI topPI[N_TOP_BLOCKS];
- uint64_t totCount = 0, cumulCount;
- uint32_t totSE = 0;
- size_t totCodeB = 0, totExitB = 0;
- memset(topFragID, 0, sizeof(topFragID));
- memset(topPI, 0, sizeof(topPI));
- FragStatsMap::Iter iter(*tm->profTab);
- while (iter.next()) {
- uint32_t fragID = iter.key();
- FragPI pi = iter.value();
- uint32_t count = pi.count;
- totCount += (uint64_t)count;
- /* Find the rank for this entry, in tops */
- int r = N_TOP_BLOCKS-1;
- while (true) {
- if (r == -1)
- break;
- if (topFragID[r] == 0) {
- r--;
- continue;
- }
- if (count > topPI[r].count) {
- r--;
- continue;
- }
- break;
- }
- r++;
- AvmAssert(r >= 0 && r <= N_TOP_BLOCKS);
- /* This entry should be placed at topPI[r], and entries
- at higher numbered slots moved up one. */
- if (r < N_TOP_BLOCKS) {
- for (int s = N_TOP_BLOCKS-1; s > r; s--) {
- topFragID[s] = topFragID[s-1];
- topPI[s] = topPI[s-1];
- }
- topFragID[r] = fragID;
- topPI[r] = pi;
- }
- }
-
- js_LogController.printf(
- "\n----------------- Per-fragment execution counts ------------------\n");
- js_LogController.printf(
- "\nTotal count = %llu\n\n", (unsigned long long int)totCount);
-
- js_LogController.printf(
- " Entry counts Entry counts ----- Static -----\n");
- js_LogController.printf(
- " ------Self------ ----Cumulative--- Exits Cbytes Xbytes FragID\n");
- js_LogController.printf("\n");
-
- if (totCount == 0)
- totCount = 1; /* avoid division by zero */
- cumulCount = 0;
- int r;
- for (r = 0; r < N_TOP_BLOCKS; r++) {
- if (topFragID[r] == 0)
- break;
- cumulCount += (uint64_t)topPI[r].count;
- js_LogController.printf("%3d: %5.2f%% %9u %6.2f%% %9llu"
- " %3d %5u %5u %06u\n",
- r,
- (double)topPI[r].count * 100.0 / (double)totCount,
- topPI[r].count,
- (double)cumulCount * 100.0 / (double)totCount,
- (unsigned long long int)cumulCount,
- topPI[r].nStaticExits,
- (unsigned int)topPI[r].nCodeBytes,
- (unsigned int)topPI[r].nExitBytes,
- topFragID[r]);
- totSE += (uint32_t)topPI[r].nStaticExits;
- totCodeB += topPI[r].nCodeBytes;
- totExitB += topPI[r].nExitBytes;
- }
- js_LogController.printf("\nTotal displayed code bytes = %u, "
- "exit bytes = %u\n"
- "Total displayed static exits = %d\n\n",
- (unsigned int)totCodeB, (unsigned int)totExitB, totSE);
-
- js_LogController.printf("Analysis by exit counts\n\n");
-
- for (r = 0; r < N_TOP_BLOCKS; r++) {
- if (topFragID[r] == 0)
- break;
- js_LogController.printf("FragID=%06u, total count %u:\n", topFragID[r],
- topPI[r].count);
- uint32_t madeItToEnd = topPI[r].count;
- uint32_t totThisFrag = topPI[r].count;
- if (totThisFrag == 0)
- totThisFrag = 1;
- GuardPI gpi;
- // visit the guards, in forward order
- for (Seq* guards = topPI[r].guards; guards; guards = guards->tail) {
- gpi = (*guards).head;
- if (gpi.count == 0)
- continue;
- madeItToEnd -= gpi.count;
- js_LogController.printf(" GuardID=%03u %7u (%5.2f%%)\n",
- gpi.guardID, gpi.count,
- 100.0 * (double)gpi.count / (double)totThisFrag);
- }
- js_LogController.printf(" Looped (%03u) %7u (%5.2f%%)\n",
- topPI[r].largestGuardID+1,
- madeItToEnd,
- 100.0 * (double)madeItToEnd / (double)totThisFrag);
- NanoAssert(madeItToEnd <= topPI[r].count); // else unsigned underflow
- js_LogController.printf("\n");
- }
-
- tm->profTab = NULL;
-}
-
-#endif
-
-/* ----------------------------------------------------------------- */
-
-#ifdef DEBUG
-static const char*
-getExitName(ExitType type)
-{
- static const char* exitNames[] =
- {
- #define MAKE_EXIT_STRING(x) #x,
- JS_TM_EXITCODES(MAKE_EXIT_STRING)
- #undef MAKE_EXIT_STRING
- NULL
- };
-
- JS_ASSERT(type < TOTAL_EXIT_TYPES);
-
- return exitNames[type];
-}
-
-static JSBool FASTCALL
-PrintOnTrace(char* format, uint32 argc, double *argv)
-{
- union {
- struct {
- uint32 lo;
- uint32 hi;
- } i;
- double d;
- char *cstr;
- JSObject *o;
- JSString *s;
- } u;
-
-#define GET_ARG() JS_BEGIN_MACRO \
- if (argi >= argc) { \
- fprintf(out, "[too few args for format]"); \
- break; \
-} \
- u.d = argv[argi++]; \
- JS_END_MACRO
-
- FILE *out = stderr;
-
- uint32 argi = 0;
- for (char *p = format; *p; ++p) {
- if (*p != '%') {
- putc(*p, out);
- continue;
- }
- char ch = *++p;
- if (!ch) {
- fprintf(out, "[trailing %%]");
- continue;
- }
-
- switch (ch) {
- case 'a':
- GET_ARG();
- fprintf(out, "[%u:%u 0x%x:0x%x %f]", u.i.lo, u.i.hi, u.i.lo, u.i.hi, u.d);
- break;
- case 'd':
- GET_ARG();
- fprintf(out, "%d", u.i.lo);
- break;
- case 'u':
- GET_ARG();
- fprintf(out, "%u", u.i.lo);
- break;
- case 'x':
- GET_ARG();
- fprintf(out, "%x", u.i.lo);
- break;
- case 'f':
- GET_ARG();
- fprintf(out, "%f", u.d);
- break;
- case 'o':
- GET_ARG();
- js_DumpObject(u.o);
- break;
- case 's':
- GET_ARG();
- {
- size_t length = u.s->length();
- // protect against massive spew if u.s is a bad pointer.
- if (length > 1 << 16)
- length = 1 << 16;
- jschar *chars = u.s->chars();
- for (unsigned i = 0; i < length; ++i) {
- jschar co = chars[i];
- if (co < 128)
- putc(co, out);
- else if (co < 256)
- fprintf(out, "\\u%02x", co);
- else
- fprintf(out, "\\u%04x", co);
- }
- }
- break;
- case 'S':
- GET_ARG();
- fprintf(out, "%s", u.cstr);
- break;
- default:
- fprintf(out, "[invalid %%%c]", *p);
- }
- }
-
-#undef GET_ARG
-
- return JS_TRUE;
-}
-
-JS_DEFINE_CALLINFO_3(extern, BOOL, PrintOnTrace, CHARPTR, UINT32, DOUBLEPTR, 0, 0)
-
-// This version is not intended to be called directly: usually it is easier to
-// use one of the other overloads.
-void
-TraceRecorder::tprint(const char *format, int count, nanojit::LIns *insa[])
-{
- size_t size = strlen(format) + 1;
- char *data = (char*) lir->insSkip(size)->payload();
- memcpy(data, format, size);
-
- double *args = (double*) lir->insSkip(count * sizeof(double))->payload();
- for (int i = 0; i < count; ++i) {
- JS_ASSERT(insa[i]);
- lir->insStorei(insa[i], INS_CONSTPTR(args), sizeof(double) * i);
- }
-
- LIns* args_ins[] = { INS_CONSTPTR(args), INS_CONST(count), INS_CONSTPTR(data) };
- LIns* call_ins = lir->insCall(&PrintOnTrace_ci, args_ins);
- guard(false, lir->ins_eq0(call_ins), MISMATCH_EXIT);
-}
-
-// Generate a 'printf'-type call from trace for debugging.
-void
-TraceRecorder::tprint(const char *format)
-{
- LIns* insa[] = { NULL };
- tprint(format, 0, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins)
-{
- LIns* insa[] = { ins };
- tprint(format, 1, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2)
-{
- LIns* insa[] = { ins1, ins2 };
- tprint(format, 2, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3)
-{
- LIns* insa[] = { ins1, ins2, ins3 };
- tprint(format, 3, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4)
-{
- LIns* insa[] = { ins1, ins2, ins3, ins4 };
- tprint(format, 4, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4,
- LIns *ins5)
-{
- LIns* insa[] = { ins1, ins2, ins3, ins4, ins5 };
- tprint(format, 5, insa);
-}
-
-void
-TraceRecorder::tprint(const char *format, LIns *ins1, LIns *ins2, LIns *ins3, LIns *ins4,
- LIns *ins5, LIns *ins6)
-{
- LIns* insa[] = { ins1, ins2, ins3, ins4, ins5, ins6 };
- tprint(format, 6, insa);
-}
-#endif
-
-/*
- * The entire VM shares one oracle. Collisions and concurrent updates are
- * tolerated and worst case cause performance regressions.
- */
-static Oracle oracle;
-
-/*
- * This confusing and mysterious expression is used for the Tracker. The
- * tracker's responsibility is to map opaque, 4-byte aligned addresses to LIns
- * pointers. To do this efficiently, we observe that the addresses of jsvals
- * living in the interpreter tend to be aggregated close to each other -
- * usually on the same page (where a tracker page doesn't have to be the same
- * size as the OS page size, but it's typically similar).
- *
- * For every address, we split it into two values: upper bits which represent
- * the "base", and lower bits which represent an offset against the base. We
- * create a list of:
- * struct TrackerPage {
- * void* base;
- * LIns* map;
- * };
- * The mapping then becomes:
- * page = page such that Base(address) == page->base,
- * page->map[Index(address)]
- *
- * The size of the map is allocated as N * sizeof(LIns*), where N is
- * (TRACKER_PAGE_SIZE >> 2). Since the lower two bits are 0, they are always
- * discounted.
- *
- * TRACKER_PAGE_MASK is the "reverse" expression, with a |- 1| to get a mask
- * which separates an address into the Base and Index bits. It is necessary to
- * do all this work rather than use TRACKER_PAGE_SIZE - 1, because on 64-bit
- * platforms the pointer width is twice as large, and only half as many
- * indexes can fit into TrackerPage::map. So the "Base" grows by one bit, and
- * the "Index" shrinks by one bit.
- */
-#define TRACKER_PAGE_MASK (((TRACKER_PAGE_SIZE / sizeof(void*)) << 2) - 1)
-
-#define TRACKER_PAGE_SIZE 4096
-
-Tracker::Tracker()
-{
- pagelist = 0;
-}
-
-Tracker::~Tracker()
-{
- clear();
-}
-
-jsuword
-Tracker::getTrackerPageBase(const void* v) const
-{
- return jsuword(v) & ~jsuword(TRACKER_PAGE_MASK);
-}
-
-struct Tracker::TrackerPage*
-Tracker::findTrackerPage(const void* v) const
-{
- jsuword base = getTrackerPageBase(v);
- struct Tracker::TrackerPage* p = pagelist;
- while (p) {
- if (p->base == base) {
- return p;
- }
- p = p->next;
- }
- return 0;
-}
-
-struct Tracker::TrackerPage*
-Tracker::addTrackerPage(const void* v) {
- jsuword base = getTrackerPageBase(v);
- struct Tracker::TrackerPage* p = (struct Tracker::TrackerPage*)
- calloc(1, sizeof(*p) - sizeof(p->map) + (TRACKER_PAGE_SIZE >> 2) * sizeof(LIns*));
- p->base = base;
- p->next = pagelist;
- pagelist = p;
- return p;
-}
-
-void
-Tracker::clear()
-{
- while (pagelist) {
- TrackerPage* p = pagelist;
- pagelist = pagelist->next;
- free(p);
- }
-}
-
-bool
-Tracker::has(const void *v) const
-{
- return get(v) != NULL;
-}
-
-LIns*
-Tracker::get(const void* v) const
-{
- struct Tracker::TrackerPage* p = findTrackerPage(v);
- if (!p)
- return NULL;
- return p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2];
-}
-
-void
-Tracker::set(const void* v, LIns* i)
-{
- struct Tracker::TrackerPage* p = findTrackerPage(v);
- if (!p)
- p = addTrackerPage(v);
- p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2] = i;
-}
-
-static inline jsuint
-argSlots(JSStackFrame* fp)
-{
- return JS_MAX(fp->argc, fp->fun->nargs);
-}
-
-static inline bool
-isNumber(jsval v)
-{
- return JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v);
-}
-
-static inline jsdouble
-asNumber(jsval v)
-{
- JS_ASSERT(isNumber(v));
- if (JSVAL_IS_DOUBLE(v))
- return *JSVAL_TO_DOUBLE(v);
- return (jsdouble)JSVAL_TO_INT(v);
-}
-
-static inline bool
-isInt32(jsval v)
-{
- if (!isNumber(v))
- return false;
- jsdouble d = asNumber(v);
- jsint i;
- return JSDOUBLE_IS_INT(d, i);
-}
-
-static inline jsint
-asInt32(jsval v)
-{
- JS_ASSERT(isNumber(v));
- if (JSVAL_IS_INT(v))
- return JSVAL_TO_INT(v);
-#ifdef DEBUG
- jsint i;
- JS_ASSERT(JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i));
-#endif
- return jsint(*JSVAL_TO_DOUBLE(v));
-}
-
-/* Return TT_DOUBLE for all numbers (int and double) and the tag otherwise. */
-static inline JSTraceType
-GetPromotedType(jsval v)
-{
- if (JSVAL_IS_INT(v))
- return TT_DOUBLE;
- if (JSVAL_IS_OBJECT(v)) {
- if (JSVAL_IS_NULL(v))
- return TT_NULL;
- if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)))
- return TT_FUNCTION;
- return TT_OBJECT;
- }
- uint8_t tag = JSVAL_TAG(v);
- JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL);
- JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE);
- JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING);
- JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
- return JSTraceType(tag);
-}
-
-/* Return TT_INT32 for all whole numbers that fit into signed 32-bit and the tag otherwise. */
-static inline JSTraceType
-getCoercedType(jsval v)
-{
- if (isInt32(v))
- return TT_INT32;
- if (JSVAL_IS_OBJECT(v)) {
- if (JSVAL_IS_NULL(v))
- return TT_NULL;
- if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)))
- return TT_FUNCTION;
- return TT_OBJECT;
- }
- uint8_t tag = JSVAL_TAG(v);
- JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL);
- JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE);
- JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING);
- JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
- return JSTraceType(tag);
-}
-
-/* Constant seed and accumulate step borrowed from the DJB hash. */
-
-const uintptr_t ORACLE_MASK = ORACLE_SIZE - 1;
-JS_STATIC_ASSERT((ORACLE_MASK & ORACLE_SIZE) == 0);
-
-const uintptr_t FRAGMENT_TABLE_MASK = FRAGMENT_TABLE_SIZE - 1;
-JS_STATIC_ASSERT((FRAGMENT_TABLE_MASK & FRAGMENT_TABLE_SIZE) == 0);
-
-const uintptr_t HASH_SEED = 5381;
-
-static inline void
-HashAccum(uintptr_t& h, uintptr_t i, uintptr_t mask)
-{
- h = ((h << 5) + h + (mask & i)) & mask;
-}
-
-static JS_REQUIRES_STACK inline int
-StackSlotHash(JSContext* cx, unsigned slot)
-{
- uintptr_t h = HASH_SEED;
- HashAccum(h, uintptr_t(cx->fp->script), ORACLE_MASK);
- HashAccum(h, uintptr_t(cx->fp->regs->pc), ORACLE_MASK);
- HashAccum(h, uintptr_t(slot), ORACLE_MASK);
- return int(h);
-}
-
-static JS_REQUIRES_STACK inline int
-GlobalSlotHash(JSContext* cx, unsigned slot)
-{
- uintptr_t h = HASH_SEED;
- JSStackFrame* fp = cx->fp;
-
- while (fp->down)
- fp = fp->down;
-
- HashAccum(h, uintptr_t(fp->script), ORACLE_MASK);
- HashAccum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), ORACLE_MASK);
- HashAccum(h, uintptr_t(slot), ORACLE_MASK);
- return int(h);
-}
-
-static inline int
-PCHash(jsbytecode* pc)
-{
- return int(uintptr_t(pc) & ORACLE_MASK);
-}
-
-Oracle::Oracle()
-{
- /* Grow the oracle bitsets to their (fixed) size here, once. */
- _stackDontDemote.set(ORACLE_SIZE-1);
- _globalDontDemote.set(ORACLE_SIZE-1);
- clear();
-}
-
-/* Tell the oracle that a certain global variable should not be demoted. */
-JS_REQUIRES_STACK void
-Oracle::markGlobalSlotUndemotable(JSContext* cx, unsigned slot)
-{
- _globalDontDemote.set(GlobalSlotHash(cx, slot));
-}
-
-/* Consult with the oracle whether we shouldn't demote a certain global variable. */
-JS_REQUIRES_STACK bool
-Oracle::isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const
-{
- return _globalDontDemote.get(GlobalSlotHash(cx, slot));
-}
-
-/* Tell the oracle that a certain slot at a certain stack slot should not be demoted. */
-JS_REQUIRES_STACK void
-Oracle::markStackSlotUndemotable(JSContext* cx, unsigned slot)
-{
- _stackDontDemote.set(StackSlotHash(cx, slot));
-}
-
-/* Consult with the oracle whether we shouldn't demote a certain slot. */
-JS_REQUIRES_STACK bool
-Oracle::isStackSlotUndemotable(JSContext* cx, unsigned slot) const
-{
- return _stackDontDemote.get(StackSlotHash(cx, slot));
-}
-
-/* Tell the oracle that a certain slot at a certain bytecode location should not be demoted. */
-void
-Oracle::markInstructionUndemotable(jsbytecode* pc)
-{
- _pcDontDemote.set(PCHash(pc));
-}
-
-/* Consult with the oracle whether we shouldn't demote a certain bytecode location. */
-bool
-Oracle::isInstructionUndemotable(jsbytecode* pc) const
-{
- return _pcDontDemote.get(PCHash(pc));
-}
-
-void
-Oracle::clearDemotability()
-{
- _stackDontDemote.reset();
- _globalDontDemote.reset();
- _pcDontDemote.reset();
-}
-
-JS_REQUIRES_STACK static JS_INLINE void
-MarkSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
-{
- if (slot < ti->nStackTypes) {
- oracle.markStackSlotUndemotable(cx, slot);
- return;
- }
-
- uint16* gslots = ti->globalSlots->data();
- oracle.markGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]);
-}
-
-static JS_REQUIRES_STACK inline bool
-IsSlotUndemotable(JSContext* cx, TreeInfo* ti, unsigned slot)
-{
- if (slot < ti->nStackTypes)
- return oracle.isStackSlotUndemotable(cx, slot);
-
- uint16* gslots = ti->globalSlots->data();
- return oracle.isGlobalSlotUndemotable(cx, gslots[slot - ti->nStackTypes]);
-}
-
-struct PCHashEntry : public JSDHashEntryStub {
- size_t count;
-};
-
-#define PC_HASH_COUNT 1024
-
-static void
-Blacklist(jsbytecode* pc)
-{
- AUDIT(blacklisted);
- JS_ASSERT(*pc == JSOP_TRACE || *pc == JSOP_NOP);
- *pc = JSOP_NOP;
-}
-
-static void
-Backoff(JSContext *cx, jsbytecode* pc, Fragment* tree = NULL)
-{
- JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts;
-
- if (table->ops) {
- PCHashEntry *entry = (PCHashEntry *)
- JS_DHashTableOperate(table, pc, JS_DHASH_ADD);
-
- if (entry) {
- if (!entry->key) {
- entry->key = pc;
- JS_ASSERT(entry->count == 0);
- }
- JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr)));
- if (entry->count++ > (BL_ATTEMPTS * MAXPEERS)) {
- entry->count = 0;
- Blacklist(pc);
- return;
- }
- }
- }
-
- if (tree) {
- tree->hits() -= BL_BACKOFF;
-
- /*
- * In case there is no entry or no table (due to OOM) or some
- * serious imbalance in the recording-attempt distribution on a
- * multitree, give each tree another chance to blacklist here as
- * well.
- */
- if (++tree->recordAttempts > BL_ATTEMPTS)
- Blacklist(pc);
- }
-}
-
-static void
-ResetRecordingAttempts(JSContext *cx, jsbytecode* pc)
-{
- JSDHashTable *table = &JS_TRACE_MONITOR(cx).recordAttempts;
- if (table->ops) {
- PCHashEntry *entry = (PCHashEntry *)
- JS_DHashTableOperate(table, pc, JS_DHASH_LOOKUP);
-
- if (JS_DHASH_ENTRY_IS_FREE(&(entry->hdr)))
- return;
- JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(&(entry->hdr)));
- entry->count = 0;
- }
-}
-
-static inline size_t
-FragmentHash(const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
-{
- uintptr_t h = HASH_SEED;
- HashAccum(h, uintptr_t(ip), FRAGMENT_TABLE_MASK);
- HashAccum(h, uintptr_t(globalObj), FRAGMENT_TABLE_MASK);
- HashAccum(h, uintptr_t(globalShape), FRAGMENT_TABLE_MASK);
- HashAccum(h, uintptr_t(argc), FRAGMENT_TABLE_MASK);
- return size_t(h);
-}
-
-/*
- * argc is cx->fp->argc at the trace loop header, i.e., the number of arguments
- * pushed for the innermost JS frame. This is required as part of the fragment
- * key because the fragment will write those arguments back to the interpreter
- * stack when it exits, using its typemap, which implicitly incorporates a
- * given value of argc. Without this feature, a fragment could be called as an
- * inner tree with two different values of argc, and entry type checking or
- * exit frame synthesis could crash.
- */
-struct VMFragment : public Fragment
-{
- VMFragment(const void* _ip, JSObject* _globalObj, uint32 _globalShape, uint32 _argc
- verbose_only(, uint32_t profFragID)) :
- Fragment(_ip verbose_only(, profFragID)),
- first(NULL),
- next(NULL),
- peer(NULL),
- globalObj(_globalObj),
- globalShape(_globalShape),
- argc(_argc)
- { }
-
- inline TreeInfo* getTreeInfo() {
- return (TreeInfo*)vmprivate;
- }
-
- VMFragment* first;
- VMFragment* next;
- VMFragment* peer;
- JSObject* globalObj;
- uint32 globalShape;
- uint32 argc;
-};
-
-static VMFragment*
-getVMFragment(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape,
- uint32 argc)
-{
- size_t h = FragmentHash(ip, globalObj, globalShape, argc);
- VMFragment* vf = tm->vmfragments[h];
- while (vf &&
- ! (vf->globalObj == globalObj &&
- vf->globalShape == globalShape &&
- vf->ip == ip &&
- vf->argc == argc)) {
- vf = vf->next;
- }
- return vf;
-}
-
-static VMFragment*
-getLoop(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
-{
- return getVMFragment(tm, ip, globalObj, globalShape, argc);
-}
-
-static VMFragment*
-getAnchor(JSTraceMonitor* tm, const void *ip, JSObject* globalObj, uint32 globalShape, uint32 argc)
-{
- verbose_only(
- uint32_t profFragID = (js_LogController.lcbits & LC_FragProfile)
- ? (++(tm->lastFragID)) : 0;
- )
- VMFragment *f = new (*tm->dataAlloc) VMFragment(ip, globalObj, globalShape, argc
- verbose_only(, profFragID));
- JS_ASSERT(f);
-
- VMFragment *p = getVMFragment(tm, ip, globalObj, globalShape, argc);
-
- if (p) {
- f->first = p;
- /* append at the end of the peer list */
- VMFragment* next;
- while ((next = p->peer) != NULL)
- p = next;
- p->peer = f;
- } else {
- /* this is the first fragment */
- f->first = f;
- size_t h = FragmentHash(ip, globalObj, globalShape, argc);
- f->next = tm->vmfragments[h];
- tm->vmfragments[h] = f;
- }
- f->root = f;
- return f;
-}
-
-#ifdef DEBUG
-static void
-AssertTreeIsUnique(JSTraceMonitor* tm, VMFragment* f, TreeInfo* ti)
-{
- JS_ASSERT(f->root == f);
-
- /*
- * Check for duplicate entry type maps. This is always wrong and hints at
- * trace explosion since we are trying to stabilize something without
- * properly connecting peer edges.
- */
- TreeInfo* ti_other;
- for (VMFragment* peer = getLoop(tm, f->ip, f->globalObj, f->globalShape, f->argc);
- peer != NULL;
- peer = peer->peer) {
- if (!peer->code() || peer == f)
- continue;
- ti_other = (TreeInfo*)peer->vmprivate;
- JS_ASSERT(ti_other);
- JS_ASSERT(!ti->typeMap.matches(ti_other->typeMap));
- }
-}
-#endif
-
-static void
-AttemptCompilation(JSContext *cx, JSTraceMonitor* tm, JSObject* globalObj, jsbytecode* pc,
- uint32 argc)
-{
- /* If we already permanently blacklisted the location, undo that. */
- JS_ASSERT(*pc == JSOP_NOP || *pc == JSOP_TRACE);
- *pc = JSOP_TRACE;
- ResetRecordingAttempts(cx, pc);
-
- /* Breathe new life into all peer fragments at the designated loop header. */
- VMFragment* f = (VMFragment*)getLoop(tm, pc, globalObj, OBJ_SHAPE(globalObj), argc);
- if (!f) {
- /*
- * If the global object's shape changed, we can't easily find the
- * corresponding loop header via a hash table lookup. In this
- * we simply bail here and hope that the fragment has another
- * outstanding compilation attempt. This case is extremely rare.
- */
- return;
- }
- JS_ASSERT(f->root == f);
- f = f->first;
- while (f) {
- JS_ASSERT(f->root == f);
- --f->recordAttempts;
- f->hits() = HOTLOOP;
- f = f->peer;
- }
-}
-
-// Forward declarations.
-JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1)
-JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1)
-
-static bool
-isi2f(LIns* i)
-{
- if (i->isop(LIR_i2f))
- return true;
-
- if (nanojit::AvmCore::config.soft_float &&
- i->isop(LIR_qjoin) &&
- i->oprnd1()->isop(LIR_pcall) &&
- i->oprnd2()->isop(LIR_callh)) {
- if (i->oprnd1()->callInfo() == &i2f_ci)
- return true;
- }
-
- return false;
-}
-
-static bool
-isu2f(LIns* i)
-{
- if (i->isop(LIR_u2f))
- return true;
-
- if (nanojit::AvmCore::config.soft_float &&
- i->isop(LIR_qjoin) &&
- i->oprnd1()->isop(LIR_pcall) &&
- i->oprnd2()->isop(LIR_callh)) {
- if (i->oprnd1()->callInfo() == &u2f_ci)
- return true;
- }
-
- return false;
-}
-
-static LIns*
-iu2fArg(LIns* i)
-{
- if (nanojit::AvmCore::config.soft_float &&
- i->isop(LIR_qjoin)) {
- return i->oprnd1()->arg(0);
- }
-
- return i->oprnd1();
-}
-
-static LIns*
-demote(LirWriter *out, LIns* i)
-{
- if (i->isCall())
- return i->callArgN(0);
- if (isi2f(i) || isu2f(i))
- return iu2fArg(i);
- if (i->isconst())
- return i;
- JS_ASSERT(i->isconstf());
- double cf = i->imm64f();
- int32_t ci = cf > 0x7fffffff ? uint32_t(cf) : int32_t(cf);
- return out->insImm(ci);
-}
-
-static bool
-isPromoteInt(LIns* i)
-{
- if (isi2f(i) || i->isconst())
- return true;
- if (!i->isconstf())
- return false;
- jsdouble d = i->imm64f();
- return d == jsdouble(jsint(d)) && !JSDOUBLE_IS_NEGZERO(d);
-}
-
-static bool
-isPromoteUint(LIns* i)
-{
- if (isu2f(i) || i->isconst())
- return true;
- if (!i->isconstf())
- return false;
- jsdouble d = i->imm64f();
- return d == jsdouble(jsuint(d)) && !JSDOUBLE_IS_NEGZERO(d);
-}
-
-static bool
-isPromote(LIns* i)
-{
- return isPromoteInt(i) || isPromoteUint(i);
-}
-
-static bool
-IsConst(LIns* i, int32_t c)
-{
- return i->isconst() && i->imm32() == c;
-}
-
-/*
- * Determine whether this operand is guaranteed to not overflow the specified
- * integer operation.
- */
-static bool
-IsOverflowSafe(LOpcode op, LIns* i)
-{
- LIns* c;
- switch (op) {
- case LIR_add:
- case LIR_sub:
- return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
- ((c->imm32() & 0xc0000000) == 0)) ||
- (i->isop(LIR_rsh) && ((c = i->oprnd2())->isconst()) &&
- ((c->imm32() > 0)));
- default:
- JS_ASSERT(op == LIR_mul);
- }
- return (i->isop(LIR_and) && ((c = i->oprnd2())->isconst()) &&
- ((c->imm32() & 0xffff0000) == 0)) ||
- (i->isop(LIR_ush) && ((c = i->oprnd2())->isconst()) &&
- ((c->imm32() >= 16)));
-}
-
-/* soft float support */
-
-static jsdouble FASTCALL
-fneg(jsdouble x)
-{
- return -x;
-}
-JS_DEFINE_CALLINFO_1(static, DOUBLE, fneg, DOUBLE, 1, 1)
-
-static jsdouble FASTCALL
-i2f(int32 i)
-{
- return i;
-}
-
-static jsdouble FASTCALL
-u2f(jsuint u)
-{
- return u;
-}
-
-static int32 FASTCALL
-fcmpeq(jsdouble x, jsdouble y)
-{
- return x==y;
-}
-JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
-
-static int32 FASTCALL
-fcmplt(jsdouble x, jsdouble y)
-{
- return x < y;
-}
-JS_DEFINE_CALLINFO_2(static, INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
-
-static int32 FASTCALL
-fcmple(jsdouble x, jsdouble y)
-{
- return x <= y;
-}
-JS_DEFINE_CALLINFO_2(static, INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
-
-static int32 FASTCALL
-fcmpgt(jsdouble x, jsdouble y)
-{
- return x > y;
-}
-JS_DEFINE_CALLINFO_2(static, INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
-
-static int32 FASTCALL
-fcmpge(jsdouble x, jsdouble y)
-{
- return x >= y;
-}
-JS_DEFINE_CALLINFO_2(static, INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
-
-static jsdouble FASTCALL
-fmul(jsdouble x, jsdouble y)
-{
- return x * y;
-}
-JS_DEFINE_CALLINFO_2(static, DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
-
-static jsdouble FASTCALL
-fadd(jsdouble x, jsdouble y)
-{
- return x + y;
-}
-JS_DEFINE_CALLINFO_2(static, DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
-
-static jsdouble FASTCALL
-fdiv(jsdouble x, jsdouble y)
-{
- return x / y;
-}
-JS_DEFINE_CALLINFO_2(static, DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
-
-static jsdouble FASTCALL
-fsub(jsdouble x, jsdouble y)
-{
- return x - y;
-}
-JS_DEFINE_CALLINFO_2(static, DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
-
-// replace fpu ops with function calls
-class SoftFloatFilter: public LirWriter
-{
-public:
- SoftFloatFilter(LirWriter *out) : LirWriter(out)
- {}
-
- LIns *hi(LIns *q) {
- return ins1(LIR_qhi, q);
- }
- LIns *lo(LIns *q) {
- return ins1(LIR_qlo, q);
- }
-
- LIns *split(LIns *a) {
- if (a->isQuad() && !a->isop(LIR_qjoin)) {
- // all quad-sized args must be qjoin's for soft-float
- a = ins2(LIR_qjoin, lo(a), hi(a));
- }
- return a;
- }
-
- LIns *split(const CallInfo *call, LInsp args[]) {
- LIns *lo = out->insCall(call, args);
- LIns *hi = out->ins1(LIR_callh, lo);
- return out->ins2(LIR_qjoin, lo, hi);
- }
-
- LIns *fcall1(const CallInfo *call, LIns *a) {
- LIns *args[] = { split(a) };
- return split(call, args);
- }
-
- LIns *fcall2(const CallInfo *call, LIns *a, LIns *b) {
- LIns *args[] = { split(b), split(a) };
- return split(call, args);
- }
-
- LIns *fcmp(const CallInfo *call, LIns *a, LIns *b) {
- LIns *args[] = { split(b), split(a) };
- return out->ins2(LIR_eq, out->insCall(call, args), out->insImm(1));
- }
-
- LIns *ins1(LOpcode op, LIns *a) {
- switch (op) {
- case LIR_i2f:
- return fcall1(&i2f_ci, a);
- case LIR_u2f:
- return fcall1(&u2f_ci, a);
- case LIR_fneg:
- return fcall1(&fneg_ci, a);
- case LIR_fret:
- return out->ins1(op, split(a));
- default:
- return out->ins1(op, a);
- }
- }
-
- LIns *ins2(LOpcode op, LIns *a, LIns *b) {
- switch (op) {
- case LIR_fadd:
- return fcall2(&fadd_ci, a, b);
- case LIR_fsub:
- return fcall2(&fsub_ci, a, b);
- case LIR_fmul:
- return fcall2(&fmul_ci, a, b);
- case LIR_fdiv:
- return fcall2(&fdiv_ci, a, b);
- case LIR_feq:
- return fcmp(&fcmpeq_ci, a, b);
- case LIR_flt:
- return fcmp(&fcmplt_ci, a, b);
- case LIR_fgt:
- return fcmp(&fcmpgt_ci, a, b);
- case LIR_fle:
- return fcmp(&fcmple_ci, a, b);
- case LIR_fge:
- return fcmp(&fcmpge_ci, a, b);
- default:
- ;
- }
- return out->ins2(op, a, b);
- }
-
- LIns *insCall(const CallInfo *ci, LInsp args[]) {
- uint32_t argt = ci->_argtypes;
-
- for (uint32_t i = 0, argsizes = argt >> ARGSIZE_SHIFT; argsizes != 0; i++, argsizes >>= ARGSIZE_SHIFT)
- args[i] = split(args[i]);
-
- if ((argt & ARGSIZE_MASK_ANY) == ARGSIZE_F) {
- // this function returns a double as two 32bit values, so replace
- // call with qjoin(qhi(call), call)
- return split(ci, args);
- } else {
- return out->insCall(ci, args);
- }
- }
-};
-
-class FuncFilter: public LirWriter
-{
-public:
- FuncFilter(LirWriter* out):
- LirWriter(out)
- {
- }
-
- LIns* ins2(LOpcode v, LIns* s0, LIns* s1)
- {
- if (s0 == s1 && v == LIR_feq) {
- if (isPromote(s0)) {
- // double(int) and double(uint) cannot be nan
- return insImm(1);
- }
- if (s0->isop(LIR_fmul) || s0->isop(LIR_fsub) || s0->isop(LIR_fadd)) {
- LIns* lhs = s0->oprnd1();
- LIns* rhs = s0->oprnd2();
- if (isPromote(lhs) && isPromote(rhs)) {
- // add/sub/mul promoted ints can't be nan
- return insImm(1);
- }
- }
- } else if (LIR_feq <= v && v <= LIR_fge) {
- if (isPromoteInt(s0) && isPromoteInt(s1)) {
- // demote fcmp to cmp
- v = LOpcode(v + (LIR_eq - LIR_feq));
- return out->ins2(v, demote(out, s0), demote(out, s1));
- } else if (isPromoteUint(s0) && isPromoteUint(s1)) {
- // uint compare
- v = LOpcode(v + (LIR_eq - LIR_feq));
- if (v != LIR_eq)
- v = LOpcode(v + (LIR_ult - LIR_lt)); // cmp -> ucmp
- return out->ins2(v, demote(out, s0), demote(out, s1));
- }
- } else if (v == LIR_or &&
- s0->isop(LIR_lsh) && IsConst(s0->oprnd2(), 16) &&
- s1->isop(LIR_and) && IsConst(s1->oprnd2(), 0xffff)) {
- LIns* msw = s0->oprnd1();
- LIns* lsw = s1->oprnd1();
- LIns* x;
- LIns* y;
- if (lsw->isop(LIR_add) &&
- lsw->oprnd1()->isop(LIR_and) &&
- lsw->oprnd2()->isop(LIR_and) &&
- IsConst(lsw->oprnd1()->oprnd2(), 0xffff) &&
- IsConst(lsw->oprnd2()->oprnd2(), 0xffff) &&
- msw->isop(LIR_add) &&
- msw->oprnd1()->isop(LIR_add) &&
- msw->oprnd2()->isop(LIR_rsh) &&
- msw->oprnd1()->oprnd1()->isop(LIR_rsh) &&
- msw->oprnd1()->oprnd2()->isop(LIR_rsh) &&
- IsConst(msw->oprnd2()->oprnd2(), 16) &&
- IsConst(msw->oprnd1()->oprnd1()->oprnd2(), 16) &&
- IsConst(msw->oprnd1()->oprnd2()->oprnd2(), 16) &&
- (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() &&
- (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() &&
- lsw == msw->oprnd2()->oprnd1()) {
- return out->ins2(LIR_add, x, y);
- }
- }
-
- return out->ins2(v, s0, s1);
- }
-
- LIns* insCall(const CallInfo *ci, LIns* args[])
- {
- if (ci == &js_DoubleToUint32_ci) {
- LIns* s0 = args[0];
- if (s0->isconstf())
- return out->insImm(js_DoubleToECMAUint32(s0->imm64f()));
- if (isi2f(s0) || isu2f(s0))
- return iu2fArg(s0);
- } else if (ci == &js_DoubleToInt32_ci) {
- LIns* s0 = args[0];
- if (s0->isconstf())
- return out->insImm(js_DoubleToECMAInt32(s0->imm64f()));
- if (s0->isop(LIR_fadd) || s0->isop(LIR_fsub)) {
- LIns* lhs = s0->oprnd1();
- LIns* rhs = s0->oprnd2();
- if (isPromote(lhs) && isPromote(rhs)) {
- LOpcode op = LOpcode(s0->opcode() & ~LIR64);
- return out->ins2(op, demote(out, lhs), demote(out, rhs));
- }
- }
- if (isi2f(s0) || isu2f(s0))
- return iu2fArg(s0);
-
- // XXX ARM -- check for qjoin(call(UnboxDouble),call(UnboxDouble))
- if (s0->isCall()) {
- const CallInfo* ci2 = s0->callInfo();
- if (ci2 == &js_UnboxDouble_ci) {
- LIns* args2[] = { s0->callArgN(0) };
- return out->insCall(&js_UnboxInt32_ci, args2);
- } else if (ci2 == &js_StringToNumber_ci) {
- // callArgN's ordering is that as seen by the builtin, not as stored in
- // args here. True story!
- LIns* args2[] = { s0->callArgN(1), s0->callArgN(0) };
- return out->insCall(&js_StringToInt32_ci, args2);
- } else if (ci2 == &js_String_p_charCodeAt0_ci) {
- // Use a fast path builtin for a charCodeAt that converts to an int right away.
- LIns* args2[] = { s0->callArgN(0) };
- return out->insCall(&js_String_p_charCodeAt0_int_ci, args2);
- } else if (ci2 == &js_String_p_charCodeAt_ci) {
- LIns* idx = s0->callArgN(1);
- // If the index is not already an integer, force it to be an integer.
- idx = isPromote(idx)
- ? demote(out, idx)
- : out->insCall(&js_DoubleToInt32_ci, &idx);
- LIns* args2[] = { idx, s0->callArgN(0) };
- return out->insCall(&js_String_p_charCodeAt_int_ci, args2);
- }
- }
- } else if (ci == &js_BoxDouble_ci) {
- LIns* s0 = args[0];
- JS_ASSERT(s0->isQuad());
- if (isPromoteInt(s0)) {
- LIns* args2[] = { demote(out, s0), args[1] };
- return out->insCall(&js_BoxInt32_ci, args2);
- }
- if (s0->isCall() && s0->callInfo() == &js_UnboxDouble_ci)
- return s0->callArgN(0);
- }
- return out->insCall(ci, args);
- }
-};
-
-/*
- * Visit the values in the given JSStackFrame that the tracer cares about. This
- * visitor function is (implicitly) the primary definition of the native stack
- * area layout. There are a few other independent pieces of code that must be
- * maintained to assume the same layout. They are marked like this:
- *
- * Duplicate native stack layout computation: see VisitFrameSlots header comment.
- */
-template
-static JS_REQUIRES_STACK bool
-VisitFrameSlots(Visitor &visitor, unsigned depth, JSStackFrame *fp,
- JSStackFrame *up)
-{
- if (depth > 0 && !VisitFrameSlots(visitor, depth-1, fp->down, fp))
- return false;
-
- if (fp->argv) {
- if (depth == 0) {
- visitor.setStackSlotKind("args");
- if (!visitor.visitStackSlots(&fp->argv[-2], argSlots(fp) + 2, fp))
- return false;
- }
- visitor.setStackSlotKind("arguments");
- if (!visitor.visitStackSlots(&fp->argsobj, 1, fp))
- return false;
- visitor.setStackSlotKind("var");
- if (!visitor.visitStackSlots(fp->slots, fp->script->nfixed, fp))
- return false;
- }
- visitor.setStackSlotKind("stack");
- JS_ASSERT(fp->regs->sp >= StackBase(fp));
- if (!visitor.visitStackSlots(StackBase(fp),
- size_t(fp->regs->sp - StackBase(fp)),
- fp)) {
- return false;
- }
- if (up) {
- int missing = up->fun->nargs - up->argc;
- if (missing > 0) {
- visitor.setStackSlotKind("missing");
- if (!visitor.visitStackSlots(fp->regs->sp, size_t(missing), fp))
- return false;
- }
- }
- return true;
-}
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
-VisitStackSlots(Visitor &visitor, JSContext *cx, unsigned callDepth)
-{
- return VisitFrameSlots(visitor, callDepth, cx->fp, NULL);
-}
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj,
- unsigned ngslots, uint16 *gslots)
-{
- for (unsigned n = 0; n < ngslots; ++n) {
- unsigned slot = gslots[n];
- visitor.visitGlobalSlot(&STOBJ_GET_SLOT(globalObj, slot), n, slot);
- }
-}
-
-class AdjustCallerTypeVisitor;
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots)
-{
- VisitGlobalSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
- gslots.length(), gslots.data());
-}
-
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitSlots(Visitor& visitor, JSContext* cx, JSObject* globalObj,
- unsigned callDepth, unsigned ngslots, uint16* gslots)
-{
- if (VisitStackSlots(visitor, cx, callDepth))
- VisitGlobalSlots(visitor, cx, globalObj, ngslots, gslots);
-}
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth,
- unsigned ngslots, uint16* gslots)
-{
- VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
- callDepth, ngslots, gslots);
-}
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj,
- unsigned callDepth, const SlotList& slots)
-{
- VisitSlots(visitor, cx, globalObj, callDepth, slots.length(),
- slots.data());
-}
-
-template
-static JS_REQUIRES_STACK JS_ALWAYS_INLINE void
-VisitSlots(Visitor &visitor, JSContext *cx, unsigned callDepth,
- const SlotList& slots)
-{
- VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain),
- callDepth, slots.length(), slots.data());
-}
-
-
-class SlotVisitorBase {
-#ifdef JS_JIT_SPEW
-protected:
- char const *mStackSlotKind;
-public:
- SlotVisitorBase() : mStackSlotKind(NULL) {}
- JS_ALWAYS_INLINE const char *stackSlotKind() { return mStackSlotKind; }
- JS_ALWAYS_INLINE void setStackSlotKind(char const *k) {
- mStackSlotKind = k;
- }
-#else
-public:
- JS_ALWAYS_INLINE const char *stackSlotKind() { return NULL; }
- JS_ALWAYS_INLINE void setStackSlotKind(char const *k) {}
-#endif
-};
-
-struct CountSlotsVisitor : public SlotVisitorBase
-{
- unsigned mCount;
- bool mDone;
- jsval* mStop;
-public:
- JS_ALWAYS_INLINE CountSlotsVisitor(jsval* stop = NULL) :
- mCount(0),
- mDone(false),
- mStop(stop)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- if (mDone)
- return false;
- if (mStop && size_t(mStop - vp) < count) {
- mCount += size_t(mStop - vp);
- mDone = true;
- return false;
- }
- mCount += count;
- return true;
- }
-
- JS_ALWAYS_INLINE unsigned count() {
- return mCount;
- }
-
- JS_ALWAYS_INLINE bool stopped() {
- return mDone;
- }
-};
-
-/*
- * Calculate the total number of native frame slots we need from this frame all
- * the way back to the entry frame, including the current stack usage.
- */
-JS_REQUIRES_STACK unsigned
-NativeStackSlots(JSContext *cx, unsigned callDepth)
-{
- JSStackFrame* fp = cx->fp;
- unsigned slots = 0;
- unsigned depth = callDepth;
- for (;;) {
- /*
- * Duplicate native stack layout computation: see VisitFrameSlots
- * header comment.
- */
- unsigned operands = fp->regs->sp - StackBase(fp);
- slots += operands;
- if (fp->argv)
- slots += fp->script->nfixed + 1 /*argsobj*/;
- if (depth-- == 0) {
- if (fp->argv)
- slots += 2/*callee,this*/ + argSlots(fp);
-#ifdef DEBUG
- CountSlotsVisitor visitor;
- VisitStackSlots(visitor, cx, callDepth);
- JS_ASSERT(visitor.count() == slots && !visitor.stopped());
-#endif
- return slots;
- }
- JSStackFrame* fp2 = fp;
- fp = fp->down;
- int missing = fp2->fun->nargs - fp2->argc;
- if (missing > 0)
- slots += missing;
- }
- JS_NOT_REACHED("NativeStackSlots");
-}
-
-class CaptureTypesVisitor : public SlotVisitorBase
-{
- JSContext* mCx;
- JSTraceType* mTypeMap;
- JSTraceType* mPtr;
-
-public:
- JS_ALWAYS_INLINE CaptureTypesVisitor(JSContext* cx, JSTraceType* typeMap) :
- mCx(cx),
- mTypeMap(typeMap),
- mPtr(typeMap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- JSTraceType type = getCoercedType(*vp);
- if (type == TT_INT32 &&
- oracle.isGlobalSlotUndemotable(mCx, slot))
- type = TT_DOUBLE;
- JS_ASSERT(type != TT_JSVAL);
- debug_only_printf(LC_TMTracer,
- "capture type global%d: %d=%c\n",
- n, type, typeChar[type]);
- *mPtr++ = type;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, int count, JSStackFrame* fp) {
- for (int i = 0; i < count; ++i) {
- JSTraceType type = getCoercedType(vp[i]);
- if (type == TT_INT32 &&
- oracle.isStackSlotUndemotable(mCx, length()))
- type = TT_DOUBLE;
- JS_ASSERT(type != TT_JSVAL);
- debug_only_printf(LC_TMTracer,
- "capture type %s%d: %d=%c\n",
- stackSlotKind(), i, type, typeChar[type]);
- *mPtr++ = type;
- }
- return true;
- }
-
- JS_ALWAYS_INLINE uintptr_t length() {
- return mPtr - mTypeMap;
- }
-};
-
-/*
- * Capture the type map for the selected slots of the global object and currently pending
- * stack frames.
- */
-JS_REQUIRES_STACK void
-TypeMap::captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth)
-{
- setLength(NativeStackSlots(cx, callDepth) + slots.length());
- CaptureTypesVisitor visitor(cx, data());
- VisitSlots(visitor, cx, globalObj, callDepth, slots);
- JS_ASSERT(visitor.length() == length());
-}
-
-JS_REQUIRES_STACK void
-TypeMap::captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned stackSlots)
-{
- unsigned oldSlots = length() - stackSlots;
- int diff = slots.length() - oldSlots;
- JS_ASSERT(diff >= 0);
- setLength(length() + diff);
- CaptureTypesVisitor visitor(cx, data() + stackSlots + oldSlots);
- VisitGlobalSlots(visitor, cx, globalObj, diff, slots.data() + oldSlots);
-}
-
-/* Compare this type map to another one and see whether they match. */
-bool
-TypeMap::matches(TypeMap& other) const
-{
- if (length() != other.length())
- return false;
- return !memcmp(data(), other.data(), length());
-}
-
-void
-TypeMap::fromRaw(JSTraceType* other, unsigned numSlots)
-{
- unsigned oldLength = length();
- setLength(length() + numSlots);
- for (unsigned i = 0; i < numSlots; i++)
- get(oldLength + i) = other[i];
-}
-
-/*
- * Use the provided storage area to create a new type map that contains the
- * partial type map with the rest of it filled up from the complete type
- * map.
- */
-static void
-MergeTypeMaps(JSTraceType** partial, unsigned* plength, JSTraceType* complete, unsigned clength, JSTraceType* mem)
-{
- unsigned l = *plength;
- JS_ASSERT(l < clength);
- memcpy(mem, *partial, l * sizeof(JSTraceType));
- memcpy(mem + l, complete + l, (clength - l) * sizeof(JSTraceType));
- *partial = mem;
- *plength = clength;
-}
-
-/*
- * Specializes a tree to any specifically missing globals, including any
- * dependent trees.
- */
-static JS_REQUIRES_STACK void
-SpecializeTreesToLateGlobals(JSContext* cx, TreeInfo* root, JSTraceType* globalTypeMap,
- unsigned numGlobalSlots)
-{
- TreeInfo* ti = root;
-
- for (unsigned i = ti->nGlobalTypes(); i < numGlobalSlots; i++)
- ti->typeMap.add(globalTypeMap[i]);
-
- JS_ASSERT(ti->nGlobalTypes() == numGlobalSlots);
-
- for (unsigned i = 0; i < root->dependentTrees.length(); i++) {
- ti = (TreeInfo*)root->dependentTrees[i]->vmprivate;
-
- /* ti can be NULL if we hit the recording tree in emitTreeCall; this is harmless. */
- if (ti && ti->nGlobalTypes() < numGlobalSlots)
- SpecializeTreesToLateGlobals(cx, ti, globalTypeMap, numGlobalSlots);
- }
- for (unsigned i = 0; i < root->linkedTrees.length(); i++) {
- ti = (TreeInfo*)root->linkedTrees[i]->vmprivate;
- if (ti && ti->nGlobalTypes() < numGlobalSlots)
- SpecializeTreesToLateGlobals(cx, ti, globalTypeMap, numGlobalSlots);
- }
-}
-
-/* Specializes a tree to any missing globals, including any dependent trees. */
-static JS_REQUIRES_STACK void
-SpecializeTreesToMissingGlobals(JSContext* cx, JSObject* globalObj, TreeInfo* root)
-{
- TreeInfo* ti = root;
-
- ti->typeMap.captureMissingGlobalTypes(cx, globalObj, *ti->globalSlots, ti->nStackTypes);
- JS_ASSERT(ti->globalSlots->length() == ti->typeMap.length() - ti->nStackTypes);
-
- SpecializeTreesToLateGlobals(cx, ti, ti->globalTypeMap(), ti->nGlobalTypes());
-}
-
-static void
-TrashTree(JSContext* cx, Fragment* f);
-
-JS_REQUIRES_STACK
-TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* _anchor, Fragment* _fragment,
- TreeInfo* ti, unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
- VMSideExit* innermostNestedGuard, jsbytecode* outer, uint32 outerArgc)
- : tempAlloc(*JS_TRACE_MONITOR(cx).tempAlloc),
- whichTreesToTrash(&tempAlloc),
- cfgMerges(&tempAlloc),
- tempTypeMap(cx)
-{
- JS_ASSERT(!_fragment->vmprivate && ti && cx->fp->regs->pc == (jsbytecode*)_fragment->ip);
- /* Reset the fragment state we care about in case we got a recycled fragment.
- This includes resetting any profiling data we might have accumulated. */
- _fragment->lastIns = NULL;
- verbose_only( _fragment->profCount = 0; )
- verbose_only( _fragment->nStaticExits = 0; )
- verbose_only( _fragment->nCodeBytes = 0; )
- verbose_only( _fragment->nExitBytes = 0; )
- verbose_only( _fragment->guardNumberer = 1; )
- verbose_only( _fragment->guardsForFrag = NULL; )
- verbose_only( _fragment->loopLabel = NULL; )
- // don't change _fragment->profFragID, though. Once the identity of
- // the Fragment is set up (for profiling purposes), we can't change it.
- this->cx = cx;
- this->traceMonitor = &JS_TRACE_MONITOR(cx);
- this->globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- this->lexicalBlock = cx->fp->blockChain;
- this->anchor = _anchor;
- this->fragment = _fragment;
- this->lirbuf = _fragment->lirbuf;
- this->treeInfo = ti;
- this->callDepth = _anchor ? _anchor->calldepth : 0;
- this->atoms = FrameAtomBase(cx, cx->fp);
- this->trashSelf = false;
- this->global_dslots = this->globalObj->dslots;
- this->loop = true; /* default assumption is we are compiling a loop */
- this->outer = outer;
- this->outerArgc = outerArgc;
- this->pendingSpecializedNative = NULL;
- this->newobj_ins = NULL;
- this->loopLabel = NULL;
-
-#ifdef JS_JIT_SPEW
- debug_only_print0(LC_TMMinimal, "\n");
- debug_only_printf(LC_TMMinimal, "Recording starting from %s:%u@%u (FragID=%06u)\n",
- ti->treeFileName, ti->treeLineNumber, ti->treePCOffset,
- _fragment->profFragID);
-
- debug_only_printf(LC_TMTracer, "globalObj=%p, shape=%d\n",
- (void*)this->globalObj, OBJ_SHAPE(this->globalObj));
- debug_only_printf(LC_TMTreeVis, "TREEVIS RECORD FRAG=%p ANCHOR=%p\n", (void*)fragment,
- (void*)anchor);
-#endif
-
- lir = lir_buf_writer = new LirBufWriter(lirbuf);
-#ifdef DEBUG
- lir = sanity_filter_1 = new SanityFilter(lir);
-#endif
- debug_only_stmt(
- if (js_LogController.lcbits & LC_TMRecorder) {
- lir = verbose_filter
- = new VerboseWriter (tempAlloc, lir, lirbuf->names,
- &js_LogController);
- }
- )
- // CseFilter must be downstream of SoftFloatFilter (see bug 527754 for why).
- lir = cse_filter = new CseFilter(lir, tempAlloc);
- if (nanojit::AvmCore::config.soft_float)
- lir = float_filter = new SoftFloatFilter(lir);
- lir = expr_filter = new ExprFilter(lir);
- lir = func_filter = new FuncFilter(lir);
-#ifdef DEBUG
- lir = sanity_filter_2 = new SanityFilter(lir);
-#endif
- lir->ins0(LIR_start);
-
- for (int i = 0; i < NumSavedRegs; ++i)
- lir->insParam(i, 1);
-#ifdef DEBUG
- for (int i = 0; i < NumSavedRegs; ++i)
- addName(lirbuf->savedRegs[i], regNames[Assembler::savedRegs[i]]);
-#endif
-
- lirbuf->state = addName(lir->insParam(0, 0), "state");
-
- if (fragment == fragment->root)
- loopLabel = lir->ins0(LIR_label);
-
- // if profiling, drop a label, so the assembler knows to put a
- // frag-entry-counter increment at this point. If there's a
- // loopLabel, use that; else we'll have to make a dummy label
- // especially for this purpose.
- verbose_only( if (js_LogController.lcbits & LC_FragProfile) {
- LIns* entryLabel = NULL;
- if (fragment == fragment->root) {
- entryLabel = loopLabel;
- } else {
- entryLabel = lir->ins0(LIR_label);
- }
- NanoAssert(entryLabel);
- NanoAssert(!fragment->loopLabel);
- fragment->loopLabel = entryLabel;
- })
-
- lirbuf->sp = addName(lir->insLoad(LIR_ldp, lirbuf->state, (int)offsetof(InterpState, sp)), "sp");
- lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp");
- cx_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx");
- eos_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos");
- eor_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor");
-
- /* If we came from exit, we might not have enough global types. */
- if (ti->globalSlots->length() > ti->nGlobalTypes())
- SpecializeTreesToMissingGlobals(cx, globalObj, ti);
-
- /* read into registers all values on the stack and all globals we know so far */
- import(treeInfo, lirbuf->sp, stackSlots, ngslots, callDepth, typeMap);
-
- if (fragment == fragment->root) {
- /*
- * We poll the operation callback request flag. It is updated asynchronously whenever
- * the callback is to be invoked.
- */
- LIns* x = lir->insLoad(LIR_ld, cx_ins, offsetof(JSContext, operationCallbackFlag));
- guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT));
- }
-
- /*
- * If we are attached to a tree call guard, make sure the guard the inner
- * tree exited from is what we expect it to be.
- */
- if (_anchor && _anchor->exitType == NESTED_EXIT) {
- LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state,
- offsetof(InterpState, lastTreeExitGuard)),
- "lastTreeExitGuard");
- guard(true, lir->ins2(LIR_peq, nested_ins, INS_CONSTPTR(innermostNestedGuard)), NESTED_EXIT);
- }
-}
-
-TraceRecorder::~TraceRecorder()
-{
- JS_ASSERT(treeInfo && fragment);
-
- if (trashSelf)
- TrashTree(cx, fragment->root);
-
- for (unsigned int i = 0; i < whichTreesToTrash.length(); i++)
- TrashTree(cx, whichTreesToTrash[i]);
-
- /* Purge the tempAlloc used during recording. */
- tempAlloc.reset();
- traceMonitor->lirbuf->clear();
-
-#ifdef DEBUG
- debug_only_stmt( delete verbose_filter; )
- delete sanity_filter_1;
- delete sanity_filter_2;
-#endif
- delete cse_filter;
- delete expr_filter;
- delete func_filter;
- delete float_filter;
- delete lir_buf_writer;
-}
-
-bool
-TraceRecorder::outOfMemory()
-{
- return traceMonitor->dataAlloc->outOfMemory() || tempAlloc.outOfMemory();
-}
-
-/* Add debug information to a LIR instruction as we emit it. */
-inline LIns*
-TraceRecorder::addName(LIns* ins, const char* name)
-{
-#ifdef JS_JIT_SPEW
- /*
- * We'll only ask for verbose Nanojit when .lcbits > 0, so there's no point
- * in adding names otherwise.
- */
- if (js_LogController.lcbits > 0)
- lirbuf->names->addName(ins, name);
-#endif
- return ins;
-}
-
-inline LIns*
-TraceRecorder::insImmVal(jsval val)
-{
- if (JSVAL_IS_TRACEABLE(val))
- treeInfo->gcthings.addUnique(val);
- return lir->insImmWord(val);
-}
-
-inline LIns*
-TraceRecorder::insImmObj(JSObject* obj)
-{
- treeInfo->gcthings.addUnique(OBJECT_TO_JSVAL(obj));
- return lir->insImmPtr((void*)obj);
-}
-
-inline LIns*
-TraceRecorder::insImmFun(JSFunction* fun)
-{
- treeInfo->gcthings.addUnique(OBJECT_TO_JSVAL(FUN_OBJECT(fun)));
- return lir->insImmPtr((void*)fun);
-}
-
-inline LIns*
-TraceRecorder::insImmStr(JSString* str)
-{
- treeInfo->gcthings.addUnique(STRING_TO_JSVAL(str));
- return lir->insImmPtr((void*)str);
-}
-
-inline LIns*
-TraceRecorder::insImmSprop(JSScopeProperty* sprop)
-{
- treeInfo->sprops.addUnique(sprop);
- return lir->insImmPtr((void*)sprop);
-}
-
-inline LIns*
-TraceRecorder::p2i(nanojit::LIns* ins)
-{
-#ifdef NANOJIT_64BIT
- return lir->ins1(LIR_qlo, ins);
-#else
- return ins;
-#endif
-}
-
-/* Determine the current call depth (starting with the entry frame.) */
-unsigned
-TraceRecorder::getCallDepth() const
-{
- return callDepth;
-}
-
-/* Determine the offset in the native global frame for a jsval we track. */
-ptrdiff_t
-TraceRecorder::nativeGlobalOffset(jsval* p) const
-{
- JS_ASSERT(isGlobal(p));
- if (size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS)
- return sizeof(InterpState) + size_t(p - globalObj->fslots) * sizeof(double);
- return sizeof(InterpState) + ((p - globalObj->dslots) + JS_INITIAL_NSLOTS) * sizeof(double);
-}
-
-/* Determine whether a value is a global stack slot. */
-bool
-TraceRecorder::isGlobal(jsval* p) const
-{
- return ((size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) ||
- (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS)));
-}
-
-/*
- * Return the offset in the native stack for the given jsval. More formally,
- * |p| must be the address of a jsval that is represented in the native stack
- * area. The return value is the offset, from InterpState::stackBase, in bytes,
- * where the native representation of |*p| is stored. To get the offset
- * relative to InterpState::sp, subtract TreeInfo::nativeStackBase.
- */
-JS_REQUIRES_STACK ptrdiff_t
-TraceRecorder::nativeStackOffset(jsval* p) const
-{
- CountSlotsVisitor visitor(p);
- VisitStackSlots(visitor, cx, callDepth);
- size_t offset = visitor.count() * sizeof(double);
-
- /*
- * If it's not in a pending frame, it must be on the stack of the current
- * frame above sp but below fp->slots + script->nslots.
- */
- if (!visitor.stopped()) {
- JS_ASSERT(size_t(p - cx->fp->slots) < cx->fp->script->nslots);
- offset += size_t(p - cx->fp->regs->sp) * sizeof(double);
- }
- return offset;
-}
-
-/* Track the maximum number of native frame slots we need during execution. */
-void
-TraceRecorder::trackNativeStackUse(unsigned slots)
-{
- if (slots > treeInfo->maxNativeStackSlots)
- treeInfo->maxNativeStackSlots = slots;
-}
-
-/*
- * Unbox a jsval into a slot. Slots are wide enough to hold double values
- * directly (instead of storing a pointer to them). We assert instead of
- * type checking. The caller must ensure the types are compatible.
- */
-static void
-ValueToNative(JSContext* cx, jsval v, JSTraceType type, double* slot)
-{
- uint8_t tag = JSVAL_TAG(v);
- switch (type) {
- case TT_OBJECT:
- JS_ASSERT(tag == JSVAL_OBJECT);
- JS_ASSERT(!JSVAL_IS_NULL(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)));
- *(JSObject**)slot = JSVAL_TO_OBJECT(v);
- debug_only_printf(LC_TMTracer,
- "object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
- JSVAL_IS_NULL(v)
- ? "null"
- : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);
- return;
-
- case TT_INT32:
- jsint i;
- if (JSVAL_IS_INT(v))
- *(jsint*)slot = JSVAL_TO_INT(v);
- else if (tag == JSVAL_DOUBLE && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(v), i))
- *(jsint*)slot = i;
- else
- JS_ASSERT(JSVAL_IS_INT(v));
- debug_only_printf(LC_TMTracer, "int<%d> ", *(jsint*)slot);
- return;
-
- case TT_DOUBLE:
- jsdouble d;
- if (JSVAL_IS_INT(v))
- d = JSVAL_TO_INT(v);
- else
- d = *JSVAL_TO_DOUBLE(v);
- JS_ASSERT(JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v));
- *(jsdouble*)slot = d;
- debug_only_printf(LC_TMTracer, "double<%g> ", d);
- return;
-
- case TT_JSVAL:
- JS_NOT_REACHED("found jsval type in an entry type map");
- return;
-
- case TT_STRING:
- JS_ASSERT(tag == JSVAL_STRING);
- *(JSString**)slot = JSVAL_TO_STRING(v);
- debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot));
- return;
-
- case TT_NULL:
- JS_ASSERT(tag == JSVAL_OBJECT);
- *(JSObject**)slot = NULL;
- debug_only_print0(LC_TMTracer, "null ");
- return;
-
- case TT_PSEUDOBOOLEAN:
- /* Watch out for pseudo-booleans. */
- JS_ASSERT(tag == JSVAL_SPECIAL);
- *(JSBool*)slot = JSVAL_TO_SPECIAL(v);
- debug_only_printf(LC_TMTracer, "pseudoboolean<%d> ", *(JSBool*)slot);
- return;
-
- case TT_FUNCTION: {
- JS_ASSERT(tag == JSVAL_OBJECT);
- JSObject* obj = JSVAL_TO_OBJECT(v);
- *(JSObject**)slot = obj;
-#ifdef DEBUG
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj);
- debug_only_printf(LC_TMTracer,
- "function<%p:%s> ", (void*) obj,
- fun->atom
- ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
- : "unnamed");
-#endif
- return;
- }
- }
-
- JS_NOT_REACHED("unexpected type");
-}
-
-/*
- * We maintain an emergency pool of doubles so we can recover safely if a trace
- * runs out of memory (doubles or objects).
- */
-static jsval
-AllocateDoubleFromReservedPool(JSContext* cx)
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- JS_ASSERT(tm->reservedDoublePoolPtr > tm->reservedDoublePool);
- return *--tm->reservedDoublePoolPtr;
-}
-
-static bool
-ReplenishReservedPool(JSContext* cx, JSTraceMonitor* tm)
-{
- /* We should not be called with a full pool. */
- JS_ASSERT((size_t) (tm->reservedDoublePoolPtr - tm->reservedDoublePool) <
- MAX_NATIVE_STACK_SLOTS);
-
- /*
- * When the GC runs in js_NewDoubleInRootedValue, it resets
- * tm->reservedDoublePoolPtr back to tm->reservedDoublePool.
- */
- JSRuntime* rt = cx->runtime;
- uintN gcNumber = rt->gcNumber;
- uintN lastgcNumber = gcNumber;
- jsval* ptr = tm->reservedDoublePoolPtr;
- while (ptr < tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) {
- if (!js_NewDoubleInRootedValue(cx, 0.0, ptr))
- goto oom;
-
- /* Check if the last call to js_NewDoubleInRootedValue GC'd. */
- if (rt->gcNumber != lastgcNumber) {
- lastgcNumber = rt->gcNumber;
- ptr = tm->reservedDoublePool;
-
- /*
- * Have we GC'd more than once? We're probably running really
- * low on memory, bail now.
- */
- if (uintN(rt->gcNumber - gcNumber) > uintN(1))
- goto oom;
- continue;
- }
- ++ptr;
- }
- tm->reservedDoublePoolPtr = ptr;
- return true;
-
-oom:
- /*
- * Already massive GC pressure, no need to hold doubles back.
- * We won't run any native code anyway.
- */
- tm->reservedDoublePoolPtr = tm->reservedDoublePool;
- return false;
-}
-
-void
-JSTraceMonitor::flush()
-{
- AUDIT(cacheFlushed);
-
- // recover profiling data from expiring Fragments
- verbose_only(
- for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
- for (VMFragment *f = vmfragments[i]; f; f = f->next) {
- JS_ASSERT(f->root == f);
- for (VMFragment *p = f; p; p = p->peer)
- js_FragProfiling_FragFinalizer(p, this);
- }
- }
- )
-
- verbose_only(
- for (Seq* f = branches; f; f = f->tail)
- js_FragProfiling_FragFinalizer(f->head, this);
- )
-
- dataAlloc->reset();
- codeAlloc->reset();
- tempAlloc->reset();
- reTempAlloc->reset();
-
- Allocator& alloc = *dataAlloc;
-
- for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
- globalStates[i].globalShape = -1;
- globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
- }
-
- assembler = new (alloc) Assembler(*codeAlloc, alloc, core, &js_LogController);
- lirbuf = new (alloc) LirBuffer(*tempAlloc);
- reLirBuf = new (alloc) LirBuffer(*reTempAlloc);
- verbose_only( branches = NULL; )
-
-#ifdef DEBUG
- labels = new (alloc) LabelMap(alloc, &js_LogController);
- reLirBuf->names =
- lirbuf->names = new (alloc) LirNameMap(alloc, labels);
-#endif
-
- memset(&vmfragments[0], 0, FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
- reFragments = new (alloc) REHashMap(alloc);
-
- needFlush = JS_FALSE;
-}
-
-static inline void
-MarkTreeInfo(JSTracer* trc, TreeInfo *ti)
-{
- jsval* vp = ti->gcthings.data();
- unsigned len = ti->gcthings.length();
- while (len--) {
- jsval v = *vp++;
- JS_SET_TRACING_NAME(trc, "jitgcthing");
- JS_CallTracer(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v));
- }
- JSScopeProperty** spropp = ti->sprops.data();
- len = ti->sprops.length();
- while (len--) {
- JSScopeProperty* sprop = *spropp++;
- sprop->trace(trc);
- }
-}
-
-void
-JSTraceMonitor::mark(JSTracer* trc)
-{
- if (!trc->context->runtime->gcFlushCodeCaches) {
- for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
- VMFragment* f = vmfragments[i];
- while (f) {
- if (TreeInfo* ti = (TreeInfo*)f->vmprivate)
- MarkTreeInfo(trc, ti);
- VMFragment* peer = (VMFragment*)f->peer;
- while (peer) {
- if (TreeInfo* ti = (TreeInfo*)peer->vmprivate)
- MarkTreeInfo(trc, ti);
- peer = (VMFragment*)peer->peer;
- }
- f = f->next;
- }
- }
- if (recorder)
- MarkTreeInfo(trc, recorder->getTreeInfo());
- }
-}
-
-/*
- * Box a value from the native stack back into the jsval format. Integers that
- * are too large to fit into a jsval are automatically boxed into
- * heap-allocated doubles.
- */
-template
-static inline bool
-NativeToValueBase(JSContext* cx, jsval& v, JSTraceType type, double* slot)
-{
- jsint i;
- jsdouble d;
- switch (type) {
- case TT_OBJECT:
- v = OBJECT_TO_JSVAL(*(JSObject**)slot);
- JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
- debug_only_printf(LC_TMTracer,
- "object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
- JSVAL_IS_NULL(v)
- ? "null"
- : STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name);
- break;
-
- case TT_INT32:
- i = *(jsint*)slot;
- debug_only_printf(LC_TMTracer, "int<%d> ", i);
- store_int:
- if (INT_FITS_IN_JSVAL(i)) {
- v = INT_TO_JSVAL(i);
- break;
- }
- d = (jsdouble)i;
- goto store_double;
- case TT_DOUBLE:
- d = *slot;
- debug_only_printf(LC_TMTracer, "double<%g> ", d);
- if (JSDOUBLE_IS_INT(d, i))
- goto store_int;
- store_double: {
- /*
- * It's not safe to trigger the GC here, so use an emergency heap if we
- * are out of double boxes.
- */
- if (cx->doubleFreeList) {
-#ifdef DEBUG
- JSBool ok =
-#endif
- js_NewDoubleInRootedValue(cx, d, &v);
- JS_ASSERT(ok);
- return true;
- }
- return E::handleDoubleOOM(cx, d, v);
- }
-
- case TT_JSVAL:
- v = *(jsval*)slot;
- JS_ASSERT(v != JSVAL_ERROR_COOKIE); /* don't leak JSVAL_ERROR_COOKIE */
- debug_only_printf(LC_TMTracer, "box<%p> ", (void*)v);
- break;
-
- case TT_STRING:
- v = STRING_TO_JSVAL(*(JSString**)slot);
- debug_only_printf(LC_TMTracer, "string<%p> ", (void*)(*(JSString**)slot));
- break;
-
- case TT_NULL:
- JS_ASSERT(*(JSObject**)slot == NULL);
- v = JSVAL_NULL;
- debug_only_printf(LC_TMTracer, "null<%p> ", (void*)(*(JSObject**)slot));
- break;
-
- case TT_PSEUDOBOOLEAN:
- /* Watch out for pseudo-booleans. */
- v = SPECIAL_TO_JSVAL(*(JSBool*)slot);
- debug_only_printf(LC_TMTracer, "boolean<%d> ", *(JSBool*)slot);
- break;
-
- case TT_FUNCTION: {
- JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot));
- v = OBJECT_TO_JSVAL(*(JSObject**)slot);
-#ifdef DEBUG
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v));
- debug_only_printf(LC_TMTracer,
- "function<%p:%s> ", (void*)JSVAL_TO_OBJECT(v),
- fun->atom
- ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
- : "unnamed");
-#endif
- break;
- }
- }
- return true;
-}
-
-struct ReserveDoubleOOMHandler {
- static bool handleDoubleOOM(JSContext *cx, double d, jsval& v) {
- v = AllocateDoubleFromReservedPool(cx);
- JS_ASSERT(JSVAL_IS_DOUBLE(v) && *JSVAL_TO_DOUBLE(v) == 0.0);
- *JSVAL_TO_DOUBLE(v) = d;
- return true;
- }
-};
-
-static void
-NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot)
-{
-#ifdef DEBUG
- bool ok =
-#endif
- NativeToValueBase(cx, v, type, slot);
- JS_ASSERT(ok);
-}
-
-struct FailDoubleOOMHandler {
- static bool handleDoubleOOM(JSContext *cx, double d, jsval& v) {
- return false;
- }
-};
-
-bool
-js_NativeToValue(JSContext* cx, jsval& v, JSTraceType type, double* slot)
-{
- return NativeToValueBase(cx, v, type, slot);
-}
-
-class BuildNativeFrameVisitor : public SlotVisitorBase
-{
- JSContext *mCx;
- JSTraceType *mTypeMap;
- double *mGlobal;
- double *mStack;
-public:
- BuildNativeFrameVisitor(JSContext *cx,
- JSTraceType *typemap,
- double *global,
- double *stack) :
- mCx(cx),
- mTypeMap(typemap),
- mGlobal(global),
- mStack(stack)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- debug_only_printf(LC_TMTracer, "global%d: ", n);
- ValueToNative(mCx, *vp, *mTypeMap++, &mGlobal[slot]);
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, int count, JSStackFrame* fp) {
- for (int i = 0; i < count; ++i) {
- debug_only_printf(LC_TMTracer, "%s%d: ", stackSlotKind(), i);
- ValueToNative(mCx, *vp++, *mTypeMap++, mStack++);
- }
- return true;
- }
-};
-
-static JS_REQUIRES_STACK void
-BuildNativeFrame(JSContext *cx, JSObject *globalObj, unsigned callDepth,
- unsigned ngslots, uint16 *gslots,
- JSTraceType *typeMap, double *global, double *stack)
-{
- BuildNativeFrameVisitor visitor(cx, typeMap, global, stack);
- VisitSlots(visitor, cx, globalObj, callDepth, ngslots, gslots);
- debug_only_print0(LC_TMTracer, "\n");
-}
-
-class FlushNativeGlobalFrameVisitor : public SlotVisitorBase
-{
- JSContext *mCx;
- JSTraceType *mTypeMap;
- double *mGlobal;
-public:
- FlushNativeGlobalFrameVisitor(JSContext *cx,
- JSTraceType *typeMap,
- double *global) :
- mCx(cx),
- mTypeMap(typeMap),
- mGlobal(global)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- debug_only_printf(LC_TMTracer, "global%d=", n);
- NativeToValue(mCx, *vp, *mTypeMap++, &mGlobal[slot]);
- }
-};
-
-class FlushNativeStackFrameVisitor : public SlotVisitorBase
-{
- JSContext *mCx;
- JSTraceType *mTypeMap;
- double *mStack;
- jsval *mStop;
-public:
- FlushNativeStackFrameVisitor(JSContext *cx,
- JSTraceType *typeMap,
- double *stack,
- jsval *stop) :
- mCx(cx),
- mTypeMap(typeMap),
- mStack(stack),
- mStop(stop)
- {}
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- if (vp == mStop)
- return false;
- debug_only_printf(LC_TMTracer, "%s%u=", stackSlotKind(), unsigned(i));
- NativeToValue(mCx, *vp++, *mTypeMap++, mStack++);
- }
- return true;
- }
-};
-
-/* Box the given native frame into a JS frame. This is infallible. */
-static JS_REQUIRES_STACK void
-FlushNativeGlobalFrame(JSContext *cx, double *global, unsigned ngslots,
- uint16 *gslots, JSTraceType *typemap)
-{
- FlushNativeGlobalFrameVisitor visitor(cx, typemap, global);
- JSObject *globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- VisitGlobalSlots(visitor, cx, globalObj, ngslots, gslots);
- debug_only_print0(LC_TMTracer, "\n");
-}
-
-/*
- * Returns the number of values on the native stack, excluding the innermost
- * frame. This walks all FrameInfos on the native frame stack and sums the
- * slot usage of each frame.
- */
-static int32
-StackDepthFromCallStack(InterpState* state, uint32 callDepth)
-{
- int32 nativeStackFramePos = 0;
-
- // Duplicate native stack layout computation: see VisitFrameSlots header comment.
- for (FrameInfo** fip = state->callstackBase; fip < state->rp + callDepth; fip++)
- nativeStackFramePos += (*fip)->callerHeight;
- return nativeStackFramePos;
-}
-
-/*
- * Generic function to read upvars on trace from slots of active frames.
- * T Traits type parameter. Must provide static functions:
- * interp_get(fp, slot) Read the value out of an interpreter frame.
- * native_slot(argc, slot) Return the position of the desired value in the on-trace
- * stack frame (with position 0 being callee).
- *
- * upvarLevel Static level of the function containing the upvar definition
- * slot Identifies the value to get. The meaning is defined by the traits type.
- * callDepth Call depth of current point relative to trace entry
- */
-template
-inline JSTraceType
-GetUpvarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
-{
- InterpState* state = cx->interpState;
- FrameInfo** fip = state->rp + callDepth;
-
- /*
- * First search the FrameInfo call stack for an entry containing our
- * upvar, namely one with level == upvarLevel. The first FrameInfo is a
- * transition from the entry frame to some callee. However, it is not
- * known (from looking at the FrameInfo) whether the entry frame had a
- * callee. Rather than special-case this or insert more logic into the
- * loop, instead just stop before that FrameInfo (i.e. |> base| instead of
- * |>= base|), and let the code after the loop handle it.
- */
- int32 stackOffset = StackDepthFromCallStack(state, callDepth);
- while (--fip > state->callstackBase) {
- FrameInfo* fi = *fip;
-
- /*
- * The loop starts aligned to the top of the stack, so move down to the first meaningful
- * callee. Then read the callee directly from the frame.
- */
- stackOffset -= fi->callerHeight;
- JSObject* callee = *(JSObject**)(&state->stackBase[stackOffset]);
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, callee);
- uintN calleeLevel = fun->u.i.script->staticLevel;
- if (calleeLevel == upvarLevel) {
- /*
- * Now find the upvar's value in the native stack. stackOffset is
- * the offset of the start of the activation record corresponding
- * to *fip in the native stack.
- */
- uint32 native_slot = T::native_slot(fi->callerArgc, slot);
- *result = state->stackBase[stackOffset + native_slot];
- return fi->get_typemap()[native_slot];
- }
- }
-
- // Next search the trace entry frame, which is not in the FrameInfo stack.
- if (state->outermostTree->script->staticLevel == upvarLevel) {
- uint32 argc = ((VMFragment*) state->outermostTree->fragment)->argc;
- uint32 native_slot = T::native_slot(argc, slot);
- *result = state->stackBase[native_slot];
- return state->callstackBase[0]->get_typemap()[native_slot];
- }
-
- /*
- * If we did not find the upvar in the frames for the active traces,
- * then we simply get the value from the interpreter state.
- */
- JS_ASSERT(upvarLevel < JS_DISPLAY_SIZE);
- JSStackFrame* fp = cx->display[upvarLevel];
- jsval v = T::interp_get(fp, slot);
- JSTraceType type = getCoercedType(v);
- ValueToNative(cx, v, type, result);
- return type;
-}
-
-// For this traits type, 'slot' is the argument index, which may be -2 for callee.
-struct UpvarArgTraits {
- static jsval interp_get(JSStackFrame* fp, int32 slot) {
- return fp->argv[slot];
- }
-
- static uint32 native_slot(uint32 argc, int32 slot) {
- return 2 /*callee,this*/ + slot;
- }
-};
-
-uint32 JS_FASTCALL
-GetUpvarArgOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
-{
- return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result);
-}
-
-// For this traits type, 'slot' is an index into the local slots array.
-struct UpvarVarTraits {
- static jsval interp_get(JSStackFrame* fp, int32 slot) {
- return fp->slots[slot];
- }
-
- static uint32 native_slot(uint32 argc, int32 slot) {
- return 3 /*callee,this,arguments*/ + argc + slot;
- }
-};
-
-uint32 JS_FASTCALL
-GetUpvarVarOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth, double* result)
-{
- return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result);
-}
-
-/*
- * For this traits type, 'slot' is an index into the stack area (within slots,
- * after nfixed) of a frame with no function. (On trace, the top-level frame is
- * the only one that can have no function.)
- */
-struct UpvarStackTraits {
- static jsval interp_get(JSStackFrame* fp, int32 slot) {
- return fp->slots[slot + fp->script->nfixed];
- }
-
- static uint32 native_slot(uint32 argc, int32 slot) {
- /*
- * Locals are not imported by the tracer when the frame has no
- * function, so we do not add fp->script->nfixed.
- */
- JS_ASSERT(argc == 0);
- return slot;
- }
-};
-
-uint32 JS_FASTCALL
-GetUpvarStackOnTrace(JSContext* cx, uint32 upvarLevel, int32 slot, uint32 callDepth,
- double* result)
-{
- return GetUpvarOnTrace(cx, upvarLevel, slot, callDepth, result);
-}
-
-// Parameters needed to access a value from a closure on trace.
-struct ClosureVarInfo
-{
- jsid id;
- uint32 slot;
- uint32 callDepth;
- uint32 resolveFlags;
-};
-
-/*
- * Generic function to read upvars from Call objects of active heavyweight functions.
- * call Callee Function object in which the upvar is accessed.
- */
-template
-inline uint32
-GetFromClosure(JSContext* cx, JSObject* call, const ClosureVarInfo* cv, double* result)
-{
- JS_ASSERT(OBJ_GET_CLASS(cx, call) == &js_CallClass);
-
- InterpState* state = cx->interpState;
-
-#ifdef DEBUG
- int32 stackOffset = StackDepthFromCallStack(state, cv->callDepth);
- FrameInfo** fip = state->rp + cv->callDepth;
- while (--fip > state->callstackBase) {
- FrameInfo* fi = *fip;
- JSObject* callee = *(JSObject**)(&state->stackBase[stackOffset]);
- if (callee == call) {
- // This is not reachable as long as JSOP_LAMBDA is not traced:
- // - The upvar is found at this point only if the upvar was defined on a frame that was
- // entered on this trace.
- // - The upvar definition must be (dynamically, and thus on trace) before the definition
- // of the function that uses the upvar.
- // - Therefore, if the upvar is found at this point, the function definition JSOP_LAMBDA
- // is on the trace.
- JS_NOT_REACHED("JSOP_NAME variable found in outer trace");
- }
- stackOffset -= fi->callerHeight;
- }
-#endif
-
- /*
- * Here we specifically want to check the call object of the trace entry frame.
- */
- uint32 slot = cv->slot;
- VOUCH_DOES_NOT_REQUIRE_STACK();
- if (cx->fp->callobj == call) {
- slot = T::adj_slot(cx->fp, slot);
- *result = state->stackBase[slot];
- return state->callstackBase[0]->get_typemap()[slot];
- }
-
- JSStackFrame* fp = (JSStackFrame*) call->getPrivate();
- jsval v;
- if (fp) {
- v = T::slots(fp)[slot];
- } else {
- JS_ASSERT(cv->resolveFlags != JSRESOLVE_INFER);
- JSAutoResolveFlags rf(cx, cv->resolveFlags);
-#ifdef DEBUG
- JSBool rv =
-#endif
- js_GetPropertyHelper(cx, call, cv->id, JS_FALSE, &v);
- JS_ASSERT(rv);
- }
- JSTraceType type = getCoercedType(v);
- ValueToNative(cx, v, type, result);
- return type;
-}
-
-struct ArgClosureTraits
-{
- // See also UpvarArgTraits.
- static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return 2 + slot; }
-
- // Generate the adj_slot computation in LIR.
- static inline LIns* adj_slot_lir(LirWriter* lir, LIns* fp_ins, unsigned slot) {
- return lir->insImm(2 + slot);
- }
-
- // See also UpvarArgTraits.
- static inline jsval* slots(JSStackFrame* fp) { return fp->argv; }
-private:
- ArgClosureTraits();
-};
-
-uint32 JS_FASTCALL
-GetClosureArg(JSContext* cx, JSObject* callee, const ClosureVarInfo* cv, double* result)
-{
- return GetFromClosure(cx, callee, cv, result);
-}
-
-struct VarClosureTraits
-{
- // See also UpvarVarTraits.
- static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return 3 + fp->argc + slot; }
-
- // See also UpvarVarTraits.
- static inline LIns* adj_slot_lir(LirWriter* lir, LIns* fp_ins, unsigned slot) {
- LIns *argc_ins = lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc));
- return lir->ins2(LIR_add, lir->insImm(3 + slot), argc_ins);
- }
-
- // See also UpvarVarTraits.
- static inline jsval* slots(JSStackFrame* fp) { return fp->slots; }
-private:
- VarClosureTraits();
-};
-
-uint32 JS_FASTCALL
-GetClosureVar(JSContext* cx, JSObject* callee, const ClosureVarInfo* cv, double* result)
-{
- return GetFromClosure(cx, callee, cv, result);
-}
-
-/**
- * Box the given native stack frame into the virtual machine stack. This
- * is infallible.
- *
- * @param callDepth the distance between the entry frame into our trace and
- * cx->fp when we make this call. If this is not called as a
- * result of a nested exit, callDepth is 0.
- * @param mp an array of JSTraceTypes that indicate what the types of the things
- * on the stack are.
- * @param np pointer to the native stack. We want to copy values from here to
- * the JS stack as needed.
- * @param stopFrame if non-null, this frame and everything above it should not
- * be restored.
- * @return the number of things we popped off of np.
- */
-static JS_REQUIRES_STACK int
-FlushNativeStackFrame(JSContext* cx, unsigned callDepth, JSTraceType* mp, double* np,
- JSStackFrame* stopFrame)
-{
- jsval* stopAt = stopFrame ? &stopFrame->argv[-2] : NULL;
-
- /* Root all string and object references first (we don't need to call the GC for this). */
- FlushNativeStackFrameVisitor visitor(cx, mp, np, stopAt);
- VisitStackSlots(visitor, cx, callDepth);
-
- // Restore thisp from the now-restored argv[-1] in each pending frame.
- // Keep in mind that we didn't restore frames at stopFrame and above!
- // Scope to keep |fp| from leaking into the macros we're using.
- {
- unsigned n = callDepth+1; // +1 to make sure we restore the entry frame
- JSStackFrame* fp = cx->fp;
- if (stopFrame) {
- for (; fp != stopFrame; fp = fp->down) {
- JS_ASSERT(n != 0);
- --n;
- }
-
- // Skip over stopFrame itself.
- JS_ASSERT(n != 0);
- --n;
- fp = fp->down;
- }
- for (; n != 0; fp = fp->down) {
- --n;
- if (fp->argv) {
- if (fp->argsobj &&
- js_GetArgsPrivateNative(JSVAL_TO_OBJECT(fp->argsobj))) {
- JSVAL_TO_OBJECT(fp->argsobj)->setPrivate(fp);
- }
-
- /*
- * We might return from trace with a different callee object, but it still
- * has to be the same JSFunction (FIXME: bug 471425, eliminate fp->callee).
- */
- JS_ASSERT(JSVAL_IS_OBJECT(fp->argv[-1]));
- JS_ASSERT(HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(fp->argv[-2])));
- JS_ASSERT(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fp->argv[-2])) ==
- GET_FUNCTION_PRIVATE(cx, fp->callee()));
- JS_ASSERT(GET_FUNCTION_PRIVATE(cx, fp->callee()) == fp->fun);
-
- /*
- * SynthesizeFrame sets scopeChain to NULL, because we can't calculate the
- * correct scope chain until we have the final callee. Calculate the real
- * scope object here.
- */
- if (!fp->scopeChain) {
- fp->scopeChain = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(fp->argv[-2]));
- if (fp->fun->flags & JSFUN_HEAVYWEIGHT) {
- /*
- * Set hookData to null because the failure case for js_GetCallObject
- * involves it calling the debugger hook.
- *
- * Allocating the Call object must not fail, so use an object
- * previously reserved by ExecuteTree if needed.
- */
- void* hookData = ((JSInlineFrame*)fp)->hookData;
- ((JSInlineFrame*)fp)->hookData = NULL;
- JS_ASSERT(!JS_TRACE_MONITOR(cx).useReservedObjects);
- JS_TRACE_MONITOR(cx).useReservedObjects = JS_TRUE;
-#ifdef DEBUG
- JSObject *obj =
-#endif
- js_GetCallObject(cx, fp);
- JS_ASSERT(obj);
- JS_TRACE_MONITOR(cx).useReservedObjects = JS_FALSE;
- ((JSInlineFrame*)fp)->hookData = hookData;
- }
- }
- fp->thisp = JSVAL_TO_OBJECT(fp->argv[-1]);
- if (fp->flags & JSFRAME_CONSTRUCTING) // constructors always compute 'this'
- fp->flags |= JSFRAME_COMPUTED_THIS;
- }
- }
- }
- debug_only_print0(LC_TMTracer, "\n");
- return visitor.getTypeMap() - mp;
-}
-
-/* Emit load instructions onto the trace that read the initial stack state. */
-JS_REQUIRES_STACK void
-TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, JSTraceType t,
- const char *prefix, uintN index, JSStackFrame *fp)
-{
- LIns* ins;
- if (t == TT_INT32) { /* demoted */
- JS_ASSERT(isInt32(*p));
-
- /*
- * Ok, we have a valid demotion attempt pending, so insert an integer
- * read and promote it to double since all arithmetic operations expect
- * to see doubles on entry. The first op to use this slot will emit a
- * f2i cast which will cancel out the i2f we insert here.
- */
- ins = lir->insLoad(LIR_ld, base, offset);
- ins = lir->ins1(LIR_i2f, ins);
- } else {
- JS_ASSERT_IF(t != TT_JSVAL, isNumber(*p) == (t == TT_DOUBLE));
- if (t == TT_DOUBLE) {
- ins = lir->insLoad(LIR_ldq, base, offset);
- } else if (t == TT_PSEUDOBOOLEAN) {
- ins = lir->insLoad(LIR_ld, base, offset);
- } else {
- ins = lir->insLoad(LIR_ldp, base, offset);
- }
- }
- checkForGlobalObjectReallocation();
- tracker.set(p, ins);
-
-#ifdef DEBUG
- char name[64];
- JS_ASSERT(strlen(prefix) < 10);
- void* mark = NULL;
- jsuword* localNames = NULL;
- const char* funName = NULL;
- if (*prefix == 'a' || *prefix == 'v') {
- mark = JS_ARENA_MARK(&cx->tempPool);
- if (fp->fun->hasLocalNames())
- localNames = js_GetLocalNameArray(cx, fp->fun, &cx->tempPool);
- funName = fp->fun->atom ? js_AtomToPrintableString(cx, fp->fun->atom) : "";
- }
- if (!strcmp(prefix, "argv")) {
- if (index < fp->fun->nargs) {
- JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[index]);
- JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom));
- } else {
- JS_snprintf(name, sizeof name, "$%s.", funName, index);
- }
- } else if (!strcmp(prefix, "vars")) {
- JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(localNames[fp->fun->nargs + index]);
- JS_snprintf(name, sizeof name, "$%s.%s", funName, js_AtomToPrintableString(cx, atom));
- } else {
- JS_snprintf(name, sizeof name, "$%s%d", prefix, index);
- }
-
- if (mark)
- JS_ARENA_RELEASE(&cx->tempPool, mark);
- addName(ins, name);
-
- static const char* typestr[] = {
- "object", "int", "double", "jsval", "string", "null", "boolean", "function"
- };
- debug_only_printf(LC_TMTracer, "import vp=%p name=%s type=%s flags=%d\n",
- (void*)p, name, typestr[t & 7], t >> 3);
-#endif
-}
-
-class ImportGlobalSlotVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- LIns *mBase;
- JSTraceType *mTypemap;
-public:
- ImportGlobalSlotVisitor(TraceRecorder &recorder,
- LIns *base,
- JSTraceType *typemap) :
- mRecorder(recorder),
- mBase(base),
- mTypemap(typemap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- JS_ASSERT(*mTypemap != TT_JSVAL);
- mRecorder.import(mBase, mRecorder.nativeGlobalOffset(vp),
- vp, *mTypemap++, "global", n, NULL);
- }
-};
-
-class ImportBoxedStackSlotVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- LIns *mBase;
- ptrdiff_t mStackOffset;
- JSTraceType *mTypemap;
- JSStackFrame *mFp;
-public:
- ImportBoxedStackSlotVisitor(TraceRecorder &recorder,
- LIns *base,
- ptrdiff_t stackOffset,
- JSTraceType *typemap) :
- mRecorder(recorder),
- mBase(base),
- mStackOffset(stackOffset),
- mTypemap(typemap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- if (*mTypemap == TT_JSVAL) {
- mRecorder.import(mBase, mStackOffset, vp, TT_JSVAL,
- "jsval", i, fp);
- LIns *vp_ins = mRecorder.unbox_jsval(*vp, mRecorder.get(vp),
- mRecorder.copy(mRecorder.anchor));
- mRecorder.set(vp, vp_ins);
- }
- vp++;
- mTypemap++;
- mStackOffset += sizeof(double);
- }
- return true;
- }
-};
-
-class ImportUnboxedStackSlotVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- LIns *mBase;
- ptrdiff_t mStackOffset;
- JSTraceType *mTypemap;
- JSStackFrame *mFp;
-public:
- ImportUnboxedStackSlotVisitor(TraceRecorder &recorder,
- LIns *base,
- ptrdiff_t stackOffset,
- JSTraceType *typemap) :
- mRecorder(recorder),
- mBase(base),
- mStackOffset(stackOffset),
- mTypemap(typemap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- if (*mTypemap != TT_JSVAL) {
- mRecorder.import(mBase, mStackOffset, vp++, *mTypemap,
- stackSlotKind(), i, fp);
- }
- mTypemap++;
- mStackOffset += sizeof(double);
- }
- return true;
- }
-};
-
-// Like ImportUnboxedStackSlotVisitor, except that this does not import
-// slots past nfixed. It imports only the slots that belong totally to
-// the given frame.
-class ImportUnboxedFrameSlotVisitor : public ImportUnboxedStackSlotVisitor
-{
-public:
- ImportUnboxedFrameSlotVisitor(TraceRecorder &recorder,
- LIns *base,
- ptrdiff_t stackOffset,
- JSTraceType *typemap) :
- ImportUnboxedStackSlotVisitor(recorder, base, stackOffset, typemap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- if (vp == &fp->slots[fp->script->nfixed])
- return false;
- return ImportUnboxedStackSlotVisitor::visitStackSlots(vp, count, fp);
- }
-};
-
-JS_REQUIRES_STACK void
-TraceRecorder::import(TreeInfo* treeInfo, LIns* sp, unsigned stackSlots, unsigned ngslots,
- unsigned callDepth, JSTraceType* typeMap)
-{
- /*
- * If we get a partial list that doesn't have all the types (i.e. recording
- * from a side exit that was recorded but we added more global slots
- * later), merge the missing types from the entry type map. This is safe
- * because at the loop edge we verify that we have compatible types for all
- * globals (entry type and loop edge type match). While a different trace
- * of the tree might have had a guard with a different type map for these
- * slots we just filled in here (the guard we continue from didn't know
- * about them), since we didn't take that particular guard the only way we
- * could have ended up here is if that other trace had at its end a
- * compatible type distribution with the entry map. Since that's exactly
- * what we used to fill in the types our current side exit didn't provide,
- * this is always safe to do.
- */
-
- JSTraceType* globalTypeMap = typeMap + stackSlots;
- unsigned length = treeInfo->nGlobalTypes();
-
- /*
- * This is potentially the typemap of the side exit and thus shorter than
- * the tree's global type map.
- */
- if (ngslots < length) {
- MergeTypeMaps(&globalTypeMap /* out param */, &ngslots /* out param */,
- treeInfo->globalTypeMap(), length,
- (JSTraceType*)alloca(sizeof(JSTraceType) * length));
- }
- JS_ASSERT(ngslots == treeInfo->nGlobalTypes());
- ptrdiff_t offset = -treeInfo->nativeStackBase;
-
- /*
- * Check whether there are any values on the stack we have to unbox and do
- * that first before we waste any time fetching the state from the stack.
- */
- ImportBoxedStackSlotVisitor boxedStackVisitor(*this, sp, offset, typeMap);
- VisitStackSlots(boxedStackVisitor, cx, callDepth);
-
- ImportGlobalSlotVisitor globalVisitor(*this, lirbuf->state, globalTypeMap);
- VisitGlobalSlots(globalVisitor, cx, globalObj, ngslots,
- treeInfo->globalSlots->data());
-
- ImportUnboxedStackSlotVisitor unboxedStackVisitor(*this, sp, offset,
- typeMap);
- VisitStackSlots(unboxedStackVisitor, cx, callDepth);
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop)
-{
- uint32 setflags = (js_CodeSpec[*cx->fp->regs->pc].format & (JOF_SET | JOF_INCDEC | JOF_FOR));
-
- if (setflags) {
- if (!SPROP_HAS_STUB_SETTER(sprop))
- ABORT_TRACE_RV("non-stub setter", false);
- if (sprop->attrs & JSPROP_READONLY)
- ABORT_TRACE_RV("writing to a read-only property", false);
- }
-
- /* This check applies even when setflags == 0. */
- if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop))
- ABORT_TRACE_RV("non-stub getter", false);
-
- if (!SPROP_HAS_VALID_SLOT(sprop, scope))
- ABORT_TRACE_RV("slotless obj property", false);
-
- return true;
-}
-
-/* Lazily import a global slot if we don't already have it in the tracker. */
-JS_REQUIRES_STACK bool
-TraceRecorder::lazilyImportGlobalSlot(unsigned slot)
-{
- if (slot != uint16(slot)) /* we use a table of 16-bit ints, bail out if that's not enough */
- return false;
-
- /*
- * If the global object grows too large, alloca in ExecuteTree might fail,
- * so abort tracing on global objects with unreasonably many slots.
- */
- if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS)
- return false;
- jsval* vp = &STOBJ_GET_SLOT(globalObj, slot);
- if (known(vp))
- return true; /* we already have it */
- unsigned index = treeInfo->globalSlots->length();
-
- /* Add the slot to the list of interned global slots. */
- JS_ASSERT(treeInfo->nGlobalTypes() == treeInfo->globalSlots->length());
- treeInfo->globalSlots->add(slot);
- JSTraceType type = getCoercedType(*vp);
- if (type == TT_INT32 && oracle.isGlobalSlotUndemotable(cx, slot))
- type = TT_DOUBLE;
- treeInfo->typeMap.add(type);
- import(lirbuf->state, sizeof(struct InterpState) + slot*sizeof(double),
- vp, type, "global", index, NULL);
- SpecializeTreesToMissingGlobals(cx, globalObj, treeInfo);
- return true;
-}
-
-/* Write back a value onto the stack or global frames. */
-LIns*
-TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset, bool demote)
-{
- /*
- * Sink all type casts targeting the stack into the side exit by simply storing the original
- * (uncasted) value. Each guard generates the side exit map based on the types of the
- * last stores to every stack location, so it's safe to not perform them on-trace.
- */
- if (demote && isPromoteInt(i))
- i = ::demote(lir, i);
- return lir->insStorei(i, base, offset);
-}
-
-/* Update the tracker, then issue a write back store. */
-JS_REQUIRES_STACK void
-TraceRecorder::set(jsval* p, LIns* i, bool initializing, bool demote)
-{
- JS_ASSERT(i != NULL);
- JS_ASSERT(initializing || known(p));
- checkForGlobalObjectReallocation();
- tracker.set(p, i);
-
- /*
- * If we are writing to this location for the first time, calculate the
- * offset into the native frame manually. Otherwise just look up the last
- * load or store associated with the same source address (p) and use the
- * same offset/base.
- */
- LIns* x = nativeFrameTracker.get(p);
- if (!x) {
- if (isGlobal(p))
- x = writeBack(i, lirbuf->state, nativeGlobalOffset(p), demote);
- else
- x = writeBack(i, lirbuf->sp, -treeInfo->nativeStackBase + nativeStackOffset(p), demote);
- nativeFrameTracker.set(p, x);
- } else {
- JS_ASSERT(x->isop(LIR_sti) || x->isop(LIR_stqi));
-
- int disp;
- LIns *base = x->oprnd2();
-#ifdef NANOJIT_ARM
- if (base->isop(LIR_piadd)) {
- disp = base->oprnd2()->imm32();
- base = base->oprnd1();
- } else
-#endif
- disp = x->disp();
-
- JS_ASSERT(base == lirbuf->sp || base == lirbuf->state);
- JS_ASSERT(disp == ((base == lirbuf->sp) ?
- -treeInfo->nativeStackBase + nativeStackOffset(p) :
- nativeGlobalOffset(p)));
-
- writeBack(i, base, disp, demote);
- }
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::get(jsval* p)
-{
- JS_ASSERT(known(p));
- checkForGlobalObjectReallocation();
- return tracker.get(p);
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::addr(jsval* p)
-{
- return isGlobal(p)
- ? lir->ins2(LIR_piadd, lirbuf->state, INS_CONSTWORD(nativeGlobalOffset(p)))
- : lir->ins2(LIR_piadd, lirbuf->sp,
- INS_CONSTWORD(-treeInfo->nativeStackBase + nativeStackOffset(p)));
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::known(jsval* p)
-{
- checkForGlobalObjectReallocation();
- return tracker.has(p);
-}
-
-/*
- * The dslots of the global object are sometimes reallocated by the interpreter.
- * This function check for that condition and re-maps the entries of the tracker
- * accordingly.
- */
-JS_REQUIRES_STACK void
-TraceRecorder::checkForGlobalObjectReallocation()
-{
- if (global_dslots != globalObj->dslots) {
- debug_only_print0(LC_TMTracer,
- "globalObj->dslots relocated, updating tracker\n");
- jsval* src = global_dslots;
- jsval* dst = globalObj->dslots;
- jsuint length = globalObj->dslots[-1] - JS_INITIAL_NSLOTS;
- LIns** map = (LIns**)alloca(sizeof(LIns*) * length);
- for (jsuint n = 0; n < length; ++n) {
- map[n] = tracker.get(src);
- tracker.set(src++, NULL);
- }
- for (jsuint n = 0; n < length; ++n)
- tracker.set(dst++, map[n]);
- global_dslots = globalObj->dslots;
- }
-}
-
-/* Determine whether the current branch is a loop edge (taken or not taken). */
-static JS_REQUIRES_STACK bool
-IsLoopEdge(jsbytecode* pc, jsbytecode* header)
-{
- switch (*pc) {
- case JSOP_IFEQ:
- case JSOP_IFNE:
- return ((pc + GET_JUMP_OFFSET(pc)) == header);
- case JSOP_IFEQX:
- case JSOP_IFNEX:
- return ((pc + GET_JUMPX_OFFSET(pc)) == header);
- default:
- JS_ASSERT((*pc == JSOP_AND) || (*pc == JSOP_ANDX) ||
- (*pc == JSOP_OR) || (*pc == JSOP_ORX));
- }
- return false;
-}
-
-class AdjustCallerGlobalTypesVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSContext *mCx;
- nanojit::LirBuffer *mLirbuf;
- nanojit::LirWriter *mLir;
- JSTraceType *mTypeMap;
-public:
- AdjustCallerGlobalTypesVisitor(TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mCx(mRecorder.cx),
- mLirbuf(mRecorder.lirbuf),
- mLir(mRecorder.lir),
- mTypeMap(typeMap)
- {}
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- LIns *ins = mRecorder.get(vp);
- bool isPromote = isPromoteInt(ins);
- if (isPromote && *mTypeMap == TT_DOUBLE) {
- mLir->insStorei(mRecorder.get(vp), mLirbuf->state,
- mRecorder.nativeGlobalOffset(vp));
-
- /*
- * Aggressively undo speculation so the inner tree will compile
- * if this fails.
- */
- oracle.markGlobalSlotUndemotable(mCx, slot);
- }
- JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
- ++mTypeMap;
- }
-};
-
-class AdjustCallerStackTypesVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSContext *mCx;
- nanojit::LirBuffer *mLirbuf;
- nanojit::LirWriter *mLir;
- unsigned mSlotnum;
- JSTraceType *mTypeMap;
-public:
- AdjustCallerStackTypesVisitor(TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mCx(mRecorder.cx),
- mLirbuf(mRecorder.lirbuf),
- mLir(mRecorder.lir),
- mSlotnum(0),
- mTypeMap(typeMap)
- {}
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- LIns *ins = mRecorder.get(vp);
- bool isPromote = isPromoteInt(ins);
- if (isPromote && *mTypeMap == TT_DOUBLE) {
- mLir->insStorei(mRecorder.get(vp), mLirbuf->sp,
- -mRecorder.treeInfo->nativeStackBase +
- mRecorder.nativeStackOffset(vp));
-
- /*
- * Aggressively undo speculation so the inner tree will compile
- * if this fails.
- */
- oracle.markStackSlotUndemotable(mCx, mSlotnum);
- }
- JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
- ++vp;
- ++mTypeMap;
- ++mSlotnum;
- }
- return true;
- }
-};
-
-/*
- * Promote slots if necessary to match the called tree's type map. This
- * function is infallible and must only be called if we are certain that it is
- * possible to reconcile the types for each slot in the inner and outer trees.
- */
-JS_REQUIRES_STACK void
-TraceRecorder::adjustCallerTypes(Fragment* f)
-{
- TreeInfo* ti = (TreeInfo*)f->vmprivate;
-
- AdjustCallerGlobalTypesVisitor globalVisitor(*this, ti->globalTypeMap());
- VisitGlobalSlots(globalVisitor, cx, *treeInfo->globalSlots);
-
- AdjustCallerStackTypesVisitor stackVisitor(*this, ti->stackTypeMap());
- VisitStackSlots(stackVisitor, cx, 0);
-
- JS_ASSERT(f == f->root);
-}
-
-JS_REQUIRES_STACK JSTraceType
-TraceRecorder::determineSlotType(jsval* vp)
-{
- JSTraceType m;
- LIns* i = get(vp);
- if (isNumber(*vp)) {
- m = isPromoteInt(i) ? TT_INT32 : TT_DOUBLE;
- } else if (JSVAL_IS_OBJECT(*vp)) {
- if (JSVAL_IS_NULL(*vp))
- m = TT_NULL;
- else if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp)))
- m = TT_FUNCTION;
- else
- m = TT_OBJECT;
- } else {
- JS_ASSERT(JSVAL_TAG(*vp) == JSVAL_STRING || JSVAL_IS_SPECIAL(*vp));
- JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING);
- JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL);
- m = JSTraceType(JSVAL_TAG(*vp));
- }
- JS_ASSERT(m != TT_INT32 || isInt32(*vp));
- return m;
-}
-
-class DetermineTypesVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSTraceType *mTypeMap;
-public:
- DetermineTypesVisitor(TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mTypeMap(typeMap)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- *mTypeMap++ = mRecorder.determineSlotType(vp);
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i)
- *mTypeMap++ = mRecorder.determineSlotType(vp++);
- return true;
- }
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-};
-
-#if defined JS_JIT_SPEW
-JS_REQUIRES_STACK static void
-TreevisLogExit(JSContext* cx, VMSideExit* exit)
-{
- debug_only_printf(LC_TMTreeVis, "TREEVIS ADDEXIT EXIT=%p TYPE=%s FRAG=%p PC=%p FILE=\"%s\""
- " LINE=%d OFFS=%d", (void*)exit, getExitName(exit->exitType),
- (void*)exit->from, (void*)cx->fp->regs->pc, cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp), FramePCOffset(cx->fp));
- debug_only_print0(LC_TMTreeVis, " STACK=\"");
- for (unsigned i = 0; i < exit->numStackSlots; i++)
- debug_only_printf(LC_TMTreeVis, "%c", typeChar[exit->stackTypeMap()[i]]);
- debug_only_print0(LC_TMTreeVis, "\" GLOBALS=\"");
- for (unsigned i = 0; i < exit->numGlobalSlots; i++)
- debug_only_printf(LC_TMTreeVis, "%c", typeChar[exit->globalTypeMap()[i]]);
- debug_only_print0(LC_TMTreeVis, "\"\n");
-}
-#endif
-
-JS_REQUIRES_STACK VMSideExit*
-TraceRecorder::snapshot(ExitType exitType)
-{
- JSStackFrame* fp = cx->fp;
- JSFrameRegs* regs = fp->regs;
- jsbytecode* pc = regs->pc;
-
- /*
- * Check for a return-value opcode that needs to restart at the next
- * instruction.
- */
- const JSCodeSpec& cs = js_CodeSpec[*pc];
-
- /*
- * When calling a _FAIL native, make the snapshot's pc point to the next
- * instruction after the CALL or APPLY. Even on failure, a _FAIL native
- * must not be called again from the interpreter.
- */
- bool resumeAfter = (pendingSpecializedNative &&
- JSTN_ERRTYPE(pendingSpecializedNative) == FAIL_STATUS);
- if (resumeAfter) {
- JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW ||
- *pc == JSOP_SETPROP || *pc == JSOP_SETNAME);
- pc += cs.length;
- regs->pc = pc;
- MUST_FLOW_THROUGH("restore_pc");
- }
-
- /*
- * Generate the entry map for the (possibly advanced) pc and stash it in
- * the trace.
- */
- unsigned stackSlots = NativeStackSlots(cx, callDepth);
-
- /*
- * It's sufficient to track the native stack use here since all stores
- * above the stack watermark defined by guards are killed.
- */
- trackNativeStackUse(stackSlots + 1);
-
- /* Capture the type map into a temporary location. */
- unsigned ngslots = treeInfo->globalSlots->length();
- unsigned typemap_size = (stackSlots + ngslots) * sizeof(JSTraceType);
-
- /* Use the recorder-local temporary type map. */
- JSTraceType* typemap = NULL;
- if (tempTypeMap.resize(typemap_size))
- typemap = tempTypeMap.begin(); /* crash if resize() fails. */
-
- /*
- * Determine the type of a store by looking at the current type of the
- * actual value the interpreter is using. For numbers we have to check what
- * kind of store we used last (integer or double) to figure out what the
- * side exit show reflect in its typemap.
- */
- DetermineTypesVisitor detVisitor(*this, typemap);
- VisitSlots(detVisitor, cx, callDepth, ngslots,
- treeInfo->globalSlots->data());
- JS_ASSERT(unsigned(detVisitor.getTypeMap() - typemap) ==
- ngslots + stackSlots);
-
- /*
- * If this snapshot is for a side exit that leaves a boxed jsval result on
- * the stack, make a note of this in the typemap. Examples include the
- * builtinStatus guard after calling a _FAIL builtin, a JSFastNative, or
- * GetPropertyByName; and the type guard in unbox_jsval after such a call
- * (also at the beginning of a trace branched from such a type guard).
- */
- if (pendingUnboxSlot ||
- (pendingSpecializedNative && (pendingSpecializedNative->flags & JSTN_UNBOX_AFTER))) {
- unsigned pos = stackSlots - 1;
- if (pendingUnboxSlot == cx->fp->regs->sp - 2)
- pos = stackSlots - 2;
- typemap[pos] = TT_JSVAL;
- }
-
- /* Now restore the the original pc (after which early returns are ok). */
- if (resumeAfter) {
- MUST_FLOW_LABEL(restore_pc);
- regs->pc = pc - cs.length;
- } else {
- /*
- * If we take a snapshot on a goto, advance to the target address. This
- * avoids inner trees returning on a break goto, which the outer
- * recorder then would confuse with a break in the outer tree.
- */
- if (*pc == JSOP_GOTO)
- pc += GET_JUMP_OFFSET(pc);
- else if (*pc == JSOP_GOTOX)
- pc += GET_JUMPX_OFFSET(pc);
- }
-
- /*
- * Check if we already have a matching side exit; if so we can return that
- * side exit instead of creating a new one.
- */
- VMSideExit** exits = treeInfo->sideExits.data();
- unsigned nexits = treeInfo->sideExits.length();
- if (exitType == LOOP_EXIT) {
- for (unsigned n = 0; n < nexits; ++n) {
- VMSideExit* e = exits[n];
- if (e->pc == pc && e->imacpc == fp->imacpc &&
- ngslots == e->numGlobalSlots &&
- !memcmp(exits[n]->fullTypeMap(), typemap, typemap_size)) {
- AUDIT(mergedLoopExits);
-#if defined JS_JIT_SPEW
- TreevisLogExit(cx, e);
-#endif
- return e;
- }
- }
- }
-
- if (sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(JSTraceType) >
- LirBuffer::MAX_SKIP_PAYLOAD_SZB) {
- /*
- * ::snapshot() is infallible in the sense that callers don't
- * expect errors; but this is a trace-aborting error condition. So
- * mangle the request to consume zero slots, and mark the tree as
- * to-be-trashed. This should be safe as the trace will be aborted
- * before assembly or execution due to the call to
- * trackNativeStackUse above.
- */
- stackSlots = 0;
- ngslots = 0;
- typemap_size = 0;
- trashSelf = true;
- }
-
- /* We couldn't find a matching side exit, so create a new one. */
- VMSideExit* exit = (VMSideExit*)
- traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
- (stackSlots + ngslots) * sizeof(JSTraceType));
-
- /* Setup side exit structure. */
- memset(exit, 0, sizeof(VMSideExit));
- exit->from = fragment;
- exit->calldepth = callDepth;
- exit->numGlobalSlots = ngslots;
- exit->numStackSlots = stackSlots;
- exit->numStackSlotsBelowCurrentFrame = cx->fp->argv ?
- nativeStackOffset(&cx->fp->argv[-2]) / sizeof(double) :
- 0;
- exit->exitType = exitType;
- exit->block = fp->blockChain;
- if (fp->blockChain)
- treeInfo->gcthings.addUnique(OBJECT_TO_JSVAL(fp->blockChain));
- exit->pc = pc;
- exit->imacpc = fp->imacpc;
- exit->sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase;
- exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
- exit->nativeCalleeWord = 0;
- exit->lookupFlags = js_InferFlags(cx, 0);
- memcpy(exit->fullTypeMap(), typemap, typemap_size);
-
-#if defined JS_JIT_SPEW
- TreevisLogExit(cx, exit);
-#endif
- return exit;
-}
-
-JS_REQUIRES_STACK GuardRecord*
-TraceRecorder::createGuardRecord(VMSideExit* exit)
-{
- GuardRecord* gr = new (*traceMonitor->dataAlloc) GuardRecord();
-
- memset(gr, 0, sizeof(GuardRecord));
- gr->exit = exit;
- exit->addGuard(gr);
-
- // gr->profCount is memset'd to zero
- verbose_only(
- gr->profGuardID = fragment->guardNumberer++;
- gr->nextInFrag = fragment->guardsForFrag;
- fragment->guardsForFrag = gr;
- )
-
- return gr;
-}
-
-/*
- * Emit a guard for condition (cond), expecting to evaluate to boolean result
- * (expected) and using the supplied side exit if the conditon doesn't hold.
- */
-JS_REQUIRES_STACK void
-TraceRecorder::guard(bool expected, LIns* cond, VMSideExit* exit)
-{
- debug_only_printf(LC_TMRecorder,
- " About to try emitting guard code for "
- "SideExit=%p exitType=%s\n",
- (void*)exit, getExitName(exit->exitType));
-
- GuardRecord* guardRec = createGuardRecord(exit);
-
- /*
- * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
- * safe to keep references to the side exits here. If we ever start
- * clearing those lirbufs, we have to make sure we purge the side exits
- * that then no longer will be in valid memory.
- */
- if (exit->exitType == LOOP_EXIT)
- treeInfo->sideExits.add(exit);
-
- if (!cond->isCond()) {
- expected = !expected;
- cond = cond->isQuad() ? lir->ins_peq0(cond) : lir->ins_eq0(cond);
- }
-
- LIns* guardIns =
- lir->insGuard(expected ? LIR_xf : LIR_xt, cond, guardRec);
- if (!guardIns) {
- debug_only_print0(LC_TMRecorder,
- " redundant guard, eliminated, no codegen\n");
- }
-}
-
-JS_REQUIRES_STACK VMSideExit*
-TraceRecorder::copy(VMSideExit* copy)
-{
- size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots;
- VMSideExit* exit = (VMSideExit*)
- traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
- typemap_size * sizeof(JSTraceType));
-
- /* Copy side exit structure. */
- memcpy(exit, copy, sizeof(VMSideExit) + typemap_size * sizeof(JSTraceType));
- exit->guards = NULL;
- exit->from = fragment;
- exit->target = NULL;
-
- /*
- * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
- * safe to keep references to the side exits here. If we ever start
- * clearing those lirbufs, we have to make sure we purge the side exits
- * that then no longer will be in valid memory.
- */
- if (exit->exitType == LOOP_EXIT)
- treeInfo->sideExits.add(exit);
-#if defined JS_JIT_SPEW
- TreevisLogExit(cx, exit);
-#endif
- return exit;
-}
-
-/*
- * Emit a guard for condition (cond), expecting to evaluate to boolean result
- * (expected) and generate a side exit with type exitType to jump to if the
- * condition does not hold.
- */
-JS_REQUIRES_STACK void
-TraceRecorder::guard(bool expected, LIns* cond, ExitType exitType)
-{
- guard(expected, cond, snapshot(exitType));
-}
-
-/*
- * Determine whether any context associated with the same thread as cx is
- * executing native code.
- */
-static inline bool
-ProhibitFlush(JSContext* cx)
-{
- if (cx->interpState) // early out if the given is in native code
- return true;
-
- JSCList *cl;
-
-#ifdef JS_THREADSAFE
- JSThread* thread = cx->thread;
- for (cl = thread->contextList.next; cl != &thread->contextList; cl = cl->next)
- if (CX_FROM_THREAD_LINKS(cl)->interpState)
- return true;
-#else
- JSRuntime* rt = cx->runtime;
- for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next)
- if (js_ContextFromLinkField(cl)->interpState)
- return true;
-#endif
- return false;
-}
-
-static void
-ResetJITImpl(JSContext* cx)
-{
- if (!TRACING_ENABLED(cx))
- return;
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- debug_only_print0(LC_TMTracer, "Flushing cache.\n");
- if (tm->recorder) {
- JS_ASSERT_NOT_ON_TRACE(cx);
- js_AbortRecording(cx, "flush cache");
- }
- if (ProhibitFlush(cx)) {
- debug_only_print0(LC_TMTracer, "Deferring JIT flush due to deep bail.\n");
- tm->needFlush = JS_TRUE;
- return;
- }
- tm->flush();
-}
-
-#ifdef MOZ_TRACEVIS
-static JS_INLINE void
-ResetJIT(JSContext* cx, TraceVisFlushReason r)
-{
- js_LogTraceVisEvent(cx, S_RESET, r);
- ResetJITImpl(cx);
-}
-#else
-#define ResetJIT(cx, r) ResetJITImpl(cx)
-#endif
-
-void
-js_FlushJITCache(JSContext *cx)
-{
- ResetJIT(cx, FR_OOM);
-}
-
-/* Compile the current fragment. */
-JS_REQUIRES_STACK bool
-TraceRecorder::compile(JSTraceMonitor* tm)
-{
-#ifdef MOZ_TRACEVIS
- TraceVisStateObj tvso(cx, S_COMPILE);
-#endif
-
- if (tm->needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return false;
- }
- if (treeInfo->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) {
- debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n");
- Blacklist((jsbytecode*) fragment->root->ip);
- return false;
- }
- if (anchor && anchor->exitType != CASE_EXIT)
- ++treeInfo->branchCount;
- if (outOfMemory())
- return false;
-
- Assembler *assm = tm->assembler;
- nanojit::compile(assm, fragment verbose_only(, tempAlloc, tm->labels));
- if (outOfMemory())
- return false;
-
- if (assm->error() != nanojit::None) {
- debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
- Blacklist((jsbytecode*) fragment->root->ip);
- return false;
- }
- ResetRecordingAttempts(cx, (jsbytecode*) fragment->ip);
- ResetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip);
- if (anchor) {
-#ifdef NANOJIT_IA32
- if (anchor->exitType == CASE_EXIT)
- assm->patch(anchor, anchor->switchInfo);
- else
-#endif
- assm->patch(anchor);
- }
- JS_ASSERT(fragment->code());
- JS_ASSERT(!fragment->vmprivate);
- if (fragment == fragment->root)
- fragment->vmprivate = treeInfo;
-
- /* :TODO: windows support */
-#if defined DEBUG && !defined WIN32
- const char* filename = cx->fp->script->filename;
- char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16);
- sprintf(label, "%s:%u", filename ? filename : "",
- js_FramePCToLineNumber(cx, cx->fp));
- tm->labels->add(fragment, sizeof(Fragment), 0, label);
- js_free(label);
-#endif
- AUDIT(traceCompleted);
- return true;
-}
-
-static void
-JoinPeers(Assembler* assm, VMSideExit* exit, VMFragment* target)
-{
- exit->target = target;
- assm->patch(exit);
-
- debug_only_printf(LC_TMTreeVis, "TREEVIS JOIN ANCHOR=%p FRAG=%p\n", (void*)exit, (void*)target);
-
- if (exit->root() == target)
- return;
-
- target->getTreeInfo()->dependentTrees.addUnique(exit->root());
- exit->root()->getTreeInfo()->linkedTrees.addUnique(target);
-}
-
-/* Results of trying to connect an arbitrary type A with arbitrary type B */
-enum TypeCheckResult
-{
- TypeCheck_Okay, /* Okay: same type */
- TypeCheck_Promote, /* Okay: Type A needs f2i() */
- TypeCheck_Demote, /* Okay: Type A needs i2f() */
- TypeCheck_Undemote, /* Bad: Slot is undemotable */
- TypeCheck_Bad /* Bad: incompatible types */
-};
-
-class SlotMap : public SlotVisitorBase
-{
- public:
- struct SlotInfo
- {
- SlotInfo()
- : v(0), promoteInt(false), lastCheck(TypeCheck_Bad)
- {}
- SlotInfo(jsval* v, bool promoteInt)
- : v(v), promoteInt(promoteInt), lastCheck(TypeCheck_Bad)
- {}
- jsval *v;
- bool promoteInt;
- TypeCheckResult lastCheck;
- };
-
- SlotMap(TraceRecorder& rec, unsigned slotOffset)
- : mRecorder(rec),
- mCx(rec.cx),
- slots(NULL),
- slotOffset(slotOffset)
- {
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot)
- {
- addSlot(vp);
- }
-
- JS_ALWAYS_INLINE SlotMap::SlotInfo&
- operator [](unsigned i)
- {
- return slots[i];
- }
-
- JS_ALWAYS_INLINE SlotMap::SlotInfo&
- get(unsigned i)
- {
- return slots[i];
- }
-
- JS_ALWAYS_INLINE unsigned
- length()
- {
- return slots.length();
- }
-
- /**
- * Possible return states:
- *
- * TypeConsensus_Okay: All types are compatible. Caller must go through slot list and handle
- * promote/demotes.
- * TypeConsensus_Bad: Types are not compatible. Individual type check results are undefined.
- * TypeConsensus_Undemotes: Types would be compatible if slots were marked as undemotable
- * before recording began. Caller can go through slot list and mark
- * such slots as undemotable.
- */
- JS_REQUIRES_STACK TypeConsensus
- checkTypes(TreeInfo* ti)
- {
- if (ti->typeMap.length() < slotOffset || length() != ti->typeMap.length() - slotOffset)
- return TypeConsensus_Bad;
-
- bool has_undemotes = false;
- for (unsigned i = 0; i < length(); i++) {
- TypeCheckResult result = checkType(i, ti->typeMap[i + slotOffset]);
- if (result == TypeCheck_Bad)
- return TypeConsensus_Bad;
- if (result == TypeCheck_Undemote)
- has_undemotes = true;
- slots[i].lastCheck = result;
- }
- if (has_undemotes)
- return TypeConsensus_Undemotes;
- return TypeConsensus_Okay;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- addSlot(jsval* vp)
- {
- slots.add(SlotInfo(vp, isNumber(*vp) && isPromoteInt(mRecorder.get(vp))));
- }
-
- JS_REQUIRES_STACK void
- markUndemotes()
- {
- for (unsigned i = 0; i < length(); i++) {
- if (get(i).lastCheck == TypeCheck_Undemote)
- MarkSlotUndemotable(mRecorder.cx, mRecorder.treeInfo, slotOffset + i);
- }
- }
-
- JS_REQUIRES_STACK virtual void
- adjustTypes()
- {
- for (unsigned i = 0; i < length(); i++) {
- SlotInfo& info = get(i);
- JS_ASSERT(info.lastCheck != TypeCheck_Undemote && info.lastCheck != TypeCheck_Bad);
- if (info.lastCheck == TypeCheck_Promote) {
- JS_ASSERT(isNumber(*info.v));
- mRecorder.set(info.v, mRecorder.f2i(mRecorder.get(info.v)));
- } else if (info.lastCheck == TypeCheck_Demote) {
- JS_ASSERT(isNumber(*info.v));
- JS_ASSERT(mRecorder.get(info.v)->isQuad());
-
- /* Never demote this final i2f. */
- mRecorder.set(info.v, mRecorder.get(info.v), false, false);
- }
- }
- }
- private:
- TypeCheckResult
- checkType(unsigned i, JSTraceType t)
- {
- debug_only_printf(LC_TMTracer,
- "checkType slot %d: interp=%c typemap=%c isNum=%d promoteInt=%d\n",
- i,
- typeChar[getCoercedType(*slots[i].v)],
- typeChar[t],
- isNumber(*slots[i].v),
- slots[i].promoteInt);
- switch (t) {
- case TT_INT32:
- if (!isNumber(*slots[i].v))
- return TypeCheck_Bad; /* Not a number? Type mismatch. */
- /* This is always a type mismatch, we can't close a double to an int. */
- if (!slots[i].promoteInt)
- return TypeCheck_Undemote;
- /* Looks good, slot is an int32, the last instruction should be promotable. */
- JS_ASSERT(isInt32(*slots[i].v) && slots[i].promoteInt);
- return TypeCheck_Promote;
- case TT_DOUBLE:
- if (!isNumber(*slots[i].v))
- return TypeCheck_Bad; /* Not a number? Type mismatch. */
- if (slots[i].promoteInt)
- return TypeCheck_Demote;
- return TypeCheck_Okay;
- case TT_NULL:
- return JSVAL_IS_NULL(*slots[i].v) ? TypeCheck_Okay : TypeCheck_Bad;
- case TT_FUNCTION:
- return !JSVAL_IS_PRIMITIVE(*slots[i].v) &&
- HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*slots[i].v)) ?
- TypeCheck_Okay : TypeCheck_Bad;
- case TT_OBJECT:
- return !JSVAL_IS_PRIMITIVE(*slots[i].v) &&
- !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*slots[i].v)) ?
- TypeCheck_Okay : TypeCheck_Bad;
- default:
- return getCoercedType(*slots[i].v) == t ? TypeCheck_Okay : TypeCheck_Bad;
- }
- JS_NOT_REACHED("shouldn't fall through type check switch");
- }
- protected:
- TraceRecorder& mRecorder;
- JSContext* mCx;
- Queue slots;
- unsigned slotOffset;
-};
-
-class DefaultSlotMap : public SlotMap
-{
- public:
- DefaultSlotMap(TraceRecorder& tr) : SlotMap(tr, 0)
- {
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp)
- {
- for (size_t i = 0; i < count; i++)
- addSlot(&vp[i]);
- return true;
- }
-};
-
-JS_REQUIRES_STACK TypeConsensus
-TraceRecorder::selfTypeStability(SlotMap& slotMap)
-{
- debug_only_printf(LC_TMTracer, "Checking type stability against self=%p\n", (void*)fragment);
- TypeConsensus consensus = slotMap.checkTypes(treeInfo);
-
- /* Best case: loop jumps back to its own header */
- if (consensus == TypeConsensus_Okay)
- return TypeConsensus_Okay;
-
- /* If the only thing keeping this loop from being stable is undemotions, then mark relevant
- * slots as undemotable.
- */
- if (consensus == TypeConsensus_Undemotes)
- slotMap.markUndemotes();
-
- return consensus;
-}
-
-JS_REQUIRES_STACK TypeConsensus
-TraceRecorder::peerTypeStability(SlotMap& slotMap, VMFragment** pPeer)
-{
- /* See if there are any peers that would make this stable */
- VMFragment* root = (VMFragment*)fragment->root;
- VMFragment* peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape,
- root->argc);
- JS_ASSERT(peer != NULL);
- bool onlyUndemotes = false;
- for (; peer != NULL; peer = (VMFragment*)peer->peer) {
- if (!peer->vmprivate || peer == fragment)
- continue;
- debug_only_printf(LC_TMTracer, "Checking type stability against peer=%p\n", (void*)peer);
- TypeConsensus consensus = slotMap.checkTypes((TreeInfo*)peer->vmprivate);
- if (consensus == TypeConsensus_Okay) {
- *pPeer = peer;
- /* Return this even though there will be linkage; the trace itself is not stable.
- * Caller should inspect ppeer to check for a compatible peer.
- */
- return TypeConsensus_Okay;
- }
- if (consensus == TypeConsensus_Undemotes)
- onlyUndemotes = true;
- }
-
- return onlyUndemotes ? TypeConsensus_Undemotes : TypeConsensus_Bad;
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::closeLoop(TypeConsensus &consensus)
-{
- DefaultSlotMap slotMap(*this);
- VisitSlots(slotMap, cx, 0, *treeInfo->globalSlots);
- return closeLoop(slotMap, snapshot(UNSTABLE_LOOP_EXIT), consensus);
-}
-
-/* Complete and compile a trace and link it to the existing tree if appropriate.
- * Returns true if something was compiled. Outparam is always set.
- */
-JS_REQUIRES_STACK bool
-TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& consensus)
-{
- /*
- * We should have arrived back at the loop header, and hence we don't want
- * to be in an imacro here and the opcode should be either JSOP_TRACE or, in
- * case this loop was blacklisted in the meantime, JSOP_NOP.
- */
- JS_ASSERT((*cx->fp->regs->pc == JSOP_TRACE || *cx->fp->regs->pc == JSOP_NOP) &&
- !cx->fp->imacpc);
-
- if (callDepth != 0) {
- debug_only_print0(LC_TMTracer,
- "Blacklisted: stack depth mismatch, possible recursion.\n");
- Blacklist((jsbytecode*) fragment->root->ip);
- trashSelf = true;
- consensus = TypeConsensus_Bad;
- return false;
- }
-
- JS_ASSERT(exit->exitType == UNSTABLE_LOOP_EXIT);
- JS_ASSERT(exit->numStackSlots == treeInfo->nStackTypes);
-
- VMFragment* peer = NULL;
- VMFragment* root = (VMFragment*)fragment->root;
-
- consensus = selfTypeStability(slotMap);
- if (consensus != TypeConsensus_Okay) {
- TypeConsensus peerConsensus = peerTypeStability(slotMap, &peer);
- /* If there was a semblance of a stable peer (even if not linkable), keep the result. */
- if (peerConsensus != TypeConsensus_Bad)
- consensus = peerConsensus;
- }
-
-#if DEBUG
- if (consensus != TypeConsensus_Okay || peer)
- AUDIT(unstableLoopVariable);
-#endif
-
- JS_ASSERT(!trashSelf);
-
- /* This exit is indeed linkable to something now. Process any promote/demotes that
- * are pending in the slot map.
- */
- if (consensus == TypeConsensus_Okay)
- slotMap.adjustTypes();
-
- if (consensus != TypeConsensus_Okay || peer) {
- fragment->lastIns = lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
-
- /* If there is a peer, there must have been an "Okay" consensus. */
- JS_ASSERT_IF(peer, consensus == TypeConsensus_Okay);
-
- /* Compile as a type-unstable loop, and hope for a connection later. */
- if (!peer) {
- /*
- * If such a fragment does not exist, let's compile the loop ahead
- * of time anyway. Later, if the loop becomes type stable, we will
- * connect these two fragments together.
- */
- debug_only_print0(LC_TMTracer,
- "Trace has unstable loop variable with no stable peer, "
- "compiling anyway.\n");
- UnstableExit* uexit = new (*traceMonitor->dataAlloc) UnstableExit;
- uexit->fragment = fragment;
- uexit->exit = exit;
- uexit->next = treeInfo->unstableExits;
- treeInfo->unstableExits = uexit;
- } else {
- JS_ASSERT(peer->code());
- exit->target = peer;
- debug_only_printf(LC_TMTracer,
- "Joining type-unstable trace to target fragment %p.\n",
- (void*)peer);
- ((TreeInfo*)peer->vmprivate)->dependentTrees.addUnique(fragment->root);
- treeInfo->linkedTrees.addUnique(peer);
- }
- } else {
- exit->exitType = LOOP_EXIT;
- debug_only_printf(LC_TMTreeVis, "TREEVIS CHANGEEXIT EXIT=%p TYPE=%s\n", (void*)exit,
- getExitName(LOOP_EXIT));
-
- JS_ASSERT((fragment == fragment->root) == !!loopLabel);
- if (loopLabel) {
- lir->insBranch(LIR_j, NULL, loopLabel);
- lir->ins1(LIR_live, lirbuf->state);
- }
-
- exit->target = fragment->root;
- fragment->lastIns = lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
- }
- if (!compile(traceMonitor))
- return false;
-
- debug_only_printf(LC_TMTreeVis, "TREEVIS CLOSELOOP EXIT=%p PEER=%p\n", (void*)exit, (void*)peer);
-
- peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
- JS_ASSERT(peer);
- joinEdgesToEntry(peer);
-
- debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj,
- peer->globalShape, peer->argc);)
-
- debug_only_print0(LC_TMTracer,
- "updating specializations on dependent and linked trees\n");
- if (fragment->root->vmprivate)
- SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate);
-
- /*
- * If this is a newly formed tree, and the outer tree has not been compiled yet, we
- * should try to compile the outer tree again.
- */
- if (outer)
- AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc);
-#ifdef JS_JIT_SPEW
- debug_only_printf(LC_TMMinimal,
- "Recording completed at %s:%u@%u via closeLoop (FragID=%06u)\n",
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp),
- fragment->profFragID);
- debug_only_print0(LC_TMMinimal, "\n");
-#endif
-
- return true;
-}
-
-static void
-FullMapFromExit(TypeMap& typeMap, VMSideExit* exit)
-{
- typeMap.setLength(0);
- typeMap.fromRaw(exit->stackTypeMap(), exit->numStackSlots);
- typeMap.fromRaw(exit->globalTypeMap(), exit->numGlobalSlots);
- /* Include globals that were later specialized at the root of the tree. */
- if (exit->numGlobalSlots < exit->root()->getTreeInfo()->nGlobalTypes()) {
- typeMap.fromRaw(exit->root()->getTreeInfo()->globalTypeMap() + exit->numGlobalSlots,
- exit->root()->getTreeInfo()->nGlobalTypes() - exit->numGlobalSlots);
- }
-}
-
-static JS_REQUIRES_STACK TypeConsensus
-TypeMapLinkability(JSContext* cx, const TypeMap& typeMap, VMFragment* peer)
-{
- const TypeMap& peerMap = peer->getTreeInfo()->typeMap;
- unsigned minSlots = JS_MIN(typeMap.length(), peerMap.length());
- TypeConsensus consensus = TypeConsensus_Okay;
- for (unsigned i = 0; i < minSlots; i++) {
- if (typeMap[i] == peerMap[i])
- continue;
- if (typeMap[i] == TT_INT32 && peerMap[i] == TT_DOUBLE &&
- IsSlotUndemotable(cx, peer->getTreeInfo(), i)) {
- consensus = TypeConsensus_Undemotes;
- } else {
- return TypeConsensus_Bad;
- }
- }
- return consensus;
-}
-
-static JS_REQUIRES_STACK unsigned
-FindUndemotesInTypemaps(JSContext* cx, const TypeMap& typeMap, TreeInfo* treeInfo,
- Queue& undemotes)
-{
- undemotes.setLength(0);
- unsigned minSlots = JS_MIN(typeMap.length(), treeInfo->typeMap.length());
- for (unsigned i = 0; i < minSlots; i++) {
- if (typeMap[i] == TT_INT32 && treeInfo->typeMap[i] == TT_DOUBLE) {
- undemotes.add(i);
- } else if (typeMap[i] != treeInfo->typeMap[i]) {
- return 0;
- }
- }
- for (unsigned i = 0; i < undemotes.length(); i++)
- MarkSlotUndemotable(cx, treeInfo, undemotes[i]);
- return undemotes.length();
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::joinEdgesToEntry(VMFragment* peer_root)
-{
- if (fragment->root != fragment)
- return;
-
- TypeMap typeMap(NULL);
- Queue undemotes(NULL);
-
- for (VMFragment* peer = peer_root; peer; peer = (VMFragment*)peer->peer) {
- TreeInfo* ti = peer->getTreeInfo();
- if (!ti)
- continue;
- UnstableExit* uexit = ti->unstableExits;
- while (uexit != NULL) {
- /* Build the full typemap for this unstable exit */
- FullMapFromExit(typeMap, uexit->exit);
- /* Check its compatibility against this tree */
- TypeConsensus consensus = TypeMapLinkability(cx, typeMap, (VMFragment*)fragment->root);
- JS_ASSERT_IF(consensus == TypeConsensus_Okay, peer != fragment);
- if (consensus == TypeConsensus_Okay) {
- debug_only_printf(LC_TMTracer,
- "Joining type-stable trace to target exit %p->%p.\n",
- (void*)uexit->fragment, (void*)uexit->exit);
-
- /*
- * See bug 531513. Before linking these trees, make sure the
- * peer's dependency graph is up to date.
- */
- TreeInfo* from = (TreeInfo*)uexit->exit->root()->vmprivate;
- if (from->nGlobalTypes() < treeInfo->nGlobalTypes()) {
- SpecializeTreesToLateGlobals(cx, from, treeInfo->globalTypeMap(),
- treeInfo->nGlobalTypes());
- }
-
- /* It's okay! Link together and remove the unstable exit. */
- JoinPeers(traceMonitor->assembler, uexit->exit, (VMFragment*)fragment);
- uexit = ti->removeUnstableExit(uexit->exit);
- } else {
- /* Check for int32->double slots that suggest trashing. */
- if (FindUndemotesInTypemaps(cx, typeMap, treeInfo, undemotes)) {
- JS_ASSERT(peer == uexit->fragment->root);
- if (fragment == peer)
- trashSelf = true;
- else
- whichTreesToTrash.addUnique(uexit->fragment->root);
- return;
- }
- uexit = uexit->next;
- }
- }
- }
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::endLoop()
-{
- endLoop(snapshot(LOOP_EXIT));
-}
-
-/* Emit an always-exit guard and compile the tree (used for break statements. */
-JS_REQUIRES_STACK void
-TraceRecorder::endLoop(VMSideExit* exit)
-{
- if (callDepth != 0) {
- debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n");
- Blacklist((jsbytecode*) fragment->root->ip);
- trashSelf = true;
- return;
- }
-
- fragment->lastIns =
- lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
- if (!compile(traceMonitor))
- return;
-
- debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", (void*)exit);
-
- VMFragment* root = (VMFragment*)fragment->root;
- joinEdgesToEntry(getLoop(traceMonitor,
- root->ip,
- root->globalObj,
- root->globalShape,
- root->argc));
- debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj,
- root->globalShape, root->argc);)
-
- /*
- * Note: this must always be done, in case we added new globals on trace
- * and haven't yet propagated those to linked and dependent trees.
- */
- debug_only_print0(LC_TMTracer,
- "updating specializations on dependent and linked trees\n");
- if (fragment->root->vmprivate)
- SpecializeTreesToMissingGlobals(cx, globalObj, (TreeInfo*)fragment->root->vmprivate);
-
- /*
- * If this is a newly formed tree, and the outer tree has not been compiled
- * yet, we should try to compile the outer tree again.
- */
- if (outer)
- AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc);
-#ifdef JS_JIT_SPEW
- debug_only_printf(LC_TMMinimal,
- "Recording completed at %s:%u@%u via endLoop (FragID=%06u)\n",
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp),
- fragment->profFragID);
- debug_only_print0(LC_TMTracer, "\n");
-#endif
-}
-
-/* Emit code to adjust the stack to match the inner tree's stack expectations. */
-JS_REQUIRES_STACK void
-TraceRecorder::prepareTreeCall(VMFragment* inner)
-{
- TreeInfo* ti = (TreeInfo*)inner->vmprivate;
- inner_sp_ins = lirbuf->sp;
- VMSideExit* exit = snapshot(OOM_EXIT);
-
- /*
- * The inner tree expects to be called from the current frame. If the outer
- * tree (this trace) is currently inside a function inlining code
- * (calldepth > 0), we have to advance the native stack pointer such that
- * we match what the inner trace expects to see. We move it back when we
- * come out of the inner tree call.
- */
- if (callDepth > 0) {
- /*
- * Calculate the amount we have to lift the native stack pointer by to
- * compensate for any outer frames that the inner tree doesn't expect
- * but the outer tree has.
- */
- ptrdiff_t sp_adj = nativeStackOffset(&cx->fp->argv[-2]);
-
- /* Calculate the amount we have to lift the call stack by. */
- ptrdiff_t rp_adj = callDepth * sizeof(FrameInfo*);
-
- /*
- * Guard that we have enough stack space for the tree we are trying to
- * call on top of the new value for sp.
- */
- debug_only_printf(LC_TMTracer,
- "sp_adj=%lld outer=%lld inner=%lld\n",
- (long long int)sp_adj,
- (long long int)treeInfo->nativeStackBase,
- (long long int)ti->nativeStackBase);
- ptrdiff_t sp_offset =
- - treeInfo->nativeStackBase /* rebase sp to beginning of outer tree's stack */
- + sp_adj /* adjust for stack in outer frame inner tree can't see */
- + ti->maxNativeStackSlots * sizeof(double); /* plus the inner tree's stack */
- LIns* sp_top = lir->ins2(LIR_piadd, lirbuf->sp, INS_CONSTWORD(sp_offset));
- guard(true, lir->ins2(LIR_plt, sp_top, eos_ins), exit);
-
- /* Guard that we have enough call stack space. */
- ptrdiff_t rp_offset = rp_adj + ti->maxCallDepth * sizeof(FrameInfo*);
- LIns* rp_top = lir->ins2(LIR_piadd, lirbuf->rp, INS_CONSTWORD(rp_offset));
- guard(true, lir->ins2(LIR_plt, rp_top, eor_ins), exit);
-
- sp_offset =
- - treeInfo->nativeStackBase /* rebase sp to beginning of outer tree's stack */
- + sp_adj /* adjust for stack in outer frame inner tree can't see */
- + ti->nativeStackBase; /* plus the inner tree's stack base */
- /* We have enough space, so adjust sp and rp to their new level. */
- lir->insStorei(inner_sp_ins = lir->ins2(LIR_piadd, lirbuf->sp, INS_CONSTWORD(sp_offset)),
- lirbuf->state, offsetof(InterpState, sp));
- lir->insStorei(lir->ins2(LIR_piadd, lirbuf->rp, INS_CONSTWORD(rp_adj)),
- lirbuf->state, offsetof(InterpState, rp));
- }
-
- /*
- * The inner tree will probably access stack slots. So tell nanojit not to
- * discard or defer stack writes before calling js_CallTree.
- *
- * (The ExitType of this snapshot is nugatory. The exit can't be taken.)
- */
- GuardRecord* guardRec = createGuardRecord(exit);
- lir->insGuard(LIR_xbarrier, NULL, guardRec);
-}
-
-static unsigned
-BuildGlobalTypeMapFromInnerTree(Queue& typeMap, VMSideExit* inner)
-{
-#if defined DEBUG
- unsigned initialSlots = typeMap.length();
-#endif
- /* First, use the innermost exit's global typemap. */
- typeMap.add(inner->globalTypeMap(), inner->numGlobalSlots);
-
- /* Add missing global types from the innermost exit's tree. */
- TreeInfo* innerTree = inner->root()->getTreeInfo();
- unsigned slots = inner->numGlobalSlots;
- if (slots < innerTree->nGlobalTypes()) {
- typeMap.add(innerTree->globalTypeMap() + slots, innerTree->nGlobalTypes() - slots);
- slots = innerTree->nGlobalTypes();
- }
- JS_ASSERT(typeMap.length() - initialSlots == slots);
- return slots;
-}
-
-/* Record a call to an inner tree. */
-JS_REQUIRES_STACK void
-TraceRecorder::emitTreeCall(VMFragment* inner, VMSideExit* exit)
-{
- TreeInfo* ti = (TreeInfo*)inner->vmprivate;
-
- /* Invoke the inner tree. */
- LIns* args[] = { INS_CONSTPTR(inner), lirbuf->state }; /* reverse order */
- LIns* ret = lir->insCall(&js_CallTree_ci, args);
-
- /* Read back all registers, in case the called tree changed any of them. */
-#ifdef DEBUG
- JSTraceType* map;
- size_t i;
- map = exit->globalTypeMap();
- for (i = 0; i < exit->numGlobalSlots; i++)
- JS_ASSERT(map[i] != TT_JSVAL);
- map = exit->stackTypeMap();
- for (i = 0; i < exit->numStackSlots; i++)
- JS_ASSERT(map[i] != TT_JSVAL);
-#endif
- /*
- * Bug 502604 - It is illegal to extend from the outer typemap without
- * first extending from the inner. Make a new typemap here.
- */
- TypeMap fullMap(NULL);
- fullMap.add(exit->stackTypeMap(), exit->numStackSlots);
- BuildGlobalTypeMapFromInnerTree(fullMap, exit);
- import(ti, inner_sp_ins, exit->numStackSlots, fullMap.length() - exit->numStackSlots,
- exit->calldepth, fullMap.data());
-
- /* Restore sp and rp to their original values (we still have them in a register). */
- if (callDepth > 0) {
- lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp));
- lir->insStorei(lirbuf->rp, lirbuf->state, offsetof(InterpState, rp));
- }
-
- // Create snapshot now so that the following block has an updated type map.
- VMSideExit* nested = snapshot(NESTED_EXIT);
-
- // If the outer-trace entry frame is not the same as the inner-trace entry frame,
- // then we must reimport the outer trace entry frame in case the inner trace set
- // upvars defined in that frame.
- if (callDepth > 0) {
- ptrdiff_t offset = -treeInfo->nativeStackBase;
- JSStackFrame *fp = cx->fp;
- for (unsigned i = 0; i < callDepth; ++i)
- fp = fp->down;
- ImportUnboxedFrameSlotVisitor frameVisitor(*this, lirbuf->sp, offset,
- nested->stackTypeMap());
- VisitFrameSlots(frameVisitor, 0, fp, NULL);
- }
-
- /*
- * Guard that we come out of the inner tree along the same side exit we came out when
- * we called the inner tree at recording time.
- */
- guard(true, lir->ins2(LIR_peq, ret, INS_CONSTPTR(exit)), nested);
- debug_only_printf(LC_TMTreeVis, "TREEVIS TREECALL INNER=%p EXIT=%p GUARD=%p\n", (void*)inner,
- (void*)nested, (void*)exit);
-
- /* Register us as a dependent tree of the inner tree. */
- ((TreeInfo*)inner->vmprivate)->dependentTrees.addUnique(fragment->root);
- treeInfo->linkedTrees.addUnique(inner);
-}
-
-/* Add a if/if-else control-flow merge point to the list of known merge points. */
-JS_REQUIRES_STACK void
-TraceRecorder::trackCfgMerges(jsbytecode* pc)
-{
- /* If we hit the beginning of an if/if-else, then keep track of the merge point after it. */
- JS_ASSERT((*pc == JSOP_IFEQ) || (*pc == JSOP_IFEQX));
- jssrcnote* sn = js_GetSrcNote(cx->fp->script, pc);
- if (sn != NULL) {
- if (SN_TYPE(sn) == SRC_IF) {
- cfgMerges.add((*pc == JSOP_IFEQ)
- ? pc + GET_JUMP_OFFSET(pc)
- : pc + GET_JUMPX_OFFSET(pc));
- } else if (SN_TYPE(sn) == SRC_IF_ELSE)
- cfgMerges.add(pc + js_GetSrcNoteOffset(sn, 0));
- }
-}
-
-/*
- * Invert the direction of the guard if this is a loop edge that is not
- * taken (thin loop).
- */
-JS_REQUIRES_STACK void
-TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x)
-{
- ExitType exitType;
- if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) {
- exitType = LOOP_EXIT;
-
- /*
- * If we are about to walk out of the loop, generate code for the
- * inverse loop condition, pretending we recorded the case that stays
- * on trace.
- */
- if ((*pc == JSOP_IFEQ || *pc == JSOP_IFEQX) == cond) {
- JS_ASSERT(*pc == JSOP_IFNE || *pc == JSOP_IFNEX || *pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
- debug_only_print0(LC_TMTracer,
- "Walking out of the loop, terminating it anyway.\n");
- cond = !cond;
- }
-
- /*
- * Conditional guards do not have to be emitted if the condition is
- * constant. We make a note whether the loop condition is true or false
- * here, so we later know whether to emit a loop edge or a loop end.
- */
- if (x->isconst()) {
- loop = (x->imm32() == cond);
- return;
- }
- } else {
- exitType = BRANCH_EXIT;
- }
- if (!x->isconst())
- guard(cond, x, exitType);
-}
-
-/* Emit code for a fused IFEQ/IFNE. */
-JS_REQUIRES_STACK void
-TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x)
-{
- if (*pc == JSOP_IFEQ || *pc == JSOP_IFNE) {
- emitIf(pc, cond, x);
- if (*pc == JSOP_IFEQ)
- trackCfgMerges(pc);
- }
-}
-
-/* Check whether we have reached the end of the trace. */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::checkTraceEnd(jsbytecode *pc)
-{
- if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) {
- /*
- * If we compile a loop, the trace should have a zero stack balance at
- * the loop edge. Currently we are parked on a comparison op or
- * IFNE/IFEQ, so advance pc to the loop header and adjust the stack
- * pointer and pretend we have reached the loop header.
- */
- if (loop) {
- JS_ASSERT(!cx->fp->imacpc && (pc == cx->fp->regs->pc || pc == cx->fp->regs->pc + 1));
- bool fused = pc != cx->fp->regs->pc;
- JSFrameRegs orig = *cx->fp->regs;
-
- cx->fp->regs->pc = (jsbytecode*)fragment->root->ip;
- cx->fp->regs->sp -= fused ? 2 : 1;
-
- TypeConsensus consensus;
- closeLoop(consensus);
-
- *cx->fp->regs = orig;
- } else {
- endLoop();
- }
- return JSRS_STOP;
- }
- return JSRS_CONTINUE;
-}
-
-bool
-TraceRecorder::hasMethod(JSObject* obj, jsid id)
-{
- if (!obj)
- return false;
-
- JSObject* pobj;
- JSProperty* prop;
- int protoIndex = obj->lookupProperty(cx, id, &pobj, &prop);
- if (protoIndex < 0 || !prop)
- return false;
-
- bool found = false;
- if (OBJ_IS_NATIVE(pobj)) {
- JSScope* scope = OBJ_SCOPE(pobj);
- JSScopeProperty* sprop = (JSScopeProperty*) prop;
-
- if (SPROP_HAS_STUB_GETTER(sprop) &&
- SPROP_HAS_VALID_SLOT(sprop, scope)) {
- jsval v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot);
- if (VALUE_IS_FUNCTION(cx, v)) {
- found = true;
- if (!scope->branded()) {
- scope->brandingShapeChange(cx, sprop->slot, v);
- scope->setBranded();
- }
- }
- }
- }
-
- pobj->dropProperty(cx, prop);
- return found;
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::hasIteratorMethod(JSObject* obj)
-{
- JS_ASSERT(cx->fp->regs->sp + 2 <= cx->fp->slots + cx->fp->script->nslots);
-
- return hasMethod(obj, ATOM_TO_JSID(cx->runtime->atomState.iteratorAtom));
-}
-
-void
-nanojit::StackFilter::getTops(LIns* guard, int& spTop, int& rpTop)
-{
- VMSideExit* e = (VMSideExit*)guard->record()->exit;
- spTop = e->sp_adj;
- rpTop = e->rp_adj;
-}
-
-#if defined NJ_VERBOSE
-void
-nanojit::LirNameMap::formatGuard(LIns *i, char *out)
-{
- VMSideExit *x;
-
- x = (VMSideExit *)i->record()->exit;
- sprintf(out,
- "%s: %s %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)",
- formatRef(i),
- lirNames[i->opcode()],
- i->oprnd1() ? formatRef(i->oprnd1()) : "",
- (void *)x->pc,
- (void *)x->imacpc,
- (long int)x->sp_adj,
- (long int)x->rp_adj,
- i->record()->profGuardID);
-}
-#endif
-
-static JS_REQUIRES_STACK bool
-DeleteRecorder(JSContext* cx)
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
-
- /* Aborting and completing a trace end up here. */
- delete tm->recorder;
- tm->recorder = NULL;
-
- /* If we ran out of memory, flush the code cache. */
- if (tm->dataAlloc->outOfMemory() ||
- js_OverfullJITCache(tm)) {
- ResetJIT(cx, FR_OOM);
- return false;
- }
-
- return true;
-}
-
-/* Check whether the shape of the global object has changed. */
-static JS_REQUIRES_STACK bool
-CheckGlobalObjectShape(JSContext* cx, JSTraceMonitor* tm, JSObject* globalObj,
- uint32 *shape = NULL, SlotList** slots = NULL)
-{
- if (tm->needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return false;
- }
-
- if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS)
- return false;
-
- uint32 globalShape = OBJ_SHAPE(globalObj);
-
- if (tm->recorder) {
- VMFragment* root = (VMFragment*)tm->recorder->getFragment()->root;
- TreeInfo* ti = tm->recorder->getTreeInfo();
-
- /* Check the global shape matches the recorder's treeinfo's shape. */
- if (globalObj != root->globalObj || globalShape != root->globalShape) {
- AUDIT(globalShapeMismatchAtEntry);
- debug_only_printf(LC_TMTracer,
- "Global object/shape mismatch (%p/%u vs. %p/%u), flushing cache.\n",
- (void*)globalObj, globalShape, (void*)root->globalObj,
- root->globalShape);
- Backoff(cx, (jsbytecode*) root->ip);
- ResetJIT(cx, FR_GLOBAL_SHAPE_MISMATCH);
- return false;
- }
- if (shape)
- *shape = globalShape;
- if (slots)
- *slots = ti->globalSlots;
- return true;
- }
-
- /* No recorder, search for a tracked global-state (or allocate one). */
- for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
- GlobalState &state = tm->globalStates[i];
-
- if (state.globalShape == uint32(-1)) {
- state.globalObj = globalObj;
- state.globalShape = globalShape;
- JS_ASSERT(state.globalSlots);
- JS_ASSERT(state.globalSlots->length() == 0);
- }
-
- if (state.globalObj == globalObj && state.globalShape == globalShape) {
- if (shape)
- *shape = globalShape;
- if (slots)
- *slots = state.globalSlots;
- return true;
- }
- }
-
- /* No currently-tracked-global found and no room to allocate, abort. */
- AUDIT(globalShapeMismatchAtEntry);
- debug_only_printf(LC_TMTracer,
- "No global slotlist for global shape %u, flushing cache.\n",
- globalShape);
- ResetJIT(cx, FR_GLOBALS_FULL);
- return false;
-}
-
-static JS_REQUIRES_STACK bool
-StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
- unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
- VMSideExit* expectedInnerExit, jsbytecode* outer, uint32 outerArgc)
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- if (JS_TRACE_MONITOR(cx).needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return false;
- }
-
- JS_ASSERT(f->root != f || !cx->fp->imacpc);
-
- /* Start recording if no exception during construction. */
- tm->recorder = new TraceRecorder(cx, anchor, f, ti,
- stackSlots, ngslots, typeMap,
- expectedInnerExit, outer, outerArgc);
-
- if (cx->throwing) {
- js_AbortRecording(cx, "setting up recorder failed");
- return false;
- }
-
- /* Clear any leftover error state. */
- Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
- assm->setError(None);
- return true;
-}
-
-static void
-TrashTree(JSContext* cx, Fragment* f)
-{
- JS_ASSERT((!f->code()) == (!f->vmprivate));
- JS_ASSERT(f == f->root);
- debug_only_printf(LC_TMTreeVis, "TREEVIS TRASH FRAG=%p\n", (void*)f);
-
- if (!f->code())
- return;
- AUDIT(treesTrashed);
- debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
- TreeInfo* ti = (TreeInfo*)f->vmprivate;
- f->vmprivate = NULL;
- f->setCode(NULL);
- Fragment** data = ti->dependentTrees.data();
- unsigned length = ti->dependentTrees.length();
- for (unsigned n = 0; n < length; ++n)
- TrashTree(cx, data[n]);
- data = ti->linkedTrees.data();
- length = ti->linkedTrees.length();
- for (unsigned n = 0; n < length; ++n)
- TrashTree(cx, data[n]);
-}
-
-static int
-SynthesizeFrame(JSContext* cx, const FrameInfo& fi, JSObject* callee)
-{
- VOUCH_DOES_NOT_REQUIRE_STACK();
-
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, callee);
- JS_ASSERT(FUN_INTERPRETED(fun));
-
- /* Assert that we have a correct sp distance from cx->fp->slots in fi. */
- JSStackFrame* fp = cx->fp;
- JS_ASSERT_IF(!fi.imacpc,
- js_ReconstructStackDepth(cx, fp->script, fi.pc) ==
- uintN(fi.spdist - fp->script->nfixed));
-
- uintN nframeslots = JS_HOWMANY(sizeof(JSInlineFrame), sizeof(jsval));
- JSScript* script = fun->u.i.script;
- size_t nbytes = (nframeslots + script->nslots) * sizeof(jsval);
-
- /* Code duplicated from inline_call: case in js_Interpret (FIXME). */
- JSArena* a = cx->stackPool.current;
- void* newmark = (void*) a->avail;
- uintN argc = fi.get_argc();
- jsval* vp = fp->slots + fi.spdist - (2 + argc);
- uintN missing = 0;
- jsval* newsp;
-
- if (fun->nargs > argc) {
- const JSFrameRegs& regs = *fp->regs;
-
- newsp = vp + 2 + fun->nargs;
- JS_ASSERT(newsp > regs.sp);
- if ((jsuword) newsp <= a->limit) {
- if ((jsuword) newsp > a->avail)
- a->avail = (jsuword) newsp;
- jsval* argsp = newsp;
- do {
- *--argsp = JSVAL_VOID;
- } while (argsp != regs.sp);
- missing = 0;
- } else {
- missing = fun->nargs - argc;
- nbytes += (2 + fun->nargs) * sizeof(jsval);
- }
- }
-
- /* Allocate the inline frame with its vars and operands. */
- if (a->avail + nbytes <= a->limit) {
- newsp = (jsval *) a->avail;
- a->avail += nbytes;
- JS_ASSERT(missing == 0);
- } else {
- /*
- * This allocation is infallible: ExecuteTree reserved enough stack.
- * (But see bug 491023.)
- */
- JS_ARENA_ALLOCATE_CAST(newsp, jsval *, &cx->stackPool, nbytes);
- JS_ASSERT(newsp);
-
- /*
- * Move args if the missing ones overflow arena a, then push
- * undefined for the missing args.
- */
- if (missing) {
- memcpy(newsp, vp, (2 + argc) * sizeof(jsval));
- vp = newsp;
- newsp = vp + 2 + argc;
- do {
- *newsp++ = JSVAL_VOID;
- } while (--missing != 0);
- }
- }
-
- /* Claim space for the stack frame and initialize it. */
- JSInlineFrame* newifp = (JSInlineFrame *) newsp;
- newsp += nframeslots;
-
- newifp->frame.callobj = NULL;
- newifp->frame.argsobj = NULL;
- newifp->frame.varobj = NULL;
- newifp->frame.script = script;
- newifp->frame.fun = fun;
-
- bool constructing = fi.is_constructing();
- newifp->frame.argc = argc;
- newifp->callerRegs.pc = fi.pc;
- newifp->callerRegs.sp = fp->slots + fi.spdist;
- fp->imacpc = fi.imacpc;
-
-#ifdef DEBUG
- if (fi.block != fp->blockChain) {
- for (JSObject* obj = fi.block; obj != fp->blockChain; obj = STOBJ_GET_PARENT(obj))
- JS_ASSERT(obj);
- }
-#endif
- fp->blockChain = fi.block;
-
- newifp->frame.argv = newifp->callerRegs.sp - argc;
- JS_ASSERT(newifp->frame.argv);
-#ifdef DEBUG
- // Initialize argv[-1] to a known-bogus value so we'll catch it if
- // someone forgets to initialize it later.
- newifp->frame.argv[-1] = JSVAL_HOLE;
-#endif
- JS_ASSERT(newifp->frame.argv >= StackBase(fp) + 2);
-
- newifp->frame.rval = JSVAL_VOID;
- newifp->frame.down = fp;
- newifp->frame.annotation = NULL;
- newifp->frame.scopeChain = NULL; // will be updated in FlushNativeStackFrame
- newifp->frame.sharpDepth = 0;
- newifp->frame.sharpArray = NULL;
- newifp->frame.flags = constructing ? JSFRAME_CONSTRUCTING : 0;
- newifp->frame.dormantNext = NULL;
- newifp->frame.blockChain = NULL;
- newifp->mark = newmark;
- newifp->frame.thisp = NULL; // will be updated in FlushNativeStackFrame
-
- newifp->frame.regs = fp->regs;
- newifp->frame.regs->pc = script->code;
- newifp->frame.regs->sp = newsp + script->nfixed;
- newifp->frame.imacpc = NULL;
- newifp->frame.slots = newsp;
- if (script->staticLevel < JS_DISPLAY_SIZE) {
- JSStackFrame **disp = &cx->display[script->staticLevel];
- newifp->frame.displaySave = *disp;
- *disp = &newifp->frame;
- }
-
- /*
- * Note that fp->script is still the caller's script; set the callee
- * inline frame's idea of caller version from its version.
- */
- newifp->callerVersion = (JSVersion) fp->script->version;
-
- // After this paragraph, fp and cx->fp point to the newly synthesized frame.
- fp->regs = &newifp->callerRegs;
- fp = cx->fp = &newifp->frame;
-
- /*
- * If there's a call hook, invoke it to compute the hookData used by
- * debuggers that cooperate with the interpreter.
- */
- JSInterpreterHook hook = cx->debugHooks->callHook;
- if (hook) {
- newifp->hookData = hook(cx, fp, JS_TRUE, 0, cx->debugHooks->callHookData);
- } else {
- newifp->hookData = NULL;
- }
-
- /*
- * Duplicate native stack layout computation: see VisitFrameSlots header comment.
- *
- * FIXME - We must count stack slots from caller's operand stack up to (but
- * not including) callee's, including missing arguments. Could we shift
- * everything down to the caller's fp->slots (where vars start) and avoid
- * some of the complexity?
- */
- return (fi.spdist - fp->down->script->nfixed) +
- ((fun->nargs > fp->argc) ? fun->nargs - fp->argc : 0) +
- script->nfixed + 1/*argsobj*/;
-}
-
-static void
-SynthesizeSlowNativeFrame(InterpState& state, JSContext *cx, VMSideExit *exit)
-{
- VOUCH_DOES_NOT_REQUIRE_STACK();
-
- void *mark;
- JSInlineFrame *ifp;
-
- /* This allocation is infallible: ExecuteTree reserved enough stack. */
- mark = JS_ARENA_MARK(&cx->stackPool);
- JS_ARENA_ALLOCATE_CAST(ifp, JSInlineFrame *, &cx->stackPool, sizeof(JSInlineFrame));
- JS_ASSERT(ifp);
-
- JSStackFrame *fp = &ifp->frame;
- fp->regs = NULL;
- fp->imacpc = NULL;
- fp->slots = NULL;
- fp->callobj = NULL;
- fp->argsobj = NULL;
- fp->varobj = cx->fp->varobj;
- fp->script = NULL;
- // fp->thisp is really a jsval, so reinterpret_cast here, not JSVAL_TO_OBJECT.
- fp->thisp = (JSObject *) state.nativeVp[1];
- fp->argc = state.nativeVpLen - 2;
- fp->argv = state.nativeVp + 2;
- fp->fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fp->argv[-2]));
- fp->rval = JSVAL_VOID;
- fp->down = cx->fp;
- fp->annotation = NULL;
- JS_ASSERT(cx->fp->scopeChain);
- fp->scopeChain = cx->fp->scopeChain;
- fp->blockChain = NULL;
- fp->sharpDepth = 0;
- fp->sharpArray = NULL;
- fp->flags = exit->constructing() ? JSFRAME_CONSTRUCTING : 0;
- fp->dormantNext = NULL;
- fp->displaySave = NULL;
-
- ifp->mark = mark;
- cx->fp = fp;
-}
-
-static JS_REQUIRES_STACK bool
-RecordTree(JSContext* cx, JSTraceMonitor* tm, VMFragment* f, jsbytecode* outer,
- uint32 outerArgc, JSObject* globalObj, uint32 globalShape,
- SlotList* globalSlots, uint32 argc)
-{
- JS_ASSERT(f->root == f);
-
- /* save a local copy for use after JIT flush */
- const void* localRootIP = f->root->ip;
-
- /* Make sure the global type map didn't change on us. */
- if (!CheckGlobalObjectShape(cx, tm, globalObj)) {
- Backoff(cx, (jsbytecode*) localRootIP);
- return false;
- }
-
- AUDIT(recorderStarted);
-
- /* Try to find an unused peer fragment, or allocate a new one. */
- while (f->code() && f->peer)
- f = f->peer;
- if (f->code())
- f = getAnchor(&JS_TRACE_MONITOR(cx), f->root->ip, globalObj, globalShape, argc);
-
- if (!f) {
- ResetJIT(cx, FR_OOM);
- return false;
- }
-
- f->root = f;
- f->lirbuf = tm->lirbuf;
-
- if (tm->dataAlloc->outOfMemory() || js_OverfullJITCache(tm)) {
- Backoff(cx, (jsbytecode*) f->root->ip);
- ResetJIT(cx, FR_OOM);
- debug_only_print0(LC_TMTracer,
- "Out of memory recording new tree, flushing cache.\n");
- return false;
- }
-
- JS_ASSERT(!f->code() && !f->vmprivate);
-
- /* Set up the VM-private treeInfo structure for this fragment. */
- TreeInfo* ti = new (*tm->dataAlloc) TreeInfo(tm->dataAlloc, f, globalSlots);
-
- /* Capture the coerced type of each active slot in the type map. */
- ti->typeMap.captureTypes(cx, globalObj, *globalSlots, 0 /* callDepth */);
- ti->nStackTypes = ti->typeMap.length() - globalSlots->length();
-
-#ifdef DEBUG
- AssertTreeIsUnique(tm, (VMFragment*)f, ti);
- ti->treeFileName = cx->fp->script->filename;
- ti->treeLineNumber = js_FramePCToLineNumber(cx, cx->fp);
- ti->treePCOffset = FramePCOffset(cx->fp);
-#endif
-#ifdef JS_JIT_SPEW
- debug_only_printf(LC_TMTreeVis, "TREEVIS CREATETREE ROOT=%p PC=%p FILE=\"%s\" LINE=%d OFFS=%d",
- (void*)f, f->ip, ti->treeFileName, ti->treeLineNumber,
- FramePCOffset(cx->fp));
- debug_only_print0(LC_TMTreeVis, " STACK=\"");
- for (unsigned i = 0; i < ti->nStackTypes; i++)
- debug_only_printf(LC_TMTreeVis, "%c", typeChar[ti->typeMap[i]]);
- debug_only_print0(LC_TMTreeVis, "\" GLOBALS=\"");
- for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
- debug_only_printf(LC_TMTreeVis, "%c", typeChar[ti->typeMap[ti->nStackTypes + i]]);
- debug_only_print0(LC_TMTreeVis, "\"\n");
-#endif
-
- /* Determine the native frame layout at the entry point. */
- unsigned entryNativeStackSlots = ti->nStackTypes;
- JS_ASSERT(entryNativeStackSlots == NativeStackSlots(cx, 0 /* callDepth */));
- ti->nativeStackBase = (entryNativeStackSlots -
- (cx->fp->regs->sp - StackBase(cx->fp))) * sizeof(double);
- ti->maxNativeStackSlots = entryNativeStackSlots;
- ti->maxCallDepth = 0;
- ti->script = cx->fp->script;
-
- /* Recording primary trace. */
- if (!StartRecorder(cx, NULL, f, ti,
- ti->nStackTypes,
- ti->globalSlots->length(),
- ti->typeMap.data(), NULL, outer, outerArgc)) {
- return false;
- }
-
- return true;
-}
-
-static JS_REQUIRES_STACK TypeConsensus
-FindLoopEdgeTarget(JSContext* cx, VMSideExit* exit, VMFragment** peerp)
-{
- VMFragment* from = exit->root();
- TreeInfo* from_ti = from->getTreeInfo();
-
- JS_ASSERT(from->code());
-
- TypeMap typeMap(NULL);
- FullMapFromExit(typeMap, exit);
- JS_ASSERT(typeMap.length() - exit->numStackSlots == from_ti->nGlobalTypes());
-
- /* Mark all double slots as undemotable */
- for (unsigned i = 0; i < typeMap.length(); i++) {
- if (typeMap[i] == TT_DOUBLE)
- MarkSlotUndemotable(cx, from_ti, i);
- }
-
- VMFragment* firstPeer = ((VMFragment*)from)->first;
- for (VMFragment* peer = firstPeer; peer; peer = peer->peer) {
- TreeInfo* peer_ti = peer->getTreeInfo();
- if (!peer_ti)
- continue;
- JS_ASSERT(peer->argc == from->argc);
- JS_ASSERT(exit->numStackSlots == peer_ti->nStackTypes);
- TypeConsensus consensus = TypeMapLinkability(cx, typeMap, peer);
- if (consensus == TypeConsensus_Okay || consensus == TypeConsensus_Undemotes) {
- *peerp = peer;
- return consensus;
- }
- }
-
- return TypeConsensus_Bad;
-}
-
-UnstableExit*
-TreeInfo::removeUnstableExit(VMSideExit* exit)
-{
- /* Now erase this exit from the unstable exit list. */
- UnstableExit** tail = &this->unstableExits;
- for (UnstableExit* uexit = this->unstableExits; uexit != NULL; uexit = uexit->next) {
- if (uexit->exit == exit) {
- *tail = uexit->next;
- return *tail;
- }
- tail = &uexit->next;
- }
- JS_NOT_REACHED("exit not in unstable exit list");
- return NULL;
-}
-
-static JS_REQUIRES_STACK bool
-AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, jsbytecode* outer,
- uint32 outerArgc)
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- if (tm->needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return false;
- }
-
- VMFragment* from = exit->root();
- TreeInfo* from_ti = from->getTreeInfo();
-
- VMFragment* peer = NULL;
- TypeConsensus consensus = FindLoopEdgeTarget(cx, exit, &peer);
- if (consensus == TypeConsensus_Okay) {
- TreeInfo* peer_ti = peer->getTreeInfo();
- JS_ASSERT(from_ti->globalSlots == peer_ti->globalSlots);
- JS_ASSERT(from_ti->nStackTypes == peer_ti->nStackTypes);
- /* Patch this exit to its peer */
- JoinPeers(tm->assembler, exit, peer);
- /*
- * Update peer global types. The |from| fragment should already be updated because it on
- * the execution path, and somehow connected to the entry trace.
- */
- if (peer_ti->nGlobalTypes() < peer_ti->globalSlots->length())
- SpecializeTreesToMissingGlobals(cx, globalObj, peer_ti);
- JS_ASSERT(from_ti->nGlobalTypes() == from_ti->globalSlots->length());
- /* This exit is no longer unstable, so remove it. */
- from_ti->removeUnstableExit(exit);
- debug_only_stmt(DumpPeerStability(tm, peer->ip, from->globalObj, from->globalShape, from->argc);)
- return false;
- } else if (consensus == TypeConsensus_Undemotes) {
- /* The original tree is unconnectable, so trash it. */
- TrashTree(cx, peer);
- return false;
- }
-
- return RecordTree(cx, tm, from->first, outer, outerArgc, from->globalObj,
- from->globalShape, from_ti->globalSlots, cx->fp->argc);
-}
-
-static JS_REQUIRES_STACK bool
-AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom, jsbytecode* outer
-#ifdef MOZ_TRACEVIS
- , TraceVisStateObj* tvso = NULL
-#endif
- )
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- if (tm->needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
-#ifdef MOZ_TRACEVIS
- if (tvso) tvso->r = R_FAIL_EXTEND_FLUSH;
-#endif
- return false;
- }
-
- Fragment* f = anchor->root();
- JS_ASSERT(f->vmprivate);
- TreeInfo* ti = (TreeInfo*)f->vmprivate;
-
- /*
- * Don't grow trees above a certain size to avoid code explosion due to
- * tail duplication.
- */
- if (ti->branchCount >= MAX_BRANCHES) {
-#ifdef MOZ_TRACEVIS
- if (tvso) tvso->r = R_FAIL_EXTEND_MAX_BRANCHES;
-#endif
- return false;
- }
-
- Fragment* c;
- if (!(c = anchor->target)) {
- JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
- Allocator& alloc = *tm->dataAlloc;
- verbose_only(
- uint32_t profFragID = (js_LogController.lcbits & LC_FragProfile)
- ? (++(tm->lastFragID)) : 0;
- )
- c = new (alloc) Fragment(cx->fp->regs->pc verbose_only(, profFragID));
- c->root = anchor->from->root;
- debug_only_printf(LC_TMTreeVis, "TREEVIS CREATEBRANCH ROOT=%p FRAG=%p PC=%p FILE=\"%s\""
- " LINE=%d ANCHOR=%p OFFS=%d\n",
- (void*)f, (void*)c, (void*)cx->fp->regs->pc, cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp), (void*)anchor,
- FramePCOffset(cx->fp));
- anchor->target = c;
- c->root = f;
- verbose_only( tm->branches = new (alloc) Seq(c, tm->branches); )
- }
-
- /*
- * If we are recycling a fragment, it might have a different ip so reset it
- * here. This can happen when attaching a branch to a NESTED_EXIT, which
- * might extend along separate paths (i.e. after the loop edge, and after a
- * return statement).
- */
- c->ip = cx->fp->regs->pc;
-
- debug_only_printf(LC_TMTracer,
- "trying to attach another branch to the tree (hits = %d)\n", c->hits());
-
- int32_t& hits = c->hits();
- if (outer || (hits++ >= HOTEXIT && hits <= HOTEXIT+MAXEXIT)) {
- /* start tracing secondary trace from this point */
- c->lirbuf = f->lirbuf;
- unsigned stackSlots;
- unsigned ngslots;
- JSTraceType* typeMap;
- TypeMap fullMap(NULL);
- if (exitedFrom == NULL) {
- /*
- * If we are coming straight from a simple side exit, just use that
- * exit's type map as starting point.
- */
- ngslots = anchor->numGlobalSlots;
- stackSlots = anchor->numStackSlots;
- typeMap = anchor->fullTypeMap();
- } else {
- /*
- * If we side-exited on a loop exit and continue on a nesting
- * guard, the nesting guard (anchor) has the type information for
- * everything below the current scope, and the actual guard we
- * exited from has the types for everything in the current scope
- * (and whatever it inlined). We have to merge those maps here.
- */
- VMSideExit* e1 = anchor;
- VMSideExit* e2 = exitedFrom;
- fullMap.add(e1->stackTypeMap(), e1->numStackSlotsBelowCurrentFrame);
- fullMap.add(e2->stackTypeMap(), e2->numStackSlots);
- stackSlots = fullMap.length();
- ngslots = BuildGlobalTypeMapFromInnerTree(fullMap, e2);
- JS_ASSERT(ngslots >= e1->numGlobalSlots); // inner tree must have all globals
- JS_ASSERT(ngslots == fullMap.length() - stackSlots);
- typeMap = fullMap.data();
- }
- JS_ASSERT(ngslots >= anchor->numGlobalSlots);
- bool rv = StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate, stackSlots,
- ngslots, typeMap, exitedFrom, outer, cx->fp->argc);
-#ifdef MOZ_TRACEVIS
- if (!rv && tvso)
- tvso->r = R_FAIL_EXTEND_START;
-#endif
- return rv;
- }
-#ifdef MOZ_TRACEVIS
- if (tvso) tvso->r = R_FAIL_EXTEND_COLD;
-#endif
- return false;
-}
-
-static JS_REQUIRES_STACK VMSideExit*
-ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
- VMSideExit** innermostNestedGuardp);
-
-static JS_REQUIRES_STACK bool
-RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
-{
-#ifdef JS_THREADSAFE
- if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) {
- js_AbortRecording(cx, "Global object not owned by this context");
- return false; /* we stay away from shared global objects */
- }
-#endif
-
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
-
- /* Process needFlush and deep abort requests. */
- if (tm->needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return false;
- }
-
- JS_ASSERT(r->getFragment() && !r->getFragment()->lastIns);
- VMFragment* root = (VMFragment*)r->getFragment()->root;
-
- /* Does this branch go to an inner loop? */
- VMFragment* first = getLoop(&JS_TRACE_MONITOR(cx), cx->fp->regs->pc,
- root->globalObj, root->globalShape, cx->fp->argc);
- if (!first) {
- /* Not an inner loop we can call, abort trace. */
- AUDIT(returnToDifferentLoopHeader);
- JS_ASSERT(!cx->fp->imacpc);
- debug_only_printf(LC_TMTracer,
- "loop edge to %lld, header %lld\n",
- (long long int)(cx->fp->regs->pc - cx->fp->script->code),
- (long long int)((jsbytecode*)r->getFragment()->root->ip - cx->fp->script->code));
- js_AbortRecording(cx, "Loop edge does not return to header");
- return false;
- }
-
- /* Make sure inner tree call will not run into an out-of-memory condition. */
- if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
- !ReplenishReservedPool(cx, tm)) {
- js_AbortRecording(cx, "Couldn't call inner tree (out of memory)");
- return false;
- }
-
- /*
- * Make sure the shape of the global object still matches (this might flush
- * the JIT cache).
- */
- JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- uint32 globalShape = -1;
- SlotList* globalSlots = NULL;
- if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots))
- return false;
-
- debug_only_printf(LC_TMTracer,
- "Looking for type-compatible peer (%s:%d@%d)\n",
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp));
-
- // Find a matching inner tree. If none can be found, compile one.
- VMFragment* f = r->findNestedCompatiblePeer(first);
- if (!f || !f->code()) {
- AUDIT(noCompatInnerTrees);
-
- VMFragment* outerFragment = (VMFragment*) tm->recorder->getFragment()->root;
- jsbytecode* outer = (jsbytecode*) outerFragment->ip;
- uint32 outerArgc = outerFragment->argc;
- uint32 argc = cx->fp->argc;
- js_AbortRecording(cx, "No compatible inner tree");
-
- // Find an empty fragment we can recycle, or allocate a new one.
- for (f = first; f != NULL; f = f->peer) {
- if (!f->code())
- break;
- }
- if (!f || f->code()) {
- f = getAnchor(tm, cx->fp->regs->pc, globalObj, globalShape, argc);
- if (!f) {
- ResetJIT(cx, FR_OOM);
- return false;
- }
- }
- return RecordTree(cx, tm, f, outer, outerArgc, globalObj, globalShape, globalSlots, argc);
- }
-
- r->adjustCallerTypes(f);
- r->prepareTreeCall(f);
-
- VMSideExit* innermostNestedGuard = NULL;
- VMSideExit* lr = ExecuteTree(cx, f, inlineCallCount, &innermostNestedGuard);
-
- /* ExecuteTree can reenter the interpreter and kill |this|. */
- if (!TRACE_RECORDER(cx))
- return false;
-
- if (!lr) {
- js_AbortRecording(cx, "Couldn't call inner tree");
- return false;
- }
-
- VMFragment* outerFragment = (VMFragment*) tm->recorder->getFragment()->root;
- jsbytecode* outer = (jsbytecode*) outerFragment->ip;
- switch (lr->exitType) {
- case LOOP_EXIT:
- /* If the inner tree exited on an unknown loop exit, grow the tree around it. */
- if (innermostNestedGuard) {
- js_AbortRecording(cx, "Inner tree took different side exit, abort current "
- "recording and grow nesting tree");
- return AttemptToExtendTree(cx, innermostNestedGuard, lr, outer);
- }
-
- /* Emit a call to the inner tree and continue recording the outer tree trace. */
- r->emitTreeCall(f, lr);
- return true;
-
- case UNSTABLE_LOOP_EXIT:
- /* Abort recording so the inner loop can become type stable. */
- js_AbortRecording(cx, "Inner tree is trying to stabilize, abort outer recording");
- return AttemptToStabilizeTree(cx, globalObj, lr, outer, outerFragment->argc);
-
- case OVERFLOW_EXIT:
- oracle.markInstructionUndemotable(cx->fp->regs->pc);
- /* FALL THROUGH */
- case BRANCH_EXIT:
- case CASE_EXIT:
- /* Abort recording the outer tree, extend the inner tree. */
- js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
- return AttemptToExtendTree(cx, lr, NULL, outer);
-
- default:
- debug_only_printf(LC_TMTracer, "exit_type=%s\n", getExitName(lr->exitType));
- js_AbortRecording(cx, "Inner tree not suitable for calling");
- return false;
- }
-}
-
-static bool
-IsEntryTypeCompatible(jsval* vp, JSTraceType* m)
-{
- unsigned tag = JSVAL_TAG(*vp);
-
- debug_only_printf(LC_TMTracer, "%c/%c ", tagChar[tag], typeChar[*m]);
-
- switch (*m) {
- case TT_OBJECT:
- if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) &&
- !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) {
- return true;
- }
- debug_only_printf(LC_TMTracer, "object != tag%u ", tag);
- return false;
- case TT_INT32:
- jsint i;
- if (JSVAL_IS_INT(*vp))
- return true;
- if (tag == JSVAL_DOUBLE && JSDOUBLE_IS_INT(*JSVAL_TO_DOUBLE(*vp), i))
- return true;
- debug_only_printf(LC_TMTracer, "int != tag%u(value=%lu) ", tag, (unsigned long)*vp);
- return false;
- case TT_DOUBLE:
- if (JSVAL_IS_INT(*vp) || tag == JSVAL_DOUBLE)
- return true;
- debug_only_printf(LC_TMTracer, "double != tag%u ", tag);
- return false;
- case TT_JSVAL:
- JS_NOT_REACHED("shouldn't see jsval type in entry");
- return false;
- case TT_STRING:
- if (tag == JSVAL_STRING)
- return true;
- debug_only_printf(LC_TMTracer, "string != tag%u ", tag);
- return false;
- case TT_NULL:
- if (JSVAL_IS_NULL(*vp))
- return true;
- debug_only_printf(LC_TMTracer, "null != tag%u ", tag);
- return false;
- case TT_PSEUDOBOOLEAN:
- if (tag == JSVAL_SPECIAL)
- return true;
- debug_only_printf(LC_TMTracer, "bool != tag%u ", tag);
- return false;
- default:
- JS_ASSERT(*m == TT_FUNCTION);
- if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) &&
- HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) {
- return true;
- }
- debug_only_printf(LC_TMTracer, "fun != tag%u ", tag);
- return false;
- }
-}
-
-class TypeCompatibilityVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSContext *mCx;
- JSTraceType *mTypeMap;
- unsigned mStackSlotNum;
- bool mOk;
-public:
- TypeCompatibilityVisitor (TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mCx(mRecorder.cx),
- mTypeMap(typeMap),
- mStackSlotNum(0),
- mOk(true)
- {}
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- debug_only_printf(LC_TMTracer, "global%d=", n);
- if (!IsEntryTypeCompatible(vp, mTypeMap)) {
- mOk = false;
- } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
- oracle.markGlobalSlotUndemotable(mCx, slot);
- mOk = false;
- } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
- oracle.markGlobalSlotUndemotable(mCx, slot);
- }
- mTypeMap++;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- debug_only_printf(LC_TMTracer, "%s%u=", stackSlotKind(), unsigned(i));
- if (!IsEntryTypeCompatible(vp, mTypeMap)) {
- mOk = false;
- } else if (!isPromoteInt(mRecorder.get(vp)) && *mTypeMap == TT_INT32) {
- oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
- mOk = false;
- } else if (JSVAL_IS_INT(*vp) && *mTypeMap == TT_DOUBLE) {
- oracle.markStackSlotUndemotable(mCx, mStackSlotNum);
- }
- vp++;
- mTypeMap++;
- mStackSlotNum++;
- }
- return true;
- }
-
- bool isOk() {
- return mOk;
- }
-};
-
-JS_REQUIRES_STACK VMFragment*
-TraceRecorder::findNestedCompatiblePeer(VMFragment* f)
-{
- JSTraceMonitor* tm;
-
- tm = &JS_TRACE_MONITOR(cx);
- unsigned int ngslots = treeInfo->globalSlots->length();
-
- TreeInfo* ti;
- for (; f != NULL; f = f->peer) {
- if (!f->code())
- continue;
-
- ti = (TreeInfo*)f->vmprivate;
-
- debug_only_printf(LC_TMTracer, "checking nested types %p: ", (void*)f);
-
- if (ngslots > ti->nGlobalTypes())
- SpecializeTreesToMissingGlobals(cx, globalObj, ti);
-
- /*
- * Determine whether the typemap of the inner tree matches the outer
- * tree's current state. If the inner tree expects an integer, but the
- * outer tree doesn't guarantee an integer for that slot, we mark the
- * slot undemotable and mismatch here. This will force a new tree to be
- * compiled that accepts a double for the slot. If the inner tree
- * expects a double, but the outer tree has an integer, we can proceed,
- * but we mark the location undemotable.
- */
- TypeCompatibilityVisitor visitor(*this, ti->typeMap.data());
- VisitSlots(visitor, cx, 0, *treeInfo->globalSlots);
-
- debug_only_printf(LC_TMTracer, " %s\n", visitor.isOk() ? "match" : "");
- if (visitor.isOk())
- return f;
- }
-
- return NULL;
-}
-
-class CheckEntryTypeVisitor : public SlotVisitorBase
-{
- bool mOk;
- JSTraceType *mTypeMap;
-public:
- CheckEntryTypeVisitor(JSTraceType *typeMap) :
- mOk(true),
- mTypeMap(typeMap)
- {}
-
- JS_ALWAYS_INLINE void checkSlot(jsval *vp, char const *name, int i) {
- debug_only_printf(LC_TMTracer, "%s%d=", name, i);
- JS_ASSERT(*(uint8_t*)mTypeMap != 0xCD);
- mOk = IsEntryTypeCompatible(vp, mTypeMap++);
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- if (mOk)
- checkSlot(vp, "global", n);
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- if (!mOk)
- break;
- checkSlot(vp++, stackSlotKind(), i);
- }
- return mOk;
- }
-
- bool isOk() {
- return mOk;
- }
-};
-
-/**
- * Check if types are usable for trace execution.
- *
- * @param cx Context.
- * @param ti Tree info of peer we're testing.
- * @return True if compatible (with or without demotions), false otherwise.
- */
-static JS_REQUIRES_STACK bool
-CheckEntryTypes(JSContext* cx, JSObject* globalObj, TreeInfo* ti)
-{
- unsigned int ngslots = ti->globalSlots->length();
-
- JS_ASSERT(ti->nStackTypes == NativeStackSlots(cx, 0));
-
- if (ngslots > ti->nGlobalTypes())
- SpecializeTreesToMissingGlobals(cx, globalObj, ti);
-
- JS_ASSERT(ti->typeMap.length() == NativeStackSlots(cx, 0) + ngslots);
- JS_ASSERT(ti->typeMap.length() == ti->nStackTypes + ngslots);
- JS_ASSERT(ti->nGlobalTypes() == ngslots);
-
- CheckEntryTypeVisitor visitor(ti->typeMap.data());
- VisitSlots(visitor, cx, 0, *ti->globalSlots);
-
- debug_only_print0(LC_TMTracer, "\n");
- return visitor.isOk();
-}
-
-/**
- * Find an acceptable entry tree given a PC.
- *
- * @param cx Context.
- * @param globalObj Global object.
- * @param f First peer fragment.
- * @param nodemote If true, will try to find a peer that does not require demotion.
- * @out count Number of fragments consulted.
- */
-static JS_REQUIRES_STACK VMFragment*
-FindVMCompatiblePeer(JSContext* cx, JSObject* globalObj, VMFragment* f, uintN& count)
-{
- count = 0;
- for (; f != NULL; f = f->peer) {
- if (f->vmprivate == NULL)
- continue;
- debug_only_printf(LC_TMTracer,
- "checking vm types %p (ip: %p): ", (void*)f, f->ip);
- if (CheckEntryTypes(cx, globalObj, (TreeInfo*)f->vmprivate))
- return f;
- ++count;
- }
- return NULL;
-}
-
-static void
-LeaveTree(InterpState&, VMSideExit* lr);
-
-static JS_REQUIRES_STACK VMSideExit*
-ExecuteTree(JSContext* cx, Fragment* f, uintN& inlineCallCount,
- VMSideExit** innermostNestedGuardp)
-{
-#ifdef MOZ_TRACEVIS
- TraceVisStateObj tvso(cx, S_EXECUTE);
-#endif
-
- JS_ASSERT(f->root == f && f->code() && f->vmprivate);
-
- /*
- * The JIT records and expects to execute with two scope-chain
- * assumptions baked-in:
- *
- * 1. That the bottom of the scope chain is global, in the sense of
- * JSCLASS_IS_GLOBAL.
- *
- * 2. That the scope chain between fp and the global is free of
- * "unusual" native objects such as HTML forms or other funny
- * things.
- *
- * #2 is checked here while following the scope-chain links, via
- * js_IsCacheableNonGlobalScope, which consults a whitelist of known
- * class types; once a global is found, it's checked for #1. Failing
- * either check causes an early return from execution.
- */
- JSObject* parent;
- JSObject* child = cx->fp->scopeChain;
- while ((parent = OBJ_GET_PARENT(cx, child)) != NULL) {
- if (!js_IsCacheableNonGlobalScope(child)) {
- debug_only_print0(LC_TMTracer,"Blacklist: non-cacheable object on scope chain.\n");
- Blacklist((jsbytecode*) f->root->ip);
- return NULL;
- }
- child = parent;
- }
- JSObject* globalObj = child;
- if (!(OBJ_GET_CLASS(cx, globalObj)->flags & JSCLASS_IS_GLOBAL)) {
- debug_only_print0(LC_TMTracer, "Blacklist: non-global at root of scope chain.\n");
- Blacklist((jsbytecode*) f->root->ip);
- return NULL;
- }
-
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- TreeInfo* ti = (TreeInfo*)f->vmprivate;
- unsigned ngslots = ti->globalSlots->length();
- uint16* gslots = ti->globalSlots->data();
- unsigned globalFrameSize = STOBJ_NSLOTS(globalObj);
-
- /* Make sure the global object is sane. */
- JS_ASSERT_IF(ngslots != 0,
- OBJ_SHAPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain)) ==
- ((VMFragment*)f)->globalShape);
-
- /* Make sure our caller replenished the double pool. */
- JS_ASSERT(tm->reservedDoublePoolPtr >= tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS);
-
- /* Reserve objects and stack space now, to make leaving the tree infallible. */
- if (!js_ReserveObjects(cx, MAX_CALL_STACK_ENTRIES))
- return NULL;
-
- /* Set up the interpreter state block, which is followed by the native global frame. */
- InterpState* state = (InterpState*)alloca(sizeof(InterpState) + (globalFrameSize+1)*sizeof(double));
- state->cx = cx;
- state->inlineCallCountp = &inlineCallCount;
- state->innermostNestedGuardp = innermostNestedGuardp;
- state->outermostTree = ti;
- state->lastTreeExitGuard = NULL;
- state->lastTreeCallGuard = NULL;
- state->rpAtLastTreeCall = NULL;
- state->nativeVp = NULL;
- state->builtinStatus = 0;
-
- /* Set up the native global frame. */
- double* global = (double*)(state+1);
-
- /* Set up the native stack frame. */
- double stack_buffer[MAX_NATIVE_STACK_SLOTS];
- state->stackBase = stack_buffer;
- state->sp = stack_buffer + (ti->nativeStackBase/sizeof(double));
- state->eos = stack_buffer + MAX_NATIVE_STACK_SLOTS;
-
- /* Set up the native call stack frame. */
- FrameInfo* callstack_buffer[MAX_CALL_STACK_ENTRIES];
- state->callstackBase = callstack_buffer;
- state->rp = callstack_buffer;
- state->eor = callstack_buffer + MAX_CALL_STACK_ENTRIES;
-
- void *reserve;
- state->stackMark = JS_ARENA_MARK(&cx->stackPool);
- JS_ARENA_ALLOCATE(reserve, &cx->stackPool, MAX_INTERP_STACK_BYTES);
- if (!reserve)
- return NULL;
-
-#ifdef DEBUG
- memset(stack_buffer, 0xCD, sizeof(stack_buffer));
- memset(global, 0xCD, (globalFrameSize+1)*sizeof(double));
- JS_ASSERT(globalFrameSize <= MAX_GLOBAL_SLOTS);
-#endif
-
- debug_only_stmt(*(uint64*)&global[globalFrameSize] = 0xdeadbeefdeadbeefLL;)
- debug_only_printf(LC_TMTracer,
- "entering trace at %s:%u@%u, native stack slots: %u code: %p\n",
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp),
- ti->maxNativeStackSlots,
- f->code());
-
- JS_ASSERT(ti->nGlobalTypes() == ngslots);
- BuildNativeFrame(cx, globalObj, 0 /* callDepth */, ngslots, gslots,
- ti->typeMap.data(), global, stack_buffer);
-
- union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u;
- u.code = f->code();
-
-#ifdef EXECUTE_TREE_TIMER
- state->startTime = rdtsc();
-#endif
-
- JS_ASSERT(!tm->tracecx);
- tm->tracecx = cx;
- state->prev = cx->interpState;
- cx->interpState = state;
-
- debug_only_stmt(fflush(NULL));
- GuardRecord* rec;
-
- // Note that the block scoping is crucial here for TraceVis; the
- // TraceVisStateObj constructors and destructors must run at the right times.
- {
-#ifdef MOZ_TRACEVIS
- TraceVisStateObj tvso_n(cx, S_NATIVE);
-#endif
-#if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32)
- SIMULATE_FASTCALL(rec, state, NULL, u.func);
-#else
- rec = u.func(state, NULL);
-#endif
- }
-
- JS_ASSERT(*(uint64*)&global[globalFrameSize] == 0xdeadbeefdeadbeefLL);
- JS_ASSERT(!state->nativeVp);
-
- VMSideExit* lr = (VMSideExit*)rec->exit;
-
- AUDIT(traceTriggered);
-
- cx->interpState = state->prev;
-
- JS_ASSERT(!cx->bailExit);
- JS_ASSERT(lr->exitType != LOOP_EXIT || !lr->calldepth);
- tm->tracecx = NULL;
- LeaveTree(*state, lr);
- return state->innermost;
-}
-
-static JS_FORCES_STACK void
-LeaveTree(InterpState& state, VMSideExit* lr)
-{
- VOUCH_DOES_NOT_REQUIRE_STACK();
-
- JSContext* cx = state.cx;
- FrameInfo** callstack = state.callstackBase;
- double* stack = state.stackBase;
-
- /*
- * Except if we find that this is a nested bailout, the guard the call
- * returned is the one we have to use to adjust pc and sp.
- */
- VMSideExit* innermost = lr;
-
- /*
- * While executing a tree we do not update state.sp and state.rp even if
- * they grow. Instead, guards tell us by how much sp and rp should be
- * incremented in case of a side exit. When calling a nested tree, however,
- * we actively adjust sp and rp. If we have such frames from outer trees on
- * the stack, then rp will have been adjusted. Before we can process the
- * stack of the frames of the tree we directly exited from, we have to
- * first work our way through the outer frames and generate interpreter
- * frames for them. Once the call stack (rp) is empty, we can process the
- * final frames (which again are not directly visible and only the guard we
- * exited on will tells us about).
- */
- FrameInfo** rp = (FrameInfo**)state.rp;
- if (lr->exitType == NESTED_EXIT) {
- VMSideExit* nested = state.lastTreeCallGuard;
- if (!nested) {
- /*
- * If lastTreeCallGuard is not set in state, we only have a single
- * level of nesting in this exit, so lr itself is the innermost and
- * outermost nested guard, and hence we set nested to lr. The
- * calldepth of the innermost guard is not added to state.rp, so we
- * do it here manually. For a nesting depth greater than 1 the
- * CallTree builtin already added the innermost guard's calldepth
- * to state.rpAtLastTreeCall.
- */
- nested = lr;
- rp += lr->calldepth;
- } else {
- /*
- * During unwinding state.rp gets overwritten at every step and we
- * restore it here to its state at the innermost nested guard. The
- * builtin already added the calldepth of that innermost guard to
- * rpAtLastTreeCall.
- */
- rp = (FrameInfo**)state.rpAtLastTreeCall;
- }
- innermost = state.lastTreeExitGuard;
- if (state.innermostNestedGuardp)
- *state.innermostNestedGuardp = nested;
- JS_ASSERT(nested);
- JS_ASSERT(nested->exitType == NESTED_EXIT);
- JS_ASSERT(state.lastTreeExitGuard);
- JS_ASSERT(state.lastTreeExitGuard->exitType != NESTED_EXIT);
- }
-
- int32_t bs = state.builtinStatus;
- bool bailed = innermost->exitType == STATUS_EXIT && (bs & JSBUILTIN_BAILED);
- if (bailed) {
- /*
- * Deep-bail case.
- *
- * A _FAIL native already called LeaveTree. We already reconstructed
- * the interpreter stack, in pre-call state, with pc pointing to the
- * CALL/APPLY op, for correctness. Then we continued in native code.
- *
- * First, if we just returned from a slow native, pop its stack frame.
- */
- if (!cx->fp->script) {
- JSStackFrame *fp = cx->fp;
- JS_ASSERT(FUN_SLOW_NATIVE(fp->fun));
- JS_ASSERT(fp->regs == NULL);
- JS_ASSERT(fp->down->regs != &((JSInlineFrame *) fp)->callerRegs);
- cx->fp = fp->down;
- JS_ARENA_RELEASE(&cx->stackPool, ((JSInlineFrame *) fp)->mark);
- }
- JS_ASSERT(cx->fp->script);
-
- if (!(bs & JSBUILTIN_ERROR)) {
- /*
- * The builtin or native deep-bailed but finished successfully
- * (no exception or error).
- *
- * After it returned, the JIT code stored the results of the
- * builtin or native at the top of the native stack and then
- * immediately flunked the guard on state->builtinStatus.
- *
- * Now LeaveTree has been called again from the tail of
- * ExecuteTree. We are about to return to the interpreter. Adjust
- * the top stack frame to resume on the next op.
- */
- JSFrameRegs* regs = cx->fp->regs;
- JSOp op = (JSOp) *regs->pc;
- JS_ASSERT(op == JSOP_CALL || op == JSOP_APPLY || op == JSOP_NEW ||
- op == JSOP_GETPROP || op == JSOP_GETTHISPROP || op == JSOP_GETARGPROP ||
- op == JSOP_GETLOCALPROP || op == JSOP_LENGTH ||
- op == JSOP_GETELEM || op == JSOP_CALLELEM ||
- op == JSOP_SETPROP || op == JSOP_SETNAME ||
- op == JSOP_SETELEM || op == JSOP_INITELEM ||
- op == JSOP_INSTANCEOF);
-
- /*
- * JSOP_SETELEM can be coalesced with a JSOP_POP in the interpeter.
- * Since this doesn't re-enter the recorder, the post-state snapshot
- * is invalid. Fix it up here.
- */
- if (op == JSOP_SETELEM && JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
- regs->sp -= js_CodeSpec[JSOP_SETELEM].nuses;
- regs->sp += js_CodeSpec[JSOP_SETELEM].ndefs;
- regs->pc += JSOP_SETELEM_LENGTH;
- op = JSOP_POP;
- }
-
- const JSCodeSpec& cs = js_CodeSpec[op];
- regs->sp -= (cs.format & JOF_INVOKE) ? GET_ARGC(regs->pc) + 2 : cs.nuses;
- regs->sp += cs.ndefs;
- regs->pc += cs.length;
- JS_ASSERT_IF(!cx->fp->imacpc,
- cx->fp->slots + cx->fp->script->nfixed +
- js_ReconstructStackDepth(cx, cx->fp->script, regs->pc) ==
- regs->sp);
-
- /*
- * If there's a tree call around the point that we deep exited at,
- * then state.sp and state.rp were restored to their original
- * values before the tree call and sp might be less than deepBailSp,
- * which we sampled when we were told to deep bail.
- */
- JS_ASSERT(state.deepBailSp >= state.stackBase && state.sp <= state.deepBailSp);
-
- /*
- * As explained above, the JIT code stored a result value or values
- * on the native stack. Transfer them to the interpreter stack now.
- * (Some opcodes, like JSOP_CALLELEM, produce two values, hence the
- * loop.)
- */
- JSTraceType* typeMap = innermost->stackTypeMap();
- for (int i = 1; i <= cs.ndefs; i++) {
- NativeToValue(cx,
- regs->sp[-i],
- typeMap[innermost->numStackSlots - i],
- (jsdouble *) state.deepBailSp
- + innermost->sp_adj / sizeof(jsdouble) - i);
- }
- }
- return;
- }
-
- JS_ARENA_RELEASE(&cx->stackPool, state.stackMark);
- while (callstack < rp) {
- FrameInfo* fi = *callstack;
- /* Peek at the callee native slot in the not-yet-synthesized down frame. */
- JSObject* callee = *(JSObject**)&stack[fi->callerHeight];
-
- /*
- * Synthesize a stack frame and write out the values in it using the
- * type map pointer on the native call stack.
- */
- SynthesizeFrame(cx, *fi, callee);
- int slots = FlushNativeStackFrame(cx, 1 /* callDepth */, (JSTraceType*)(fi + 1),
- stack, cx->fp);
-#ifdef DEBUG
- JSStackFrame* fp = cx->fp;
- debug_only_printf(LC_TMTracer,
- "synthesized deep frame for %s:%u@%u, slots=%d\n",
- fp->script->filename,
- js_FramePCToLineNumber(cx, fp),
- FramePCOffset(fp),
- slots);
-#endif
- /*
- * Keep track of the additional frames we put on the interpreter stack
- * and the native stack slots we consumed.
- */
- ++*state.inlineCallCountp;
- ++callstack;
- stack += slots;
- }
-
- /*
- * We already synthesized the frames around the innermost guard. Here we
- * just deal with additional frames inside the tree we are bailing out
- * from.
- */
- JS_ASSERT(rp == callstack);
- unsigned calldepth = innermost->calldepth;
- unsigned calldepth_slots = 0;
- unsigned calleeOffset = 0;
- for (unsigned n = 0; n < calldepth; ++n) {
- /* Peek at the callee native slot in the not-yet-synthesized down frame. */
- calleeOffset += callstack[n]->callerHeight;
- JSObject* callee = *(JSObject**)&stack[calleeOffset];
-
- /* Reconstruct the frame. */
- calldepth_slots += SynthesizeFrame(cx, *callstack[n], callee);
- ++*state.inlineCallCountp;
-#ifdef DEBUG
- JSStackFrame* fp = cx->fp;
- debug_only_printf(LC_TMTracer,
- "synthesized shallow frame for %s:%u@%u\n",
- fp->script->filename, js_FramePCToLineNumber(cx, fp),
- FramePCOffset(fp));
-#endif
- }
-
- /*
- * Adjust sp and pc relative to the tree we exited from (not the tree we
- * entered into). These are our final values for sp and pc since
- * SynthesizeFrame has already taken care of all frames in between. But
- * first we recover fp->blockChain, which comes from the side exit
- * struct.
- */
- JSStackFrame* fp = cx->fp;
-
- fp->blockChain = innermost->block;
-
- /*
- * If we are not exiting from an inlined frame, the state->sp is spbase.
- * Otherwise spbase is whatever slots frames around us consume.
- */
- fp->regs->pc = innermost->pc;
- fp->imacpc = innermost->imacpc;
- fp->regs->sp = StackBase(fp) + (innermost->sp_adj / sizeof(double)) - calldepth_slots;
- JS_ASSERT_IF(!fp->imacpc,
- fp->slots + fp->script->nfixed +
- js_ReconstructStackDepth(cx, fp->script, fp->regs->pc) == fp->regs->sp);
-
-#ifdef EXECUTE_TREE_TIMER
- uint64 cycles = rdtsc() - state.startTime;
-#elif defined(JS_JIT_SPEW)
- uint64 cycles = 0;
-#endif
-
- debug_only_printf(LC_TMTracer,
- "leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%s, sp=%lld, "
- "calldepth=%d, cycles=%llu\n",
- fp->script->filename,
- js_FramePCToLineNumber(cx, fp),
- FramePCOffset(fp),
- js_CodeName[fp->imacpc ? *fp->imacpc : *fp->regs->pc],
- (void*)lr,
- getExitName(lr->exitType),
- (long long int)(fp->regs->sp - StackBase(fp)),
- calldepth,
- (unsigned long long int)cycles);
-
- /*
- * If this trace is part of a tree, later branches might have added
- * additional globals for which we don't have any type information
- * available in the side exit. We merge in this information from the entry
- * type-map. See also the comment in the constructor of TraceRecorder
- * regarding why this is always safe to do.
- */
- TreeInfo* outermostTree = state.outermostTree;
- uint16* gslots = outermostTree->globalSlots->data();
- unsigned ngslots = outermostTree->globalSlots->length();
- JS_ASSERT(ngslots == outermostTree->nGlobalTypes());
- JSTraceType* globalTypeMap;
-
- /* Are there enough globals? */
- Queue typeMap(0);
- if (innermost->numGlobalSlots == ngslots) {
- /* Yes. This is the ideal fast path. */
- globalTypeMap = innermost->globalTypeMap();
- } else {
- /*
- * No. Merge the typemap of the innermost entry and exit together. This
- * should always work because it is invalid for nested trees or linked
- * trees to have incompatible types. Thus, whenever a new global type
- * is lazily added into a tree, all dependent and linked trees are
- * immediately specialized (see bug 476653).
- */
- JS_ASSERT(innermost->root()->getTreeInfo()->nGlobalTypes() == ngslots);
- JS_ASSERT(innermost->root()->getTreeInfo()->nGlobalTypes() > innermost->numGlobalSlots);
- typeMap.ensure(ngslots);
-#ifdef DEBUG
- unsigned check_ngslots =
-#endif
- BuildGlobalTypeMapFromInnerTree(typeMap, innermost);
- JS_ASSERT(check_ngslots == ngslots);
- globalTypeMap = typeMap.data();
- }
-
- /* Write back the topmost native stack frame. */
-#ifdef DEBUG
- int slots =
-#endif
- FlushNativeStackFrame(cx, innermost->calldepth,
- innermost->stackTypeMap(),
- stack, NULL);
- JS_ASSERT(unsigned(slots) == innermost->numStackSlots);
-
- if (innermost->nativeCalleeWord)
- SynthesizeSlowNativeFrame(state, cx, innermost);
-
- /* Write back interned globals. */
- double* global = (double*)(&state + 1);
- FlushNativeGlobalFrame(cx, global,
- ngslots, gslots, globalTypeMap);
-#ifdef DEBUG
- /* Verify that our state restoration worked. */
- for (JSStackFrame* fp = cx->fp; fp; fp = fp->down) {
- JS_ASSERT_IF(fp->argv, JSVAL_IS_OBJECT(fp->argv[-1]));
- }
-#endif
-#ifdef JS_JIT_SPEW
- if (innermost->exitType != TIMEOUT_EXIT)
- AUDIT(sideExitIntoInterpreter);
- else
- AUDIT(timeoutIntoInterpreter);
-#endif
-
- state.innermost = innermost;
-}
-
-JS_REQUIRES_STACK bool
-js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
-{
-#ifdef MOZ_TRACEVIS
- TraceVisStateObj tvso(cx, S_MONITOR);
-#endif
-
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
-
- /* Is the recorder currently active? */
- if (tm->recorder) {
- jsbytecode* innerLoopHeaderPC = cx->fp->regs->pc;
-
- if (RecordLoopEdge(cx, tm->recorder, inlineCallCount))
- return true;
-
- /*
- * RecordLoopEdge will invoke an inner tree if we have a matching
- * one. If we arrive here, that tree didn't run to completion and
- * instead we mis-matched or the inner tree took a side exit other than
- * the loop exit. We are thus no longer guaranteed to be parked on the
- * same loop header js_MonitorLoopEdge was called for. In fact, this
- * might not even be a loop header at all. Hence if the program counter
- * no longer hovers over the inner loop header, return to the
- * interpreter and do not attempt to trigger or record a new tree at
- * this location.
- */
- if (innerLoopHeaderPC != cx->fp->regs->pc) {
-#ifdef MOZ_TRACEVIS
- tvso.r = R_INNER_SIDE_EXIT;
-#endif
- return false;
- }
- }
- JS_ASSERT(!tm->recorder);
-
- /* Check the pool of reserved doubles (this might trigger a GC). */
- if (tm->reservedDoublePoolPtr < (tm->reservedDoublePool + MAX_NATIVE_STACK_SLOTS) &&
- !ReplenishReservedPool(cx, tm)) {
-#ifdef MOZ_TRACEVIS
- tvso.r = R_DOUBLES;
-#endif
- return false; /* Out of memory, don't try to record now. */
- }
-
- /*
- * Make sure the shape of the global object still matches (this might flush
- * the JIT cache).
- */
- JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain);
- uint32 globalShape = -1;
- SlotList* globalSlots = NULL;
-
- if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) {
- Backoff(cx, cx->fp->regs->pc);
- return false;
- }
-
- /* Do not enter the JIT code with a pending operation callback. */
- if (cx->operationCallbackFlag) {
-#ifdef MOZ_TRACEVIS
- tvso.r = R_CALLBACK_PENDING;
-#endif
- return false;
- }
-
- jsbytecode* pc = cx->fp->regs->pc;
- uint32 argc = cx->fp->argc;
-
- VMFragment* f = getLoop(tm, pc, globalObj, globalShape, argc);
- if (!f)
- f = getAnchor(tm, pc, globalObj, globalShape, argc);
-
- if (!f) {
- ResetJIT(cx, FR_OOM);
-#ifdef MOZ_TRACEVIS
- tvso.r = R_OOM_GETANCHOR;
-#endif
- return false;
- }
-
- /*
- * If we have no code in the anchor and no peers, we definitively won't be
- * able to activate any trees, so start compiling.
- */
- if (!f->code() && !f->peer) {
- record:
- if (++f->hits() < HOTLOOP) {
-#ifdef MOZ_TRACEVIS
- tvso.r = f->hits() < 1 ? R_BACKED_OFF : R_COLD;
-#endif
- return false;
- }
-
- /*
- * We can give RecordTree the root peer. If that peer is already taken,
- * it will walk the peer list and find us a free slot or allocate a new
- * tree if needed.
- */
- bool rv = RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape,
- globalSlots, argc);
-#ifdef MOZ_TRACEVIS
- if (!rv)
- tvso.r = R_FAIL_RECORD_TREE;
-#endif
- return rv;
- }
-
- debug_only_printf(LC_TMTracer,
- "Looking for compat peer %d@%d, from %p (ip: %p)\n",
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp), (void*)f, f->ip);
-
- uintN count;
- Fragment* match = FindVMCompatiblePeer(cx, globalObj, f, count);
- if (!match) {
- if (count < MAXPEERS)
- goto record;
-
- /*
- * If we hit the max peers ceiling, don't try to lookup fragments all
- * the time. That's expensive. This must be a rather type-unstable loop.
- */
- debug_only_print0(LC_TMTracer, "Blacklisted: too many peer trees.\n");
- Blacklist((jsbytecode*) f->root->ip);
-#ifdef MOZ_TRACEVIS
- tvso.r = R_MAX_PEERS;
-#endif
- return false;
- }
-
- VMSideExit* lr = NULL;
- VMSideExit* innermostNestedGuard = NULL;
-
- lr = ExecuteTree(cx, match, inlineCallCount, &innermostNestedGuard);
- if (!lr) {
-#ifdef MOZ_TRACEVIS
- tvso.r = R_FAIL_EXECUTE_TREE;
-#endif
- return false;
- }
-
- /*
- * If we exit on a branch, or on a tree call guard, try to grow the inner
- * tree (in case of a branch exit), or the tree nested around the tree we
- * exited from (in case of the tree call guard).
- */
- bool rv;
- switch (lr->exitType) {
- case UNSTABLE_LOOP_EXIT:
- rv = AttemptToStabilizeTree(cx, globalObj, lr, NULL, NULL);
-#ifdef MOZ_TRACEVIS
- if (!rv)
- tvso.r = R_FAIL_STABILIZE;
-#endif
- return rv;
-
- case OVERFLOW_EXIT:
- oracle.markInstructionUndemotable(cx->fp->regs->pc);
- /* FALL THROUGH */
- case BRANCH_EXIT:
- case CASE_EXIT:
- return AttemptToExtendTree(cx, lr, NULL, NULL
-#ifdef MOZ_TRACEVIS
- , &tvso
-#endif
- );
-
- case LOOP_EXIT:
- if (innermostNestedGuard)
- return AttemptToExtendTree(cx, innermostNestedGuard, lr, NULL
-#ifdef MOZ_TRACEVIS
- , &tvso
-#endif
- );
-#ifdef MOZ_TRACEVIS
- tvso.r = R_NO_EXTEND_OUTER;
-#endif
- return false;
-
-#ifdef MOZ_TRACEVIS
- case MISMATCH_EXIT: tvso.r = R_MISMATCH_EXIT; return false;
- case OOM_EXIT: tvso.r = R_OOM_EXIT; return false;
- case TIMEOUT_EXIT: tvso.r = R_TIMEOUT_EXIT; return false;
- case DEEP_BAIL_EXIT: tvso.r = R_DEEP_BAIL_EXIT; return false;
- case STATUS_EXIT: tvso.r = R_STATUS_EXIT; return false;
-#endif
-
- default:
- /*
- * No, this was an unusual exit (i.e. out of memory/GC), so just resume
- * interpretation.
- */
-#ifdef MOZ_TRACEVIS
- tvso.r = R_OTHER_EXIT;
-#endif
- return false;
- }
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
-{
- Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
- JSTraceMonitor &localtm = JS_TRACE_MONITOR(cx);
-
- /* Process needFlush requests now. */
- if (localtm.needFlush) {
- ResetJIT(cx, FR_DEEP_BAIL);
- return JSRS_STOP;
- }
- JS_ASSERT(!tr->fragment->lastIns);
-
- /*
- * Clear one-shot state used to communicate between record_JSOP_CALL and post-
- * opcode-case-guts record hook (record_NativeCallComplete).
- */
- tr->pendingSpecializedNative = NULL;
- tr->newobj_ins = NULL;
-
- /* Handle one-shot request from finishGetProp or INSTANCEOF to snapshot post-op state and guard. */
- if (tr->pendingGuardCondition) {
- tr->guard(true, tr->pendingGuardCondition, STATUS_EXIT);
- tr->pendingGuardCondition = NULL;
- }
-
- /* Handle one-shot request to unbox the result of a property get. */
- if (tr->pendingUnboxSlot) {
- LIns* val_ins = tr->get(tr->pendingUnboxSlot);
- val_ins = tr->unbox_jsval(*tr->pendingUnboxSlot, val_ins, tr->snapshot(BRANCH_EXIT));
- tr->set(tr->pendingUnboxSlot, val_ins);
- tr->pendingUnboxSlot = 0;
- }
-
- debug_only_stmt(
- if (js_LogController.lcbits & LC_TMRecorder) {
- js_Disassemble1(cx, cx->fp->script, cx->fp->regs->pc,
- cx->fp->imacpc
- ? 0 : cx->fp->regs->pc - cx->fp->script->code,
- !cx->fp->imacpc, stdout);
- }
- )
-
- /*
- * If op is not a break or a return from a loop, continue recording and
- * follow the trace. We check for imacro-calling bytecodes inside each
- * switch case to resolve the if (JSOP_IS_IMACOP(x)) conditions at compile
- * time.
- */
-
- JSRecordingStatus status;
-#ifdef DEBUG
- bool wasInImacro = (cx->fp->imacpc != NULL);
-#endif
- switch (op) {
- default:
- status = JSRS_ERROR;
- goto stop_recording;
-# define OPDEF(x,val,name,token,length,nuses,ndefs,prec,format) \
- case x: \
- status = tr->record_##x(); \
- if (JSOP_IS_IMACOP(x)) \
- goto imacro; \
- break;
-# include "jsopcode.tbl"
-# undef OPDEF
- }
-
- /* record_JSOP_X can reenter the interpreter and kill |tr|. */
- if (!localtm.recorder)
- return JSRS_STOP;
-
- JS_ASSERT(status != JSRS_IMACRO);
- JS_ASSERT_IF(!wasInImacro, cx->fp->imacpc == NULL);
-
- if (assm->error()) {
- js_AbortRecording(cx, "error during recording");
- return JSRS_STOP;
- }
-
- if (tr->outOfMemory() || js_OverfullJITCache(&localtm)) {
- js_AbortRecording(cx, "no more memory");
- ResetJIT(cx, FR_OOM);
- return JSRS_STOP;
- }
-
- imacro:
- /* record_JSOP_X can reenter the interpreter and kill |tr|. */
- if (!localtm.recorder)
- return JSRS_STOP;
-
- if (!STATUS_ABORTS_RECORDING(status))
- return status;
-
- stop_recording:
- /* If we recorded the end of the trace, destroy the recorder now. */
- if (tr->fragment->lastIns) {
- DeleteRecorder(cx);
- return status;
- }
-
- /* Looks like we encountered an error condition. Abort recording. */
- js_AbortRecording(cx, js_CodeName[op]);
- return status;
-}
-
-JS_REQUIRES_STACK void
-js_AbortRecording(JSContext* cx, const char* reason)
-{
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- JS_ASSERT(tm->recorder != NULL);
-
- /* Abort the trace and blacklist its starting point. */
- Fragment* f = tm->recorder->getFragment();
-
- /*
- * If the recorder already had its fragment disposed, or we actually
- * finished recording and this recorder merely is passing through the deep
- * abort state to the next recorder on the stack, just destroy the
- * recorder. There is nothing to abort.
- */
- if (!f || f->lastIns) {
- DeleteRecorder(cx);
- return;
- }
-
- AUDIT(recorderAborted);
-
- JS_ASSERT(!f->vmprivate);
-#ifdef DEBUG
- TreeInfo* ti = tm->recorder->getTreeInfo();
- debug_only_printf(LC_TMAbort,
- "Abort recording of tree %s:%d@%d at %s:%d@%d: %s.\n",
- ti->treeFileName,
- ti->treeLineNumber,
- ti->treePCOffset,
- cx->fp->script->filename,
- js_FramePCToLineNumber(cx, cx->fp),
- FramePCOffset(cx->fp),
- reason);
-#endif
-
- Backoff(cx, (jsbytecode*) f->root->ip, f->root);
-
- /* If DeleteRecorder flushed the code cache, we can't rely on f any more. */
- if (!DeleteRecorder(cx))
- return;
-
- /*
- * If this is the primary trace and we didn't succeed compiling, trash the
- * TreeInfo object.
- */
- if (!f->code() && (f->root == f))
- TrashTree(cx, f);
-}
-
-#if defined NANOJIT_IA32
-static bool
-CheckForSSE2()
-{
- char *c = getenv("X86_FORCE_SSE2");
- if (c)
- return (!strcmp(c, "true") ||
- !strcmp(c, "1") ||
- !strcmp(c, "yes"));
-
- int features = 0;
-#if defined _MSC_VER
- __asm
- {
- pushad
- mov eax, 1
- cpuid
- mov features, edx
- popad
- }
-#elif defined __GNUC__
- asm("xchg %%esi, %%ebx\n" /* we can't clobber ebx on gcc (PIC register) */
- "mov $0x01, %%eax\n"
- "cpuid\n"
- "mov %%edx, %0\n"
- "xchg %%esi, %%ebx\n"
- : "=m" (features)
- : /* We have no inputs */
- : "%eax", "%esi", "%ecx", "%edx"
- );
-#elif defined __SUNPRO_C || defined __SUNPRO_CC
- asm("push %%ebx\n"
- "mov $0x01, %%eax\n"
- "cpuid\n"
- "pop %%ebx\n"
- : "=d" (features)
- : /* We have no inputs */
- : "%eax", "%ecx"
- );
-#endif
- return (features & (1<<26)) != 0;
-}
-#endif
-
-#if defined(NANOJIT_ARM)
-
-#if defined(_MSC_VER) && defined(WINCE)
-
-// these come in from jswince.asm
-extern "C" int js_arm_try_thumb_op();
-extern "C" int js_arm_try_armv6t2_op();
-extern "C" int js_arm_try_armv5_op();
-extern "C" int js_arm_try_armv6_op();
-extern "C" int js_arm_try_armv7_op();
-extern "C" int js_arm_try_vfp_op();
-
-static bool
-js_arm_check_thumb() {
- bool ret = false;
- __try {
- js_arm_try_thumb_op();
- ret = true;
- } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
- ret = false;
- }
- return ret;
-}
-
-static bool
-js_arm_check_thumb2() {
- bool ret = false;
- __try {
- js_arm_try_armv6t2_op();
- ret = true;
- } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
- ret = false;
- }
- return ret;
-}
-
-static unsigned int
-js_arm_check_arch() {
- unsigned int arch = 4;
- __try {
- js_arm_try_armv5_op();
- arch = 5;
- js_arm_try_armv6_op();
- arch = 6;
- js_arm_try_armv7_op();
- arch = 7;
- } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
- }
- return arch;
-}
-
-static bool
-js_arm_check_vfp() {
-#ifdef WINCE_WINDOWS_MOBILE
- return false;
-#else
- bool ret = false;
- __try {
- js_arm_try_vfp_op();
- ret = true;
- } __except(GetExceptionCode() == EXCEPTION_ILLEGAL_INSTRUCTION) {
- ret = false;
- }
- return ret;
-#endif
-}
-
-#define HAVE_ENABLE_DISABLE_DEBUGGER_EXCEPTIONS 1
-
-/* See "Suppressing Exception Notifications while Debugging", at
- * http://msdn.microsoft.com/en-us/library/ms924252.aspx
- */
-static void
-js_disable_debugger_exceptions()
-{
- // 2 == TLSSLOT_KERNEL
- DWORD kctrl = (DWORD) TlsGetValue(2);
- // 0x12 = TLSKERN_NOFAULT | TLSKERN_NOFAULTMSG
- kctrl |= 0x12;
- TlsSetValue(2, (LPVOID) kctrl);
-}
-
-static void
-js_enable_debugger_exceptions()
-{
- // 2 == TLSSLOT_KERNEL
- DWORD kctrl = (DWORD) TlsGetValue(2);
- // 0x12 = TLSKERN_NOFAULT | TLSKERN_NOFAULTMSG
- kctrl &= ~0x12;
- TlsSetValue(2, (LPVOID) kctrl);
-}
-
-#elif defined(__GNUC__) && defined(AVMPLUS_LINUX)
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// Assume ARMv4 by default.
-static unsigned int arm_arch = 4;
-static bool arm_has_thumb = false;
-static bool arm_has_vfp = false;
-static bool arm_has_neon = false;
-static bool arm_has_iwmmxt = false;
-static bool arm_tests_initialized = false;
-
-static void
-arm_read_auxv() {
- int fd;
- Elf32_auxv_t aux;
-
- fd = open("/proc/self/auxv", O_RDONLY);
- if (fd > 0) {
- while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
- if (aux.a_type == AT_HWCAP) {
- uint32_t hwcap = aux.a_un.a_val;
- if (getenv("ARM_FORCE_HWCAP"))
- hwcap = strtoul(getenv("ARM_FORCE_HWCAP"), NULL, 0);
- else if (getenv("_SBOX_DIR"))
- continue; // Ignore the rest, if we're running in scratchbox
- // hardcode these values to avoid depending on specific versions
- // of the hwcap header, e.g. HWCAP_NEON
- arm_has_thumb = (hwcap & 4) != 0;
- arm_has_vfp = (hwcap & 64) != 0;
- arm_has_iwmmxt = (hwcap & 512) != 0;
- // this flag is only present on kernel 2.6.29
- arm_has_neon = (hwcap & 4096) != 0;
- } else if (aux.a_type == AT_PLATFORM) {
- const char *plat = (const char*) aux.a_un.a_val;
- if (getenv("ARM_FORCE_PLATFORM"))
- plat = getenv("ARM_FORCE_PLATFORM");
- else if (getenv("_SBOX_DIR"))
- continue; // Ignore the rest, if we're running in scratchbox
- // The platform string has the form "v[0-9][lb]". The "l" or "b" indicate little-
- // or big-endian variants and the digit indicates the version of the platform.
- // We can only accept ARMv4 and above, but allow anything up to ARMv9 for future
- // processors. Architectures newer than ARMv7 are assumed to be
- // backwards-compatible with ARMv7.
- if ((plat[0] == 'v') &&
- (plat[1] >= '4') && (plat[1] <= '9') &&
- ((plat[2] == 'l') || (plat[2] == 'b')))
- {
- arm_arch = plat[1] - '0';
- }
- }
- }
- close (fd);
-
- // if we don't have 2.6.29, we have to do this hack; set
- // the env var to trust HWCAP.
- if (!getenv("ARM_TRUST_HWCAP") && (arm_arch >= 7))
- arm_has_neon = true;
- }
-
- arm_tests_initialized = true;
-}
-
-static bool
-js_arm_check_thumb() {
- if (!arm_tests_initialized)
- arm_read_auxv();
-
- return arm_has_thumb;
-}
-
-static bool
-js_arm_check_thumb2() {
- if (!arm_tests_initialized)
- arm_read_auxv();
-
- // ARMv6T2 also supports Thumb2, but Linux doesn't provide an easy way to test for this as
- // there is no associated bit in auxv. ARMv7 always supports Thumb2, and future architectures
- // are assumed to be backwards-compatible.
- return (arm_arch >= 7);
-}
-
-static unsigned int
-js_arm_check_arch() {
- if (!arm_tests_initialized)
- arm_read_auxv();
-
- return arm_arch;
-}
-
-static bool
-js_arm_check_vfp() {
- if (!arm_tests_initialized)
- arm_read_auxv();
-
- return arm_has_vfp;
-}
-
-#else
-#warning Not sure how to check for architecture variant on your platform. Assuming ARMv4.
-static bool
-js_arm_check_thumb() { return false; }
-static bool
-js_arm_check_thumb2() { return false; }
-static unsigned int
-js_arm_check_arch() { return 4; }
-static bool
-js_arm_check_vfp() { return false; }
-#endif
-
-#ifndef HAVE_ENABLE_DISABLE_DEBUGGER_EXCEPTIONS
-static void
-js_enable_debugger_exceptions() { }
-static void
-js_disable_debugger_exceptions() { }
-#endif
-
-#endif /* NANOJIT_ARM */
-
-#define K *1024
-#define M K K
-#define G K M
-
-void
-js_SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes)
-{
- JSTraceMonitor* tm = &JS_THREAD_DATA(cx)->traceMonitor;
- JS_ASSERT(tm->codeAlloc && tm->dataAlloc);
- if (bytes > 1 G)
- bytes = 1 G;
- if (bytes < 128 K)
- bytes = 128 K;
- tm->maxCodeCacheBytes = bytes;
-}
-
-void
-js_InitJIT(JSTraceMonitor *tm)
-{
-#if defined JS_JIT_SPEW
- tm->profAlloc = NULL;
- /* Set up debug logging. */
- if (!did_we_set_up_debug_logging) {
- InitJITLogController();
- did_we_set_up_debug_logging = true;
- }
- /* Set up fragprofiling, if required. */
- if (js_LogController.lcbits & LC_FragProfile) {
- tm->profAlloc = new VMAllocator();
- tm->profTab = new (*tm->profAlloc) FragStatsMap(*tm->profAlloc);
- }
- tm->lastFragID = 0;
-#else
- memset(&js_LogController, 0, sizeof(js_LogController));
-#endif
-
- if (!did_we_check_processor_features) {
-#if defined NANOJIT_IA32
- avmplus::AvmCore::config.use_cmov =
- avmplus::AvmCore::config.sse2 = CheckForSSE2();
-#endif
-#if defined NANOJIT_ARM
-
- js_disable_debugger_exceptions();
-
- bool arm_vfp = js_arm_check_vfp();
- bool arm_thumb = js_arm_check_thumb();
- bool arm_thumb2 = js_arm_check_thumb2();
- unsigned int arm_arch = js_arm_check_arch();
-
- js_enable_debugger_exceptions();
-
- avmplus::AvmCore::config.vfp = arm_vfp;
- avmplus::AvmCore::config.soft_float = !arm_vfp;
- avmplus::AvmCore::config.thumb = arm_thumb;
- avmplus::AvmCore::config.thumb2 = arm_thumb2;
- avmplus::AvmCore::config.arch = arm_arch;
-
- // Sanity-check the configuration detection.
- // * We don't understand architectures prior to ARMv4.
- JS_ASSERT(arm_arch >= 4);
- // * All architectures support Thumb with the possible exception of ARMv4.
- JS_ASSERT((arm_thumb) || (arm_arch == 4));
- // * Only ARMv6T2 and ARMv7(+) support Thumb2, but ARMv6 does not.
- JS_ASSERT((arm_thumb2) || (arm_arch <= 6));
- // * All architectures that support Thumb2 also support Thumb.
- JS_ASSERT((arm_thumb2 && arm_thumb) || (!arm_thumb2));
-#endif
- did_we_check_processor_features = true;
- }
-
- /* Set the default size for the code cache to 16MB. */
- tm->maxCodeCacheBytes = 16 M;
-
- if (!tm->recordAttempts.ops) {
- JS_DHashTableInit(&tm->recordAttempts, JS_DHashGetStubOps(),
- NULL, sizeof(PCHashEntry),
- JS_DHASH_DEFAULT_CAPACITY(PC_HASH_COUNT));
- }
-
- JS_ASSERT(!tm->dataAlloc && !tm->codeAlloc);
- tm->dataAlloc = new VMAllocator();
- tm->tempAlloc = new VMAllocator();
- tm->reTempAlloc = new VMAllocator();
- tm->codeAlloc = new CodeAlloc();
- tm->flush();
- verbose_only( tm->branches = NULL; )
-
- JS_ASSERT(!tm->reservedDoublePool);
- tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
-
-#if !defined XP_WIN
- debug_only(memset(&jitstats, 0, sizeof(jitstats)));
-#endif
-
-#ifdef JS_JIT_SPEW
- /* Architecture properties used by test cases. */
- jitstats.archIsIA32 = 0;
- jitstats.archIs64BIT = 0;
- jitstats.archIsARM = 0;
- jitstats.archIsSPARC = 0;
- jitstats.archIsPPC = 0;
-#if defined NANOJIT_IA32
- jitstats.archIsIA32 = 1;
-#endif
-#if defined NANOJIT_64BIT
- jitstats.archIs64BIT = 1;
-#endif
-#if defined NANOJIT_ARM
- jitstats.archIsARM = 1;
-#endif
-#if defined NANOJIT_SPARC
- jitstats.archIsSPARC = 1;
-#endif
-#if defined NANOJIT_PPC
- jitstats.archIsPPC = 1;
-#endif
-#if defined NANOJIT_X64
- jitstats.archIsAMD64 = 1;
-#endif
-#endif
-}
-
-void
-js_FinishJIT(JSTraceMonitor *tm)
-{
-#ifdef JS_JIT_SPEW
- if (jitstats.recorderStarted) {
- char sep = ':';
- debug_only_print0(LC_TMStats, "recorder");
-#define RECORDER_JITSTAT(_ident, _name) \
- debug_only_printf(LC_TMStats, "%c " _name "(%llu)", sep, \
- (unsigned long long int)jitstats._ident); \
- sep = ',';
-#define JITSTAT(x) /* nothing */
-#include "jitstats.tbl"
-#undef JITSTAT
-#undef RECORDER_JITSTAT
- debug_only_print0(LC_TMStats, "\n");
-
- sep = ':';
- debug_only_print0(LC_TMStats, "monitor");
-#define MONITOR_JITSTAT(_ident, _name) \
- debug_only_printf(LC_TMStats, "%c " _name "(%llu)", sep, \
- (unsigned long long int)jitstats._ident); \
- sep = ',';
-#define JITSTAT(x) /* nothing */
-#include "jitstats.tbl"
-#undef JITSTAT
-#undef MONITOR_JITSTAT
- debug_only_print0(LC_TMStats, "\n");
- }
-#endif
- JS_ASSERT(tm->reservedDoublePool);
-
- if (tm->recordAttempts.ops)
- JS_DHashTableFinish(&tm->recordAttempts);
-
-#ifdef DEBUG
- // Recover profiling data from expiring Fragments, and display
- // final results.
- if (js_LogController.lcbits & LC_FragProfile) {
- for (Seq* f = tm->branches; f; f = f->tail) {
- js_FragProfiling_FragFinalizer(f->head, tm);
- }
- for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
- for (VMFragment *f = tm->vmfragments[i]; f; f = f->next) {
- JS_ASSERT(f->root == f);
- for (VMFragment *p = f; p; p = p->peer)
- js_FragProfiling_FragFinalizer(p, tm);
- }
- }
- REHashMap::Iter iter(*(tm->reFragments));
- while (iter.next()) {
- nanojit::Fragment* frag = iter.value();
- js_FragProfiling_FragFinalizer(frag, tm);
- }
-
- js_FragProfiling_showResults(tm);
- delete tm->profAlloc;
-
- } else {
- NanoAssert(!tm->profTab);
- NanoAssert(!tm->profAlloc);
- }
-#endif
-
- memset(&tm->vmfragments[0], 0, FRAGMENT_TABLE_SIZE * sizeof(VMFragment*));
-
- delete[] tm->reservedDoublePool;
- tm->reservedDoublePool = tm->reservedDoublePoolPtr = NULL;
-
- if (tm->codeAlloc) {
- delete tm->codeAlloc;
- tm->codeAlloc = NULL;
- }
-
- if (tm->dataAlloc) {
- delete tm->dataAlloc;
- tm->dataAlloc = NULL;
- }
-
- if (tm->tempAlloc) {
- delete tm->tempAlloc;
- tm->tempAlloc = NULL;
- }
-
- if (tm->reTempAlloc) {
- delete tm->reTempAlloc;
- tm->reTempAlloc = NULL;
- }
-}
-
-void
-js_PurgeJITOracle()
-{
- oracle.clear();
-}
-
-static JSDHashOperator
-PurgeScriptRecordingAttempts(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, void *arg)
-{
- PCHashEntry *e = (PCHashEntry *)hdr;
- JSScript *script = (JSScript *)arg;
- jsbytecode *pc = (jsbytecode *)e->key;
-
- if (JS_UPTRDIFF(pc, script->code) < script->length)
- return JS_DHASH_REMOVE;
- return JS_DHASH_NEXT;
-}
-
-
-JS_REQUIRES_STACK void
-js_PurgeScriptFragments(JSContext* cx, JSScript* script)
-{
- if (!TRACING_ENABLED(cx))
- return;
- debug_only_printf(LC_TMTracer,
- "Purging fragments for JSScript %p.\n", (void*)script);
-
- JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
- for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
- VMFragment** fragp = &tm->vmfragments[i];
- while (VMFragment* frag = *fragp) {
- if (JS_UPTRDIFF(frag->ip, script->code) < script->length) {
- /* This fragment is associated with the script. */
- debug_only_printf(LC_TMTracer,
- "Disconnecting VMFragment %p "
- "with ip %p, in range [%p,%p).\n",
- (void*)frag, frag->ip, script->code,
- script->code + script->length);
-
- JS_ASSERT(frag->root == frag);
- *fragp = frag->next;
- do {
- verbose_only( js_FragProfiling_FragFinalizer(frag, tm); )
- TrashTree(cx, frag);
- } while ((frag = frag->peer) != NULL);
- continue;
- }
- fragp = &frag->next;
- }
- }
-
- JS_DHashTableEnumerate(&tm->recordAttempts, PurgeScriptRecordingAttempts, script);
-}
-
-bool
-js_OverfullJITCache(JSTraceMonitor* tm)
-{
- /*
- * You might imagine the outOfMemory flag on the allocator is sufficient
- * to model the notion of "running out of memory", but there are actually
- * two separate issues involved:
- *
- * 1. The process truly running out of memory: malloc() or mmap()
- * failed.
- *
- * 2. The limit we put on the "intended size" of the tracemonkey code
- * cache, in pages, has been exceeded.
- *
- * Condition 1 doesn't happen very often, but we're obliged to try to
- * safely shut down and signal the rest of spidermonkey when it
- * does. Condition 2 happens quite regularly.
- *
- * Presently, the code in this file doesn't check the outOfMemory condition
- * often enough, and frequently misuses the unchecked results of
- * lirbuffer insertions on the asssumption that it will notice the
- * outOfMemory flag "soon enough" when it returns to the monitorRecording
- * function. This turns out to be a false assumption if we use outOfMemory
- * to signal condition 2: we regularly provoke "passing our intended
- * size" and regularly fail to notice it in time to prevent writing
- * over the end of an artificially self-limited LIR buffer.
- *
- * To mitigate, though not completely solve, this problem, we're
- * modeling the two forms of memory exhaustion *separately* for the
- * time being: condition 1 is handled by the outOfMemory flag inside
- * nanojit, and condition 2 is being handled independently *here*. So
- * we construct our allocators to use all available memory they like,
- * and only report outOfMemory to us when there is literally no OS memory
- * left. Merely purging our cache when we hit our highwater mark is
- * handled by the (few) callers of this function.
- *
- */
- jsuint maxsz = tm->maxCodeCacheBytes;
- VMAllocator *dataAlloc = tm->dataAlloc;
- CodeAlloc *codeAlloc = tm->codeAlloc;
-
- return (codeAlloc->size() + dataAlloc->size() > maxsz);
-}
-
-JS_FORCES_STACK JS_FRIEND_API(void)
-js_DeepBail(JSContext *cx)
-{
- JS_ASSERT(JS_ON_TRACE(cx));
-
- /*
- * Exactly one context on the current thread is on trace. Find out which
- * one. (Most callers cannot guarantee that it's cx.)
- */
- JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
- JSContext *tracecx = tm->tracecx;
-
- /* It's a bug if a non-FAIL_STATUS builtin gets here. */
- JS_ASSERT(tracecx->bailExit);
-
- tm->tracecx = NULL;
- debug_only_print0(LC_TMTracer, "Deep bail.\n");
- LeaveTree(*tracecx->interpState, tracecx->bailExit);
- tracecx->bailExit = NULL;
-
- InterpState* state = tracecx->interpState;
- state->builtinStatus |= JSBUILTIN_BAILED;
- state->deepBailSp = state->sp;
-}
-
-JS_REQUIRES_STACK jsval&
-TraceRecorder::argval(unsigned n) const
-{
- JS_ASSERT(n < cx->fp->fun->nargs);
- return cx->fp->argv[n];
-}
-
-JS_REQUIRES_STACK jsval&
-TraceRecorder::varval(unsigned n) const
-{
- JS_ASSERT(n < cx->fp->script->nslots);
- return cx->fp->slots[n];
-}
-
-JS_REQUIRES_STACK jsval&
-TraceRecorder::stackval(int n) const
-{
- jsval* sp = cx->fp->regs->sp;
- return sp[n];
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::scopeChain() const
-{
- return lir->insLoad(LIR_ldp,
- lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)),
- offsetof(JSStackFrame, scopeChain));
-}
-
-/*
- * Return the frame of a call object if that frame is part of the current
- * trace. |depthp| is an optional outparam: if it is non-null, it will be
- * filled in with the depth of the call object's frame relevant to cx->fp.
- */
-JS_REQUIRES_STACK JSStackFrame*
-TraceRecorder::frameIfInRange(JSObject* obj, unsigned* depthp) const
-{
- JSStackFrame* ofp = (JSStackFrame*) obj->getPrivate();
- JSStackFrame* fp = cx->fp;
- for (unsigned depth = 0; depth <= callDepth; ++depth) {
- if (fp == ofp) {
- if (depthp)
- *depthp = depth;
- return ofp;
- }
- if (!(fp = fp->down))
- break;
- }
- return NULL;
-}
-
-JS_DEFINE_CALLINFO_4(extern, UINT32, GetClosureVar, CONTEXT, OBJECT, CVIPTR, DOUBLEPTR, 0, 0)
-JS_DEFINE_CALLINFO_4(extern, UINT32, GetClosureArg, CONTEXT, OBJECT, CVIPTR, DOUBLEPTR, 0, 0)
-
-/*
- * Search the scope chain for a property lookup operation at the current PC and
- * generate LIR to access the given property. Return JSRS_CONTINUE on success,
- * otherwise abort and return JSRS_STOP. There are 3 outparams:
- *
- * vp the address of the current property value
- * ins LIR instruction representing the property value on trace
- * NameResult describes how to look up name; see comment for NameResult in jstracer.h
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::scopeChainProp(JSObject* obj, jsval*& vp, LIns*& ins, NameResult& nr)
-{
- JS_ASSERT(obj != globalObj);
-
- JSTraceMonitor &localtm = *traceMonitor;
-
- JSAtom* atom = atoms[GET_INDEX(cx->fp->regs->pc)];
- JSObject* obj2;
- JSProperty* prop;
- bool ok = js_FindProperty(cx, ATOM_TO_JSID(atom), &obj, &obj2, &prop);
-
- /* js_FindProperty can reenter the interpreter and kill |this|. */
- if (!localtm.recorder)
- return JSRS_STOP;
-
- if (!ok)
- ABORT_TRACE_ERROR("error in js_FindProperty");
-
- if (!prop)
- ABORT_TRACE("failed to find name in non-global scope chain");
-
- if (obj == globalObj) {
- // Even if the property is on the global object, we must guard against
- // the creation of properties that shadow the property in the middle
- // of the scope chain if we are in a function.
- if (cx->fp->argv) {
- LIns* obj_ins;
- JSObject* parent = STOBJ_GET_PARENT(JSVAL_TO_OBJECT(cx->fp->argv[-2]));
- LIns* parent_ins = stobj_get_parent(get(&cx->fp->argv[-2]));
- CHECK_STATUS(traverseScopeChain(parent, parent_ins, obj, obj_ins));
- }
-
- JSScopeProperty* sprop = (JSScopeProperty*) prop;
-
- if (obj2 != obj) {
- obj2->dropProperty(cx, prop);
- ABORT_TRACE("prototype property");
- }
- if (!isValidSlot(OBJ_SCOPE(obj), sprop)) {
- obj2->dropProperty(cx, prop);
- return JSRS_STOP;
- }
- if (!lazilyImportGlobalSlot(sprop->slot)) {
- obj2->dropProperty(cx, prop);
- ABORT_TRACE("lazy import of global slot failed");
- }
- vp = &STOBJ_GET_SLOT(obj, sprop->slot);
- ins = get(vp);
- obj2->dropProperty(cx, prop);
- nr.tracked = true;
- return JSRS_CONTINUE;
- }
-
- if (obj == obj2 && OBJ_GET_CLASS(cx, obj) == &js_CallClass)
- return callProp(obj, obj2, prop, ATOM_TO_JSID(atom), vp, ins, nr);
-
- obj2->dropProperty(cx, prop);
- ABORT_TRACE("fp->scopeChain is not global or active call object");
-}
-
-/*
- * Generate LIR to access a property of a Call object.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::callProp(JSObject* obj, JSObject* obj2, JSProperty* prop, jsid id, jsval*& vp,
- LIns*& ins, NameResult& nr)
-{
- JSScopeProperty *sprop = (JSScopeProperty*) prop;
-
- uint32 setflags = (js_CodeSpec[*cx->fp->regs->pc].format & (JOF_SET | JOF_INCDEC | JOF_FOR));
- if (setflags && (sprop->attrs & JSPROP_READONLY))
- ABORT_TRACE("writing to a read-only property");
-
- uintN slot = sprop->shortid;
-
- vp = NULL;
- uintN upvar_slot = SPROP_INVALID_SLOT;
- JSStackFrame* cfp = (JSStackFrame*) obj->getPrivate();
- if (cfp) {
- if (sprop->getter == js_GetCallArg) {
- JS_ASSERT(slot < cfp->fun->nargs);
- vp = &cfp->argv[slot];
- upvar_slot = slot;
- nr.v = *vp;
- } else if (sprop->getter == js_GetCallVar) {
- JS_ASSERT(slot < cfp->script->nslots);
- vp = &cfp->slots[slot];
- upvar_slot = cx->fp->fun->nargs + slot;
- nr.v = *vp;
- } else {
- ABORT_TRACE("dynamic property of Call object");
- }
- obj2->dropProperty(cx, prop);
-
- if (frameIfInRange(obj)) {
- // At this point we are guaranteed to be looking at an active call oject
- // whose properties are stored in the corresponding JSStackFrame.
- ins = get(vp);
- nr.tracked = true;
- return JSRS_CONTINUE;
- }
- } else {
-#ifdef DEBUG
- JSBool rv =
-#endif
- js_GetPropertyHelper(cx, obj, sprop->id, JS_FALSE, &nr.v);
- JS_ASSERT(rv);
- obj2->dropProperty(cx, prop);
- }
-
- LIns* obj_ins;
- JSObject* parent = STOBJ_GET_PARENT(JSVAL_TO_OBJECT(cx->fp->argv[-2]));
- LIns* parent_ins = stobj_get_parent(get(&cx->fp->argv[-2]));
- CHECK_STATUS(traverseScopeChain(parent, parent_ins, obj, obj_ins));
-
- ClosureVarInfo* cv = new (traceMonitor->dataAlloc) ClosureVarInfo();
- cv->id = id;
- cv->slot = slot;
- cv->callDepth = callDepth;
- cv->resolveFlags = cx->resolveFlags == JSRESOLVE_INFER
- ? js_InferFlags(cx, 0)
- : cx->resolveFlags;
-
- LIns* outp = lir->insAlloc(sizeof(double));
- LIns* args[] = {
- outp,
- INS_CONSTPTR(cv),
- obj_ins,
- cx_ins
- };
- const CallInfo* ci;
- if (sprop->getter == js_GetCallArg)
- ci = &GetClosureArg_ci;
- else
- ci = &GetClosureVar_ci;
-
- LIns* call_ins = lir->insCall(ci, args);
- JSTraceType type = getCoercedType(nr.v);
- guard(true,
- addName(lir->ins2(LIR_eq, call_ins, lir->insImm(type)),
- "guard(type-stable name access)"),
- BRANCH_EXIT);
- ins = stackLoad(outp, type);
- nr.tracked = false;
- nr.obj = obj;
- nr.obj_ins = obj_ins;
- nr.sprop = sprop;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::arg(unsigned n)
-{
- return get(&argval(n));
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::arg(unsigned n, LIns* i)
-{
- set(&argval(n), i);
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::var(unsigned n)
-{
- return get(&varval(n));
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::var(unsigned n, LIns* i)
-{
- set(&varval(n), i);
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::stack(int n)
-{
- return get(&stackval(n));
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::stack(int n, LIns* i)
-{
- set(&stackval(n), i, n >= 0);
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1)
-{
- /*
- * To even consider this operation for demotion, both operands have to be
- * integers and the oracle must not give us a negative hint for the
- * instruction.
- */
- if (oracle.isInstructionUndemotable(cx->fp->regs->pc) || !isPromoteInt(s0) || !isPromoteInt(s1)) {
- out:
- if (v == LIR_fmod) {
- LIns* args[] = { s1, s0 };
- return lir->insCall(&js_dmod_ci, args);
- }
- LIns* result = lir->ins2(v, s0, s1);
- JS_ASSERT_IF(s0->isconstf() && s1->isconstf(), result->isconstf());
- return result;
- }
-
- jsdouble r;
- switch (v) {
- case LIR_fadd:
- r = v0 + v1;
- break;
- case LIR_fsub:
- r = v0 - v1;
- break;
-#if !defined NANOJIT_ARM
- case LIR_fmul:
- r = v0 * v1;
- if (r == 0.0)
- goto out;
- break;
-#endif
-#if defined NANOJIT_IA32 || defined NANOJIT_X64
- case LIR_fdiv:
- if (v1 == 0)
- goto out;
- r = v0 / v1;
- break;
- case LIR_fmod:
- if (v0 < 0 || v1 == 0 || (s1->isconstf() && v1 < 0))
- goto out;
- r = js_dmod(v0, v1);
- break;
-#endif
- default:
- goto out;
- }
-
- /*
- * The result must be an integer at record time, otherwise there is no
- * point in trying to demote it.
- */
- if (jsint(r) != r || JSDOUBLE_IS_NEGZERO(r))
- goto out;
-
- LIns* d0 = ::demote(lir, s0);
- LIns* d1 = ::demote(lir, s1);
-
- /*
- * Speculatively emit an integer operation, betting that at runtime we
- * will get integer results again.
- */
- VMSideExit* exit;
- LIns* result;
- switch (v) {
-#if defined NANOJIT_IA32 || defined NANOJIT_X64
- case LIR_fdiv:
- if (d0->isconst() && d1->isconst())
- return lir->ins1(LIR_i2f, lir->insImm(jsint(r)));
-
- exit = snapshot(OVERFLOW_EXIT);
-
- /*
- * If the divisor is greater than zero its always safe to execute
- * the division. If not, we have to make sure we are not running
- * into -2147483648 / -1, because it can raise an overflow exception.
- */
- if (!d1->isconst()) {
- LIns* gt = lir->insBranch(LIR_jt, lir->ins2i(LIR_gt, d1, 0), NULL);
- guard(false, lir->ins_eq0(d1), exit);
- guard(false, lir->ins2(LIR_and,
- lir->ins2i(LIR_eq, d0, 0x80000000),
- lir->ins2i(LIR_eq, d1, -1)), exit);
- gt->setTarget(lir->ins0(LIR_label));
- } else {
- if (d1->imm32() == -1)
- guard(false, lir->ins2i(LIR_eq, d0, 0x80000000), exit);
- }
- result = lir->ins2(v = LIR_div, d0, d1);
-
- /* As long the modulus is zero, the result is an integer. */
- guard(true, lir->ins_eq0(lir->ins1(LIR_mod, result)), exit);
-
- /* Don't lose a -0. */
- guard(false, lir->ins_eq0(result), exit);
- break;
-
- case LIR_fmod: {
- if (d0->isconst() && d1->isconst())
- return lir->ins1(LIR_i2f, lir->insImm(jsint(r)));
-
- exit = snapshot(OVERFLOW_EXIT);
-
- /* Make sure we don't trigger division by zero at runtime. */
- if (!d1->isconst())
- guard(false, lir->ins_eq0(d1), exit);
- result = lir->ins1(v = LIR_mod, lir->ins2(LIR_div, d0, d1));
-
- /* If the result is not 0, it is always within the integer domain. */
- LIns* branch = lir->insBranch(LIR_jf, lir->ins_eq0(result), NULL);
-
- /*
- * If the result is zero, we must exit if the lhs is negative since
- * the result is -0 in this case, which is not in the integer domain.
- */
- guard(false, lir->ins2i(LIR_lt, d0, 0), exit);
- branch->setTarget(lir->ins0(LIR_label));
- break;
- }
-#endif
-
- default:
- v = (LOpcode)((int)v & ~LIR64);
- result = lir->ins2(v, d0, d1);
-
- /*
- * If the operands guarantee that the result will be an integer (i.e.
- * z = x + y with 0 <= (x|y) <= 0xffff guarantees z <= fffe0001), we
- * don't have to guard against an overflow. Otherwise we emit a guard
- * that will inform the oracle and cause a non-demoted trace to be
- * attached that uses floating-point math for this operation.
- */
- if (!result->isconst() && (!IsOverflowSafe(v, d0) || !IsOverflowSafe(v, d1))) {
- exit = snapshot(OVERFLOW_EXIT);
- guard(false, lir->ins1(LIR_ov, result), exit);
- if (v == LIR_mul) // make sure we don't lose a -0
- guard(false, lir->ins_eq0(result), exit);
- }
- break;
- }
- JS_ASSERT_IF(d0->isconst() && d1->isconst(),
- result->isconst() && result->imm32() == jsint(r));
- return lir->ins1(LIR_i2f, result);
-}
-
-LIns*
-TraceRecorder::f2i(LIns* f)
-{
- return lir->insCall(&js_DoubleToInt32_ci, &f);
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::makeNumberInt32(LIns* f)
-{
- JS_ASSERT(f->isQuad());
- LIns* x;
- if (!isPromote(f)) {
- x = f2i(f);
- guard(true, lir->ins2(LIR_feq, f, lir->ins1(LIR_i2f, x)), MISMATCH_EXIT);
- } else {
- x = ::demote(lir, f);
- }
- return x;
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::stringify(jsval& v)
-{
- LIns* v_ins = get(&v);
- if (JSVAL_IS_STRING(v))
- return v_ins;
-
- LIns* args[] = { v_ins, cx_ins };
- const CallInfo* ci;
- if (JSVAL_IS_NUMBER(v)) {
- ci = &js_NumberToString_ci;
- } else if (JSVAL_IS_SPECIAL(v)) {
- ci = &js_BooleanOrUndefinedToString_ci;
- } else {
- /*
- * Callers must deal with non-primitive (non-null object) values by
- * calling an imacro. We don't try to guess about which imacro, with
- * what valueOf hint, here.
- */
- JS_ASSERT(JSVAL_IS_NULL(v));
- return INS_ATOM(cx->runtime->atomState.nullAtom);
- }
-
- v_ins = lir->insCall(ci, args);
- guard(false, lir->ins_peq0(v_ins), OOM_EXIT);
- return v_ins;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::call_imacro(jsbytecode* imacro)
-{
- JSStackFrame* fp = cx->fp;
- JSFrameRegs* regs = fp->regs;
-
- /* We cannot nest imacros, only tail-call. */
- if (fp->imacpc) {
- /* Dereference is safe since imacros are JSOP_STOP-terminated. */
- if (regs->pc[js_CodeSpec[*regs->pc].length] != JSOP_STOP)
- return JSRS_STOP;
- regs->pc = imacro;
- return JSRS_IMACRO;
- }
-
- fp->imacpc = regs->pc;
- regs->pc = imacro;
- atoms = COMMON_ATOMS_START(&cx->runtime->atomState);
- return JSRS_IMACRO;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::ifop()
-{
- jsval& v = stackval(-1);
- LIns* v_ins = get(&v);
- bool cond;
- LIns* x;
-
- if (JSVAL_IS_NULL(v)) {
- cond = false;
- x = lir->insImm(0);
- } else if (!JSVAL_IS_PRIMITIVE(v)) {
- cond = true;
- x = lir->insImm(1);
- } else if (JSVAL_IS_SPECIAL(v)) {
- /* Test for boolean is true, negate later if we are testing for false. */
- cond = JSVAL_TO_SPECIAL(v) == JS_TRUE;
- x = lir->ins2i(LIR_eq, v_ins, 1);
- } else if (isNumber(v)) {
- jsdouble d = asNumber(v);
- cond = !JSDOUBLE_IS_NaN(d) && d;
- x = lir->ins2(LIR_and,
- lir->ins2(LIR_feq, v_ins, v_ins),
- lir->ins_eq0(lir->ins2(LIR_feq, v_ins, lir->insImmf(0))));
- } else if (JSVAL_IS_STRING(v)) {
- cond = JSVAL_TO_STRING(v)->length() != 0;
- x = lir->ins2(LIR_piand,
- lir->insLoad(LIR_ldp,
- v_ins,
- (int)offsetof(JSString, mLength)),
- INS_CONSTWORD(JSString::LENGTH_MASK));
- } else {
- JS_NOT_REACHED("ifop");
- return JSRS_STOP;
- }
-
- jsbytecode* pc = cx->fp->regs->pc;
- emitIf(pc, cond, x);
- return checkTraceEnd(pc);
-}
-
-#ifdef NANOJIT_IA32
-/*
- * Record LIR for a tableswitch or tableswitchx op. We record LIR only the
- * "first" time we hit the op. Later, when we start traces after exiting that
- * trace, we just patch.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::tableswitch()
-{
- jsval& v = stackval(-1);
-
- /* No need to guard if the condition can't match any of the cases. */
- if (!isNumber(v))
- return JSRS_CONTINUE;
-
- /* No need to guard if the condition is constant. */
- LIns* v_ins = f2i(get(&v));
- if (v_ins->isconst() || v_ins->isconstq())
- return JSRS_CONTINUE;
-
- jsbytecode* pc = cx->fp->regs->pc;
- /* Starting a new trace after exiting a trace via switch. */
- if (anchor &&
- (anchor->exitType == CASE_EXIT || anchor->exitType == DEFAULT_EXIT) &&
- fragment->ip == pc) {
- return JSRS_CONTINUE;
- }
-
- /* Decode jsop. */
- jsint low, high;
- if (*pc == JSOP_TABLESWITCH) {
- pc += JUMP_OFFSET_LEN;
- low = GET_JUMP_OFFSET(pc);
- pc += JUMP_OFFSET_LEN;
- high = GET_JUMP_OFFSET(pc);
- } else {
- pc += JUMPX_OFFSET_LEN;
- low = GET_JUMPX_OFFSET(pc);
- pc += JUMPX_OFFSET_LEN;
- high = GET_JUMPX_OFFSET(pc);
- }
-
- /*
- * Really large tables won't fit in a page. This is a conservative check.
- * If it matters in practice we need to go off-page.
- */
- if ((high + 1 - low) * sizeof(intptr_t*) + 128 > (unsigned) LARGEST_UNDERRUN_PROT)
- return switchop();
-
- /* Generate switch LIR. */
- SwitchInfo* si = new (*traceMonitor->dataAlloc) SwitchInfo();
- si->count = high + 1 - low;
- si->table = 0;
- si->index = (uint32) -1;
- LIns* diff = lir->ins2(LIR_sub, v_ins, lir->insImm(low));
- LIns* cmp = lir->ins2(LIR_ult, diff, lir->insImm(si->count));
- lir->insGuard(LIR_xf, cmp, createGuardRecord(snapshot(DEFAULT_EXIT)));
- lir->insStorei(diff, lir->insImmPtr(&si->index), 0);
- VMSideExit* exit = snapshot(CASE_EXIT);
- exit->switchInfo = si;
- LIns* guardIns = lir->insGuard(LIR_xtbl, diff, createGuardRecord(exit));
- fragment->lastIns = guardIns;
- compile(&JS_TRACE_MONITOR(cx));
- return JSRS_STOP;
-}
-#endif
-
-static JS_ALWAYS_INLINE int32_t
-UnboxBooleanOrUndefined(jsval v)
-{
- /* Although this says 'special', we really only expect 3 special values: */
- JS_ASSERT(v == JSVAL_TRUE || v == JSVAL_FALSE || v == JSVAL_VOID);
- return JSVAL_TO_SPECIAL(v);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::switchop()
-{
- jsval& v = stackval(-1);
- LIns* v_ins = get(&v);
-
- /* No need to guard if the condition is constant. */
- if (v_ins->isconst() || v_ins->isconstq())
- return JSRS_CONTINUE;
- if (isNumber(v)) {
- jsdouble d = asNumber(v);
- guard(true,
- addName(lir->ins2(LIR_feq, v_ins, lir->insImmf(d)),
- "guard(switch on numeric)"),
- BRANCH_EXIT);
- } else if (JSVAL_IS_STRING(v)) {
- LIns* args[] = { v_ins, INS_CONSTSTR(JSVAL_TO_STRING(v)) };
- guard(true,
- addName(lir->ins_eq0(lir->ins_eq0(lir->insCall(&js_EqualStrings_ci, args))),
- "guard(switch on string)"),
- BRANCH_EXIT);
- } else if (JSVAL_IS_SPECIAL(v)) {
- guard(true,
- addName(lir->ins2(LIR_eq, v_ins, lir->insImm(UnboxBooleanOrUndefined(v))),
- "guard(switch on boolean)"),
- BRANCH_EXIT);
- } else {
- ABORT_TRACE("switch on object or null");
- }
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::inc(jsval& v, jsint incr, bool pre)
-{
- LIns* v_ins = get(&v);
- CHECK_STATUS(inc(v, v_ins, incr, pre));
- set(&v, v_ins);
- return JSRS_CONTINUE;
-}
-
-/*
- * On exit, v_ins is the incremented unboxed value, and the appropriate value
- * (pre- or post-increment as described by pre) is stacked.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::inc(jsval v, LIns*& v_ins, jsint incr, bool pre)
-{
- LIns* v_after;
- CHECK_STATUS(incHelper(v, v_ins, v_after, incr));
-
- const JSCodeSpec& cs = js_CodeSpec[*cx->fp->regs->pc];
- JS_ASSERT(cs.ndefs == 1);
- stack(-cs.nuses, pre ? v_after : v_ins);
- v_ins = v_after;
- return JSRS_CONTINUE;
-}
-
-/*
- * Do an increment operation without storing anything to the stack.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::incHelper(jsval v, LIns* v_ins, LIns*& v_after, jsint incr)
-{
- if (!isNumber(v))
- ABORT_TRACE("can only inc numbers");
- v_after = alu(LIR_fadd, asNumber(v), incr, v_ins, lir->insImmf(incr));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::incProp(jsint incr, bool pre)
-{
- jsval& l = stackval(-1);
- if (JSVAL_IS_PRIMITIVE(l))
- ABORT_TRACE("incProp on primitive");
-
- JSObject* obj = JSVAL_TO_OBJECT(l);
- LIns* obj_ins = get(&l);
-
- uint32 slot;
- LIns* v_ins;
- CHECK_STATUS(prop(obj, obj_ins, &slot, &v_ins, NULL));
-
- if (slot == SPROP_INVALID_SLOT)
- ABORT_TRACE("incProp on invalid slot");
-
- jsval& v = STOBJ_GET_SLOT(obj, slot);
- CHECK_STATUS(inc(v, v_ins, incr, pre));
-
- LIns* dslots_ins = NULL;
- stobj_set_slot(obj_ins, slot, dslots_ins, box_jsval(v, v_ins));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::incElem(jsint incr, bool pre)
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
- jsval* vp;
- LIns* v_ins;
- LIns* addr_ins;
-
- if (JSVAL_IS_PRIMITIVE(l) || !JSVAL_IS_INT(r) ||
- !guardDenseArray(JSVAL_TO_OBJECT(l), get(&l))) {
- return JSRS_STOP;
- }
-
- CHECK_STATUS(denseArrayElement(l, r, vp, v_ins, addr_ins));
- if (!addr_ins) // if we read a hole, abort
- return JSRS_STOP;
- CHECK_STATUS(inc(*vp, v_ins, incr, pre));
- lir->insStorei(box_jsval(*vp, v_ins), addr_ins, 0);
- return JSRS_CONTINUE;
-}
-
-static bool
-EvalCmp(LOpcode op, double l, double r)
-{
- bool cond;
- switch (op) {
- case LIR_feq:
- cond = (l == r);
- break;
- case LIR_flt:
- cond = l < r;
- break;
- case LIR_fgt:
- cond = l > r;
- break;
- case LIR_fle:
- cond = l <= r;
- break;
- case LIR_fge:
- cond = l >= r;
- break;
- default:
- JS_NOT_REACHED("unexpected comparison op");
- return false;
- }
- return cond;
-}
-
-static bool
-EvalCmp(LOpcode op, JSString* l, JSString* r)
-{
- if (op == LIR_feq)
- return js_EqualStrings(l, r);
- return EvalCmp(op, js_CompareStrings(l, r), 0);
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::strictEquality(bool equal, bool cmpCase)
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
- LIns* l_ins = get(&l);
- LIns* r_ins = get(&r);
- LIns* x;
- bool cond;
-
- JSTraceType ltag = GetPromotedType(l);
- if (ltag != GetPromotedType(r)) {
- cond = !equal;
- x = lir->insImm(cond);
- } else if (ltag == TT_STRING) {
- LIns* args[] = { r_ins, l_ins };
- x = lir->ins2i(LIR_eq, lir->insCall(&js_EqualStrings_ci, args), equal);
- cond = js_EqualStrings(JSVAL_TO_STRING(l), JSVAL_TO_STRING(r));
- } else {
- LOpcode op;
- if (ltag == TT_DOUBLE)
- op = LIR_feq;
- else if (ltag == TT_NULL || ltag == TT_OBJECT || ltag == TT_FUNCTION)
- op = LIR_peq;
- else
- op = LIR_eq;
- x = lir->ins2(op, l_ins, r_ins);
- if (!equal)
- x = lir->ins_eq0(x);
- cond = (ltag == TT_DOUBLE)
- ? asNumber(l) == asNumber(r)
- : l == r;
- }
- cond = (cond == equal);
-
- if (cmpCase) {
- /* Only guard if the same path may not always be taken. */
- if (!x->isconst())
- guard(cond, x, BRANCH_EXIT);
- return;
- }
-
- set(&l, x);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::equality(bool negate, bool tryBranchAfterCond)
-{
- jsval& rval = stackval(-1);
- jsval& lval = stackval(-2);
- LIns* l_ins = get(&lval);
- LIns* r_ins = get(&rval);
-
- return equalityHelper(lval, rval, l_ins, r_ins, negate, tryBranchAfterCond, lval);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins,
- bool negate, bool tryBranchAfterCond,
- jsval& rval)
-{
- LOpcode op = LIR_eq;
- bool cond;
- LIns* args[] = { NULL, NULL };
-
- /*
- * The if chain below closely mirrors that found in 11.9.3, in general
- * deviating from that ordering of ifs only to account for SpiderMonkey's
- * conflation of booleans and undefined and for the possibility of
- * confusing objects and null. Note carefully the spec-mandated recursion
- * in the final else clause, which terminates because Number == T recurs
- * only if T is Object, but that must recur again to convert Object to
- * primitive, and ToPrimitive throws if the object cannot be converted to
- * a primitive value (which would terminate recursion).
- */
-
- if (GetPromotedType(l) == GetPromotedType(r)) {
- if (JSVAL_TAG(l) == JSVAL_OBJECT || JSVAL_IS_SPECIAL(l)) {
- if (JSVAL_TAG(l) == JSVAL_OBJECT && l) {
- JSClass *clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(l));
- if ((clasp->flags & JSCLASS_IS_EXTENDED) && ((JSExtendedClass*) clasp)->equality)
- ABORT_TRACE("Can't trace extended class equality operator");
- }
- if (JSVAL_TAG(l) == JSVAL_OBJECT)
- op = LIR_peq;
- cond = (l == r);
- } else if (JSVAL_IS_STRING(l)) {
- args[0] = r_ins, args[1] = l_ins;
- l_ins = lir->insCall(&js_EqualStrings_ci, args);
- r_ins = lir->insImm(1);
- cond = js_EqualStrings(JSVAL_TO_STRING(l), JSVAL_TO_STRING(r));
- } else {
- JS_ASSERT(isNumber(l) && isNumber(r));
- cond = (asNumber(l) == asNumber(r));
- op = LIR_feq;
- }
- } else if (JSVAL_IS_NULL(l) && JSVAL_IS_SPECIAL(r)) {
- l_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID));
- cond = (r == JSVAL_VOID);
- } else if (JSVAL_IS_SPECIAL(l) && JSVAL_IS_NULL(r)) {
- r_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID));
- cond = (l == JSVAL_VOID);
- } else if (isNumber(l) && JSVAL_IS_STRING(r)) {
- args[0] = r_ins, args[1] = cx_ins;
- r_ins = lir->insCall(&js_StringToNumber_ci, args);
- cond = (asNumber(l) == js_StringToNumber(cx, JSVAL_TO_STRING(r)));
- op = LIR_feq;
- } else if (JSVAL_IS_STRING(l) && isNumber(r)) {
- args[0] = l_ins, args[1] = cx_ins;
- l_ins = lir->insCall(&js_StringToNumber_ci, args);
- cond = (js_StringToNumber(cx, JSVAL_TO_STRING(l)) == asNumber(r));
- op = LIR_feq;
- } else {
- if (JSVAL_IS_SPECIAL(l)) {
- bool isVoid = JSVAL_IS_VOID(l);
- guard(isVoid,
- lir->ins2(LIR_eq, l_ins, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))),
- BRANCH_EXIT);
- if (!isVoid) {
- args[0] = l_ins, args[1] = cx_ins;
- l_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- l = (l == JSVAL_VOID)
- ? DOUBLE_TO_JSVAL(cx->runtime->jsNaN)
- : INT_TO_JSVAL(l == JSVAL_TRUE);
- return equalityHelper(l, r, l_ins, r_ins, negate,
- tryBranchAfterCond, rval);
- }
- } else if (JSVAL_IS_SPECIAL(r)) {
- bool isVoid = JSVAL_IS_VOID(r);
- guard(isVoid,
- lir->ins2(LIR_eq, r_ins, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))),
- BRANCH_EXIT);
- if (!isVoid) {
- args[0] = r_ins, args[1] = cx_ins;
- r_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- r = (r == JSVAL_VOID)
- ? DOUBLE_TO_JSVAL(cx->runtime->jsNaN)
- : INT_TO_JSVAL(r == JSVAL_TRUE);
- return equalityHelper(l, r, l_ins, r_ins, negate,
- tryBranchAfterCond, rval);
- }
- } else {
- if ((JSVAL_IS_STRING(l) || isNumber(l)) && !JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(equality_imacros.any_obj);
- }
- if (!JSVAL_IS_PRIMITIVE(l) && (JSVAL_IS_STRING(r) || isNumber(r))) {
- ABORT_IF_XML(l);
- return call_imacro(equality_imacros.obj_any);
- }
- }
-
- l_ins = lir->insImm(0);
- r_ins = lir->insImm(1);
- cond = false;
- }
-
- /* If the operands aren't numbers, compare them as integers. */
- LIns* x = lir->ins2(op, l_ins, r_ins);
- if (negate) {
- x = lir->ins_eq0(x);
- cond = !cond;
- }
-
- jsbytecode* pc = cx->fp->regs->pc;
-
- /*
- * Don't guard if the same path is always taken. If it isn't, we have to
- * fuse comparisons and the following branch, because the interpreter does
- * that.
- */
- if (tryBranchAfterCond)
- fuseIf(pc + 1, cond, x);
-
- /*
- * There is no need to write out the result of this comparison if the trace
- * ends on this operation.
- */
- if (pc[1] == JSOP_IFNE || pc[1] == JSOP_IFEQ)
- CHECK_STATUS(checkTraceEnd(pc + 1));
-
- /*
- * We update the stack after the guard. This is safe since the guard bails
- * out at the comparison and the interpreter will therefore re-execute the
- * comparison. This way the value of the condition doesn't have to be
- * calculated and saved on the stack in most cases.
- */
- set(&rval, x);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond)
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
- LIns* x = NULL;
- bool cond;
- LIns* l_ins = get(&l);
- LIns* r_ins = get(&r);
- bool fp = false;
- jsdouble lnum, rnum;
-
- /*
- * 11.8.5 if either argument is an object with a function-valued valueOf
- * property; if both arguments are objects with non-function-valued valueOf
- * properties, abort.
- */
- if (!JSVAL_IS_PRIMITIVE(l)) {
- ABORT_IF_XML(l);
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(binary_imacros.obj_obj);
- }
- return call_imacro(binary_imacros.obj_any);
- }
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(binary_imacros.any_obj);
- }
-
- /* 11.8.5 steps 3, 16-21. */
- if (JSVAL_IS_STRING(l) && JSVAL_IS_STRING(r)) {
- LIns* args[] = { r_ins, l_ins };
- l_ins = lir->insCall(&js_CompareStrings_ci, args);
- r_ins = lir->insImm(0);
- cond = EvalCmp(op, JSVAL_TO_STRING(l), JSVAL_TO_STRING(r));
- goto do_comparison;
- }
-
- /* 11.8.5 steps 4-5. */
- if (!JSVAL_IS_NUMBER(l)) {
- LIns* args[] = { l_ins, cx_ins };
- switch (JSVAL_TAG(l)) {
- case JSVAL_SPECIAL:
- l_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- break;
- case JSVAL_STRING:
- l_ins = lir->insCall(&js_StringToNumber_ci, args);
- break;
- case JSVAL_OBJECT:
- if (JSVAL_IS_NULL(l)) {
- l_ins = lir->insImmf(0.0);
- break;
- }
- // FALL THROUGH
- case JSVAL_INT:
- case JSVAL_DOUBLE:
- default:
- JS_NOT_REACHED("JSVAL_IS_NUMBER if int/double, objects should "
- "have been handled at start of method");
- ABORT_TRACE("safety belt");
- }
- }
- if (!JSVAL_IS_NUMBER(r)) {
- LIns* args[] = { r_ins, cx_ins };
- switch (JSVAL_TAG(r)) {
- case JSVAL_SPECIAL:
- r_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- break;
- case JSVAL_STRING:
- r_ins = lir->insCall(&js_StringToNumber_ci, args);
- break;
- case JSVAL_OBJECT:
- if (JSVAL_IS_NULL(r)) {
- r_ins = lir->insImmf(0.0);
- break;
- }
- // FALL THROUGH
- case JSVAL_INT:
- case JSVAL_DOUBLE:
- default:
- JS_NOT_REACHED("JSVAL_IS_NUMBER if int/double, objects should "
- "have been handled at start of method");
- ABORT_TRACE("safety belt");
- }
- }
- {
- jsval tmp = JSVAL_NULL;
- JSAutoTempValueRooter tvr(cx, 1, &tmp);
-
- tmp = l;
- lnum = js_ValueToNumber(cx, &tmp);
- tmp = r;
- rnum = js_ValueToNumber(cx, &tmp);
- }
- cond = EvalCmp(op, lnum, rnum);
- fp = true;
-
- /* 11.8.5 steps 6-15. */
- do_comparison:
- /*
- * If the result is not a number or it's not a quad, we must use an integer
- * compare.
- */
- if (!fp) {
- JS_ASSERT(op >= LIR_feq && op <= LIR_fge);
- op = LOpcode(op + (LIR_eq - LIR_feq));
- }
- x = lir->ins2(op, l_ins, r_ins);
-
- jsbytecode* pc = cx->fp->regs->pc;
-
- /*
- * Don't guard if the same path is always taken. If it isn't, we have to
- * fuse comparisons and the following branch, because the interpreter does
- * that.
- */
- if (tryBranchAfterCond)
- fuseIf(pc + 1, cond, x);
-
- /*
- * There is no need to write out the result of this comparison if the trace
- * ends on this operation.
- */
- if (pc[1] == JSOP_IFNE || pc[1] == JSOP_IFEQ)
- CHECK_STATUS(checkTraceEnd(pc + 1));
-
- /*
- * We update the stack after the guard. This is safe since the guard bails
- * out at the comparison and the interpreter will therefore re-execute the
- * comparison. This way the value of the condition doesn't have to be
- * calculated and saved on the stack in most cases.
- */
- set(&l, x);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::unary(LOpcode op)
-{
- jsval& v = stackval(-1);
- bool intop = !(op & LIR64);
- if (isNumber(v)) {
- LIns* a = get(&v);
- if (intop)
- a = f2i(a);
- a = lir->ins1(op, a);
- if (intop)
- a = lir->ins1(LIR_i2f, a);
- set(&v, a);
- return JSRS_CONTINUE;
- }
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::binary(LOpcode op)
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
-
- if (!JSVAL_IS_PRIMITIVE(l)) {
- ABORT_IF_XML(l);
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(binary_imacros.obj_obj);
- }
- return call_imacro(binary_imacros.obj_any);
- }
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(binary_imacros.any_obj);
- }
-
- bool intop = !(op & LIR64);
- LIns* a = get(&l);
- LIns* b = get(&r);
-
- bool leftIsNumber = isNumber(l);
- jsdouble lnum = leftIsNumber ? asNumber(l) : 0;
-
- bool rightIsNumber = isNumber(r);
- jsdouble rnum = rightIsNumber ? asNumber(r) : 0;
-
- if ((op >= LIR_sub && op <= LIR_ush) || // sub, mul, (callh), or, xor, (not,) lsh, rsh, ush
- (op >= LIR_fsub && op <= LIR_fmod)) { // fsub, fmul, fdiv, fmod
- LIns* args[2];
- if (JSVAL_IS_STRING(l)) {
- args[0] = a;
- args[1] = cx_ins;
- a = lir->insCall(&js_StringToNumber_ci, args);
- lnum = js_StringToNumber(cx, JSVAL_TO_STRING(l));
- leftIsNumber = true;
- }
- if (JSVAL_IS_STRING(r)) {
- args[0] = b;
- args[1] = cx_ins;
- b = lir->insCall(&js_StringToNumber_ci, args);
- rnum = js_StringToNumber(cx, JSVAL_TO_STRING(r));
- rightIsNumber = true;
- }
- }
- if (JSVAL_IS_SPECIAL(l)) {
- LIns* args[] = { a, cx_ins };
- a = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- lnum = js_BooleanOrUndefinedToNumber(cx, JSVAL_TO_SPECIAL(l));
- leftIsNumber = true;
- }
- if (JSVAL_IS_SPECIAL(r)) {
- LIns* args[] = { b, cx_ins };
- b = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args);
- rnum = js_BooleanOrUndefinedToNumber(cx, JSVAL_TO_SPECIAL(r));
- rightIsNumber = true;
- }
- if (leftIsNumber && rightIsNumber) {
- if (intop) {
- LIns *args[] = { a };
- a = lir->insCall(op == LIR_ush ? &js_DoubleToUint32_ci : &js_DoubleToInt32_ci, args);
- b = f2i(b);
- }
- a = alu(op, lnum, rnum, a, b);
- if (intop)
- a = lir->ins1(op == LIR_ush ? LIR_u2f : LIR_i2f, a);
- set(&l, a);
- return JSRS_CONTINUE;
- }
- return JSRS_STOP;
-}
-
-void
-TraceRecorder::guardShape(LIns* obj_ins, JSObject* obj, uint32 shape, const char* guardName,
- LIns* map_ins, VMSideExit* exit)
-{
- LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape");
- guard(true,
- addName(lir->ins2i(LIR_eq, shape_ins, shape), guardName),
- exit);
-}
-
-JS_STATIC_ASSERT(offsetof(JSObjectOps, objectMap) == 0);
-
-inline LIns*
-TraceRecorder::map(LIns *obj_ins)
-{
- return addName(lir->insLoad(LIR_ldp, obj_ins, (int) offsetof(JSObject, map)), "map");
-}
-
-bool
-TraceRecorder::map_is_native(JSObjectMap* map, LIns* map_ins, LIns*& ops_ins, size_t op_offset)
-{
- JS_ASSERT(op_offset < sizeof(JSObjectOps));
- JS_ASSERT(op_offset % sizeof(void *) == 0);
-
-#define OP(ops) (*(void **) ((uint8 *) (ops) + op_offset))
- void* ptr = OP(map->ops);
- if (ptr != OP(&js_ObjectOps))
- return false;
-#undef OP
-
- ops_ins = addName(lir->insLoad(LIR_ldcp, map_ins, int(offsetof(JSObjectMap, ops))), "ops");
- LIns* n = lir->insLoad(LIR_ldcp, ops_ins, op_offset);
- guard(true,
- addName(lir->ins2(LIR_peq, n, INS_CONSTPTR(ptr)), "guard(native-map)"),
- BRANCH_EXIT);
-
- return true;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::guardNativePropertyOp(JSObject* aobj, LIns* map_ins)
-{
- /*
- * Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops
- * which is required to use native objects (those whose maps are scopes),
- * or even more narrow conditions required because the cache miss case
- * will call a particular object-op (js_GetProperty, js_SetProperty).
- *
- * We parameterize using offsetof and guard on match against the hook at
- * the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP
- * guards the js_SetProperty case.
- */
- uint32 format = js_CodeSpec[*cx->fp->regs->pc].format;
- uint32 mode = JOF_MODE(format);
-
- // No need to guard native-ness of global object.
- JS_ASSERT(OBJ_IS_NATIVE(globalObj));
- if (aobj != globalObj) {
- size_t op_offset = offsetof(JSObjectOps, objectMap);
- if (mode == JOF_PROP || mode == JOF_VARPROP) {
- op_offset = (format & JOF_SET)
- ? offsetof(JSObjectOps, setProperty)
- : offsetof(JSObjectOps, getProperty);
- } else {
- JS_ASSERT(mode == JOF_NAME);
- }
-
- LIns* ops_ins;
- if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset))
- ABORT_TRACE("non-native map");
- }
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2, jsuword& pcval)
-{
- jsbytecode* pc = cx->fp->regs->pc;
- JS_ASSERT(*pc != JSOP_INITPROP && *pc != JSOP_SETNAME && *pc != JSOP_SETPROP);
-
- // Mimic the interpreter's special case for dense arrays by skipping up one
- // hop along the proto chain when accessing a named (not indexed) property,
- // typically to find Array.prototype methods.
- JSObject* aobj = obj;
- if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
- guardDenseArray(obj, obj_ins, BRANCH_EXIT);
- aobj = OBJ_GET_PROTO(cx, obj);
- obj_ins = stobj_get_proto(obj_ins);
- }
-
- if (!OBJ_IS_NATIVE(obj))
- ABORT_TRACE("non-native object");
-
- LIns* map_ins = map(obj_ins);
-
- CHECK_STATUS(guardNativePropertyOp(aobj, map_ins));
-
- JSAtom* atom;
- JSPropCacheEntry* entry;
- PROPERTY_CACHE_TEST(cx, pc, aobj, obj2, entry, atom);
- if (!atom) {
- // Null atom means that obj2 is locked and must now be unlocked.
- JS_UNLOCK_OBJ(cx, obj2);
- } else {
- // Miss: pre-fill the cache for the interpreter, as well as for our needs.
- jsid id = ATOM_TO_JSID(atom);
- JSProperty* prop;
- if (JOF_OPMODE(*pc) == JOF_NAME) {
- JS_ASSERT(aobj == obj);
-
- JSTraceMonitor &localtm = *traceMonitor;
- entry = js_FindPropertyHelper(cx, id, true, &obj, &obj2, &prop);
-
- /* js_FindPropertyHelper can reenter the interpreter and kill |this|. */
- if (!localtm.recorder)
- return JSRS_STOP;
-
- if (!entry)
- ABORT_TRACE_ERROR("error in js_FindPropertyHelper");
- if (entry == JS_NO_PROP_CACHE_FILL)
- ABORT_TRACE("cannot cache name");
- } else {
- JSTraceMonitor &localtm = *traceMonitor;
- JSContext *localcx = cx;
- int protoIndex = js_LookupPropertyWithFlags(cx, aobj, id,
- cx->resolveFlags,
- &obj2, &prop);
-
- /* js_LookupPropertyWithFlags can reenter the interpreter and kill |this|. */
- if (!localtm.recorder) {
- if (prop)
- obj2->dropProperty(localcx, prop);
- return JSRS_STOP;
- }
-
- if (protoIndex < 0)
- ABORT_TRACE_ERROR("error in js_LookupPropertyWithFlags");
-
- if (prop) {
- if (!OBJ_IS_NATIVE(obj2)) {
- obj2->dropProperty(cx, prop);
- ABORT_TRACE("property found on non-native object");
- }
- entry = js_FillPropertyCache(cx, aobj, 0, protoIndex, obj2,
- (JSScopeProperty*) prop, false);
- JS_ASSERT(entry);
- if (entry == JS_NO_PROP_CACHE_FILL)
- entry = NULL;
- }
-
- }
-
- if (!prop) {
- // Propagate obj from js_FindPropertyHelper to record_JSOP_BINDNAME
- // via our obj2 out-parameter. If we are recording JSOP_SETNAME and
- // the global it's assigning does not yet exist, create it.
- obj2 = obj;
-
- // Use PCVAL_NULL to return "no such property" to our caller.
- pcval = PCVAL_NULL;
- return JSRS_CONTINUE;
- }
-
- obj2->dropProperty(cx, prop);
- if (!entry)
- ABORT_TRACE("failed to fill property cache");
- }
-
-#ifdef JS_THREADSAFE
- // There's a potential race in any JS_THREADSAFE embedding that's nuts
- // enough to share mutable objects on the scope or proto chain, but we
- // don't care about such insane embeddings. Anyway, the (scope, proto)
- // entry->vcap coordinates must reach obj2 from aobj at this point.
- JS_ASSERT(cx->requestDepth);
-#endif
-
- return guardPropertyCacheHit(obj_ins, map_ins, aobj, obj2, entry, pcval);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::guardPropertyCacheHit(LIns* obj_ins,
- LIns* map_ins,
- JSObject* aobj,
- JSObject* obj2,
- JSPropCacheEntry* entry,
- jsuword& pcval)
-{
- VMSideExit* exit = snapshot(BRANCH_EXIT);
-
- uint32 vshape = PCVCAP_SHAPE(entry->vcap);
-
- // Check for first-level cache hit and guard on kshape if possible.
- // Otherwise guard on key object exact match.
- if (PCVCAP_TAG(entry->vcap) <= 1) {
- if (aobj != globalObj)
- guardShape(obj_ins, aobj, entry->kshape, "guard_kshape", map_ins, exit);
-
- if (entry->adding()) {
- if (aobj == globalObj)
- ABORT_TRACE("adding a property to the global object");
-
- LIns *vshape_ins = addName(
- lir->insLoad(LIR_ld,
- addName(lir->insLoad(LIR_ldcp, cx_ins, offsetof(JSContext, runtime)),
- "runtime"),
- offsetof(JSRuntime, protoHazardShape)),
- "protoHazardShape");
- guard(true,
- addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard_protoHazardShape"),
- MISMATCH_EXIT);
- }
- } else {
-#ifdef DEBUG
- JSOp op = js_GetOpcode(cx, cx->fp->script, cx->fp->regs->pc);
- JSAtom *pcatom;
- if (op == JSOP_LENGTH) {
- pcatom = cx->runtime->atomState.lengthAtom;
- } else {
- ptrdiff_t pcoff = (JOF_TYPE(js_CodeSpec[op].format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0;
- GET_ATOM_FROM_BYTECODE(cx->fp->script, cx->fp->regs->pc, pcoff, pcatom);
- }
- JS_ASSERT(entry->kpc == (jsbytecode *) pcatom);
- JS_ASSERT(entry->kshape == jsuword(aobj));
-#endif
- if (aobj != globalObj && !obj_ins->isconstp()) {
- guard(true,
- addName(lir->ins2(LIR_peq, obj_ins, INS_CONSTOBJ(aobj)), "guard_kobj"),
- exit);
- }
- }
-
- // For any hit that goes up the scope and/or proto chains, we will need to
- // guard on the shape of the object containing the property.
- if (PCVCAP_TAG(entry->vcap) >= 1) {
- JS_ASSERT(OBJ_SHAPE(obj2) == vshape);
-
- LIns* obj2_ins;
- if (PCVCAP_TAG(entry->vcap) == 1) {
- // Duplicate the special case in PROPERTY_CACHE_TEST.
- obj2_ins = addName(stobj_get_proto(obj_ins), "proto");
- guard(false, lir->ins_peq0(obj2_ins), exit);
- } else {
- obj2_ins = INS_CONSTOBJ(obj2);
- }
- guardShape(obj2_ins, obj2, vshape, "guard_vshape", map(obj2_ins), exit);
- }
-
- pcval = entry->vword;
- return JSRS_CONTINUE;
-}
-
-void
-TraceRecorder::stobj_set_fslot(LIns *obj_ins, unsigned slot, LIns* v_ins)
-{
- lir->insStorei(v_ins, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval));
-}
-
-void
-TraceRecorder::stobj_set_dslot(LIns *obj_ins, unsigned slot, LIns*& dslots_ins, LIns* v_ins)
-{
- if (!dslots_ins)
- dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots));
- lir->insStorei(v_ins, dslots_ins, slot * sizeof(jsval));
-}
-
-void
-TraceRecorder::stobj_set_slot(LIns* obj_ins, unsigned slot, LIns*& dslots_ins, LIns* v_ins)
-{
- if (slot < JS_INITIAL_NSLOTS) {
- stobj_set_fslot(obj_ins, slot, v_ins);
- } else {
- stobj_set_dslot(obj_ins, slot - JS_INITIAL_NSLOTS, dslots_ins, v_ins);
- }
-}
-
-LIns*
-TraceRecorder::stobj_get_fslot(LIns* obj_ins, unsigned slot)
-{
- JS_ASSERT(slot < JS_INITIAL_NSLOTS);
- return lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval));
-}
-
-LIns*
-TraceRecorder::stobj_get_dslot(LIns* obj_ins, unsigned index, LIns*& dslots_ins)
-{
- if (!dslots_ins)
- dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots));
- return lir->insLoad(LIR_ldp, dslots_ins, index * sizeof(jsval));
-}
-
-LIns*
-TraceRecorder::stobj_get_slot(LIns* obj_ins, unsigned slot, LIns*& dslots_ins)
-{
- if (slot < JS_INITIAL_NSLOTS)
- return stobj_get_fslot(obj_ins, slot);
- return stobj_get_dslot(obj_ins, slot - JS_INITIAL_NSLOTS, dslots_ins);
-}
-
-JSRecordingStatus
-TraceRecorder::native_get(LIns* obj_ins, LIns* pobj_ins, JSScopeProperty* sprop,
- LIns*& dslots_ins, LIns*& v_ins)
-{
- if (!SPROP_HAS_STUB_GETTER(sprop))
- return JSRS_STOP;
-
- if (sprop->slot != SPROP_INVALID_SLOT)
- v_ins = stobj_get_slot(pobj_ins, sprop->slot, dslots_ins);
- else
- v_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::box_jsval(jsval v, LIns* v_ins)
-{
- if (isNumber(v)) {
- LIns* args[] = { v_ins, cx_ins };
- v_ins = lir->insCall(&js_BoxDouble_ci, args);
- guard(false, lir->ins2(LIR_peq, v_ins, INS_CONSTWORD(JSVAL_ERROR_COOKIE)),
- OOM_EXIT);
- return v_ins;
- }
- switch (JSVAL_TAG(v)) {
- case JSVAL_SPECIAL:
- return lir->ins2(LIR_pior, lir->ins2i(LIR_pilsh, lir->ins_u2p(v_ins), JSVAL_TAGBITS),
- INS_CONSTWORD(JSVAL_SPECIAL));
- case JSVAL_OBJECT:
- return v_ins;
- default:
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING);
- return lir->ins2(LIR_pior, v_ins, INS_CONSTWORD(JSVAL_STRING));
- }
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::unbox_jsval(jsval v, LIns* v_ins, VMSideExit* exit)
-{
- if (isNumber(v)) {
- // JSVAL_IS_NUMBER(v)
- guard(false,
- lir->ins_eq0(lir->ins2(LIR_or,
- p2i(lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_INT))),
- lir->ins2(LIR_peq,
- lir->ins2(LIR_piand, v_ins,
- INS_CONSTWORD(JSVAL_TAGMASK)),
- INS_CONSTWORD(JSVAL_DOUBLE)))),
- exit);
- LIns* args[] = { v_ins };
- return lir->insCall(&js_UnboxDouble_ci, args);
- }
- switch (JSVAL_TAG(v)) {
- case JSVAL_SPECIAL:
- guard(true,
- lir->ins2(LIR_peq,
- lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
- INS_CONSTWORD(JSVAL_SPECIAL)),
- exit);
- return p2i(lir->ins2i(LIR_pursh, v_ins, JSVAL_TAGBITS));
-
- case JSVAL_OBJECT:
- if (JSVAL_IS_NULL(v)) {
- // JSVAL_NULL maps to type TT_NULL, so insist that v_ins == 0 here.
- guard(true, lir->ins_peq0(v_ins), exit);
- } else {
- guard(false, lir->ins_peq0(v_ins), exit);
- guard(true,
- lir->ins2(LIR_peq,
- lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
- INS_CONSTWORD(JSVAL_OBJECT)),
- exit);
-
- /*
- * LIR_ldcp is ok to use here even though Array classword can
- * change, because no object's classword can ever change from
- * &js_ArrayClass to &js_FunctionClass.
- */
- guard(HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)),
- lir->ins2(LIR_peq,
- lir->ins2(LIR_piand,
- lir->insLoad(LIR_ldcp, v_ins, offsetof(JSObject, classword)),
- INS_CONSTWORD(~JSSLOT_CLASS_MASK_BITS)),
- INS_CONSTPTR(&js_FunctionClass)),
- exit);
- }
- return v_ins;
-
- default:
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING);
- guard(true,
- lir->ins2(LIR_peq,
- lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
- INS_CONSTWORD(JSVAL_STRING)),
- exit);
- return lir->ins2(LIR_piand, v_ins, addName(lir->insImmWord(~JSVAL_TAGMASK),
- "~JSVAL_TAGMASK"));
- }
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getThis(LIns*& this_ins)
-{
- /*
- * js_ComputeThisForFrame updates cx->fp->argv[-1], so sample it into 'original' first.
- */
- jsval original = JSVAL_NULL;
- if (cx->fp->argv) {
- original = cx->fp->argv[-1];
- if (!JSVAL_IS_PRIMITIVE(original) &&
- guardClass(JSVAL_TO_OBJECT(original), get(&cx->fp->argv[-1]), &js_WithClass, snapshot(MISMATCH_EXIT))) {
- ABORT_TRACE("can't trace getThis on With object");
- }
- }
-
- JSObject* thisObj = js_ComputeThisForFrame(cx, cx->fp);
- if (!thisObj)
- ABORT_TRACE_ERROR("js_ComputeThisForName failed");
-
- /* In global code, bake in the global object as 'this' object. */
- if (!cx->fp->callee()) {
- JS_ASSERT(callDepth == 0);
- this_ins = INS_CONSTOBJ(thisObj);
-
- /*
- * We don't have argv[-1] in global code, so we don't update the
- * tracker here.
- */
- return JSRS_CONTINUE;
- }
-
- jsval& thisv = cx->fp->argv[-1];
- JS_ASSERT(JSVAL_IS_OBJECT(thisv));
-
- /*
- * Traces type-specialize between null and objects, so if we currently see
- * a null value in argv[-1], this trace will only match if we see null at
- * runtime as well. Bake in the global object as 'this' object, updating
- * the tracker as well. We can only detect this condition prior to calling
- * js_ComputeThisForFrame, since it updates the interpreter's copy of
- * argv[-1].
- */
- JSClass* clasp = NULL;;
- if (JSVAL_IS_NULL(original) ||
- (((clasp = STOBJ_GET_CLASS(JSVAL_TO_OBJECT(original))) == &js_CallClass) ||
- (clasp == &js_BlockClass))) {
- if (clasp)
- guardClass(JSVAL_TO_OBJECT(original), get(&thisv), clasp, snapshot(BRANCH_EXIT));
- JS_ASSERT(!JSVAL_IS_PRIMITIVE(thisv));
- if (thisObj != globalObj)
- ABORT_TRACE("global object was wrapped while recording");
- this_ins = INS_CONSTOBJ(thisObj);
- set(&thisv, this_ins);
- return JSRS_CONTINUE;
- }
-
- this_ins = get(&thisv);
-
- JSObject* wrappedGlobal = globalObj->thisObject(cx);
- if (!wrappedGlobal)
- ABORT_TRACE_ERROR("globalObj->thisObject hook threw in getThis");
-
- /*
- * The only unwrapped object that needs to be wrapped that we can get here
- * is the global object obtained throught the scope chain.
- */
- this_ins = lir->ins_choose(lir->ins_peq0(stobj_get_parent(this_ins)),
- INS_CONSTOBJ(wrappedGlobal),
- this_ins);
- return JSRS_CONTINUE;
-}
-
-
-LIns*
-TraceRecorder::getStringLength(LIns* str_ins)
-{
- LIns* len_ins = lir->insLoad(LIR_ldp, str_ins, (int)offsetof(JSString, mLength));
-
- LIns* masked_len_ins = lir->ins2(LIR_piand,
- len_ins,
- INS_CONSTWORD(JSString::LENGTH_MASK));
-
- LIns* real_len =
- lir->ins_choose(lir->ins_peq0(lir->ins2(LIR_piand,
- len_ins,
- INS_CONSTWORD(JSString::DEPENDENT))),
- masked_len_ins,
- lir->ins_choose(lir->ins_peq0(lir->ins2(LIR_piand,
- len_ins,
- INS_CONSTWORD(JSString::PREFIX))),
- lir->ins2(LIR_piand,
- len_ins,
- INS_CONSTWORD(JSString::DEPENDENT_LENGTH_MASK)),
- masked_len_ins));
- return p2i(real_len);
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::guardClass(JSObject* obj, LIns* obj_ins, JSClass* clasp, VMSideExit* exit)
-{
- bool cond = STOBJ_GET_CLASS(obj) == clasp;
-
- LIns* class_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, classword));
- class_ins = lir->ins2(LIR_piand, class_ins, INS_CONSTWORD(~JSSLOT_CLASS_MASK_BITS));
-
- char namebuf[32];
- JS_snprintf(namebuf, sizeof namebuf, "guard(class is %s)", clasp->name);
- guard(cond, addName(lir->ins2(LIR_peq, class_ins, INS_CONSTPTR(clasp)), namebuf), exit);
- return cond;
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::guardDenseArray(JSObject* obj, LIns* obj_ins, ExitType exitType)
-{
- return guardClass(obj, obj_ins, &js_ArrayClass, snapshot(exitType));
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::guardDenseArray(JSObject* obj, LIns* obj_ins, VMSideExit* exit)
-{
- return guardClass(obj, obj_ins, &js_ArrayClass, exit);
-}
-
-JS_REQUIRES_STACK bool
-TraceRecorder::guardHasPrototype(JSObject* obj, LIns* obj_ins,
- JSObject** pobj, LIns** pobj_ins,
- VMSideExit* exit)
-{
- *pobj = obj->getProto();
- *pobj_ins = stobj_get_proto(obj_ins);
-
- bool cond = *pobj == NULL;
- guard(cond, addName(lir->ins_peq0(*pobj_ins), "guard(proto-not-null)"), exit);
- return !cond;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::guardPrototypeHasNoIndexedProperties(JSObject* obj, LIns* obj_ins, ExitType exitType)
-{
- /*
- * Guard that no object along the prototype chain has any indexed
- * properties which might become visible through holes in the array.
- */
- VMSideExit* exit = snapshot(exitType);
-
- if (js_PrototypeHasIndexedProperties(cx, obj))
- return JSRS_STOP;
-
- while (guardHasPrototype(obj, obj_ins, &obj, &obj_ins, exit))
- guardShape(obj_ins, obj, OBJ_SHAPE(obj), "guard(shape)", map(obj_ins), exit);
- return JSRS_CONTINUE;
-}
-
-JSRecordingStatus
-TraceRecorder::guardNotGlobalObject(JSObject* obj, LIns* obj_ins)
-{
- if (obj == globalObj)
- ABORT_TRACE("reference aliases global object");
- guard(false, lir->ins2(LIR_peq, obj_ins, INS_CONSTOBJ(globalObj)), MISMATCH_EXIT);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::clearFrameSlotsFromCache()
-{
- /*
- * Clear out all slots of this frame in the nativeFrameTracker. Different
- * locations on the VM stack might map to different locations on the native
- * stack depending on the number of arguments (i.e.) of the next call, so
- * we have to make sure we map those in to the cache with the right
- * offsets.
- */
- JSStackFrame* fp = cx->fp;
- jsval* vp;
- jsval* vpstop;
-
- /*
- * Duplicate native stack layout computation: see VisitFrameSlots header comment.
- * This doesn't do layout arithmetic, but it must clear out all the slots defined as
- * imported by VisitFrameSlots.
- */
- if (fp->argv) {
- vp = &fp->argv[-2];
- vpstop = &fp->argv[argSlots(fp)];
- while (vp < vpstop)
- nativeFrameTracker.set(vp++, (LIns*)0);
- nativeFrameTracker.set(&fp->argsobj, (LIns*)0);
- }
- vp = &fp->slots[0];
- vpstop = &fp->slots[fp->script->nslots];
- while (vp < vpstop)
- nativeFrameTracker.set(vp++, (LIns*)0);
-}
-
-/*
- * If we have created an |arguments| object for the frame, we must copy the
- * argument values into the object as properties in case it is used after
- * this frame returns.
- */
-JS_REQUIRES_STACK void
-TraceRecorder::putArguments()
-{
- if (cx->fp->argsobj && cx->fp->argc) {
- LIns* argsobj_ins = get(&cx->fp->argsobj);
- LIns* args_ins = lir->insAlloc(sizeof(jsval) * cx->fp->argc);
- for (uintN i = 0; i < cx->fp->argc; ++i) {
- LIns* arg_ins = box_jsval(cx->fp->argv[i], get(&cx->fp->argv[i]));
- lir->insStorei(arg_ins, args_ins, i * sizeof(jsval));
- }
- LIns* args[] = { args_ins, argsobj_ins, cx_ins };
- lir->insCall(&js_PutArguments_ci, args);
- }
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_EnterFrame()
-{
- JSStackFrame* fp = cx->fp;
-
- if (++callDepth >= MAX_CALLDEPTH)
- ABORT_TRACE("exceeded maximum call depth");
-
- // FIXME: Allow and attempt to inline a single level of recursion until we compile
- // recursive calls as independent trees (459301).
- if (fp->script == fp->down->script && fp->down->down && fp->down->down->script == fp->script)
- ABORT_TRACE("recursive call");
-
- debug_only_printf(LC_TMTracer, "EnterFrame %s, callDepth=%d\n",
- js_AtomToPrintableString(cx, cx->fp->fun->atom),
- callDepth);
- debug_only_stmt(
- if (js_LogController.lcbits & LC_TMRecorder) {
- js_Disassemble(cx, cx->fp->script, JS_TRUE, stdout);
- debug_only_print0(LC_TMTracer, "----\n");
- }
- )
- LIns* void_ins = INS_VOID();
-
- // Duplicate native stack layout computation: see VisitFrameSlots header comment.
- // This doesn't do layout arithmetic, but it must initialize in the tracker all the
- // slots defined as imported by VisitFrameSlots.
- jsval* vp = &fp->argv[fp->argc];
- jsval* vpstop = vp + ptrdiff_t(fp->fun->nargs) - ptrdiff_t(fp->argc);
- while (vp < vpstop) {
- if (vp >= fp->down->regs->sp)
- nativeFrameTracker.set(vp, (LIns*)0);
- set(vp++, void_ins, true);
- }
-
- vp = &fp->slots[0];
- vpstop = vp + fp->script->nfixed;
- while (vp < vpstop)
- set(vp++, void_ins, true);
- set(&fp->argsobj, INS_NULL(), true);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_LeaveFrame()
-{
- debug_only_stmt(
- if (cx->fp->fun)
- debug_only_printf(LC_TMTracer,
- "LeaveFrame (back to %s), callDepth=%d\n",
- js_AtomToPrintableString(cx, cx->fp->fun->atom),
- callDepth);
- );
- if (callDepth-- <= 0)
- ABORT_TRACE("returned out of a loop we started tracing");
-
- // LeaveFrame gets called after the interpreter popped the frame and
- // stored rval, so cx->fp not cx->fp->down, and -1 not 0.
- atoms = FrameAtomBase(cx, cx->fp);
- set(&stackval(-1), rval_ins, true);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_PUSH()
-{
- stack(0, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_POPV()
-{
- jsval& rval = stackval(-1);
- LIns *rval_ins = box_jsval(rval, get(&rval));
-
- // Store it in cx->fp->rval. NB: Tricky dependencies. cx->fp is the right
- // frame because POPV appears only in global and eval code and we don't
- // trace JSOP_EVAL or leaving the frame where tracing started.
- LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp));
- lir->insStorei(rval_ins, fp_ins, offsetof(JSStackFrame, rval));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENTERWITH()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LEAVEWITH()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RETURN()
-{
- /* A return from callDepth 0 terminates the current loop. */
- if (callDepth == 0) {
- AUDIT(returnLoopExits);
- endLoop();
- return JSRS_STOP;
- }
-
- putArguments();
-
- /* If we inlined this function call, make the return value available to the caller code. */
- jsval& rval = stackval(-1);
- JSStackFrame *fp = cx->fp;
- if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && JSVAL_IS_PRIMITIVE(rval)) {
- JS_ASSERT(OBJECT_TO_JSVAL(fp->thisp) == fp->argv[-1]);
- rval_ins = get(&fp->argv[-1]);
- } else {
- rval_ins = get(&rval);
- }
- debug_only_printf(LC_TMTracer,
- "returning from %s\n",
- js_AtomToPrintableString(cx, cx->fp->fun->atom));
- clearFrameSlotsFromCache();
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GOTO()
-{
- /*
- * If we hit a break or a continue to an outer loop, end the loop and
- * generate an always-taken loop exit guard. For other downward gotos
- * (like if/else) continue recording.
- */
- jssrcnote* sn = js_GetSrcNote(cx->fp->script, cx->fp->regs->pc);
-
- if (sn && (SN_TYPE(sn) == SRC_BREAK || SN_TYPE(sn) == SRC_CONT2LABEL)) {
- AUDIT(breakLoopExits);
- endLoop();
- return JSRS_STOP;
- }
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IFEQ()
-{
- trackCfgMerges(cx->fp->regs->pc);
- return ifop();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IFNE()
-{
- return ifop();
-}
-
-LIns*
-TraceRecorder::newArguments()
-{
- LIns* global_ins = INS_CONSTOBJ(globalObj);
- LIns* argc_ins = INS_CONST(cx->fp->argc);
- LIns* callee_ins = get(&cx->fp->argv[-2]);
- LIns* argv_ins = cx->fp->argc
- ? lir->ins2(LIR_piadd, lirbuf->sp,
- lir->insImmWord(-treeInfo->nativeStackBase + nativeStackOffset(&cx->fp->argv[0])))
- : INS_CONSTPTR((void *) 2);
- js_ArgsPrivateNative *apn = js_ArgsPrivateNative::create(*traceMonitor->dataAlloc, cx->fp->argc);
- for (uintN i = 0; i < cx->fp->argc; ++i) {
- apn->typemap()[i] = determineSlotType(&cx->fp->argv[i]);
- }
-
- LIns* args[] = { INS_CONSTPTR(apn), argv_ins, callee_ins, argc_ins, global_ins, cx_ins };
- LIns* call_ins = lir->insCall(&js_Arguments_ci, args);
- guard(false, lir->ins_peq0(call_ins), OOM_EXIT);
- return call_ins;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARGUMENTS()
-{
- if (cx->fp->flags & JSFRAME_OVERRIDE_ARGS)
- ABORT_TRACE("Can't trace |arguments| if |arguments| is assigned to");
-
- LIns* a_ins = get(&cx->fp->argsobj);
- LIns* args_ins;
- if (a_ins->opcode() == LIR_int) {
- // |arguments| is set to 0 by EnterFrame on this trace, so call to create it.
- args_ins = newArguments();
- } else {
- // Generate LIR to create arguments only if it has not already been created.
-
- LIns* mem_ins = lir->insAlloc(sizeof(jsval));
-
- LIns* br1 = lir->insBranch(LIR_jt, lir->ins_peq0(a_ins), NULL);
- lir->insStorei(a_ins, mem_ins, 0);
- LIns* br2 = lir->insBranch(LIR_j, NULL, NULL);
-
- LIns* label1 = lir->ins0(LIR_label);
- br1->setTarget(label1);
-
- LIns* call_ins = newArguments();
- lir->insStorei(call_ins, mem_ins, 0);
-
- LIns* label2 = lir->ins0(LIR_label);
- br2->setTarget(label2);
-
- args_ins = lir->insLoad(LIR_ldp, mem_ins, 0);
- }
-
- stack(0, args_ins);
- set(&cx->fp->argsobj, args_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DUP()
-{
- stack(0, get(&stackval(-1)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DUP2()
-{
- stack(0, get(&stackval(-2)));
- stack(1, get(&stackval(-1)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SWAP()
-{
- jsval& l = stackval(-2);
- jsval& r = stackval(-1);
- LIns* l_ins = get(&l);
- LIns* r_ins = get(&r);
- set(&r, l_ins);
- set(&l, r_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_PICK()
-{
- jsval* sp = cx->fp->regs->sp;
- jsint n = cx->fp->regs->pc[1];
- JS_ASSERT(sp - (n+1) >= StackBase(cx->fp));
- LIns* top = get(sp - (n+1));
- for (jsint i = 0; i < n; ++i)
- set(sp - (n+1) + i, get(sp - n + i));
- set(&sp[-1], top);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETCONST()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BITOR()
-{
- return binary(LIR_or);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BITXOR()
-{
- return binary(LIR_xor);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BITAND()
-{
- return binary(LIR_and);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_EQ()
-{
- return equality(false, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NE()
-{
- return equality(true, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LT()
-{
- return relational(LIR_flt, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LE()
-{
- return relational(LIR_fle, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GT()
-{
- return relational(LIR_fgt, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GE()
-{
- return relational(LIR_fge, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LSH()
-{
- return binary(LIR_lsh);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RSH()
-{
- return binary(LIR_rsh);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_URSH()
-{
- return binary(LIR_ush);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ADD()
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
-
- if (!JSVAL_IS_PRIMITIVE(l)) {
- ABORT_IF_XML(l);
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(add_imacros.obj_obj);
- }
- return call_imacro(add_imacros.obj_any);
- }
- if (!JSVAL_IS_PRIMITIVE(r)) {
- ABORT_IF_XML(r);
- return call_imacro(add_imacros.any_obj);
- }
-
- if (JSVAL_IS_STRING(l) || JSVAL_IS_STRING(r)) {
- LIns* args[] = { stringify(r), stringify(l), cx_ins };
- LIns* concat = lir->insCall(&js_ConcatStrings_ci, args);
- guard(false, lir->ins_peq0(concat), OOM_EXIT);
- set(&l, concat);
- return JSRS_CONTINUE;
- }
-
- return binary(LIR_fadd);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SUB()
-{
- return binary(LIR_fsub);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_MUL()
-{
- return binary(LIR_fmul);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DIV()
-{
- return binary(LIR_fdiv);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_MOD()
-{
- return binary(LIR_fmod);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NOT()
-{
- jsval& v = stackval(-1);
- if (JSVAL_IS_SPECIAL(v)) {
- set(&v, lir->ins_eq0(lir->ins2i(LIR_eq, get(&v), 1)));
- return JSRS_CONTINUE;
- }
- if (isNumber(v)) {
- LIns* v_ins = get(&v);
- set(&v, lir->ins2(LIR_or, lir->ins2(LIR_feq, v_ins, lir->insImmf(0)),
- lir->ins_eq0(lir->ins2(LIR_feq, v_ins, v_ins))));
- return JSRS_CONTINUE;
- }
- if (JSVAL_TAG(v) == JSVAL_OBJECT) {
- set(&v, lir->ins_peq0(get(&v)));
- return JSRS_CONTINUE;
- }
- JS_ASSERT(JSVAL_IS_STRING(v));
- set(&v, lir->ins_peq0(lir->ins2(LIR_piand,
- lir->insLoad(LIR_ldp, get(&v), (int)offsetof(JSString, mLength)),
- INS_CONSTWORD(JSString::LENGTH_MASK))));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BITNOT()
-{
- return unary(LIR_not);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NEG()
-{
- jsval& v = stackval(-1);
-
- if (!JSVAL_IS_PRIMITIVE(v)) {
- ABORT_IF_XML(v);
- return call_imacro(unary_imacros.sign);
- }
-
- if (isNumber(v)) {
- LIns* a = get(&v);
-
- /*
- * If we're a promoted integer, we have to watch out for 0s since -0 is
- * a double. Only follow this path if we're not an integer that's 0 and
- * we're not a double that's zero.
- */
- if (!oracle.isInstructionUndemotable(cx->fp->regs->pc) &&
- isPromoteInt(a) &&
- (!JSVAL_IS_INT(v) || JSVAL_TO_INT(v) != 0) &&
- (!JSVAL_IS_DOUBLE(v) || !JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) &&
- -asNumber(v) == (int)-asNumber(v)) {
- a = lir->ins1(LIR_neg, ::demote(lir, a));
- if (!a->isconst()) {
- VMSideExit* exit = snapshot(OVERFLOW_EXIT);
- guard(false, lir->ins1(LIR_ov, a), exit);
- guard(false, lir->ins2i(LIR_eq, a, 0), exit);
- }
- a = lir->ins1(LIR_i2f, a);
- } else {
- a = lir->ins1(LIR_fneg, a);
- }
-
- set(&v, a);
- return JSRS_CONTINUE;
- }
-
- if (JSVAL_IS_NULL(v)) {
- set(&v, lir->insImmf(-0.0));
- return JSRS_CONTINUE;
- }
-
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING || JSVAL_IS_SPECIAL(v));
-
- LIns* args[] = { get(&v), cx_ins };
- set(&v, lir->ins1(LIR_fneg,
- lir->insCall(JSVAL_IS_STRING(v)
- ? &js_StringToNumber_ci
- : &js_BooleanOrUndefinedToNumber_ci,
- args)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_POS()
-{
- jsval& v = stackval(-1);
-
- if (!JSVAL_IS_PRIMITIVE(v)) {
- ABORT_IF_XML(v);
- return call_imacro(unary_imacros.sign);
- }
-
- if (isNumber(v))
- return JSRS_CONTINUE;
-
- if (JSVAL_IS_NULL(v)) {
- set(&v, lir->insImmf(0));
- return JSRS_CONTINUE;
- }
-
- JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING || JSVAL_IS_SPECIAL(v));
-
- LIns* args[] = { get(&v), cx_ins };
- set(&v, lir->insCall(JSVAL_IS_STRING(v)
- ? &js_StringToNumber_ci
- : &js_BooleanOrUndefinedToNumber_ci,
- args));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_PRIMTOP()
-{
- // Either this opcode does nothing or we couldn't have traced here, because
- // we'd have thrown an exception -- so do nothing if we actually hit this.
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_OBJTOP()
-{
- jsval& v = stackval(-1);
- ABORT_IF_XML(v);
- return JSRS_CONTINUE;
-}
-
-JSRecordingStatus
-TraceRecorder::getClassPrototype(JSObject* ctor, LIns*& proto_ins)
-{
- // ctor must be a function created via js_InitClass.
-#ifdef DEBUG
- JSClass *clasp = FUN_CLASP(GET_FUNCTION_PRIVATE(cx, ctor));
- JS_ASSERT(clasp);
-
- JSTraceMonitor &localtm = JS_TRACE_MONITOR(cx);
-#endif
-
- jsval pval;
- if (!ctor->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom), &pval))
- ABORT_TRACE_ERROR("error getting prototype from constructor");
-
- // ctor.prototype is a permanent data property, so this lookup cannot have
- // deep-aborted.
- JS_ASSERT(localtm.recorder);
-
-#ifdef DEBUG
- JSBool ok, found;
- uintN attrs;
- ok = JS_GetPropertyAttributes(cx, ctor, js_class_prototype_str, &attrs, &found);
- JS_ASSERT(ok);
- JS_ASSERT(found);
- JS_ASSERT((~attrs & (JSPROP_READONLY | JSPROP_PERMANENT)) == 0);
-#endif
-
- // Since ctor was built by js_InitClass, we can assert (rather than check)
- // that pval is usable.
- JS_ASSERT(!JSVAL_IS_PRIMITIVE(pval));
- JSObject *proto = JSVAL_TO_OBJECT(pval);
- JS_ASSERT_IF(clasp != &js_ArrayClass, OBJ_SCOPE(proto)->emptyScope->clasp == clasp);
-
- proto_ins = INS_CONSTOBJ(proto);
- return JSRS_CONTINUE;
-}
-
-JSRecordingStatus
-TraceRecorder::getClassPrototype(JSProtoKey key, LIns*& proto_ins)
-{
-#ifdef DEBUG
- JSTraceMonitor &localtm = JS_TRACE_MONITOR(cx);
-#endif
-
- JSObject* proto;
- if (!js_GetClassPrototype(cx, globalObj, INT_TO_JSID(key), &proto))
- ABORT_TRACE_ERROR("error in js_GetClassPrototype");
-
- // This should not have reentered.
- JS_ASSERT(localtm.recorder);
-
- // If we might end up passing the proto to JSObject::initSharingEmptyScope,
- // we must check here that proto has a matching emptyScope. We skip the
- // check for Array.prototype because new arrays, being non-native, are
- // never initialized using initSharingEmptyScope.
- if (key != JSProto_Array) {
- if (!OBJ_IS_NATIVE(proto)) {
- //non-native class prototype
- return JSRS_STOP;
- }
- JSEmptyScope *emptyScope = OBJ_SCOPE(proto)->emptyScope;
- if (!emptyScope || JSCLASS_CACHED_PROTO_KEY(emptyScope->clasp) != key) {
- // class prototype is not the standard one
- return JSRS_STOP;
- }
- }
-
- proto_ins = INS_CONSTOBJ(proto);
- return JSRS_CONTINUE;
-}
-
-#define IGNORE_NATIVE_CALL_COMPLETE_CALLBACK ((JSSpecializedNative*)1)
-
-JSRecordingStatus
-TraceRecorder::newString(JSObject* ctor, uint32 argc, jsval* argv, jsval* rval)
-{
- JS_ASSERT(argc == 1);
-
- if (!JSVAL_IS_PRIMITIVE(argv[0])) {
- ABORT_IF_XML(argv[0]);
- return call_imacro(new_imacros.String);
- }
-
- LIns* proto_ins;
- CHECK_STATUS(getClassPrototype(ctor, proto_ins));
-
- LIns* args[] = { stringify(argv[0]), proto_ins, cx_ins };
- LIns* obj_ins = lir->insCall(&js_String_tn_ci, args);
- guard(false, lir->ins_peq0(obj_ins), OOM_EXIT);
-
- set(rval, obj_ins);
- pendingSpecializedNative = IGNORE_NATIVE_CALL_COMPLETE_CALLBACK;
- return JSRS_CONTINUE;
-}
-
-JSRecordingStatus
-TraceRecorder::newArray(JSObject* ctor, uint32 argc, jsval* argv, jsval* rval)
-{
- LIns *proto_ins;
- CHECK_STATUS(getClassPrototype(ctor, proto_ins));
-
- LIns *arr_ins;
- if (argc == 0) {
- // arr_ins = js_NewEmptyArray(cx, Array.prototype)
- LIns *args[] = { proto_ins, cx_ins };
- arr_ins = lir->insCall(&js_NewEmptyArray_ci, args);
- guard(false, lir->ins_peq0(arr_ins), OOM_EXIT);
- } else if (argc == 1 && JSVAL_IS_NUMBER(argv[0])) {
- // arr_ins = js_NewEmptyArray(cx, Array.prototype, length)
- LIns *args[] = { f2i(get(argv)), proto_ins, cx_ins }; // FIXME: is this 64-bit safe?
- arr_ins = lir->insCall(&js_NewEmptyArrayWithLength_ci, args);
- guard(false, lir->ins_peq0(arr_ins), OOM_EXIT);
- } else {
- // arr_ins = js_NewUninitializedArray(cx, Array.prototype, argc)
- LIns *args[] = { INS_CONST(argc), proto_ins, cx_ins };
- arr_ins = lir->insCall(&js_NewUninitializedArray_ci, args);
- guard(false, lir->ins_peq0(arr_ins), OOM_EXIT);
-
- // arr->dslots[i] = box_jsval(vp[i]); for i in 0..argc
- LIns *dslots_ins = NULL;
- for (uint32 i = 0; i < argc && !outOfMemory(); i++) {
- LIns *elt_ins = box_jsval(argv[i], get(&argv[i]));
- stobj_set_dslot(arr_ins, i, dslots_ins, elt_ins);
- }
-
- if (argc > 0)
- stobj_set_fslot(arr_ins, JSSLOT_ARRAY_COUNT, INS_CONST(argc));
- }
-
- set(rval, arr_ins);
- pendingSpecializedNative = IGNORE_NATIVE_CALL_COMPLETE_CALLBACK;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::propagateFailureToBuiltinStatus(LIns* ok_ins, LIns*& status_ins)
-{
- /*
- * Check the boolean return value (ok_ins) of a native JSNative,
- * JSFastNative, or JSPropertyOp hook for failure. On failure, set the
- * JSBUILTIN_ERROR bit of cx->builtinStatus.
- *
- * If the return value (ok_ins) is true, status' == status. Otherwise
- * status' = status | JSBUILTIN_ERROR. We calculate (rval&1)^1, which is 1
- * if rval is JS_FALSE (error), and then shift that by 1, which is the log2
- * of JSBUILTIN_ERROR.
- */
- JS_STATIC_ASSERT(((JS_TRUE & 1) ^ 1) << 1 == 0);
- JS_STATIC_ASSERT(((JS_FALSE & 1) ^ 1) << 1 == JSBUILTIN_ERROR);
- status_ins = lir->ins2(LIR_or,
- status_ins,
- lir->ins2i(LIR_lsh,
- lir->ins2i(LIR_xor,
- lir->ins2i(LIR_and, ok_ins, 1),
- 1),
- 1));
- lir->insStorei(status_ins, lirbuf->state, (int) offsetof(InterpState, builtinStatus));
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns* obj_ins,
- bool setflag, LIns* boxed_ins)
-{
- JS_ASSERT(!(sprop->attrs & (setflag ? JSPROP_SETTER : JSPROP_GETTER)));
- JS_ASSERT(setflag ? !SPROP_HAS_STUB_SETTER(sprop) : !SPROP_HAS_STUB_GETTER(sprop));
-
- enterDeepBailCall();
-
- // It is unsafe to pass the address of an object slot as the out parameter,
- // because the getter or setter could end up resizing the object's dslots.
- // Instead, use a word of stack and root it in nativeVp.
- LIns* vp_ins = lir->insAlloc(sizeof(jsval));
- lir->insStorei(vp_ins, lirbuf->state, offsetof(InterpState, nativeVp));
- lir->insStorei(INS_CONST(1), lirbuf->state, offsetof(InterpState, nativeVpLen));
- if (setflag)
- lir->insStorei(boxed_ins, vp_ins, 0);
-
- CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
- ci->_address = uintptr_t(setflag ? sprop->setter : sprop->getter);
- ci->_argtypes = ARGSIZE_I << (0*ARGSIZE_SHIFT) |
- ARGSIZE_P << (1*ARGSIZE_SHIFT) |
- ARGSIZE_P << (2*ARGSIZE_SHIFT) |
- ARGSIZE_P << (3*ARGSIZE_SHIFT) |
- ARGSIZE_P << (4*ARGSIZE_SHIFT);
- ci->_cse = ci->_fold = 0;
- ci->_abi = ABI_CDECL;
-#ifdef DEBUG
- ci->_name = "JSPropertyOp";
-#endif
- LIns* args[] = { vp_ins, INS_CONSTVAL(SPROP_USERID(sprop)), obj_ins, cx_ins };
- LIns* ok_ins = lir->insCall(ci, args);
-
- // Cleanup. Immediately clear nativeVp before we might deep bail.
- lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp));
- leaveDeepBailCall();
-
- // Guard that the call succeeded and builtinStatus is still 0.
- // If the native op succeeds but we deep-bail here, the result value is
- // lost! Therefore this can only be used for setters of shared properties.
- // In that case we ignore the result value anyway.
- LIns* status_ins = lir->insLoad(LIR_ld,
- lirbuf->state,
- (int) offsetof(InterpState, builtinStatus));
- propagateFailureToBuiltinStatus(ok_ins, status_ins);
- guard(true, lir->ins_eq0(status_ins), STATUS_EXIT);
-
- // Re-load the value--but this is currently unused, so commented out.
- //boxed_ins = lir->insLoad(LIR_ldp, vp_ins, 0);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::emitNativeCall(JSSpecializedNative* sn, uintN argc, LIns* args[], bool rooted)
-{
- bool constructing = sn->flags & JSTN_CONSTRUCTOR;
-
- if (JSTN_ERRTYPE(sn) == FAIL_STATUS) {
- // This needs to capture the pre-call state of the stack. So do not set
- // pendingSpecializedNative before taking this snapshot.
- JS_ASSERT(!pendingSpecializedNative);
-
- // Take snapshot for js_DeepBail and store it in cx->bailExit.
- // If we are calling a slow native, add information to the side exit
- // for SynthesizeSlowNativeFrame.
- VMSideExit* exit = enterDeepBailCall();
- JSObject* funobj = JSVAL_TO_OBJECT(stackval(0 - (2 + argc)));
- if (FUN_SLOW_NATIVE(GET_FUNCTION_PRIVATE(cx, funobj))) {
- exit->setNativeCallee(funobj, constructing);
- treeInfo->gcthings.addUnique(OBJECT_TO_JSVAL(funobj));
- }
- }
-
- LIns* res_ins = lir->insCall(sn->builtin, args);
-
- // Immediately unroot the vp as soon we return since we might deep bail next.
- if (rooted)
- lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp));
-
- rval_ins = res_ins;
- switch (JSTN_ERRTYPE(sn)) {
- case FAIL_NULL:
- guard(false, lir->ins_peq0(res_ins), OOM_EXIT);
- break;
- case FAIL_NEG:
- res_ins = lir->ins1(LIR_i2f, res_ins);
- guard(false, lir->ins2(LIR_flt, res_ins, lir->insImmf(0)), OOM_EXIT);
- break;
- case FAIL_VOID:
- guard(false, lir->ins2i(LIR_eq, res_ins, JSVAL_TO_SPECIAL(JSVAL_VOID)), OOM_EXIT);
- break;
- case FAIL_COOKIE:
- guard(false, lir->ins2(LIR_peq, res_ins, INS_CONSTWORD(JSVAL_ERROR_COOKIE)), OOM_EXIT);
- break;
- default:;
- }
-
- set(&stackval(0 - (2 + argc)), res_ins);
-
- /*
- * The return value will be processed by NativeCallComplete since
- * we have to know the actual return value type for calls that return
- * jsval (like Array_p_pop).
- */
- pendingSpecializedNative = sn;
-
- return JSRS_CONTINUE;
-}
-
-/*
- * Check whether we have a specialized implementation for this native
- * invocation.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::callSpecializedNative(JSNativeTraceInfo *trcinfo, uintN argc,
- bool constructing)
-{
- JSStackFrame* fp = cx->fp;
- jsbytecode *pc = fp->regs->pc;
-
- jsval& fval = stackval(0 - (2 + argc));
- jsval& tval = stackval(0 - (1 + argc));
-
- LIns* this_ins = get(&tval);
-
- LIns* args[nanojit::MAXARGS];
- JSSpecializedNative *sn = trcinfo->specializations;
- JS_ASSERT(sn);
- do {
- if (((sn->flags & JSTN_CONSTRUCTOR) != 0) != constructing)
- continue;
-
- uintN knownargc = strlen(sn->argtypes);
- if (argc != knownargc)
- continue;
-
- intN prefixc = strlen(sn->prefix);
- JS_ASSERT(prefixc <= 3);
- LIns** argp = &args[argc + prefixc - 1];
- char argtype;
-
-#if defined DEBUG
- memset(args, 0xCD, sizeof(args));
-#endif
-
- uintN i;
- for (i = prefixc; i--; ) {
- argtype = sn->prefix[i];
- if (argtype == 'C') {
- *argp = cx_ins;
- } else if (argtype == 'T') { /* this, as an object */
- if (JSVAL_IS_PRIMITIVE(tval))
- goto next_specialization;
- *argp = this_ins;
- } else if (argtype == 'S') { /* this, as a string */
- if (!JSVAL_IS_STRING(tval))
- goto next_specialization;
- *argp = this_ins;
- } else if (argtype == 'f') {
- *argp = INS_CONSTOBJ(JSVAL_TO_OBJECT(fval));
- } else if (argtype == 'p') {
- CHECK_STATUS(getClassPrototype(JSVAL_TO_OBJECT(fval), *argp));
- } else if (argtype == 'R') {
- *argp = INS_CONSTPTR(cx->runtime);
- } else if (argtype == 'P') {
- // FIXME: Set pc to imacpc when recording JSOP_CALL inside the
- // JSOP_GETELEM imacro (bug 476559).
- if (*pc == JSOP_CALL && fp->imacpc && *fp->imacpc == JSOP_GETELEM)
- *argp = INS_CONSTPTR(fp->imacpc);
- else
- *argp = INS_CONSTPTR(pc);
- } else if (argtype == 'D') { /* this, as a number */
- if (!isNumber(tval))
- goto next_specialization;
- *argp = this_ins;
- } else {
- JS_NOT_REACHED("unknown prefix arg type");
- }
- argp--;
- }
-
- for (i = knownargc; i--; ) {
- jsval& arg = stackval(0 - (i + 1));
- *argp = get(&arg);
-
- argtype = sn->argtypes[i];
- if (argtype == 'd' || argtype == 'i') {
- if (!isNumber(arg))
- goto next_specialization;
- if (argtype == 'i')
- *argp = f2i(*argp);
- } else if (argtype == 'o') {
- if (JSVAL_IS_PRIMITIVE(arg))
- goto next_specialization;
- } else if (argtype == 's') {
- if (!JSVAL_IS_STRING(arg))
- goto next_specialization;
- } else if (argtype == 'r') {
- if (!VALUE_IS_REGEXP(cx, arg))
- goto next_specialization;
- } else if (argtype == 'f') {
- if (!VALUE_IS_FUNCTION(cx, arg))
- goto next_specialization;
- } else if (argtype == 'v') {
- *argp = box_jsval(arg, *argp);
- } else {
- goto next_specialization;
- }
- argp--;
- }
-#if defined DEBUG
- JS_ASSERT(args[0] != (LIns *)0xcdcdcdcd);
-#endif
- return emitNativeCall(sn, argc, args, false);
-
-next_specialization:;
- } while ((sn++)->flags & JSTN_MORE);
-
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::callNative(uintN argc, JSOp mode)
-{
- LIns* args[5];
-
- JS_ASSERT(mode == JSOP_CALL || mode == JSOP_NEW || mode == JSOP_APPLY);
-
- jsval* vp = &stackval(0 - (2 + argc));
- JSObject* funobj = JSVAL_TO_OBJECT(vp[0]);
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, funobj);
- JSFastNative native = (JSFastNative)fun->u.n.native;
-
- switch (argc) {
- case 1:
- if (isNumber(vp[2]) &&
- (native == js_math_ceil || native == js_math_floor || native == js_math_round)) {
- LIns* a = get(&vp[2]);
- if (isPromote(a)) {
- set(&vp[0], a);
- pendingSpecializedNative = IGNORE_NATIVE_CALL_COMPLETE_CALLBACK;
- return JSRS_CONTINUE;
- }
- }
- break;
- case 2:
- if (isNumber(vp[2]) && isNumber(vp[3]) &&
- (native == js_math_min || native == js_math_max)) {
- LIns* a = get(&vp[2]);
- LIns* b = get(&vp[3]);
- if (isPromote(a) && isPromote(b)) {
- a = ::demote(lir, a);
- b = ::demote(lir, b);
- set(&vp[0],
- lir->ins1(LIR_i2f,
- lir->ins_choose(lir->ins2((native == js_math_min)
- ? LIR_lt
- : LIR_gt, a, b),
- a, b)));
- pendingSpecializedNative = IGNORE_NATIVE_CALL_COMPLETE_CALLBACK;
- return JSRS_CONTINUE;
- }
- }
- break;
- }
-
- if (fun->flags & JSFUN_TRCINFO) {
- JSNativeTraceInfo *trcinfo = FUN_TRCINFO(fun);
- JS_ASSERT(trcinfo && (JSFastNative)fun->u.n.native == trcinfo->native);
-
- /* Try to call a type specialized version of the native. */
- if (trcinfo->specializations) {
- JSRecordingStatus status = callSpecializedNative(trcinfo, argc, mode == JSOP_NEW);
- if (status != JSRS_STOP)
- return status;
- }
- }
-
- if (native == js_fun_apply || native == js_fun_call)
- ABORT_TRACE("trying to call native apply or call");
-
- // Allocate the vp vector and emit code to root it.
- uintN vplen = 2 + JS_MAX(argc, FUN_MINARGS(fun)) + fun->u.n.extra;
- if (!(fun->flags & JSFUN_FAST_NATIVE))
- vplen++; // slow native return value slot
- LIns* invokevp_ins = lir->insAlloc(vplen * sizeof(jsval));
-
- // vp[0] is the callee.
- lir->insStorei(INS_CONSTVAL(OBJECT_TO_JSVAL(funobj)), invokevp_ins, 0);
-
- // Calculate |this|.
- LIns* this_ins;
- if (mode == JSOP_NEW) {
- JSClass* clasp = fun->u.n.clasp;
- JS_ASSERT(clasp != &js_SlowArrayClass);
- if (!clasp)
- clasp = &js_ObjectClass;
- JS_ASSERT(((jsuword) clasp & 3) == 0);
-
- // Abort on |new Function|. js_NewInstance would allocate a regular-
- // sized JSObject, not a Function-sized one. (The Function ctor would
- // deep-bail anyway but let's not go there.)
- if (clasp == &js_FunctionClass)
- ABORT_TRACE("new Function");
-
- if (clasp->getObjectOps)
- ABORT_TRACE("new with non-native ops");
-
- args[0] = INS_CONSTOBJ(funobj);
- args[1] = INS_CONSTPTR(clasp);
- args[2] = cx_ins;
- newobj_ins = lir->insCall(&js_NewInstance_ci, args);
- guard(false, lir->ins_peq0(newobj_ins), OOM_EXIT);
- this_ins = newobj_ins; /* boxing an object is a no-op */
- } else if (JSFUN_BOUND_METHOD_TEST(fun->flags)) {
- /* |funobj| was rooted above already. */
- this_ins = INS_CONSTWORD(OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, funobj)));
- } else {
- this_ins = get(&vp[1]);
-
- /*
- * For fast natives, 'null' or primitives are fine as as 'this' value.
- * For slow natives we have to ensure the object is substituted for the
- * appropriate global object or boxed object value. JSOP_NEW allocates its
- * own object so it's guaranteed to have a valid 'this' value.
- */
- if (!(fun->flags & JSFUN_FAST_NATIVE)) {
- if (JSVAL_IS_NULL(vp[1])) {
- JSObject* thisObj = js_ComputeThis(cx, JS_FALSE, vp + 2);
- if (!thisObj)
- ABORT_TRACE_ERROR("error in js_ComputeGlobalThis");
- this_ins = INS_CONSTOBJ(thisObj);
- } else if (!JSVAL_IS_OBJECT(vp[1])) {
- ABORT_TRACE("slow native(primitive, args)");
- } else {
- if (guardClass(JSVAL_TO_OBJECT(vp[1]), this_ins, &js_WithClass, snapshot(MISMATCH_EXIT)))
- ABORT_TRACE("can't trace slow native invocation on With object");
-
- this_ins = lir->ins_choose(lir->ins_peq0(stobj_get_parent(this_ins)),
- INS_CONSTOBJ(globalObj),
- this_ins);
- }
- }
- this_ins = box_jsval(vp[1], this_ins);
- }
- set(&vp[1], this_ins);
- lir->insStorei(this_ins, invokevp_ins, 1 * sizeof(jsval));
-
- // Populate argv.
- for (uintN n = 2; n < 2 + argc; n++) {
- LIns* i = box_jsval(vp[n], get(&vp[n]));
- lir->insStorei(i, invokevp_ins, n * sizeof(jsval));
-
- // For a very long argument list we might run out of LIR space, so
- // check inside the loop.
- if (outOfMemory())
- ABORT_TRACE("out of memory in argument list");
- }
-
- // Populate extra slots, including the return value slot for a slow native.
- if (2 + argc < vplen) {
- LIns* undef_ins = INS_CONSTWORD(JSVAL_VOID);
- for (uintN n = 2 + argc; n < vplen; n++) {
- lir->insStorei(undef_ins, invokevp_ins, n * sizeof(jsval));
-
- if (outOfMemory())
- ABORT_TRACE("out of memory in extra slots");
- }
- }
-
- // Set up arguments for the JSNative or JSFastNative.
- uint32 types;
- if (fun->flags & JSFUN_FAST_NATIVE) {
- if (mode == JSOP_NEW)
- ABORT_TRACE("untraceable fast native constructor");
- native_rval_ins = invokevp_ins;
- args[0] = invokevp_ins;
- args[1] = lir->insImm(argc);
- args[2] = cx_ins;
- types = ARGSIZE_I << (0*ARGSIZE_SHIFT) |
- ARGSIZE_P << (1*ARGSIZE_SHIFT) |
- ARGSIZE_I << (2*ARGSIZE_SHIFT) |
- ARGSIZE_P << (3*ARGSIZE_SHIFT);
- } else {
- int32_t offset = (vplen - 1) * sizeof(jsval);
- native_rval_ins = lir->ins2(LIR_piadd, invokevp_ins, INS_CONSTWORD(offset));
- args[0] = native_rval_ins;
- args[1] = lir->ins2(LIR_piadd, invokevp_ins, INS_CONSTWORD(2 * sizeof(jsval)));
- args[2] = lir->insImm(argc);
- args[3] = this_ins;
- args[4] = cx_ins;
- types = ARGSIZE_I << (0*ARGSIZE_SHIFT) |
- ARGSIZE_P << (1*ARGSIZE_SHIFT) |
- ARGSIZE_P << (2*ARGSIZE_SHIFT) |
- ARGSIZE_I << (3*ARGSIZE_SHIFT) |
- ARGSIZE_P << (4*ARGSIZE_SHIFT) |
- ARGSIZE_P << (5*ARGSIZE_SHIFT);
- }
-
- // Generate CallInfo and a JSSpecializedNative structure on the fly.
- // Do not use JSTN_UNBOX_AFTER for mode JSOP_NEW because
- // record_NativeCallComplete unboxes the result specially.
-
- CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
- ci->_address = uintptr_t(fun->u.n.native);
- ci->_cse = ci->_fold = 0;
- ci->_abi = ABI_CDECL;
- ci->_argtypes = types;
-#ifdef DEBUG
- ci->_name = JS_GetFunctionName(fun);
- #endif
-
- // Generate a JSSpecializedNative structure on the fly.
- generatedSpecializedNative.builtin = ci;
- generatedSpecializedNative.flags = FAIL_STATUS | ((mode == JSOP_NEW)
- ? JSTN_CONSTRUCTOR
- : JSTN_UNBOX_AFTER);
- generatedSpecializedNative.prefix = NULL;
- generatedSpecializedNative.argtypes = NULL;
-
- // We only have to ensure that the values we wrote into the stack buffer
- // are rooted if we actually make it to the call, so only set nativeVp and
- // nativeVpLen immediately before emitting the call code. This way we avoid
- // leaving trace with a bogus nativeVp because we fall off trace while unboxing
- // values into the stack buffer.
- lir->insStorei(INS_CONST(vplen), lirbuf->state, offsetof(InterpState, nativeVpLen));
- lir->insStorei(invokevp_ins, lirbuf->state, offsetof(InterpState, nativeVp));
-
- // argc is the original argc here. It is used to calculate where to place
- // the return value.
- return emitNativeCall(&generatedSpecializedNative, argc, args, true);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::functionCall(uintN argc, JSOp mode)
-{
- jsval& fval = stackval(0 - (2 + argc));
- JS_ASSERT(&fval >= StackBase(cx->fp));
-
- if (!VALUE_IS_FUNCTION(cx, fval))
- ABORT_TRACE("callee is not a function");
-
- jsval& tval = stackval(0 - (1 + argc));
-
- /*
- * If callee is not constant, it's a shapeless call and we have to guard
- * explicitly that we will get this callee again at runtime.
- */
- if (!get(&fval)->isconstp())
- CHECK_STATUS(guardCallee(fval));
-
- /*
- * Require that the callee be a function object, to avoid guarding on its
- * class here. We know if the callee and this were pushed by JSOP_CALLNAME
- * or JSOP_CALLPROP that callee is a *particular* function, since these hit
- * the property cache and guard on the object (this) in which the callee
- * was found. So it's sufficient to test here that the particular function
- * is interpreted, not guard on that condition.
- *
- * Bytecode sequences that push shapeless callees must guard on the callee
- * class being Function and the function being interpreted.
- */
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(fval));
-
- if (FUN_INTERPRETED(fun)) {
- if (mode == JSOP_NEW) {
- LIns* args[] = { get(&fval), INS_CONSTPTR(&js_ObjectClass), cx_ins };
- LIns* tv_ins = lir->insCall(&js_NewInstance_ci, args);
- guard(false, lir->ins_peq0(tv_ins), OOM_EXIT);
- set(&tval, tv_ins);
- }
- return interpretedFunctionCall(fval, fun, argc, mode == JSOP_NEW);
- }
-
- if (FUN_SLOW_NATIVE(fun)) {
- JSNative native = fun->u.n.native;
- jsval* argv = &tval + 1;
- if (native == js_Array)
- return newArray(JSVAL_TO_OBJECT(fval), argc, argv, &fval);
- if (native == js_String && argc == 1) {
- if (mode == JSOP_NEW)
- return newString(JSVAL_TO_OBJECT(fval), 1, argv, &fval);
- if (!JSVAL_IS_PRIMITIVE(argv[0])) {
- ABORT_IF_XML(argv[0]);
- return call_imacro(call_imacros.String);
- }
- set(&fval, stringify(argv[0]));
- pendingSpecializedNative = IGNORE_NATIVE_CALL_COMPLETE_CALLBACK;
- return JSRS_CONTINUE;
- }
- }
-
- return callNative(argc, mode);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NEW()
-{
- uintN argc = GET_ARGC(cx->fp->regs->pc);
- cx->fp->assertValidStackDepth(argc + 2);
- return functionCall(argc, JSOP_NEW);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DELNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DELPROP()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DELELEM()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TYPEOF()
-{
- jsval& r = stackval(-1);
- LIns* type;
- if (JSVAL_IS_STRING(r)) {
- type = INS_ATOM(cx->runtime->atomState.typeAtoms[JSTYPE_STRING]);
- } else if (isNumber(r)) {
- type = INS_ATOM(cx->runtime->atomState.typeAtoms[JSTYPE_NUMBER]);
- } else if (VALUE_IS_FUNCTION(cx, r)) {
- type = INS_ATOM(cx->runtime->atomState.typeAtoms[JSTYPE_FUNCTION]);
- } else {
- LIns* args[] = { get(&r), cx_ins };
- if (JSVAL_IS_SPECIAL(r)) {
- // We specialize identically for boolean and undefined. We must not have a hole here.
- // Pass the unboxed type here, since TypeOfBoolean knows how to handle it.
- JS_ASSERT(r == JSVAL_TRUE || r == JSVAL_FALSE || r == JSVAL_VOID);
- type = lir->insCall(&js_TypeOfBoolean_ci, args);
- } else {
- JS_ASSERT(JSVAL_TAG(r) == JSVAL_OBJECT);
- type = lir->insCall(&js_TypeOfObject_ci, args);
- }
- }
- set(&r, type);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_VOID()
-{
- stack(-1, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCNAME()
-{
- return incName(1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCPROP()
-{
- return incProp(1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCELEM()
-{
- return incElem(1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECNAME()
-{
- return incName(-1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECPROP()
-{
- return incProp(-1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECELEM()
-{
- return incElem(-1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::incName(jsint incr, bool pre)
-{
- jsval* vp;
- LIns* v_ins;
- LIns* v_after;
- NameResult nr;
-
- CHECK_STATUS(name(vp, v_ins, nr));
- jsval v = nr.tracked ? *vp : nr.v;
- CHECK_STATUS(incHelper(v, v_ins, v_after, incr));
- LIns* v_result = pre ? v_after : v_ins;
- if (nr.tracked) {
- set(vp, v_after);
- stack(0, v_result);
- return JSRS_CONTINUE;
- }
-
- if (OBJ_GET_CLASS(cx, nr.obj) != &js_CallClass)
- ABORT_TRACE("incName on unsupported object class");
-
- CHECK_STATUS(setCallProp(nr.obj, nr.obj_ins, nr.sprop, v_after, v));
- stack(0, v_result);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NAMEINC()
-{
- return incName(1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_PROPINC()
-{
- return incProp(1, false);
-}
-
-// XXX consolidate with record_JSOP_GETELEM code...
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ELEMINC()
-{
- return incElem(1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NAMEDEC()
-{
- return incName(-1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_PROPDEC()
-{
- return incProp(-1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ELEMDEC()
-{
- return incElem(-1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETPROP()
-{
- return getProp(stackval(-1));
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETPROP()
-{
- jsval& l = stackval(-2);
- if (JSVAL_IS_PRIMITIVE(l))
- ABORT_TRACE("primitive this for SETPROP");
-
- JSObject* obj = JSVAL_TO_OBJECT(l);
- if (obj->map->ops->setProperty != js_SetProperty)
- ABORT_TRACE("non-native JSObjectOps::setProperty");
- return JSRS_CONTINUE;
-}
-
-/* Emit a specialized, inlined copy of js_NativeSet. */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::nativeSet(JSObject* obj, LIns* obj_ins, JSScopeProperty* sprop,
- jsval v, LIns* v_ins)
-{
- JSScope* scope = OBJ_SCOPE(obj);
- uint32 slot = sprop->slot;
-
- /*
- * We do not trace assignment to properties that have both a nonstub setter
- * and a slot, for several reasons.
- *
- * First, that would require sampling rt->propertyRemovals before and after
- * (see js_NativeSet), and even more code to handle the case where the two
- * samples differ. A mere guard is not enough, because you can't just bail
- * off trace in the middle of a property assignment without storing the
- * value and making the stack right.
- *
- * If obj is the global object, there are two additional problems. We would
- * have to emit still more code to store the result in the object (not the
- * native global frame) if the setter returned successfully after
- * deep-bailing. And we would have to cope if the run-time type of the
- * setter's return value differed from the record-time type of v, in which
- * case unboxing would fail and, having called a native setter, we could
- * not just retry the instruction in the interpreter.
- */
- JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop) || slot == SPROP_INVALID_SLOT);
-
- // Box the value to be stored, if necessary.
- LIns* boxed_ins = NULL;
- if (!SPROP_HAS_STUB_SETTER(sprop) || (slot != SPROP_INVALID_SLOT && obj != globalObj))
- boxed_ins = box_jsval(v, v_ins);
-
- // Call the setter, if any.
- if (!SPROP_HAS_STUB_SETTER(sprop))
- emitNativePropertyOp(scope, sprop, obj_ins, true, boxed_ins);
-
- // Store the value, if this property has a slot.
- if (slot != SPROP_INVALID_SLOT) {
- JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope));
- JS_ASSERT(!(sprop->attrs & JSPROP_SHARED));
- if (obj == globalObj) {
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
- set(&STOBJ_GET_SLOT(obj, slot), v_ins);
- } else {
- LIns* dslots_ins = NULL;
- stobj_set_slot(obj_ins, slot, dslots_ins, boxed_ins);
- }
- }
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop,
- jsval &v, LIns*& v_ins)
-{
- if (entry == JS_NO_PROP_CACHE_FILL)
- ABORT_TRACE("can't trace uncacheable property set");
- JS_ASSERT_IF(PCVCAP_TAG(entry->vcap) >= 1, sprop->attrs & JSPROP_SHARED);
- if (!SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT)
- ABORT_TRACE("can't trace set of property with setter and slot");
- if (sprop->attrs & JSPROP_SETTER)
- ABORT_TRACE("can't trace JavaScript function setter");
-
- // These two cases are errors and can't be traced.
- if (sprop->attrs & JSPROP_GETTER)
- ABORT_TRACE("can't assign to property with script getter but no setter");
- if (sprop->attrs & JSPROP_READONLY)
- ABORT_TRACE("can't assign to readonly property");
-
- JS_ASSERT(!JSVAL_IS_PRIMITIVE(l));
- JSObject* obj = JSVAL_TO_OBJECT(l);
- LIns* obj_ins = get(&l);
- JSScope* scope = OBJ_SCOPE(obj);
-
- JS_ASSERT_IF(entry->vcap == PCVCAP_MAKE(entry->kshape, 0, 0), scope->has(sprop));
-
- // Fast path for CallClass. This is about 20% faster than the general case.
- if (OBJ_GET_CLASS(cx, obj) == &js_CallClass) {
- v_ins = get(&v);
- return setCallProp(obj, obj_ins, sprop, v_ins, v);
- }
-
- /*
- * Setting a function-valued property might need to rebrand the object; we
- * don't trace that case. There's no need to guard on that, though, because
- * separating functions into the trace-time type TT_FUNCTION will save the
- * day!
- */
- if (scope->branded() && VALUE_IS_FUNCTION(cx, v))
- ABORT_TRACE("can't trace function-valued property set in branded scope");
-
- // Find obj2. If entry->adding(), the TAG bits are all 0.
- JSObject* obj2 = obj;
- for (jsuword i = PCVCAP_TAG(entry->vcap) >> PCVCAP_PROTOBITS; i; i--)
- obj2 = OBJ_GET_PARENT(cx, obj2);
- for (jsuword j = PCVCAP_TAG(entry->vcap) & PCVCAP_PROTOMASK; j; j--)
- obj2 = OBJ_GET_PROTO(cx, obj2);
- scope = OBJ_SCOPE(obj2);
- JS_ASSERT_IF(entry->adding(), obj2 == obj);
-
- // Guard before anything else.
- LIns* map_ins = map(obj_ins);
- CHECK_STATUS(guardNativePropertyOp(obj, map_ins));
- jsuword pcval;
- CHECK_STATUS(guardPropertyCacheHit(obj_ins, map_ins, obj, obj2, entry, pcval));
- JS_ASSERT(scope->object == obj2);
- JS_ASSERT(scope->has(sprop));
- JS_ASSERT_IF(obj2 != obj, sprop->attrs & JSPROP_SHARED);
-
- // Add a property to the object if necessary.
- if (entry->adding()) {
- JS_ASSERT(!(sprop->attrs & JSPROP_SHARED));
- if (obj == globalObj)
- ABORT_TRACE("adding a property to the global object");
-
- LIns* args[] = { INS_CONSTSPROP(sprop), obj_ins, cx_ins };
- LIns* ok_ins = lir->insCall(&js_AddProperty_ci, args);
- guard(false, lir->ins_eq0(ok_ins), OOM_EXIT);
- }
-
- v_ins = get(&v);
- return nativeSet(obj, obj_ins, sprop, v, v_ins);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::setUpwardTrackedVar(jsval* stackVp, jsval v, LIns* v_ins)
-{
- JSTraceType stackT = determineSlotType(stackVp);
- JSTraceType otherT = getCoercedType(v);
-
- bool promote = true;
-
- if (stackT != otherT) {
- if (stackT == TT_DOUBLE && otherT == TT_INT32 && isPromoteInt(v_ins))
- promote = false;
- else
- ABORT_TRACE("can't trace this upvar mutation");
- }
-
- set(stackVp, v_ins, promote);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty *sprop,
- LIns *v_ins, jsval v)
-{
- // Set variables in on-trace-stack call objects by updating the tracker.
- JSStackFrame *fp = frameIfInRange(callobj);
- if (fp) {
- jsint slot = JSVAL_TO_INT(SPROP_USERID(sprop));
- if (sprop->setter == SetCallArg) {
- jsval *vp2 = &fp->argv[slot];
- CHECK_STATUS(setUpwardTrackedVar(vp2, v, v_ins));
- return JSRS_CONTINUE;
- }
- if (sprop->setter == SetCallVar) {
- jsval *vp2 = &fp->slots[slot];
- CHECK_STATUS(setUpwardTrackedVar(vp2, v, v_ins));
- return JSRS_CONTINUE;
- }
- ABORT_TRACE("can't trace special CallClass setter");
- }
-
- // Set variables in off-trace-stack call objects by calling standard builtins.
- const CallInfo* ci = NULL;
- if (sprop->setter == SetCallArg)
- ci = &js_SetCallArg_ci;
- else if (sprop->setter == SetCallVar)
- ci = &js_SetCallVar_ci;
- else
- ABORT_TRACE("can't trace special CallClass setter");
-
- // Even though the frame is out of range, later we might be called as an
- // inner trace such that the target variable is defined in the outer trace
- // entry frame. In that case, we must store to the native stack area for
- // that frame.
-
- LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp));
- LIns *fpcallobj_ins = lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, callobj));
- LIns *br1 = lir->insBranch(LIR_jf, lir->ins2(LIR_peq, fpcallobj_ins, callobj_ins), NULL);
-
- // Case 1: storing to native stack area.
-
- // Compute native stack slot and address offset we are storing to.
- unsigned slot = uint16(sprop->shortid);
- LIns *slot_ins;
- if (sprop->setter == SetCallArg)
- slot_ins = ArgClosureTraits::adj_slot_lir(lir, fp_ins, slot);
- else
- slot_ins = VarClosureTraits::adj_slot_lir(lir, fp_ins, slot);
- LIns *offset_ins = lir->ins2(LIR_mul, slot_ins, INS_CONST(sizeof(double)));
-
- // Guard that we are not changing the type of the slot we are storing to.
- LIns *callstackBase_ins = lir->insLoad(LIR_ldp, lirbuf->state,
- offsetof(InterpState, callstackBase));
- LIns *frameInfo_ins = lir->insLoad(LIR_ldp, callstackBase_ins, 0);
- LIns *typemap_ins = lir->ins2(LIR_addp, frameInfo_ins, INS_CONSTWORD(sizeof(FrameInfo)));
- LIns *type_ins = lir->insLoad(LIR_ldcb,
- lir->ins2(LIR_addp, typemap_ins, lir->ins_u2p(slot_ins)), 0);
- JSTraceType type = getCoercedType(v);
- if (type == TT_INT32 && !isPromoteInt(v_ins))
- type = TT_DOUBLE;
- guard(true,
- addName(lir->ins2(LIR_eq, type_ins, lir->insImm(type)),
- "guard(type-stable set upvar)"),
- BRANCH_EXIT);
-
- // Store to the native stack slot.
- LIns *stackBase_ins = lir->insLoad(LIR_ldp, lirbuf->state,
- offsetof(InterpState, stackBase));
- LIns *storeValue_ins = isPromoteInt(v_ins) ? demote(lir, v_ins) : v_ins;
- lir->insStorei(storeValue_ins,
- lir->ins2(LIR_addp, stackBase_ins, lir->ins_u2p(offset_ins)), 0);
- LIns *br2 = lir->insBranch(LIR_j, NULL, NULL);
-
- // Case 2: calling builtin.
- LIns *label1 = lir->ins0(LIR_label);
- br1->setTarget(label1);
- LIns* args[] = {
- box_jsval(v, v_ins),
- INS_CONST(SPROP_USERID(sprop)),
- callobj_ins,
- cx_ins
- };
- LIns* call_ins = lir->insCall(ci, args);
- guard(false, addName(lir->ins_eq0(call_ins), "guard(set upvar)"), STATUS_EXIT);
-
- LIns *label2 = lir->ins0(LIR_label);
- br2->setTarget(label2);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop)
-{
- jsval& r = stackval(-1);
- jsval& l = stackval(-2);
- LIns* v_ins;
- CHECK_STATUS(setProp(l, entry, sprop, r, v_ins));
-
- jsbytecode* pc = cx->fp->regs->pc;
- if (*pc != JSOP_INITPROP && pc[JSOP_SETPROP_LENGTH] != JSOP_POP)
- set(&l, v_ins);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK VMSideExit*
-TraceRecorder::enterDeepBailCall()
-{
- // Take snapshot for js_DeepBail and store it in cx->bailExit.
- VMSideExit* exit = snapshot(DEEP_BAIL_EXIT);
- lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit));
-
- // Tell nanojit not to discard or defer stack writes before this call.
- GuardRecord* guardRec = createGuardRecord(exit);
- lir->insGuard(LIR_xbarrier, NULL, guardRec);
- return exit;
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::leaveDeepBailCall()
-{
- // Keep cx->bailExit null when it's invalid.
- lir->insStorei(INS_NULL(), cx_ins, offsetof(JSContext, bailExit));
-}
-
-JS_REQUIRES_STACK void
-TraceRecorder::finishGetProp(LIns* obj_ins, LIns* vp_ins, LIns* ok_ins, jsval* outp)
-{
- // Store the boxed result (and this-object, if JOF_CALLOP) before the
- // guard. The deep-bail case requires this. If the property get fails,
- // these slots will be ignored anyway.
- LIns* result_ins = lir->insLoad(LIR_ldp, vp_ins, 0);
- set(outp, result_ins, true);
- if (js_CodeSpec[*cx->fp->regs->pc].format & JOF_CALLOP)
- set(outp + 1, obj_ins, true);
-
- // We need to guard on ok_ins, but this requires a snapshot of the state
- // after this op. monitorRecording will do it for us.
- pendingGuardCondition = ok_ins;
-
- // Note there is a boxed result sitting on the stack. The caller must leave
- // it there for the time being, since the return type is not yet
- // known. monitorRecording will emit the code to unbox it.
- pendingUnboxSlot = outp;
-}
-
-static inline bool
-RootedStringToId(JSContext* cx, JSString** namep, jsid* idp)
-{
- JSString* name = *namep;
- if (name->isAtomized()) {
- *idp = ATOM_TO_JSID((JSAtom*) STRING_TO_JSVAL(name));
- return true;
- }
-
- JSAtom* atom = js_AtomizeString(cx, name, 0);
- if (!atom)
- return false;
- *namep = ATOM_TO_STRING(atom); /* write back to GC root */
- *idp = ATOM_TO_JSID(atom);
- return true;
-}
-
-static JSBool FASTCALL
-GetPropertyByName(JSContext* cx, JSObject* obj, JSString** namep, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- jsid id;
- if (!RootedStringToId(cx, namep, &id) || !obj->getProperty(cx, id, vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyByName, CONTEXT, OBJECT, STRINGPTR, JSVALPTR,
- 0, 0)
-
-// Convert the value in a slot to a string and store the resulting string back
-// in the slot (typically in order to root it).
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::primitiveToStringInPlace(jsval* vp)
-{
- jsval v = *vp;
- JS_ASSERT(JSVAL_IS_PRIMITIVE(v));
-
- if (!JSVAL_IS_STRING(v)) {
- // v is not a string. Turn it into one. js_ValueToString is safe
- // because v is not an object.
- JSString *str = js_ValueToString(cx, v);
- if (!str)
- ABORT_TRACE_ERROR("failed to stringify element id");
- v = STRING_TO_JSVAL(str);
- set(vp, stringify(*vp));
-
- // Write the string back to the stack to save the interpreter some work
- // and to ensure snapshots get the correct type for this slot.
- *vp = v;
- }
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getPropertyByName(LIns* obj_ins, jsval* idvalp, jsval* outp)
-{
- CHECK_STATUS(primitiveToStringInPlace(idvalp));
- enterDeepBailCall();
-
- // Call GetPropertyByName. The vp parameter points to stack because this is
- // what the interpreter currently does. obj and id are rooted on the
- // interpreter stack, but the slot at vp is not a root.
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- LIns* idvalp_ins = addName(addr(idvalp), "idvalp");
- LIns* args[] = {vp_ins, idvalp_ins, obj_ins, cx_ins};
- LIns* ok_ins = lir->insCall(&GetPropertyByName_ci, args);
-
- // GetPropertyByName can assign to *idvalp, so the tracker has an incorrect
- // entry for that address. Correct it. (If the value in the address is
- // never used again, the usual case, Nanojit will kill this load.)
- tracker.set(idvalp, lir->insLoad(LIR_ldp, idvalp_ins, 0));
-
- finishGetProp(obj_ins, vp_ins, ok_ins, outp);
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-static JSBool FASTCALL
-GetPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- JSAutoTempIdRooter idr(cx);
- if (!js_Int32ToId(cx, index, idr.addr()) || !obj->getProperty(cx, idr.id(), vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyByIndex, CONTEXT, OBJECT, INT32, JSVALPTR, 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getPropertyByIndex(LIns* obj_ins, LIns* index_ins, jsval* outp)
-{
- index_ins = makeNumberInt32(index_ins);
-
- // See note in getPropertyByName about vp.
- enterDeepBailCall();
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- LIns* args[] = {vp_ins, index_ins, obj_ins, cx_ins};
- LIns* ok_ins = lir->insCall(&GetPropertyByIndex_ci, args);
- finishGetProp(obj_ins, vp_ins, ok_ins, outp);
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-static JSBool FASTCALL
-GetPropertyById(JSContext* cx, JSObject* obj, jsid id, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
- if (!obj->getProperty(cx, id, vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyById,
- CONTEXT, OBJECT, JSVAL, JSVALPTR, 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getPropertyById(LIns* obj_ins, jsval* outp)
-{
- // Find the atom.
- JSAtom* atom;
- jsbytecode* pc = cx->fp->regs->pc;
- const JSCodeSpec& cs = js_CodeSpec[*pc];
- if (*pc == JSOP_LENGTH) {
- atom = cx->runtime->atomState.lengthAtom;
- } else if (JOF_TYPE(cs.format) == JOF_ATOM) {
- atom = atoms[GET_INDEX(pc)];
- } else {
- JS_ASSERT(JOF_TYPE(cs.format) == JOF_SLOTATOM);
- atom = atoms[GET_INDEX(pc + SLOTNO_LEN)];
- }
-
- // Call GetPropertyById. See note in getPropertyByName about vp.
- enterDeepBailCall();
- jsid id = ATOM_TO_JSID(atom);
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- LIns* args[] = {vp_ins, INS_CONSTWORD(id), obj_ins, cx_ins};
- LIns* ok_ins = lir->insCall(&GetPropertyById_ci, args);
- finishGetProp(obj_ins, vp_ins, ok_ins, outp);
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-/* Manually inlined, specialized copy of js_NativeGet. */
-static JSBool FASTCALL
-GetPropertyWithNativeGetter(JSContext* cx, JSObject* obj, JSScopeProperty* sprop, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
-#ifdef DEBUG
- JSProperty* prop;
- JSObject* pobj;
- JS_ASSERT(obj->lookupProperty(cx, sprop->id, &pobj, &prop));
- JS_ASSERT(prop == (JSProperty*) sprop);
- pobj->dropProperty(cx, prop);
-#endif
-
- // js_GetSprop contains a special case for With objects. We can elide it
- // here because With objects are, we claim, never on the operand stack.
- JS_ASSERT(STOBJ_GET_CLASS(obj) != &js_WithClass);
-
- *vp = JSVAL_VOID;
- if (!sprop->getter(cx, obj, SPROP_USERID(sprop), vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyWithNativeGetter,
- CONTEXT, OBJECT, SCOPEPROP, JSVALPTR, 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getPropertyWithNativeGetter(LIns* obj_ins, JSScopeProperty* sprop, jsval* outp)
-{
- JS_ASSERT(!(sprop->attrs & JSPROP_GETTER));
- JS_ASSERT(sprop->slot == SPROP_INVALID_SLOT);
- JS_ASSERT(!SPROP_HAS_STUB_GETTER(sprop));
-
- // Call GetPropertyWithNativeGetter. See note in getPropertyByName about vp.
- // FIXME - We should call the getter directly. Using a builtin function for
- // now because it buys some extra asserts. See bug 508310.
- enterDeepBailCall();
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- LIns* args[] = {vp_ins, INS_CONSTPTR(sprop), obj_ins, cx_ins};
- LIns* ok_ins = lir->insCall(&GetPropertyWithNativeGetter_ci, args);
- finishGetProp(obj_ins, vp_ins, ok_ins, outp);
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETELEM()
-{
- bool call = *cx->fp->regs->pc == JSOP_CALLELEM;
-
- jsval& idx = stackval(-1);
- jsval& lval = stackval(-2);
-
- LIns* obj_ins = get(&lval);
- LIns* idx_ins = get(&idx);
-
- // Special case for array-like access of strings.
- if (JSVAL_IS_STRING(lval) && isInt32(idx)) {
- if (call)
- ABORT_TRACE("JSOP_CALLELEM on a string");
- int i = asInt32(idx);
- if (size_t(i) >= JSVAL_TO_STRING(lval)->length())
- ABORT_TRACE("Invalid string index in JSOP_GETELEM");
- idx_ins = makeNumberInt32(idx_ins);
- LIns* args[] = { idx_ins, obj_ins, cx_ins };
- LIns* unitstr_ins = lir->insCall(&js_String_getelem_ci, args);
- guard(false, lir->ins_peq0(unitstr_ins), MISMATCH_EXIT);
- set(&lval, unitstr_ins);
- return JSRS_CONTINUE;
- }
-
- if (JSVAL_IS_PRIMITIVE(lval))
- ABORT_TRACE("JSOP_GETLEM on a primitive");
- ABORT_IF_XML(lval);
-
- JSObject* obj = JSVAL_TO_OBJECT(lval);
- if (obj == globalObj)
- ABORT_TRACE("JSOP_GETELEM on global");
- LIns* v_ins;
-
- /* Property access using a string name or something we have to stringify. */
- if (!JSVAL_IS_INT(idx)) {
- if (!JSVAL_IS_PRIMITIVE(idx))
- ABORT_TRACE("object used as index");
-
- return getPropertyByName(obj_ins, &idx, &lval);
- }
-
- if (STOBJ_GET_CLASS(obj) == &js_ArgumentsClass) {
- unsigned depth;
- JSStackFrame *afp = guardArguments(obj, obj_ins, &depth);
- if (afp) {
- uintN int_idx = JSVAL_TO_INT(idx);
- jsval* vp = &afp->argv[int_idx];
- if (idx_ins->isconstf()) {
- if (int_idx >= 0 && int_idx < afp->argc)
- v_ins = get(vp);
- else
- v_ins = INS_VOID();
- } else {
- // If the index is not a constant expression, we generate LIR to load the value from
- // the native stack area. The guard on js_ArgumentClass above ensures the up-to-date
- // value has been written back to the native stack area.
- idx_ins = makeNumberInt32(idx_ins);
-
- if (int_idx < 0 || int_idx >= afp->argc)
- ABORT_TRACE("cannot trace arguments with out of range index");
-
- guard(true,
- addName(lir->ins2(LIR_ge, idx_ins, INS_CONST(0)),
- "guard(index >= 0)"),
- MISMATCH_EXIT);
- guard(true,
- addName(lir->ins2(LIR_lt, idx_ins, INS_CONST(afp->argc)),
- "guard(index < argc)"),
- MISMATCH_EXIT);
-
- JSTraceType type = getCoercedType(*vp);
-
- // Guard that the argument has the same type on trace as during recording.
- LIns* typemap_ins;
- if (depth == 0) {
- // In this case, we are in the same frame where the arguments object was created.
- // The entry type map is not necessarily up-to-date, so we capture a new type map
- // for this point in the code.
- unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
- if (stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
- ABORT_TRACE("|arguments| requires saving too much stack");
- JSTraceType* typemap = new (*traceMonitor->dataAlloc) JSTraceType[stackSlots];
- DetermineTypesVisitor detVisitor(*this, typemap);
- VisitStackSlots(detVisitor, cx, 0);
- typemap_ins = INS_CONSTPTR(typemap + 2 /* callee, this */);
- } else {
- // In this case, we are in a deeper frame from where the arguments object was
- // created. The type map at the point of the call out from the creation frame
- // is accurate.
- // Note: this relies on the assumption that we abort on setting an element of
- // an arguments object in any deeper frame.
- LIns* fip_ins = lir->insLoad(LIR_ldp, lirbuf->rp, (callDepth-depth)*sizeof(FrameInfo*));
- typemap_ins = lir->ins2(LIR_add, fip_ins, INS_CONST(sizeof(FrameInfo) + 2/*callee,this*/ * sizeof(JSTraceType)));
- }
-
- LIns* typep_ins = lir->ins2(LIR_piadd, typemap_ins,
- lir->ins_u2p(lir->ins2(LIR_mul,
- idx_ins,
- INS_CONST(sizeof(JSTraceType)))));
- LIns* type_ins = lir->insLoad(LIR_ldcb, typep_ins, 0);
- guard(true,
- addName(lir->ins2(LIR_eq, type_ins, lir->insImm(type)),
- "guard(type-stable upvar)"),
- BRANCH_EXIT);
-
- // Read the value out of the native stack area.
- guard(true, lir->ins2(LIR_ult, idx_ins, INS_CONST(afp->argc)),
- snapshot(BRANCH_EXIT));
- size_t stackOffset = -treeInfo->nativeStackBase + nativeStackOffset(&afp->argv[0]);
- LIns* args_addr_ins = lir->ins2(LIR_piadd, lirbuf->sp, INS_CONSTWORD(stackOffset));
- LIns* argi_addr_ins = lir->ins2(LIR_piadd,
- args_addr_ins,
- lir->ins_u2p(lir->ins2(LIR_mul,
- idx_ins,
- INS_CONST(sizeof(double)))));
- v_ins = stackLoad(argi_addr_ins, type);
- }
- JS_ASSERT(v_ins);
- set(&lval, v_ins);
- if (call)
- set(&idx, obj_ins);
- return JSRS_CONTINUE;
- }
- ABORT_TRACE("can't reach arguments object's frame");
- }
- if (js_IsDenseArray(obj)) {
- // Fast path for dense arrays accessed with a integer index.
- jsval* vp;
- LIns* addr_ins;
-
- guardDenseArray(obj, obj_ins, BRANCH_EXIT);
- CHECK_STATUS(denseArrayElement(lval, idx, vp, v_ins, addr_ins));
- set(&lval, v_ins);
- if (call)
- set(&idx, obj_ins);
- return JSRS_CONTINUE;
- }
-
- return getPropertyByIndex(obj_ins, idx_ins, &lval);
-}
-
-/* Functions used by JSOP_SETELEM */
-
-static JSBool FASTCALL
-SetPropertyByName(JSContext* cx, JSObject* obj, JSString** namep, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- jsid id;
- if (!RootedStringToId(cx, namep, &id) || !obj->setProperty(cx, id, vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, SetPropertyByName, CONTEXT, OBJECT, STRINGPTR, JSVALPTR,
- 0, 0)
-
-static JSBool FASTCALL
-InitPropertyByName(JSContext* cx, JSObject* obj, JSString** namep, jsval val)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- jsid id;
- if (!RootedStringToId(cx, namep, &id) ||
- !obj->defineProperty(cx, id, val, NULL, NULL, JSPROP_ENUMERATE)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, InitPropertyByName, CONTEXT, OBJECT, STRINGPTR, JSVAL,
- 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::initOrSetPropertyByName(LIns* obj_ins, jsval* idvalp, jsval* rvalp, bool init)
-{
- CHECK_STATUS(primitiveToStringInPlace(idvalp));
-
- LIns* rval_ins = box_jsval(*rvalp, get(rvalp));
-
- enterDeepBailCall();
-
- LIns* ok_ins;
- LIns* idvalp_ins = addName(addr(idvalp), "idvalp");
- if (init) {
- LIns* args[] = {rval_ins, idvalp_ins, obj_ins, cx_ins};
- ok_ins = lir->insCall(&InitPropertyByName_ci, args);
- } else {
- // See note in getPropertyByName about vp.
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- lir->insStorei(rval_ins, vp_ins, 0);
- LIns* args[] = {vp_ins, idvalp_ins, obj_ins, cx_ins};
- ok_ins = lir->insCall(&SetPropertyByName_ci, args);
- }
- pendingGuardCondition = ok_ins;
-
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-static JSBool FASTCALL
-SetPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval* vp)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- JSAutoTempIdRooter idr(cx);
- if (!js_Int32ToId(cx, index, idr.addr()) || !obj->setProperty(cx, idr.id(), vp)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, SetPropertyByIndex, CONTEXT, OBJECT, INT32, JSVALPTR, 0, 0)
-
-static JSBool FASTCALL
-InitPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval val)
-{
- js_LeaveTraceIfGlobalObject(cx, obj);
-
- JSAutoTempIdRooter idr(cx);
- if (!js_Int32ToId(cx, index, idr.addr()) ||
- !obj->defineProperty(cx, idr.id(), val, NULL, NULL, JSPROP_ENUMERATE)) {
- js_SetBuiltinError(cx);
- return JS_FALSE;
- }
- return cx->interpState->builtinStatus == 0;
-}
-JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, InitPropertyByIndex, CONTEXT, OBJECT, INT32, JSVAL, 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::initOrSetPropertyByIndex(LIns* obj_ins, LIns* index_ins, jsval* rvalp, bool init)
-{
- index_ins = makeNumberInt32(index_ins);
-
- LIns* rval_ins = box_jsval(*rvalp, get(rvalp));
-
- enterDeepBailCall();
-
- LIns* ok_ins;
- if (init) {
- LIns* args[] = {rval_ins, index_ins, obj_ins, cx_ins};
- ok_ins = lir->insCall(&InitPropertyByIndex_ci, args);
- } else {
- // See note in getPropertyByName about vp.
- LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp");
- lir->insStorei(rval_ins, vp_ins, 0);
- LIns* args[] = {vp_ins, index_ins, obj_ins, cx_ins};
- ok_ins = lir->insCall(&SetPropertyByIndex_ci, args);
- }
- pendingGuardCondition = ok_ins;
-
- leaveDeepBailCall();
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETELEM()
-{
- jsval& v = stackval(-1);
- jsval& idx = stackval(-2);
- jsval& lval = stackval(-3);
-
- if (JSVAL_IS_PRIMITIVE(lval))
- ABORT_TRACE("left JSOP_SETELEM operand is not an object");
- ABORT_IF_XML(lval);
-
- JSObject* obj = JSVAL_TO_OBJECT(lval);
- LIns* obj_ins = get(&lval);
- LIns* idx_ins = get(&idx);
- LIns* v_ins = get(&v);
-
- if (JS_InstanceOf(cx, obj, &js_ArgumentsClass, NULL))
- ABORT_TRACE("can't trace setting elements of the |arguments| object");
-
- if (obj == globalObj)
- ABORT_TRACE("can't trace setting elements on the global object");
-
- if (!JSVAL_IS_INT(idx)) {
- if (!JSVAL_IS_PRIMITIVE(idx))
- ABORT_TRACE("non-primitive index");
- CHECK_STATUS(initOrSetPropertyByName(obj_ins, &idx, &v,
- *cx->fp->regs->pc == JSOP_INITELEM));
- } else if (JSVAL_TO_INT(idx) < 0 || !OBJ_IS_DENSE_ARRAY(cx, obj)) {
- CHECK_STATUS(initOrSetPropertyByIndex(obj_ins, idx_ins, &v,
- *cx->fp->regs->pc == JSOP_INITELEM));
- } else {
- // Fast path: assigning to element of dense array.
-
- // Make sure the array is actually dense.
- if (!guardDenseArray(obj, obj_ins, BRANCH_EXIT))
- return JSRS_STOP;
-
- // The index was on the stack and is therefore a LIR float. Force it to
- // be an integer.
- idx_ins = makeNumberInt32(idx_ins);
-
- // Box the value so we can use one builtin instead of having to add one
- // builtin for every storage type. Special case for integers though,
- // since they are so common.
- LIns* res_ins;
- LIns* args[] = { NULL, idx_ins, obj_ins, cx_ins };
- if (isNumber(v)) {
- if (isPromoteInt(v_ins)) {
- args[0] = ::demote(lir, v_ins);
- res_ins = lir->insCall(&js_Array_dense_setelem_int_ci, args);
- } else {
- args[0] = v_ins;
- res_ins = lir->insCall(&js_Array_dense_setelem_double_ci, args);
- }
- } else {
- LIns* args[] = { box_jsval(v, v_ins), idx_ins, obj_ins, cx_ins };
- res_ins = lir->insCall(&js_Array_dense_setelem_ci, args);
- }
- guard(false, lir->ins_eq0(res_ins), MISMATCH_EXIT);
- }
-
- jsbytecode* pc = cx->fp->regs->pc;
- if (*pc == JSOP_SETELEM && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
- set(&lval, v_ins);
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLNAME()
-{
- JSObject* obj = cx->fp->scopeChain;
- if (obj != globalObj) {
- jsval* vp;
- LIns* ins;
- NameResult nr;
- CHECK_STATUS(scopeChainProp(obj, vp, ins, nr));
- stack(0, ins);
- stack(1, INS_CONSTOBJ(globalObj));
- return JSRS_CONTINUE;
- }
-
- LIns* obj_ins = scopeChain();
- JSObject* obj2;
- jsuword pcval;
-
- CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval));
-
- if (PCVAL_IS_NULL(pcval) || !PCVAL_IS_OBJECT(pcval))
- ABORT_TRACE("callee is not an object");
-
- JS_ASSERT(HAS_FUNCTION_CLASS(PCVAL_TO_OBJECT(pcval)));
-
- stack(0, INS_CONSTOBJ(PCVAL_TO_OBJECT(pcval)));
- stack(1, obj_ins);
- return JSRS_CONTINUE;
-}
-
-JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarArgOnTrace, CONTEXT, UINT32, INT32, UINT32,
- DOUBLEPTR, 0, 0)
-JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarVarOnTrace, CONTEXT, UINT32, INT32, UINT32,
- DOUBLEPTR, 0, 0)
-JS_DEFINE_CALLINFO_5(extern, UINT32, GetUpvarStackOnTrace, CONTEXT, UINT32, INT32, UINT32,
- DOUBLEPTR, 0, 0)
-
-/*
- * Record LIR to get the given upvar. Return the LIR instruction for the upvar
- * value. NULL is returned only on a can't-happen condition with an invalid
- * typemap. The value of the upvar is returned as v.
- */
-JS_REQUIRES_STACK LIns*
-TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v)
-{
- /*
- * Try to find the upvar in the current trace's tracker. For &vr to be
- * the address of the jsval found in js_GetUpvar, we must initialize
- * vr directly with the result, so it is a reference to the same location.
- * It does not work to assign the result to v, because v is an already
- * existing reference that points to something else.
- */
- uint32 cookie = uva->vector[index];
- jsval& vr = js_GetUpvar(cx, script->staticLevel, cookie);
- v = vr;
-
- if (known(&vr))
- return get(&vr);
-
- /*
- * The upvar is not in the current trace, so get the upvar value exactly as
- * the interpreter does and unbox.
- */
- uint32 level = script->staticLevel - UPVAR_FRAME_SKIP(cookie);
- uint32 cookieSlot = UPVAR_FRAME_SLOT(cookie);
- JSStackFrame* fp = cx->display[level];
- const CallInfo* ci;
- int32 slot;
- if (!fp->fun) {
- ci = &GetUpvarStackOnTrace_ci;
- slot = cookieSlot;
- } else if (cookieSlot < fp->fun->nargs) {
- ci = &GetUpvarArgOnTrace_ci;
- slot = cookieSlot;
- } else if (cookieSlot == CALLEE_UPVAR_SLOT) {
- ci = &GetUpvarArgOnTrace_ci;
- slot = -2;
- } else {
- ci = &GetUpvarVarOnTrace_ci;
- slot = cookieSlot - fp->fun->nargs;
- }
-
- LIns* outp = lir->insAlloc(sizeof(double));
- LIns* args[] = {
- outp,
- INS_CONST(callDepth),
- INS_CONST(slot),
- INS_CONST(level),
- cx_ins
- };
- LIns* call_ins = lir->insCall(ci, args);
- JSTraceType type = getCoercedType(v);
- guard(true,
- addName(lir->ins2(LIR_eq, call_ins, lir->insImm(type)),
- "guard(type-stable upvar)"),
- BRANCH_EXIT);
- return stackLoad(outp, type);
-}
-
-/*
- * Generate LIR to load a value from the native stack. This method ensures that
- * the correct LIR load operator is used.
- */
-LIns* TraceRecorder::stackLoad(LIns* base, uint8 type)
-{
- LOpcode loadOp;
- switch (type) {
- case TT_DOUBLE:
- loadOp = LIR_ldq;
- break;
- case TT_OBJECT:
- case TT_STRING:
- case TT_FUNCTION:
- case TT_NULL:
- loadOp = LIR_ldp;
- break;
- case TT_INT32:
- case TT_PSEUDOBOOLEAN:
- loadOp = LIR_ld;
- break;
- case TT_JSVAL:
- default:
- JS_NOT_REACHED("found jsval type in an upvar type map entry");
- return NULL;
- }
-
- LIns* result = lir->insLoad(loadOp, base, 0);
- if (type == TT_INT32)
- result = lir->ins1(LIR_i2f, result);
- return result;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETUPVAR()
-{
- uintN index = GET_UINT16(cx->fp->regs->pc);
- JSScript *script = cx->fp->script;
- JSUpvarArray* uva = script->upvars();
- JS_ASSERT(index < uva->length);
-
- jsval v;
- LIns* upvar_ins = upvar(script, uva, index, v);
- if (!upvar_ins)
- return JSRS_STOP;
- stack(0, upvar_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLUPVAR()
-{
- CHECK_STATUS(record_JSOP_GETUPVAR());
- stack(1, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETDSLOT()
-{
- JSObject* callee = JSVAL_TO_OBJECT(cx->fp->argv[-2]);
- LIns* callee_ins = get(&cx->fp->argv[-2]);
-
- unsigned index = GET_UINT16(cx->fp->regs->pc);
- LIns* dslots_ins = NULL;
- LIns* v_ins = stobj_get_dslot(callee_ins, index, dslots_ins);
-
- stack(0, unbox_jsval(callee->dslots[index], v_ins, snapshot(BRANCH_EXIT)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLDSLOT()
-{
- CHECK_STATUS(record_JSOP_GETDSLOT());
- stack(1, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::guardCallee(jsval& callee)
-{
- JS_ASSERT(VALUE_IS_FUNCTION(cx, callee));
-
- VMSideExit* branchExit = snapshot(BRANCH_EXIT);
- JSObject* callee_obj = JSVAL_TO_OBJECT(callee);
- LIns* callee_ins = get(&callee);
-
- treeInfo->gcthings.addUnique(callee);
- guard(true,
- lir->ins2(LIR_peq,
- stobj_get_private(callee_ins),
- INS_CONSTPTR(callee_obj->getPrivate())),
- branchExit);
- guard(true,
- lir->ins2(LIR_peq,
- stobj_get_parent(callee_ins),
- INS_CONSTOBJ(OBJ_GET_PARENT(cx, callee_obj))),
- branchExit);
- return JSRS_CONTINUE;
-}
-
-/*
- * Prepare the given |arguments| object to be accessed on trace. If the return
- * value is non-NULL, then the given |arguments| object refers to a frame on
- * the current trace and is guaranteed to refer to the same frame on trace for
- * all later executions.
- */
-JS_REQUIRES_STACK JSStackFrame *
-TraceRecorder::guardArguments(JSObject *obj, LIns* obj_ins, unsigned *depthp)
-{
- JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass);
-
- JSStackFrame *afp = frameIfInRange(obj, depthp);
- if (!afp)
- return NULL;
-
- VMSideExit *exit = snapshot(MISMATCH_EXIT);
- guardClass(obj, obj_ins, &js_ArgumentsClass, exit);
-
- LIns* args_ins = get(&afp->argsobj);
- LIns* cmp = lir->ins2(LIR_peq, args_ins, obj_ins);
- lir->insGuard(LIR_xf, cmp, createGuardRecord(exit));
- return afp;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, bool constructing)
-{
- if (JS_GetGlobalForObject(cx, JSVAL_TO_OBJECT(fval)) != globalObj)
- ABORT_TRACE("JSOP_CALL or JSOP_NEW crosses global scopes");
-
- JSStackFrame* fp = cx->fp;
-
- // TODO: track the copying via the tracker...
- if (argc < fun->nargs &&
- jsuword(fp->regs->sp + (fun->nargs - argc)) > cx->stackPool.current->limit) {
- ABORT_TRACE("can't trace calls with too few args requiring argv move");
- }
-
- // Generate a type map for the outgoing frame and stash it in the LIR
- unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
- if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
- ABORT_TRACE("interpreted function call requires saving too much stack");
- FrameInfo* fi = (FrameInfo*)
- traceMonitor->dataAlloc->alloc(sizeof(FrameInfo) +
- stackSlots * sizeof(JSTraceType));
- JSTraceType* typemap = reinterpret_cast(fi + 1);
-
- DetermineTypesVisitor detVisitor(*this, typemap);
- VisitStackSlots(detVisitor, cx, 0);
-
- JS_ASSERT(argc < FrameInfo::CONSTRUCTING_FLAG);
-
- treeInfo->gcthings.addUnique(fval);
- fi->block = fp->blockChain;
- if (fp->blockChain)
- treeInfo->gcthings.addUnique(OBJECT_TO_JSVAL(fp->blockChain));
- fi->pc = fp->regs->pc;
- fi->imacpc = fp->imacpc;
- fi->spdist = fp->regs->sp - fp->slots;
- fi->set_argc(argc, constructing);
- fi->callerHeight = NativeStackSlots(cx, 0) - (2 + argc);
- fi->callerArgc = fp->argc;
-
- unsigned callDepth = getCallDepth();
- if (callDepth >= treeInfo->maxCallDepth)
- treeInfo->maxCallDepth = callDepth + 1;
-
- lir->insStorei(INS_CONSTPTR(fi), lirbuf->rp, callDepth * sizeof(FrameInfo*));
-
- atoms = fun->u.i.script->atomMap.vector;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALL()
-{
- uintN argc = GET_ARGC(cx->fp->regs->pc);
- cx->fp->assertValidStackDepth(argc + 2);
- return functionCall(argc,
- (cx->fp->imacpc && *cx->fp->imacpc == JSOP_APPLY)
- ? JSOP_APPLY
- : JSOP_CALL);
-}
-
-static jsbytecode* apply_imacro_table[] = {
- apply_imacros.apply0,
- apply_imacros.apply1,
- apply_imacros.apply2,
- apply_imacros.apply3,
- apply_imacros.apply4,
- apply_imacros.apply5,
- apply_imacros.apply6,
- apply_imacros.apply7,
- apply_imacros.apply8
-};
-
-static jsbytecode* call_imacro_table[] = {
- apply_imacros.call0,
- apply_imacros.call1,
- apply_imacros.call2,
- apply_imacros.call3,
- apply_imacros.call4,
- apply_imacros.call5,
- apply_imacros.call6,
- apply_imacros.call7,
- apply_imacros.call8
-};
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_APPLY()
-{
- JSStackFrame* fp = cx->fp;
- jsbytecode *pc = fp->regs->pc;
- uintN argc = GET_ARGC(pc);
- cx->fp->assertValidStackDepth(argc + 2);
-
- jsval* vp = fp->regs->sp - (argc + 2);
- jsuint length = 0;
- JSObject* aobj = NULL;
- LIns* aobj_ins = NULL;
-
- JS_ASSERT(!fp->imacpc);
-
- if (!VALUE_IS_FUNCTION(cx, vp[0]))
- return record_JSOP_CALL();
- ABORT_IF_XML(vp[0]);
-
- JSObject* obj = JSVAL_TO_OBJECT(vp[0]);
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj);
- if (FUN_INTERPRETED(fun))
- return record_JSOP_CALL();
-
- bool apply = (JSFastNative)fun->u.n.native == js_fun_apply;
- if (!apply && (JSFastNative)fun->u.n.native != js_fun_call)
- return record_JSOP_CALL();
-
- /*
- * We don't trace apply and call with a primitive 'this', which is the
- * first positional parameter.
- */
- if (argc > 0 && !JSVAL_IS_OBJECT(vp[2]))
- return record_JSOP_CALL();
-
- /*
- * Guard on the identity of this, which is the function we are applying.
- */
- if (!VALUE_IS_FUNCTION(cx, vp[1]))
- ABORT_TRACE("callee is not a function");
- CHECK_STATUS(guardCallee(vp[1]));
-
- if (apply && argc >= 2) {
- if (argc != 2)
- ABORT_TRACE("apply with excess arguments");
- if (JSVAL_IS_PRIMITIVE(vp[3]))
- ABORT_TRACE("arguments parameter of apply is primitive");
- aobj = JSVAL_TO_OBJECT(vp[3]);
- aobj_ins = get(&vp[3]);
-
- /*
- * We trace dense arrays and arguments objects. The code we generate
- * for apply uses imacros to handle a specific number of arguments.
- */
- if (OBJ_IS_DENSE_ARRAY(cx, aobj)) {
- guardDenseArray(aobj, aobj_ins);
- length = jsuint(aobj->fslots[JSSLOT_ARRAY_LENGTH]);
- guard(true,
- lir->ins2i(LIR_eq,
- p2i(stobj_get_fslot(aobj_ins, JSSLOT_ARRAY_LENGTH)),
- length),
- BRANCH_EXIT);
- } else if (OBJ_GET_CLASS(cx, aobj) == &js_ArgumentsClass) {
- unsigned depth;
- JSStackFrame *afp = guardArguments(aobj, aobj_ins, &depth);
- if (!afp)
- ABORT_TRACE("can't reach arguments object's frame");
- length = afp->argc;
- } else {
- ABORT_TRACE("arguments parameter of apply is not a dense array or argments object");
- }
-
- if (length >= JS_ARRAY_LENGTH(apply_imacro_table))
- ABORT_TRACE("too many arguments to apply");
-
- return call_imacro(apply_imacro_table[length]);
- }
-
- if (argc >= JS_ARRAY_LENGTH(call_imacro_table))
- ABORT_TRACE("too many arguments to call");
-
- return call_imacro(call_imacro_table[argc]);
-}
-
-static JSBool FASTCALL
-CatchStopIteration_tn(JSContext* cx, JSBool ok, jsval* vp)
-{
- if (!ok && cx->throwing && js_ValueIsStopIteration(cx->exception)) {
- cx->throwing = JS_FALSE;
- cx->exception = JSVAL_VOID;
- *vp = JSVAL_HOLE;
- return JS_TRUE;
- }
- return ok;
-}
-
-JS_DEFINE_TRCINFO_1(CatchStopIteration_tn,
- (3, (static, BOOL, CatchStopIteration_tn, CONTEXT, BOOL, JSVALPTR, 0, 0)))
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_NativeCallComplete()
-{
- if (pendingSpecializedNative == IGNORE_NATIVE_CALL_COMPLETE_CALLBACK)
- return JSRS_CONTINUE;
-
- jsbytecode* pc = cx->fp->regs->pc;
-
- JS_ASSERT(pendingSpecializedNative);
- JS_ASSERT(*pc == JSOP_CALL || *pc == JSOP_APPLY || *pc == JSOP_NEW || *pc == JSOP_SETPROP);
-
- jsval& v = stackval(-1);
- LIns* v_ins = get(&v);
-
- /*
- * At this point the generated code has already called the native function
- * and we can no longer fail back to the original pc location (JSOP_CALL)
- * because that would cause the interpreter to re-execute the native
- * function, which might have side effects.
- *
- * Instead, the snapshot() call below sees that we are currently parked on
- * a traceable native's JSOP_CALL instruction, and it will advance the pc
- * to restore by the length of the current opcode. If the native's return
- * type is jsval, snapshot() will also indicate in the type map that the
- * element on top of the stack is a boxed value which doesn't need to be
- * boxed if the type guard generated by unbox_jsval() fails.
- */
-
- if (JSTN_ERRTYPE(pendingSpecializedNative) == FAIL_STATUS) {
- /* Keep cx->bailExit null when it's invalid. */
- lir->insStorei(INS_NULL(), cx_ins, (int) offsetof(JSContext, bailExit));
-
- LIns* status = lir->insLoad(LIR_ld, lirbuf->state, (int) offsetof(InterpState, builtinStatus));
- if (pendingSpecializedNative == &generatedSpecializedNative) {
- LIns* ok_ins = v_ins;
-
- /*
- * Custom implementations of Iterator.next() throw a StopIteration exception.
- * Catch and clear it and set the return value to JSVAL_HOLE in this case.
- */
- if (uintptr_t(pc - nextiter_imacros.custom_iter_next) <
- sizeof(nextiter_imacros.custom_iter_next)) {
- LIns* args[] = { native_rval_ins, ok_ins, cx_ins }; /* reverse order */
- ok_ins = lir->insCall(&CatchStopIteration_tn_ci, args);
- }
-
- /*
- * If we run a generic traceable native, the return value is in the argument
- * vector for native function calls. The actual return value of the native is a JSBool
- * indicating the error status.
- */
- v_ins = lir->insLoad(LIR_ldp, native_rval_ins, 0);
- if (*pc == JSOP_NEW) {
- LIns* x = lir->ins_peq0(lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK)));
- x = lir->ins_choose(x, v_ins, INS_CONSTWORD(0));
- v_ins = lir->ins_choose(lir->ins_peq0(x), newobj_ins, x);
- }
- set(&v, v_ins);
-
- propagateFailureToBuiltinStatus(ok_ins, status);
- }
- guard(true, lir->ins_eq0(status), STATUS_EXIT);
- }
-
- JSRecordingStatus ok = JSRS_CONTINUE;
- if (pendingSpecializedNative->flags & JSTN_UNBOX_AFTER) {
- /*
- * If we side exit on the unboxing code due to a type change, make sure that the boxed
- * value is actually currently associated with that location, and that we are talking
- * about the top of the stack here, which is where we expected boxed values.
- */
- JS_ASSERT(&v == &cx->fp->regs->sp[-1] && get(&v) == v_ins);
- set(&v, unbox_jsval(v, v_ins, snapshot(BRANCH_EXIT)));
- } else if (JSTN_ERRTYPE(pendingSpecializedNative) == FAIL_NEG) {
- /* Already added i2f in functionCall. */
- JS_ASSERT(JSVAL_IS_NUMBER(v));
- } else {
- /* Convert the result to double if the builtin returns int32. */
- if (JSVAL_IS_NUMBER(v) &&
- (pendingSpecializedNative->builtin->_argtypes & ARGSIZE_MASK_ANY) == ARGSIZE_I) {
- set(&v, lir->ins1(LIR_i2f, v_ins));
- }
- }
-
- // We'll null pendingSpecializedNative in monitorRecording, on the next op
- // cycle. There must be a next op since the stack is non-empty.
- return ok;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::name(jsval*& vp, LIns*& ins, NameResult& nr)
-{
- JSObject* obj = cx->fp->scopeChain;
- if (obj != globalObj)
- return scopeChainProp(obj, vp, ins, nr);
-
- /* Can't use prop here, because we don't want unboxing from global slots. */
- LIns* obj_ins = scopeChain();
- uint32 slot;
-
- JSObject* obj2;
- jsuword pcval;
-
- /*
- * Property cache ensures that we are dealing with an existing property,
- * and guards the shape for us.
- */
- CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval));
-
- /* Abort if property doesn't exist (interpreter will report an error.) */
- if (PCVAL_IS_NULL(pcval))
- ABORT_TRACE("named property not found");
-
- /* Insist on obj being the directly addressed object. */
- if (obj2 != obj)
- ABORT_TRACE("name() hit prototype chain");
-
- /* Don't trace getter or setter calls, our caller wants a direct slot. */
- if (PCVAL_IS_SPROP(pcval)) {
- JSScopeProperty* sprop = PCVAL_TO_SPROP(pcval);
- if (!isValidSlot(OBJ_SCOPE(obj), sprop))
- ABORT_TRACE("name() not accessing a valid slot");
- slot = sprop->slot;
- } else {
- if (!PCVAL_IS_SLOT(pcval))
- ABORT_TRACE("PCE is not a slot");
- slot = PCVAL_TO_SLOT(pcval);
- }
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- vp = &STOBJ_GET_SLOT(obj, slot);
- ins = get(vp);
- nr.tracked = true;
- return JSRS_CONTINUE;
-}
-
-/*
- * Get a property. The current opcode has JOF_ATOM.
- *
- * There are two modes. The caller must pass nonnull pointers for either outp
- * or both slotp and v_insp. In the latter case, we require a plain old
- * property with a slot; if the property turns out to be anything else, abort
- * tracing (rather than emit a call to a native getter or GetAnyProperty).
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32 *slotp, LIns** v_insp, jsval *outp)
-{
- JS_ASSERT((slotp && v_insp && !outp) || (!slotp && !v_insp && outp));
-
- /*
- * Can't specialize to assert obj != global, must guard to avoid aliasing
- * stale homes of stacked global variables.
- */
- CHECK_STATUS(guardNotGlobalObject(obj, obj_ins));
-
- /*
- * Property cache ensures that we are dealing with an existing property,
- * and guards the shape for us.
- */
- JSObject* obj2;
- jsuword pcval;
- CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval));
-
- /* Check for non-existent property reference, which results in undefined. */
- const JSCodeSpec& cs = js_CodeSpec[*cx->fp->regs->pc];
- if (PCVAL_IS_NULL(pcval)) {
- if (slotp)
- ABORT_TRACE("property not found");
-
- /*
- * We could specialize to guard on just JSClass.getProperty, but a mere
- * class guard is simpler and slightly faster.
- */
- if (OBJ_GET_CLASS(cx, obj)->getProperty != JS_PropertyStub) {
- ABORT_TRACE("can't trace through access to undefined property if "
- "JSClass.getProperty hook isn't stubbed");
- }
- guardClass(obj, obj_ins, OBJ_GET_CLASS(cx, obj), snapshot(MISMATCH_EXIT));
-
- /*
- * This trace will be valid as long as neither the object nor any object
- * on its prototype chain changes shape.
- *
- * FIXME: This loop can become a single shape guard once bug 497789 has
- * been fixed.
- */
- VMSideExit* exit = snapshot(BRANCH_EXIT);
- do {
- LIns* map_ins = map(obj_ins);
- LIns* ops_ins;
- if (map_is_native(obj->map, map_ins, ops_ins)) {
- guardShape(obj_ins, obj, OBJ_SHAPE(obj), "guard(shape)", map_ins, exit);
- } else if (!guardDenseArray(obj, obj_ins, exit)) {
- ABORT_TRACE("non-native object involved in undefined property access");
- }
- } while (guardHasPrototype(obj, obj_ins, &obj, &obj_ins, exit));
-
- set(outp, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)), true);
- return JSRS_CONTINUE;
- }
-
- uint32 setflags = (cs.format & (JOF_INCDEC | JOF_FOR));
- JS_ASSERT(!(cs.format & JOF_SET));
-
- uint32 slot;
- if (PCVAL_IS_SPROP(pcval)) {
- JSScopeProperty* sprop = PCVAL_TO_SPROP(pcval);
-
- if (setflags && !SPROP_HAS_STUB_SETTER(sprop))
- ABORT_TRACE("non-stub setter");
- if (setflags && (sprop->attrs & JSPROP_READONLY))
- ABORT_TRACE("writing to a readonly property");
- if (!SPROP_HAS_STUB_GETTER(sprop)) {
- if (slotp)
- ABORT_TRACE("can't trace non-stub getter for this opcode");
- if (sprop->attrs & JSPROP_GETTER)
- ABORT_TRACE("script getter");
- if (sprop->slot == SPROP_INVALID_SLOT)
- return getPropertyWithNativeGetter(obj_ins, sprop, outp);
- return getPropertyById(obj_ins, outp);
- }
- if (!SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj2)))
- ABORT_TRACE("no valid slot");
- slot = sprop->slot;
- } else {
- if (!PCVAL_IS_SLOT(pcval))
- ABORT_TRACE("PCE is not a slot");
- slot = PCVAL_TO_SLOT(pcval);
- }
-
- /* We have a slot. */
- if (obj2 != obj) {
- if (setflags)
- ABORT_TRACE("JOF_INCDEC|JOF_FOR opcode hit prototype chain");
-
- /*
- * We're getting a proto-property. Walk up the prototype chain emitting
- * proto slot loads, updating obj as we go, leaving obj set to obj2 with
- * obj_ins the last proto-load.
- */
- while (obj != obj2) {
- obj_ins = stobj_get_proto(obj_ins);
- obj = STOBJ_GET_PROTO(obj);
- }
- }
-
- LIns* dslots_ins = NULL;
- LIns* v_ins = unbox_jsval(STOBJ_GET_SLOT(obj, slot),
- stobj_get_slot(obj_ins, slot, dslots_ins),
- snapshot(BRANCH_EXIT));
-
- if (slotp) {
- *slotp = slot;
- *v_insp = v_ins;
- }
- if (outp)
- set(outp, v_ins, true);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ins,
- LIns*& addr_ins)
-{
- JS_ASSERT(JSVAL_IS_OBJECT(oval) && JSVAL_IS_INT(ival));
-
- JSObject* obj = JSVAL_TO_OBJECT(oval);
- LIns* obj_ins = get(&oval);
- jsint idx = JSVAL_TO_INT(ival);
- LIns* idx_ins = makeNumberInt32(get(&ival));
- LIns* pidx_ins = lir->ins_u2p(idx_ins);
-
- VMSideExit* exit = snapshot(BRANCH_EXIT);
-
- /* check that the index is within bounds */
- LIns* dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots));
- jsuint capacity = js_DenseArrayCapacity(obj);
- bool within = (jsuint(idx) < jsuint(obj->fslots[JSSLOT_ARRAY_LENGTH]) && jsuint(idx) < capacity);
- if (!within) {
- /* If idx < 0, stay on trace (and read value as undefined, since this is a dense array). */
- LIns* br1 = NULL;
- if (MAX_DSLOTS_LENGTH > MAX_DSLOTS_LENGTH32 && !idx_ins->isconst()) {
- /* Only 64-bit machines support large enough arrays for this. */
- JS_ASSERT(sizeof(jsval) == 8);
- br1 = lir->insBranch(LIR_jt,
- lir->ins2i(LIR_lt, idx_ins, 0),
- NULL);
- }
-
- /* If not idx < length, stay on trace (and read value as undefined). */
- LIns* br2 = lir->insBranch(LIR_jf,
- lir->ins2(LIR_pult,
- pidx_ins,
- stobj_get_fslot(obj_ins, JSSLOT_ARRAY_LENGTH)),
- NULL);
-
- /* If dslots is NULL, stay on trace (and read value as undefined). */
- LIns* br3 = lir->insBranch(LIR_jt, lir->ins_peq0(dslots_ins), NULL);
-
- /* If not idx < capacity, stay on trace (and read value as undefined). */
- LIns* br4 = lir->insBranch(LIR_jf,
- lir->ins2(LIR_pult,
- pidx_ins,
- lir->insLoad(LIR_ldp,
- dslots_ins,
- -(int)sizeof(jsval))),
- NULL);
- lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
- LIns* label = lir->ins0(LIR_label);
- if (br1)
- br1->setTarget(label);
- br2->setTarget(label);
- br3->setTarget(label);
- br4->setTarget(label);
-
- CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT));
-
- // Return undefined and indicate that we didn't actually read this (addr_ins).
- v_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID));
- addr_ins = NULL;
- return JSRS_CONTINUE;
- }
-
- /* Guard against negative index */
- if (MAX_DSLOTS_LENGTH > MAX_DSLOTS_LENGTH32 && !idx_ins->isconst()) {
- /* Only 64-bit machines support large enough arrays for this. */
- JS_ASSERT(sizeof(jsval) == 8);
- guard(false,
- lir->ins2i(LIR_lt, idx_ins, 0),
- exit);
- }
-
- /* Guard array length */
- guard(true,
- lir->ins2(LIR_pult, pidx_ins, stobj_get_fslot(obj_ins, JSSLOT_ARRAY_LENGTH)),
- exit);
-
- /* dslots must not be NULL */
- guard(false,
- lir->ins_peq0(dslots_ins),
- exit);
-
- /* Guard array capacity */
- guard(true,
- lir->ins2(LIR_pult,
- pidx_ins,
- lir->insLoad(LIR_ldp, dslots_ins, 0 - (int)sizeof(jsval))),
- exit);
-
- /* Load the value and guard on its type to unbox it. */
- vp = &obj->dslots[jsuint(idx)];
- addr_ins = lir->ins2(LIR_piadd, dslots_ins,
- lir->ins2i(LIR_pilsh, pidx_ins, (sizeof(jsval) == 4) ? 2 : 3));
- v_ins = unbox_jsval(*vp, lir->insLoad(LIR_ldp, addr_ins, 0), exit);
-
- if (JSVAL_IS_SPECIAL(*vp)) {
- /*
- * If we read a hole from the array, convert it to undefined and guard
- * that there are no indexed properties along the prototype chain.
- */
- LIns* br = lir->insBranch(LIR_jf,
- lir->ins2i(LIR_eq, v_ins, JSVAL_TO_SPECIAL(JSVAL_HOLE)),
- NULL);
- CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT));
- br->setTarget(lir->ins0(LIR_label));
-
- /* Don't let the hole value escape. Turn it into an undefined. */
- v_ins = lir->ins2i(LIR_and, v_ins, ~(JSVAL_HOLE_FLAG >> JSVAL_TAGBITS));
- }
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getProp(JSObject* obj, LIns* obj_ins)
-{
- const JSCodeSpec& cs = js_CodeSpec[*cx->fp->regs->pc];
- JS_ASSERT(cs.ndefs == 1);
- return prop(obj, obj_ins, NULL, NULL, &stackval(-cs.nuses));
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::getProp(jsval& v)
-{
- if (JSVAL_IS_PRIMITIVE(v))
- ABORT_TRACE("primitive lhs");
-
- return getProp(JSVAL_TO_OBJECT(v), get(&v));
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NAME()
-{
- jsval* vp;
- LIns* v_ins;
- NameResult nr;
- CHECK_STATUS(name(vp, v_ins, nr));
- stack(0, v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DOUBLE()
-{
- jsval v = jsval(atoms[GET_INDEX(cx->fp->regs->pc)]);
- stack(0, lir->insImmf(*JSVAL_TO_DOUBLE(v)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STRING()
-{
- JSAtom* atom = atoms[GET_INDEX(cx->fp->regs->pc)];
- JS_ASSERT(ATOM_IS_STRING(atom));
- stack(0, INS_ATOM(atom));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ZERO()
-{
- stack(0, lir->insImmf(0));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ONE()
-{
- stack(0, lir->insImmf(1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NULL()
-{
- stack(0, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_THIS()
-{
- LIns* this_ins;
- CHECK_STATUS(getThis(this_ins));
- stack(0, this_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FALSE()
-{
- stack(0, lir->insImm(0));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TRUE()
-{
- stack(0, lir->insImm(1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_OR()
-{
- return ifop();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_AND()
-{
- return ifop();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TABLESWITCH()
-{
-#ifdef NANOJIT_IA32
- /* Handle tableswitches specially -- prepare a jump table if needed. */
- return tableswitch();
-#else
- return switchop();
-#endif
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LOOKUPSWITCH()
-{
- return switchop();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STRICTEQ()
-{
- strictEquality(true, false);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STRICTNE()
-{
- strictEquality(false, false);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_OBJECT()
-{
- JSStackFrame* fp = cx->fp;
- JSScript* script = fp->script;
- unsigned index = atoms - script->atomMap.vector + GET_INDEX(fp->regs->pc);
-
- JSObject* obj;
- obj = script->getObject(index);
- stack(0, INS_CONSTOBJ(obj));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_POP()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TRAP()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETARG()
-{
- stack(0, arg(GET_ARGNO(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETARG()
-{
- arg(GET_ARGNO(cx->fp->regs->pc), stack(-1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETLOCAL()
-{
- stack(0, var(GET_SLOTNO(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETLOCAL()
-{
- var(GET_SLOTNO(cx->fp->regs->pc), stack(-1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_UINT16()
-{
- stack(0, lir->insImmf(GET_UINT16(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NEWINIT()
-{
- JSProtoKey key = JSProtoKey(GET_INT8(cx->fp->regs->pc));
- LIns *proto_ins;
- CHECK_STATUS(getClassPrototype(key, proto_ins));
-
- LIns* args[] = { proto_ins, cx_ins };
- const CallInfo *ci = (key == JSProto_Array) ? &js_NewEmptyArray_ci : &js_Object_tn_ci;
- LIns* v_ins = lir->insCall(ci, args);
- guard(false, lir->ins_peq0(v_ins), OOM_EXIT);
- stack(0, v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENDINIT()
-{
-#ifdef DEBUG
- jsval& v = stackval(-1);
- JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
-#endif
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INITPROP()
-{
- // All the action is in record_SetPropHit.
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INITELEM()
-{
- return record_JSOP_SETELEM();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFSHARP()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_USESHARP()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCARG()
-{
- return inc(argval(GET_ARGNO(cx->fp->regs->pc)), 1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCLOCAL()
-{
- return inc(varval(GET_SLOTNO(cx->fp->regs->pc)), 1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECARG()
-{
- return inc(argval(GET_ARGNO(cx->fp->regs->pc)), -1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECLOCAL()
-{
- return inc(varval(GET_SLOTNO(cx->fp->regs->pc)), -1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARGINC()
-{
- return inc(argval(GET_ARGNO(cx->fp->regs->pc)), 1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LOCALINC()
-{
- return inc(varval(GET_SLOTNO(cx->fp->regs->pc)), 1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARGDEC()
-{
- return inc(argval(GET_ARGNO(cx->fp->regs->pc)), -1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LOCALDEC()
-{
- return inc(varval(GET_SLOTNO(cx->fp->regs->pc)), -1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IMACOP()
-{
- JS_ASSERT(cx->fp->imacpc);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ITER()
-{
- jsval& v = stackval(-1);
- if (JSVAL_IS_PRIMITIVE(v))
- ABORT_TRACE("for-in on a primitive value");
- ABORT_IF_XML(v);
-
- jsuint flags = cx->fp->regs->pc[1];
-
- if (hasIteratorMethod(JSVAL_TO_OBJECT(v))) {
- if (flags == JSITER_ENUMERATE)
- return call_imacro(iter_imacros.for_in);
- if (flags == (JSITER_ENUMERATE | JSITER_FOREACH))
- return call_imacro(iter_imacros.for_each);
- } else {
- if (flags == JSITER_ENUMERATE)
- return call_imacro(iter_imacros.for_in_native);
- if (flags == (JSITER_ENUMERATE | JSITER_FOREACH))
- return call_imacro(iter_imacros.for_each_native);
- }
- ABORT_TRACE("unimplemented JSITER_* flags");
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NEXTITER()
-{
- jsval& iterobj_val = stackval(-2);
- if (JSVAL_IS_PRIMITIVE(iterobj_val))
- ABORT_TRACE("for-in on a primitive value");
- ABORT_IF_XML(iterobj_val);
- JSObject* iterobj = JSVAL_TO_OBJECT(iterobj_val);
- JSClass* clasp = STOBJ_GET_CLASS(iterobj);
- LIns* iterobj_ins = get(&iterobj_val);
- guardClass(iterobj, iterobj_ins, clasp, snapshot(BRANCH_EXIT));
- if (clasp == &js_IteratorClass || clasp == &js_GeneratorClass)
- return call_imacro(nextiter_imacros.native_iter_next);
- return call_imacro(nextiter_imacros.custom_iter_next);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENDITER()
-{
- LIns* args[] = { stack(-2), cx_ins };
- LIns* ok_ins = lir->insCall(&js_CloseIterator_ci, args);
- guard(false, lir->ins_eq0(ok_ins), MISMATCH_EXIT);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FORNAME()
-{
- jsval* vp;
- LIns* x_ins;
- NameResult nr;
- CHECK_STATUS(name(vp, x_ins, nr));
- if (!nr.tracked)
- ABORT_TRACE("forname on non-tracked value not supported");
- set(vp, stack(-1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FORPROP()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FORELEM()
-{
- return record_JSOP_DUP();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FORARG()
-{
- return record_JSOP_SETARG();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FORLOCAL()
-{
- return record_JSOP_SETLOCAL();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_POPN()
-{
- return JSRS_CONTINUE;
-}
-
-/*
- * Generate LIR to reach |obj2| from |obj| by traversing the scope chain. The
- * generated code also ensures that any call objects found have not changed shape.
- *
- * obj starting object
- * obj_ins LIR instruction representing obj
- * targetObj end object for traversal
- * targetIns [out] LIR instruction representing obj2
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::traverseScopeChain(JSObject *obj, LIns *obj_ins, JSObject *targetObj,
- LIns *&targetIns)
-{
- VMSideExit* exit = NULL;
-
- /*
- * Scope chains are often left "incomplete", and reified lazily when
- * necessary, since doing so is expensive. When creating null and flat
- * closures on trace (the only kinds supported), the global object is
- * hardcoded as the parent, since reifying the scope chain on trace
- * would be extremely difficult. This is because block objects need frame
- * pointers, which do not exist on trace, and thus would require magic
- * similar to arguments objects or reification of stack frames. Luckily,
- * for null and flat closures, these blocks are unnecessary.
- *
- * The problem, as exposed by bug 523793, is that this means creating a
- * fixed traversal on trace can be inconsistent with the shorter scope
- * chain used when executing a trace. To address this, perform an initial
- * sweep of the scope chain to make sure that if there is a heavyweight
- * function with a call object, and there is also a block object, the
- * trace is safely aborted.
- *
- * If there is no call object, we must have arrived at the global object,
- * and can bypass the scope chain traversal completely.
- */
- bool foundCallObj = false;
- bool foundBlockObj = false;
- JSObject* searchObj = obj;
-
- for (;;) {
- if (searchObj != globalObj) {
- JSClass* cls = STOBJ_GET_CLASS(searchObj);
- if (cls == &js_BlockClass) {
- foundBlockObj = true;
- } else if (cls == &js_CallClass) {
- // If the function that owns this call object is not heavyweight, then
- // we can't be sure it will always be there, which means the scope chain
- // does not have a definite length, so abort.
- if (JSFUN_HEAVYWEIGHT_TEST(js_GetCallObjectFunction(searchObj)->flags))
- foundCallObj = true;
- else
- ABORT_TRACE("found call object for non-heavyweight function on scope chain");
- }
- }
-
- if (searchObj == targetObj)
- break;
-
- searchObj = STOBJ_GET_PARENT(searchObj);
- if (!searchObj)
- ABORT_TRACE("cannot traverse this scope chain on trace");
- }
-
- if (!foundCallObj) {
- JS_ASSERT(targetObj == globalObj);
- targetIns = INS_CONSTPTR(globalObj);
- return JSRS_CONTINUE;
- }
-
- if (foundBlockObj)
- ABORT_TRACE("cannot traverse this scope chain on trace");
-
- /* There was a call object, or should be a call object now. */
- for (;;) {
- if (obj != globalObj) {
- if (!js_IsCacheableNonGlobalScope(obj))
- ABORT_TRACE("scope chain lookup crosses non-cacheable object");
-
- // We must guard on the shape of all call objects for heavyweight functions
- // that we traverse on the scope chain: if the shape changes, a variable with
- // the same name may have been inserted in the scope chain.
- if (STOBJ_GET_CLASS(obj) == &js_CallClass &&
- JSFUN_HEAVYWEIGHT_TEST(js_GetCallObjectFunction(obj)->flags)) {
- LIns* map_ins = map(obj_ins);
- LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)),
- "obj_shape");
- if (!exit)
- exit = snapshot(BRANCH_EXIT);
- guard(true,
- addName(lir->ins2i(LIR_eq, shape_ins, OBJ_SHAPE(obj)), "guard_shape"),
- exit);
- }
- }
-
- JS_ASSERT(STOBJ_GET_CLASS(obj) != &js_BlockClass);
-
- if (obj == targetObj)
- break;
-
- obj = STOBJ_GET_PARENT(obj);
- obj_ins = stobj_get_parent(obj_ins);
- }
-
- targetIns = obj_ins;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BINDNAME()
-{
- JSStackFrame *fp = cx->fp;
- JSObject *obj;
-
- if (!fp->fun) {
- obj = fp->scopeChain;
-
- // In global code, fp->scopeChain can only contain blocks whose values
- // are still on the stack. We never use BINDNAME to refer to these.
- while (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
- // The block's values are still on the stack.
- JS_ASSERT(obj->getPrivate() == fp);
- obj = OBJ_GET_PARENT(cx, obj);
- // Blocks always have parents.
- JS_ASSERT(obj);
- }
-
- if (obj != globalObj)
- ABORT_TRACE("BINDNAME in global code resolved to non-global object");
-
- /*
- * The trace is specialized to this global object. Furthermore, we know it
- * is the sole 'global' object on the scope chain: we set globalObj to the
- * scope chain element with no parent, and we reached it starting from the
- * function closure or the current scopeChain, so there is nothing inner to
- * it. Therefore this must be the right base object.
- */
- stack(0, INS_CONSTOBJ(obj));
- return JSRS_CONTINUE;
- }
-
- // We can't trace BINDNAME in functions that contain direct calls to eval,
- // as they might add bindings which previously-traced references would have
- // to see.
- if (JSFUN_HEAVYWEIGHT_TEST(fp->fun->flags))
- ABORT_TRACE("BINDNAME in heavyweight function.");
-
- // We don't have the scope chain on trace, so instead we get a start object
- // that is on the scope chain and doesn't skip the target object (the one
- // that contains the property).
- jsval *callee = &cx->fp->argv[-2];
- obj = STOBJ_GET_PARENT(JSVAL_TO_OBJECT(*callee));
- if (obj == globalObj) {
- stack(0, INS_CONSTOBJ(obj));
- return JSRS_CONTINUE;
- }
- LIns *obj_ins = stobj_get_parent(get(callee));
-
- // Find the target object.
- JSAtom *atom = atoms[GET_INDEX(cx->fp->regs->pc)];
- jsid id = ATOM_TO_JSID(atom);
- JSContext *localCx = cx;
- JSObject *obj2 = js_FindIdentifierBase(cx, fp->scopeChain, id);
- if (!obj2)
- ABORT_TRACE_ERROR("js_FindIdentifierBase failed");
- if (!TRACE_RECORDER(localCx))
- return JSRS_STOP;
- if (obj2 != globalObj && STOBJ_GET_CLASS(obj2) != &js_CallClass)
- ABORT_TRACE("BINDNAME on non-global, non-call object");
-
- // Generate LIR to get to the target object from the start object.
- LIns *obj2_ins;
- CHECK_STATUS(traverseScopeChain(obj, obj_ins, obj2, obj2_ins));
-
- // If |obj2| is the global object, we can refer to it directly instead of walking up
- // the scope chain. There may still be guards on intervening call objects.
- stack(0, obj2 == globalObj ? INS_CONSTOBJ(obj2) : obj2_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETNAME()
-{
- jsval& l = stackval(-2);
- JS_ASSERT(!JSVAL_IS_PRIMITIVE(l));
-
- /*
- * Trace only cases that are global code, in lightweight functions
- * scoped by the global object only, or in call objects.
- */
- JSObject* obj = JSVAL_TO_OBJECT(l);
- if (OBJ_GET_CLASS(cx, obj) == &js_CallClass)
- return JSRS_CONTINUE;
- if (obj != cx->fp->scopeChain || obj != globalObj)
- ABORT_TRACE("JSOP_SETNAME left operand is not the global object");
-
- // The rest of the work is in record_SetPropHit.
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_THROW()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IN()
-{
- jsval& rval = stackval(-1);
- jsval& lval = stackval(-2);
-
- if (JSVAL_IS_PRIMITIVE(rval))
- ABORT_TRACE("JSOP_IN on non-object right operand");
- JSObject* obj = JSVAL_TO_OBJECT(rval);
- LIns* obj_ins = get(&rval);
-
- jsid id;
- LIns* x;
- if (JSVAL_IS_INT(lval)) {
- id = INT_JSVAL_TO_JSID(lval);
- LIns* args[] = { makeNumberInt32(get(&lval)), obj_ins, cx_ins };
- x = lir->insCall(&js_HasNamedPropertyInt32_ci, args);
- } else if (JSVAL_IS_STRING(lval)) {
- if (!js_ValueToStringId(cx, lval, &id))
- ABORT_TRACE_ERROR("left operand of JSOP_IN didn't convert to a string-id");
- LIns* args[] = { get(&lval), obj_ins, cx_ins };
- x = lir->insCall(&js_HasNamedProperty_ci, args);
- } else {
- ABORT_TRACE("string or integer expected");
- }
-
- guard(false, lir->ins2i(LIR_eq, x, JSVAL_TO_SPECIAL(JSVAL_VOID)), OOM_EXIT);
- x = lir->ins2i(LIR_eq, x, 1);
-
- JSTraceMonitor &localtm = *traceMonitor;
- JSContext *localcx = cx;
-
- JSObject* obj2;
- JSProperty* prop;
- bool ok = obj->lookupProperty(cx, id, &obj2, &prop);
-
- /* lookupProperty can reenter the interpreter and kill |this|. */
- if (!localtm.recorder) {
- if (prop)
- obj2->dropProperty(localcx, prop);
- return JSRS_STOP;
- }
-
- if (!ok)
- ABORT_TRACE_ERROR("obj->lookupProperty failed in JSOP_IN");
- bool cond = prop != NULL;
- if (prop)
- obj2->dropProperty(cx, prop);
-
- /*
- * The interpreter fuses comparisons and the following branch, so we have
- * to do that here as well.
- */
- fuseIf(cx->fp->regs->pc + 1, cond, x);
-
- /*
- * We update the stack after the guard. This is safe since the guard bails
- * out at the comparison and the interpreter will therefore re-execute the
- * comparison. This way the value of the condition doesn't have to be
- * calculated and saved on the stack in most cases.
- */
- set(&lval, x);
- return JSRS_CONTINUE;
-}
-
-static JSBool FASTCALL
-HasInstance(JSContext* cx, JSObject* ctor, jsval val)
-{
- JSBool result = JS_FALSE;
- if (!ctor->map->ops->hasInstance(cx, ctor, val, &result))
- js_SetBuiltinError(cx);
- return result;
-}
-JS_DEFINE_CALLINFO_3(static, BOOL_FAIL, HasInstance, CONTEXT, OBJECT, JSVAL, 0, 0)
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INSTANCEOF()
-{
- // If the rhs isn't an object, we are headed for a TypeError.
- jsval& ctor = stackval(-1);
- if (JSVAL_IS_PRIMITIVE(ctor))
- ABORT_TRACE("non-object on rhs of instanceof");
-
- jsval& val = stackval(-2);
- LIns* val_ins = box_jsval(val, get(&val));
-
- enterDeepBailCall();
- LIns* args[] = {val_ins, get(&ctor), cx_ins};
- stack(-2, lir->insCall(&HasInstance_ci, args));
- LIns* status_ins = lir->insLoad(LIR_ld,
- lirbuf->state,
- (int) offsetof(InterpState, builtinStatus));
- pendingGuardCondition = lir->ins_eq0(status_ins);
- leaveDeepBailCall();
-
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEBUGGER()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GOSUB()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RETSUB()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_EXCEPTION()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LINENO()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CONDSWITCH()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CASE()
-{
- strictEquality(true, true);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFAULT()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_EVAL()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENUMELEM()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETTER()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETTER()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFFUN()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFFUN_FC()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFCONST()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFVAR()
-{
- return JSRS_STOP;
-}
-
-jsatomid
-TraceRecorder::getFullIndex(ptrdiff_t pcoff)
-{
- jsatomid index = GET_INDEX(cx->fp->regs->pc + pcoff);
- index += atoms - cx->fp->script->atomMap.vector;
- return index;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LAMBDA()
-{
- JSFunction* fun;
- fun = cx->fp->script->getFunction(getFullIndex());
-
- if (FUN_NULL_CLOSURE(fun) && OBJ_GET_PARENT(cx, FUN_OBJECT(fun)) == globalObj) {
- LIns *proto_ins;
- CHECK_STATUS(getClassPrototype(JSProto_Function, proto_ins));
-
- LIns* args[] = { INS_CONSTOBJ(globalObj), proto_ins, INS_CONSTFUN(fun), cx_ins };
- LIns* x = lir->insCall(&js_NewNullClosure_ci, args);
- stack(0, x);
- return JSRS_CONTINUE;
- }
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LAMBDA_FC()
-{
- JSFunction* fun;
- fun = cx->fp->script->getFunction(getFullIndex());
-
- LIns* scopeChain_ins = get(&cx->fp->argv[-2]);
- JS_ASSERT(scopeChain_ins);
-
- LIns* args[] = {
- scopeChain_ins,
- INS_CONSTFUN(fun),
- cx_ins
- };
- LIns* call_ins = lir->insCall(&js_AllocFlatClosure_ci, args);
- guard(false,
- addName(lir->ins2(LIR_peq, call_ins, INS_NULL()),
- "guard(js_AllocFlatClosure)"),
- OOM_EXIT);
-
- if (fun->u.i.nupvars) {
- JSUpvarArray *uva = fun->u.i.script->upvars();
- for (uint32 i = 0, n = uva->length; i < n; i++) {
- jsval v;
- LIns* upvar_ins = upvar(fun->u.i.script, uva, i, v);
- if (!upvar_ins)
- return JSRS_STOP;
- LIns* dslots_ins = NULL;
- stobj_set_dslot(call_ins, i, dslots_ins, box_jsval(v, upvar_ins));
- }
- }
-
- stack(0, call_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLEE()
-{
- stack(0, get(&cx->fp->argv[-2]));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETLOCALPOP()
-{
- var(GET_SLOTNO(cx->fp->regs->pc), stack(-1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IFPRIMTOP()
-{
- // Traces are type-specialized, including null vs. object, so we need do
- // nothing here. The upstream unbox_jsval called after valueOf or toString
- // from an imacro (e.g.) will fork the trace for us, allowing us to just
- // follow along mindlessly :-).
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETCALL()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TRY()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FINALLY()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NOP()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARGSUB()
-{
- JSStackFrame* fp = cx->fp;
- if (!(fp->fun->flags & JSFUN_HEAVYWEIGHT)) {
- uintN slot = GET_ARGNO(fp->regs->pc);
- if (slot >= fp->argc)
- ABORT_TRACE("can't trace out-of-range arguments");
- stack(0, get(&cx->fp->argv[slot]));
- return JSRS_CONTINUE;
- }
- ABORT_TRACE("can't trace JSOP_ARGSUB hard case");
-}
-
-JS_REQUIRES_STACK LIns*
-TraceRecorder::guardArgsLengthNotAssigned(LIns* argsobj_ins)
-{
- // The following implements js_IsOverriddenArgsLength on trace.
- // The '2' bit is set if length was overridden.
- LIns *len_ins = stobj_get_fslot(argsobj_ins, JSSLOT_ARGS_LENGTH);
- LIns *ovr_ins = lir->ins2(LIR_piand, len_ins, INS_CONSTWORD(2));
- guard(true, lir->ins_peq0(ovr_ins), snapshot(BRANCH_EXIT));
- return len_ins;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARGCNT()
-{
- if (cx->fp->fun->flags & JSFUN_HEAVYWEIGHT)
- ABORT_TRACE("can't trace heavyweight JSOP_ARGCNT");
-
- // argc is fixed on trace, so ideally we would simply generate LIR for
- // constant argc. But the user can mutate arguments.length in the
- // interpreter, so we have to check for that in the trace entry frame.
- // We also have to check that arguments.length has not been mutated
- // at record time, because if so we will generate incorrect constant
- // LIR, which will assert in alu().
- if (cx->fp->argsobj && js_IsOverriddenArgsLength(JSVAL_TO_OBJECT(cx->fp->argsobj)))
- ABORT_TRACE("can't trace JSOP_ARGCNT if arguments.length has been modified");
- LIns *a_ins = get(&cx->fp->argsobj);
- if (callDepth == 0) {
- LIns *br = lir->insBranch(LIR_jt, lir->ins_peq0(a_ins), NULL);
- guardArgsLengthNotAssigned(a_ins);
- LIns *label = lir->ins0(LIR_label);
- br->setTarget(label);
- }
- stack(0, lir->insImmf(cx->fp->argc));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_DefLocalFunSetSlot(uint32 slot, JSObject* obj)
-{
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, obj);
-
- if (FUN_NULL_CLOSURE(fun) && OBJ_GET_PARENT(cx, FUN_OBJECT(fun)) == globalObj) {
- LIns *proto_ins;
- CHECK_STATUS(getClassPrototype(JSProto_Function, proto_ins));
-
- LIns* args[] = { INS_CONSTOBJ(globalObj), proto_ins, INS_CONSTFUN(fun), cx_ins };
- LIns* x = lir->insCall(&js_NewNullClosure_ci, args);
- var(slot, x);
- return JSRS_CONTINUE;
- }
-
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFLOCALFUN()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFLOCALFUN_FC()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GOTOX()
-{
- return record_JSOP_GOTO();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IFEQX()
-{
- return record_JSOP_IFEQ();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_IFNEX()
-{
- return record_JSOP_IFNE();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ORX()
-{
- return record_JSOP_OR();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ANDX()
-{
- return record_JSOP_AND();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GOSUBX()
-{
- return record_JSOP_GOSUB();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CASEX()
-{
- strictEquality(true, true);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFAULTX()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TABLESWITCHX()
-{
- return record_JSOP_TABLESWITCH();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LOOKUPSWITCHX()
-{
- return switchop();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BACKPATCH()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BACKPATCH_POP()
-{
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_THROWING()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETRVAL()
-{
- // If we implement this, we need to update JSOP_STOP.
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RETRVAL()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETGVAR()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- return JSRS_CONTINUE; // We will see JSOP_NAME from the interpreter's jump, so no-op here.
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- stack(0, get(&STOBJ_GET_SLOT(globalObj, slot)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETGVAR()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- return JSRS_CONTINUE; // We will see JSOP_NAME from the interpreter's jump, so no-op here.
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- set(&STOBJ_GET_SLOT(globalObj, slot), stack(-1));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INCGVAR()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- // We will see JSOP_INCNAME from the interpreter's jump, so no-op here.
- return JSRS_CONTINUE;
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- return inc(STOBJ_GET_SLOT(globalObj, slot), 1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DECGVAR()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- // We will see JSOP_INCNAME from the interpreter's jump, so no-op here.
- return JSRS_CONTINUE;
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- return inc(STOBJ_GET_SLOT(globalObj, slot), -1);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GVARINC()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- // We will see JSOP_INCNAME from the interpreter's jump, so no-op here.
- return JSRS_CONTINUE;
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- return inc(STOBJ_GET_SLOT(globalObj, slot), 1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GVARDEC()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- // We will see JSOP_INCNAME from the interpreter's jump, so no-op here.
- return JSRS_CONTINUE;
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- return inc(STOBJ_GET_SLOT(globalObj, slot), -1, false);
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_REGEXP()
-{
- return JSRS_STOP;
-}
-
-// begin JS_HAS_XML_SUPPORT
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DEFXMLNS()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ANYNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_QNAMEPART()
-{
- return record_JSOP_STRING();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_QNAMECONST()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_QNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TOATTRNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TOATTRVAL()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ADDATTRNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ADDATTRVAL()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_BINDXMLNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_SETXMLNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DESCENDANTS()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_FILTER()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENDFILTER()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TOXML()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TOXMLLIST()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLTAGEXPR()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLELTEXPR()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLOBJECT()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLCDATA()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLCOMMENT()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_XMLPI()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETFUNNS()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STARTXML()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STARTXMLEXPR()
-{
- return JSRS_STOP;
-}
-
-// end JS_HAS_XML_SUPPORT
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLPROP()
-{
- jsval& l = stackval(-1);
- JSObject* obj;
- LIns* obj_ins;
- LIns* this_ins;
- if (!JSVAL_IS_PRIMITIVE(l)) {
- obj = JSVAL_TO_OBJECT(l);
- obj_ins = get(&l);
- this_ins = obj_ins; // |this| for subsequent call
- } else {
- jsint i;
- debug_only_stmt(const char* protoname = NULL;)
- if (JSVAL_IS_STRING(l)) {
- i = JSProto_String;
- debug_only_stmt(protoname = "String.prototype";)
- } else if (JSVAL_IS_NUMBER(l)) {
- i = JSProto_Number;
- debug_only_stmt(protoname = "Number.prototype";)
- } else if (JSVAL_IS_SPECIAL(l)) {
- if (l == JSVAL_VOID)
- ABORT_TRACE("callprop on void");
- guard(false, lir->ins2i(LIR_eq, get(&l), JSVAL_TO_SPECIAL(JSVAL_VOID)), MISMATCH_EXIT);
- i = JSProto_Boolean;
- debug_only_stmt(protoname = "Boolean.prototype";)
- } else {
- JS_ASSERT(JSVAL_IS_NULL(l) || JSVAL_IS_VOID(l));
- ABORT_TRACE("callprop on null or void");
- }
-
- if (!js_GetClassPrototype(cx, NULL, INT_TO_JSID(i), &obj))
- ABORT_TRACE_ERROR("GetClassPrototype failed!");
-
- obj_ins = INS_CONSTOBJ(obj);
- debug_only_stmt(obj_ins = addName(obj_ins, protoname);)
- this_ins = get(&l); // use primitive as |this|
- }
-
- JSObject* obj2;
- jsuword pcval;
- CHECK_STATUS(test_property_cache(obj, obj_ins, obj2, pcval));
-
- if (PCVAL_IS_NULL(pcval) || !PCVAL_IS_OBJECT(pcval))
- ABORT_TRACE("callee is not an object");
- JS_ASSERT(HAS_FUNCTION_CLASS(PCVAL_TO_OBJECT(pcval)));
-
- if (JSVAL_IS_PRIMITIVE(l)) {
- JSFunction* fun = GET_FUNCTION_PRIVATE(cx, PCVAL_TO_OBJECT(pcval));
- if (!PRIMITIVE_THIS_TEST(fun, l))
- ABORT_TRACE("callee does not accept primitive |this|");
- }
-
- stack(0, this_ins);
- stack(-1, INS_CONSTOBJ(PCVAL_TO_OBJECT(pcval)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_DELDESC()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_UINT24()
-{
- stack(0, lir->insImmf(GET_UINT24(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INDEXBASE()
-{
- atoms += GET_INDEXBASE(cx->fp->regs->pc);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RESETBASE()
-{
- atoms = cx->fp->script->atomMap.vector;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_RESETBASE0()
-{
- atoms = cx->fp->script->atomMap.vector;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLELEM()
-{
- return record_JSOP_GETELEM();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_STOP()
-{
- JSStackFrame *fp = cx->fp;
-
- if (fp->imacpc) {
- /*
- * End of imacro, so return true to the interpreter immediately. The
- * interpreter's JSOP_STOP case will return from the imacro, back to
- * the pc after the calling op, still in the same JSStackFrame.
- */
- atoms = fp->script->atomMap.vector;
- return JSRS_CONTINUE;
- }
-
- putArguments();
-
- /*
- * We know falling off the end of a constructor returns the new object that
- * was passed in via fp->argv[-1], while falling off the end of a function
- * returns undefined.
- *
- * NB: we do not support script rval (eval, API users who want the result
- * of the last expression-statement, debugger API calls).
- */
- if (fp->flags & JSFRAME_CONSTRUCTING) {
- JS_ASSERT(OBJECT_TO_JSVAL(fp->thisp) == fp->argv[-1]);
- rval_ins = get(&fp->argv[-1]);
- } else {
- rval_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
- }
- clearFrameSlotsFromCache();
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETXPROP()
-{
- jsval& l = stackval(-1);
- if (JSVAL_IS_PRIMITIVE(l))
- ABORT_TRACE("primitive-this for GETXPROP?");
-
- jsval* vp;
- LIns* v_ins;
- NameResult nr;
- CHECK_STATUS(name(vp, v_ins, nr));
- stack(-1, v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLXMLNAME()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_TYPEOFEXPR()
-{
- return record_JSOP_TYPEOF();
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENTERBLOCK()
-{
- JSObject* obj;
- obj = cx->fp->script->getObject(getFullIndex(0));
-
- LIns* void_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
- for (int i = 0, n = OBJ_BLOCK_COUNT(cx, obj); i < n; i++)
- stack(i, void_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LEAVEBLOCK()
-{
- /* We mustn't exit the lexical block we began recording in. */
- if (cx->fp->blockChain != lexicalBlock)
- return JSRS_CONTINUE;
- else
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GENERATOR()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_YIELD()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ARRAYPUSH()
-{
- uint32_t slot = GET_UINT16(cx->fp->regs->pc);
- JS_ASSERT(cx->fp->script->nfixed <= slot);
- JS_ASSERT(cx->fp->slots + slot < cx->fp->regs->sp - 1);
- jsval &arrayval = cx->fp->slots[slot];
- JS_ASSERT(JSVAL_IS_OBJECT(arrayval));
- JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, JSVAL_TO_OBJECT(arrayval)));
- LIns *array_ins = get(&arrayval);
- jsval &elt = stackval(-1);
- LIns *elt_ins = box_jsval(elt, get(&elt));
-
- LIns *args[] = { elt_ins, array_ins, cx_ins };
- LIns *ok_ins = lir->insCall(&js_ArrayCompPush_ci, args);
- guard(false, lir->ins_eq0(ok_ins), OOM_EXIT);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_ENUMCONSTELEM()
-{
- return JSRS_STOP;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LEAVEBLOCKEXPR()
-{
- LIns* v_ins = stack(-1);
- int n = -1 - GET_UINT16(cx->fp->regs->pc);
- stack(n, v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETTHISPROP()
-{
- LIns* this_ins;
-
- CHECK_STATUS(getThis(this_ins));
-
- /*
- * It's safe to just use cx->fp->thisp here because getThis() returns
- * JSRS_STOP if thisp is not available.
- */
- CHECK_STATUS(getProp(cx->fp->thisp, this_ins));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETARGPROP()
-{
- return getProp(argval(GET_ARGNO(cx->fp->regs->pc)));
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_GETLOCALPROP()
-{
- return getProp(varval(GET_SLOTNO(cx->fp->regs->pc)));
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INDEXBASE1()
-{
- atoms += 1 << 16;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INDEXBASE2()
-{
- atoms += 2 << 16;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INDEXBASE3()
-{
- atoms += 3 << 16;
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLGVAR()
-{
- jsval slotval = cx->fp->slots[GET_SLOTNO(cx->fp->regs->pc)];
- if (JSVAL_IS_NULL(slotval))
- // We will see JSOP_CALLNAME from the interpreter's jump, so no-op here.
- return JSRS_CONTINUE;
-
- uint32 slot = JSVAL_TO_INT(slotval);
-
- if (!lazilyImportGlobalSlot(slot))
- ABORT_TRACE("lazy import of global slot failed");
-
- jsval& v = STOBJ_GET_SLOT(globalObj, slot);
- stack(0, get(&v));
- stack(1, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLLOCAL()
-{
- uintN slot = GET_SLOTNO(cx->fp->regs->pc);
- stack(0, var(slot));
- stack(1, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLARG()
-{
- uintN slot = GET_ARGNO(cx->fp->regs->pc);
- stack(0, arg(slot));
- stack(1, INS_NULL());
- return JSRS_CONTINUE;
-}
-
-/* Functions for use with JSOP_CALLBUILTIN. */
-
-static JSBool
-ObjectToIterator(JSContext *cx, uintN argc, jsval *vp)
-{
- jsval *argv = JS_ARGV(cx, vp);
- JS_ASSERT(JSVAL_IS_INT(argv[0]));
- JS_SET_RVAL(cx, vp, JS_THIS(cx, vp));
- return js_ValueToIterator(cx, JSVAL_TO_INT(argv[0]), &JS_RVAL(cx, vp));
-}
-
-static JSObject* FASTCALL
-ObjectToIterator_tn(JSContext* cx, jsbytecode* pc, JSObject *obj, int32 flags)
-{
- jsval v = OBJECT_TO_JSVAL(obj);
- JSBool ok = js_ValueToIterator(cx, flags, &v);
-
- if (!ok) {
- js_SetBuiltinError(cx);
- return NULL;
- }
- return JSVAL_TO_OBJECT(v);
-}
-
-static JSBool
-CallIteratorNext(JSContext *cx, uintN argc, jsval *vp)
-{
- return js_CallIteratorNext(cx, JS_THIS_OBJECT(cx, vp), &JS_RVAL(cx, vp));
-}
-
-static jsval FASTCALL
-CallIteratorNext_tn(JSContext* cx, jsbytecode* pc, JSObject* iterobj)
-{
- JSAutoTempValueRooter tvr(cx);
- JSBool ok = js_CallIteratorNext(cx, iterobj, tvr.addr());
-
- if (!ok) {
- js_SetBuiltinError(cx);
- return JSVAL_ERROR_COOKIE;
- }
- return tvr.value();
-}
-
-JS_DEFINE_TRCINFO_1(ObjectToIterator,
- (4, (static, OBJECT_FAIL, ObjectToIterator_tn, CONTEXT, PC, THIS, INT32, 0, 0)))
-JS_DEFINE_TRCINFO_1(CallIteratorNext,
- (3, (static, JSVAL_FAIL, CallIteratorNext_tn, CONTEXT, PC, THIS, 0, 0)))
-
-static const struct BuiltinFunctionInfo {
- JSNativeTraceInfo *ti;
- int nargs;
-} builtinFunctionInfo[JSBUILTIN_LIMIT] = {
- {&ObjectToIterator_trcinfo, 1},
- {&CallIteratorNext_trcinfo, 0},
-};
-
-JSObject *
-js_GetBuiltinFunction(JSContext *cx, uintN index)
-{
- JSRuntime *rt = cx->runtime;
- JSObject *funobj = rt->builtinFunctions[index];
-
- if (!funobj) {
- /* Use NULL parent and atom. Builtin functions never escape to scripts. */
- JS_ASSERT(index < JS_ARRAY_LENGTH(builtinFunctionInfo));
- const BuiltinFunctionInfo *bfi = &builtinFunctionInfo[index];
- JSFunction *fun = js_NewFunction(cx,
- NULL,
- JS_DATA_TO_FUNC_PTR(JSNative, bfi->ti),
- bfi->nargs,
- JSFUN_FAST_NATIVE | JSFUN_TRCINFO,
- NULL,
- NULL);
- if (fun) {
- funobj = FUN_OBJECT(fun);
- STOBJ_CLEAR_PROTO(funobj);
- STOBJ_CLEAR_PARENT(funobj);
-
- JS_LOCK_GC(rt);
- if (!rt->builtinFunctions[index]) /* retest now that the lock is held */
- rt->builtinFunctions[index] = funobj;
- else
- funobj = rt->builtinFunctions[index];
- JS_UNLOCK_GC(rt);
- }
- }
- return funobj;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CALLBUILTIN()
-{
- JSObject *obj = js_GetBuiltinFunction(cx, GET_INDEX(cx->fp->regs->pc));
- if (!obj)
- ABORT_TRACE_ERROR("error in js_GetBuiltinFunction");
-
- stack(0, get(&stackval(-1)));
- stack(-1, INS_CONSTOBJ(obj));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INT8()
-{
- stack(0, lir->insImmf(GET_INT8(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_INT32()
-{
- stack(0, lir->insImmf(GET_INT32(cx->fp->regs->pc)));
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_LENGTH()
-{
- jsval& l = stackval(-1);
- if (JSVAL_IS_PRIMITIVE(l)) {
- if (!JSVAL_IS_STRING(l))
- ABORT_TRACE("non-string primitive JSOP_LENGTH unsupported");
- set(&l, lir->ins1(LIR_i2f, getStringLength(get(&l))));
- return JSRS_CONTINUE;
- }
-
- JSObject* obj = JSVAL_TO_OBJECT(l);
- LIns* obj_ins = get(&l);
-
- if (STOBJ_GET_CLASS(obj) == &js_ArgumentsClass) {
- unsigned depth;
- JSStackFrame *afp = guardArguments(obj, obj_ins, &depth);
- if (!afp)
- ABORT_TRACE("can't reach arguments object's frame");
-
- // We must both check at record time and guard at run time that
- // arguments.length has not been reassigned, redefined or deleted.
- if (js_IsOverriddenArgsLength(obj))
- ABORT_TRACE("can't trace JSOP_ARGCNT if arguments.length has been modified");
- LIns* slot_ins = guardArgsLengthNotAssigned(obj_ins);
-
- // slot_ins is the value from the slot; right-shift by 2 bits to get
- // the length (see GetArgsLength in jsfun.cpp).
- LIns* v_ins = lir->ins1(LIR_i2f, lir->ins2i(LIR_rsh, slot_ins, 2));
- set(&l, v_ins);
- return JSRS_CONTINUE;
- }
-
- LIns* v_ins;
- if (OBJ_IS_ARRAY(cx, obj)) {
- if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
- if (!guardDenseArray(obj, obj_ins, BRANCH_EXIT)) {
- JS_NOT_REACHED("OBJ_IS_DENSE_ARRAY but not?!?");
- return JSRS_STOP;
- }
- } else {
- if (!guardClass(obj, obj_ins, &js_SlowArrayClass, snapshot(BRANCH_EXIT)))
- ABORT_TRACE("can't trace length property access on non-array");
- }
- v_ins = lir->ins1(LIR_i2f, p2i(stobj_get_fslot(obj_ins, JSSLOT_ARRAY_LENGTH)));
- } else {
- if (!OBJ_IS_NATIVE(obj))
- ABORT_TRACE("can't trace length property access on non-array, non-native object");
- return getProp(obj, obj_ins);
- }
- set(&l, v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_NEWARRAY()
-{
- LIns *proto_ins;
- CHECK_STATUS(getClassPrototype(JSProto_Array, proto_ins));
-
- uint32 len = GET_UINT16(cx->fp->regs->pc);
- cx->fp->assertValidStackDepth(len);
-
- LIns* args[] = { lir->insImm(len), proto_ins, cx_ins };
- LIns* v_ins = lir->insCall(&js_NewUninitializedArray_ci, args);
- guard(false, lir->ins_peq0(v_ins), OOM_EXIT);
-
- LIns* dslots_ins = NULL;
- uint32 count = 0;
- for (uint32 i = 0; i < len; i++) {
- jsval& v = stackval(int(i) - int(len));
- if (v != JSVAL_HOLE)
- count++;
- LIns* elt_ins = box_jsval(v, get(&v));
- stobj_set_dslot(v_ins, i, dslots_ins, elt_ins);
- }
-
- if (count > 0)
- stobj_set_fslot(v_ins, JSSLOT_ARRAY_COUNT, INS_CONST(count));
-
- stack(-int(len), v_ins);
- return JSRS_CONTINUE;
-}
-
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_HOLE()
-{
- stack(0, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_HOLE)));
- return JSRS_CONTINUE;
-}
-
-JSRecordingStatus
-TraceRecorder::record_JSOP_TRACE()
-{
- return JSRS_CONTINUE;
-}
-
-static const uint32 sMaxConcatNSize = 32;
-
-/*
- * Copy the result of defvalue.string back into concatn's arguments, clean the
- * stack, and return a pointer to the argument that was just overwritten.
- */
-JS_REQUIRES_STACK jsval *
-js_ConcatPostImacroStackCleanup(uint32 argc, JSFrameRegs ®s,
- TraceRecorder *recorder)
-{
- JS_ASSERT(*regs.pc == JSOP_IMACOP);
-
- /* Pop the argument offset and imacro return value. */
- jsint offset = JSVAL_TO_INT(*--regs.sp);
- jsval *imacroResult = --regs.sp;
-
- /* Replace non-primitive argument with new primitive argument. */
- jsval *vp = regs.sp - offset;
- JS_ASSERT(regs.sp - argc <= vp && vp < regs.sp);
- if (recorder)
- recorder->set(vp, recorder->get(imacroResult));
- *vp = *imacroResult;
-
- return vp;
-}
-
-/*
- * Initially, concatn takes N arguments on the stack, where N is immediate
- * operand. To convert these arguments to primitives, we must repeatedly call
- * the defvalue.string imacro. To achieve this iteration, defvalue.string ends
- * with imacop. Hence, this function is called multiple times, each time with
- * one less non-primitive. To keep track of where we are in the loop, we must
- * push an additional index value on the stack. Hence, on all subsequent
- * entries, the stack is organized as follows (bottom to top):
- *
- * prim[1]
- * ...
- * prim[i-1]
- * nonprim[i] argument to imacro
- * arg[i+1]
- * ...
- * arg[N]
- * primarg[i] nonprim[i] converted to primitive
- * i
- *
- * Hence, the stack setup on entry to this function (and JSOP_CONCATN in the
- * interpreter, on trace abort) is dependent on whether an imacro is in
- * progress. When all of concatn's arguments are primitive, it emits a builtin
- * call and allows the actual JSOP_CONCATN to be executed by the interpreter.
- */
-JS_REQUIRES_STACK JSRecordingStatus
-TraceRecorder::record_JSOP_CONCATN()
-{
- JSStackFrame *fp = cx->fp;
- JSFrameRegs ®s = *fp->regs;
-
- /*
- * If we are in an imacro, we must have just finished a call to
- * defvalue.string. Continue where we left off last time.
- */
- uint32 argc;
- jsval *loopStart;
- if (fp->imacpc) {
- JS_ASSERT(*fp->imacpc == JSOP_CONCATN);
- argc = GET_ARGC(fp->imacpc);
- loopStart = js_ConcatPostImacroStackCleanup(argc, regs, this) + 1;
- } else {
- argc = GET_ARGC(regs.pc);
- JS_ASSERT(argc > 0);
- loopStart = regs.sp - argc;
-
- /* Prevent code/alloca explosion. */
- if (argc > sMaxConcatNSize)
- return JSRS_STOP;
- }
-
- /* Convert non-primitives to primitives using defvalue.string. */
- for (jsval *vp = loopStart; vp != regs.sp; ++vp) {
- if (!JSVAL_IS_PRIMITIVE(*vp)) {
- /*
- * In addition to the jsval we want the imacro to convert to
- * primitive, pass through the offset of the argument on the stack.
- */
- jsint offset = regs.sp - vp;
-
- /* Push the non-primitive to convert. */
- set(regs.sp, get(vp), true);
- *regs.sp++ = *vp;
-
- /* Push the argument index. */
- set(regs.sp, lir->insImm(offset), true);
- *regs.sp++ = INT_TO_JSVAL(offset);
-
- /* Nested imacro call OK because this is a tail call. */
- return call_imacro(defvalue_imacros.string);
- }
- }
-
- /* Build an array of the stringified primitives. */
- int32_t bufSize = argc * sizeof(JSString *);
- LIns *buf_ins = lir->insAlloc(bufSize);
- int32_t d = 0;
- for (jsval *vp = regs.sp - argc; vp != regs.sp; ++vp, d += sizeof(void *))
- lir->insStorei(stringify(*vp), buf_ins, d);
-
- /* Perform concatenation using a builtin. */
- LIns *args[] = { lir->insImm(argc), buf_ins, cx_ins };
- LIns *concat = lir->insCall(&js_ConcatN_ci, args);
- guard(false, lir->ins_peq0(concat), OOM_EXIT);
-
- /* Update tracker with result. */
- jsval *afterPop = regs.sp - (argc - 1);
- set(afterPop - 1, concat);
-
- return JSRS_CONTINUE;
-}
-
-#define DBG_STUB(OP) \
- JS_REQUIRES_STACK JSRecordingStatus \
- TraceRecorder::record_##OP() \
- { \
- ABORT_TRACE("can't trace " #OP); \
- }
-
-DBG_STUB(JSOP_GETUPVAR_DBG)
-DBG_STUB(JSOP_CALLUPVAR_DBG)
-DBG_STUB(JSOP_DEFFUN_DBGFC)
-DBG_STUB(JSOP_DEFLOCALFUN_DBGFC)
-DBG_STUB(JSOP_LAMBDA_DBGFC)
-
-#ifdef JS_JIT_SPEW
-/*
- * Print information about entry typemaps and unstable exits for all peers
- * at a PC.
- */
-void
-DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape,
- uint32 argc)
-{
- VMFragment* f;
- TreeInfo* ti;
- bool looped = false;
- unsigned length = 0;
-
- for (f = getLoop(tm, ip, globalObj, globalShape, argc); f != NULL; f = f->peer) {
- if (!f->vmprivate)
- continue;
- debug_only_printf(LC_TMRecorder, "Stability of fragment %p:\nENTRY STACK=", (void*)f);
- ti = (TreeInfo*)f->vmprivate;
- if (looped)
- JS_ASSERT(ti->nStackTypes == length);
- for (unsigned i = 0; i < ti->nStackTypes; i++)
- debug_only_printf(LC_TMRecorder, "%c", typeChar[ti->stackTypeMap()[i]]);
- debug_only_print0(LC_TMRecorder, " GLOBALS=");
- for (unsigned i = 0; i < ti->nGlobalTypes(); i++)
- debug_only_printf(LC_TMRecorder, "%c", typeChar[ti->globalTypeMap()[i]]);
- debug_only_print0(LC_TMRecorder, "\n");
- UnstableExit* uexit = ti->unstableExits;
- while (uexit != NULL) {
- debug_only_print0(LC_TMRecorder, "EXIT ");
- JSTraceType* m = uexit->exit->fullTypeMap();
- debug_only_print0(LC_TMRecorder, "STACK=");
- for (unsigned i = 0; i < uexit->exit->numStackSlots; i++)
- debug_only_printf(LC_TMRecorder, "%c", typeChar[m[i]]);
- debug_only_print0(LC_TMRecorder, " GLOBALS=");
- for (unsigned i = 0; i < uexit->exit->numGlobalSlots; i++) {
- debug_only_printf(LC_TMRecorder, "%c",
- typeChar[m[uexit->exit->numStackSlots + i]]);
- }
- debug_only_print0(LC_TMRecorder, "\n");
- uexit = uexit->next;
- }
- length = ti->nStackTypes;
- looped = true;
- }
-}
-#endif
-
-#ifdef MOZ_TRACEVIS
-
-FILE* traceVisLogFile = NULL;
-JSHashTable *traceVisScriptTable = NULL;
-
-JS_FRIEND_API(bool)
-JS_StartTraceVis(const char* filename = "tracevis.dat")
-{
- if (traceVisLogFile) {
- // If we're currently recording, first we must stop.
- JS_StopTraceVis();
- }
-
- traceVisLogFile = fopen(filename, "wb");
- if (!traceVisLogFile)
- return false;
-
- return true;
-}
-
-JS_FRIEND_API(JSBool)
-js_StartTraceVis(JSContext *cx, JSObject *obj,
- uintN argc, jsval *argv, jsval *rval)
-{
- JSBool ok;
-
- if (argc > 0 && JSVAL_IS_STRING(argv[0])) {
- JSString *str = JSVAL_TO_STRING(argv[0]);
- char *filename = js_DeflateString(cx, str->chars(), str->length());
- if (!filename)
- goto error;
- ok = JS_StartTraceVis(filename);
- cx->free(filename);
- } else {
- ok = JS_StartTraceVis();
- }
-
- if (ok) {
- fprintf(stderr, "started TraceVis recording\n");
- return JS_TRUE;
- }
-
- error:
- JS_ReportError(cx, "failed to start TraceVis recording");
- return JS_FALSE;
-}
-
-JS_FRIEND_API(bool)
-JS_StopTraceVis()
-{
- if (!traceVisLogFile)
- return false;
-
- fclose(traceVisLogFile); // not worth checking the result
- traceVisLogFile = NULL;
-
- return true;
-}
-
-JS_FRIEND_API(JSBool)
-js_StopTraceVis(JSContext *cx, JSObject *obj,
- uintN argc, jsval *argv, jsval *rval)
-{
- JSBool ok = JS_StopTraceVis();
-
- if (ok)
- fprintf(stderr, "stopped TraceVis recording\n");
- else
- JS_ReportError(cx, "TraceVis isn't running");
-
- return ok;
-}
-
-#endif /* MOZ_TRACEVIS */
-
-#define UNUSED(n) \
- JS_REQUIRES_STACK bool \
- TraceRecorder::record_JSOP_UNUSED##n() { \
- JS_NOT_REACHED("JSOP_UNUSED" # n); \
- return false; \
- }
diff --git a/tests/cpp/jstracer_part.cpp b/tests/cpp/jstracer_part.cpp
deleted file mode 100644
index 30b829b..0000000
--- a/tests/cpp/jstracer_part.cpp
+++ /dev/null
@@ -1,195 +0,0 @@
-/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=4 sw=4 et tw=99:
- *
- * ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
- * May 28, 2008.
- *
- * The Initial Developer of the Original Code is
- * Brendan Eich
- *
- * Contributor(s):
- * Andreas Gal
- * Mike Shaver
- * David Anderson
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-
-#include "nanojit/nanojit.h"
-
-using namespace nanojit;
-
-
-void*
-nanojit::Allocator::allocChunk(size_t nbytes)
-{
- VMAllocator *vma = (VMAllocator*)this;
- JS_ASSERT(!vma->outOfMemory());
- void *p = malloc(nbytes);
- if (!p) {
- JS_ASSERT(nbytes < sizeof(vma->mReserve));
- vma->mOutOfMemory = true;
- p = (void*) &vma->mReserve[0];
- }
- vma->mSize += nbytes;
- return p;
-}
-
-void
-nanojit::Allocator::freeChunk(void *p) {
- VMAllocator *vma = (VMAllocator*)this;
- if (p != &vma->mReserve[0])
- free(p);
-}
-
-void
-nanojit::Allocator::postReset() {
- VMAllocator *vma = (VMAllocator*)this;
- vma->mOutOfMemory = false;
- vma->mSize = 0;
-}
-
-
-void
-nanojit::StackFilter::getTops(LIns* guard, int& spTop, int& rpTop)
-{
- VMSideExit* e = (VMSideExit*)guard->record()->exit;
- spTop = e->sp_adj;
- rpTop = e->rp_adj;
-}
-
-class AdjustCallerGlobalTypesVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSContext *mCx;
- nanojit::LirBuffer *mLirbuf;
- nanojit::LirWriter *mLir;
- JSTraceType *mTypeMap;
-public:
- AdjustCallerGlobalTypesVisitor(TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mCx(mRecorder.cx),
- mLirbuf(mRecorder.lirbuf),
- mLir(mRecorder.lir),
- mTypeMap(typeMap)
- {}
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE void
- visitGlobalSlot(jsval *vp, unsigned n, unsigned slot) {
- LIns *ins = mRecorder.get(vp);
- bool isPromote = isPromoteInt(ins);
- if (isPromote && *mTypeMap == TT_DOUBLE) {
- mLir->insStorei(mRecorder.get(vp), mLirbuf->state,
- mRecorder.nativeGlobalOffset(vp));
-
- /*
- * Aggressively undo speculation so the inner tree will compile
- * if this fails.
- */
- oracle.markGlobalSlotUndemotable(mCx, slot);
- }
- JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
- ++mTypeMap;
- }
-};
-
-class AdjustCallerStackTypesVisitor : public SlotVisitorBase
-{
- TraceRecorder &mRecorder;
- JSContext *mCx;
- nanojit::LirBuffer *mLirbuf;
- nanojit::LirWriter *mLir;
- unsigned mSlotnum;
- JSTraceType *mTypeMap;
-public:
- AdjustCallerStackTypesVisitor(TraceRecorder &recorder,
- JSTraceType *typeMap) :
- mRecorder(recorder),
- mCx(mRecorder.cx),
- mLirbuf(mRecorder.lirbuf),
- mLir(mRecorder.lir),
- mSlotnum(0),
- mTypeMap(typeMap)
- {}
-
- JSTraceType* getTypeMap()
- {
- return mTypeMap;
- }
-
- JS_REQUIRES_STACK JS_ALWAYS_INLINE bool
- visitStackSlots(jsval *vp, size_t count, JSStackFrame* fp) {
- for (size_t i = 0; i < count; ++i) {
- LIns *ins = mRecorder.get(vp);
- bool isPromote = isPromoteInt(ins);
- if (isPromote && *mTypeMap == TT_DOUBLE) {
- mLir->insStorei(mRecorder.get(vp), mLirbuf->sp,
- -mRecorder.treeInfo->nativeStackBase +
- mRecorder.nativeStackOffset(vp));
-
- /*
- * Aggressively undo speculation so the inner tree will compile
- * if this fails.
- */
- oracle.markStackSlotUndemotable(mCx, mSlotnum);
- }
- JS_ASSERT(!(!isPromote && *mTypeMap == TT_INT32));
- ++vp;
- ++mTypeMap;
- ++mSlotnum;
- }
- return true;
- }
-};
-
-#if defined NJ_VERBOSE
-void
-nanojit::LirNameMap::formatGuard(LIns *i, char *out)
-{
- VMSideExit *x;
-
- x = (VMSideExit *)i->record()->exit;
- sprintf(out,
- "%s: %s %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)",
- formatRef(i),
- lirNames[i->opcode()],
- i->oprnd1() ? formatRef(i->oprnd1()) : "",
- (void *)x->pc,
- (void *)x->imacpc,
- (long int)x->sp_adj,
- (long int)x->rp_adj,
- i->record()->profGuardID);
-}
-#endif
-
diff --git a/tests/cpp/mlprototype.cpp b/tests/cpp/mlprototype.cpp
deleted file mode 100644
index 628e630..0000000
--- a/tests/cpp/mlprototype.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-
- NS_IMETHODIMP
- nsThreadClassInfo::GetClassDescription(
- char **result,
- int foo,
- bool blah
- )
- {
- *result = nsnull;
- return NS_OK;
- }
-
-int foo;
diff --git a/tests/cpp/multiline.cpp b/tests/cpp/multiline.cpp
deleted file mode 100644
index 4160fd5..0000000
--- a/tests/cpp/multiline.cpp
+++ /dev/null
@@ -1,6 +0,0 @@
-void*
-Foo::bar(int i,
- const char const * const * p)
-{
- return;
-}
diff --git a/tests/cpp/nsCycleCollector.cpp b/tests/cpp/nsCycleCollector.cpp
deleted file mode 100644
index f263041..0000000
--- a/tests/cpp/nsCycleCollector.cpp
+++ /dev/null
@@ -1,3728 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
-/* vim: set cindent tabstop=4 expandtab shiftwidth=4: */
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is mozilla.org code.
- *
- * The Initial Developer of the Original Code is
- * The Mozilla Foundation.
- * Portions created by the Initial Developer are Copyright (C) 2006
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- * L. David Baron , Mozilla Corporation
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-//
-// This file implements a garbage-cycle collector based on the paper
-//
-// Concurrent Cycle Collection in Reference Counted Systems
-// Bacon & Rajan (2001), ECOOP 2001 / Springer LNCS vol 2072
-//
-// We are not using the concurrent or acyclic cases of that paper; so
-// the green, red and orange colors are not used.
-//
-// The collector is based on tracking pointers of four colors:
-//
-// Black nodes are definitely live. If we ever determine a node is
-// black, it's ok to forget about, drop from our records.
-//
-// White nodes are definitely garbage cycles. Once we finish with our
-// scanning, we unlink all the white nodes and expect that by
-// unlinking them they will self-destruct (since a garbage cycle is
-// only keeping itself alive with internal links, by definition).
-//
-// Grey nodes are being scanned. Nodes that turn grey will turn
-// either black if we determine that they're live, or white if we
-// determine that they're a garbage cycle. After the main collection
-// algorithm there should be no grey nodes.
-//
-// Purple nodes are *candidates* for being scanned. They are nodes we
-// haven't begun scanning yet because they're not old enough, or we're
-// still partway through the algorithm.
-//
-// XPCOM objects participating in garbage-cycle collection are obliged
-// to inform us when they ought to turn purple; that is, when their
-// refcount transitions from N+1 -> N, for nonzero N. Furthermore we
-// require that *after* an XPCOM object has informed us of turning
-// purple, they will tell us when they either transition back to being
-// black (incremented refcount) or are ultimately deleted.
-
-
-// Safety:
-//
-// An XPCOM object is either scan-safe or scan-unsafe, purple-safe or
-// purple-unsafe.
-//
-// An object is scan-safe if:
-//
-// - It can be QI'ed to |nsXPCOMCycleCollectionParticipant|, though this
-// operation loses ISupports identity (like nsIClassInfo).
-// - The operation |traverse| on the resulting
-// nsXPCOMCycleCollectionParticipant does not cause *any* refcount
-// adjustment to occur (no AddRef / Release calls).
-//
-// An object is purple-safe if it satisfies the following properties:
-//
-// - The object is scan-safe.
-// - If the object calls |nsCycleCollector::suspect(this)|,
-// it will eventually call |nsCycleCollector::forget(this)|,
-// exactly once per call to |suspect|, before being destroyed.
-//
-// When we receive a pointer |ptr| via
-// |nsCycleCollector::suspect(ptr)|, we assume it is purple-safe. We
-// can check the scan-safety, but have no way to ensure the
-// purple-safety; objects must obey, or else the entire system falls
-// apart. Don't involve an object in this scheme if you can't
-// guarantee its purple-safety.
-//
-// When we have a scannable set of purple nodes ready, we begin
-// our walks. During the walks, the nodes we |traverse| should only
-// feed us more scan-safe nodes, and should not adjust the refcounts
-// of those nodes.
-//
-// We do not |AddRef| or |Release| any objects during scanning. We
-// rely on purple-safety of the roots that call |suspect| and
-// |forget| to hold, such that we will forget about a purple pointer
-// before it is destroyed. The pointers that are merely scan-safe,
-// we hold only for the duration of scanning, and there should be no
-// objects released from the scan-safe set during the scan (there
-// should be no threads involved).
-//
-// We *do* call |AddRef| and |Release| on every white object, on
-// either side of the calls to |Unlink|. This keeps the set of white
-// objects alive during the unlinking.
-//
-
-#if !defined(__MINGW32__)
-#ifdef WIN32
-#include
-#include
-#endif
-#endif
-
-#include "base/process_util.h"
-
-/* This must occur *after* base/process_util.h to avoid typedefs conflicts. */
-#include "mozilla/Util.h"
-
-#include "nsCycleCollectionParticipant.h"
-#include "nsCycleCollectorUtils.h"
-#include "nsIProgrammingLanguage.h"
-#include "nsBaseHashtable.h"
-#include "nsHashKeys.h"
-#include "nsDeque.h"
-#include "nsCycleCollector.h"
-#include "nsThreadUtils.h"
-#include "prenv.h"
-#include "prprf.h"
-#include "plstr.h"
-#include "prtime.h"
-#include "nsPrintfCString.h"
-#include "nsTArray.h"
-#include "mozilla/FunctionTimer.h"
-#include "nsIObserverService.h"
-#include "nsIConsoleService.h"
-#include "nsServiceManagerUtils.h"
-#include "nsThreadUtils.h"
-#include "nsTArray.h"
-#include "mozilla/Services.h"
-#include "nsICycleCollectorListener.h"
-#include "nsIXPConnect.h"
-#include "nsIJSRuntimeService.h"
-#include "nsIMemoryReporter.h"
-#include "xpcpublic.h"
-#include
-#include
-#ifdef WIN32
-#include
-#include
-#endif
-
-#ifdef XP_WIN
-#include
-#endif
-
-#include "mozilla/Mutex.h"
-#include "mozilla/CondVar.h"
-#include "mozilla/Telemetry.h"
-
-using namespace mozilla;
-
-//#define COLLECT_TIME_DEBUG
-
-#ifdef DEBUG_CC
-#define IF_DEBUG_CC_PARAM(_p) , _p
-#define IF_DEBUG_CC_ONLY_PARAM(_p) _p
-#else
-#define IF_DEBUG_CC_PARAM(_p)
-#define IF_DEBUG_CC_ONLY_PARAM(_p)
-#endif
-
-#define DEFAULT_SHUTDOWN_COLLECTIONS 5
-#ifdef DEBUG_CC
-#define SHUTDOWN_COLLECTIONS(params) params.mShutdownCollections
-#else
-#define SHUTDOWN_COLLECTIONS(params) DEFAULT_SHUTDOWN_COLLECTIONS
-#endif
-
-#if defined(XP_WIN)
-// Defined in nsThreadManager.cpp.
-extern DWORD gTLSThreadIDIndex;
-#elif defined(NS_TLS)
-// Defined in nsThreadManager.cpp.
-extern NS_TLS mozilla::threads::ID gTLSThreadID;
-#else
-PRThread* gCycleCollectorThread = nsnull;
-#endif
-
-// If true, always log cycle collector graphs.
-const bool gAlwaysLogCCGraphs = false;
-
-// Various parameters of this collector can be tuned using environment
-// variables.
-
-struct nsCycleCollectorParams
-{
- bool mDoNothing;
- bool mLogGraphs;
-#ifdef DEBUG_CC
- bool mReportStats;
- bool mHookMalloc;
- bool mFaultIsFatal;
- bool mLogPointers;
- PRUint32 mShutdownCollections;
-#endif
-
- nsCycleCollectorParams() :
-#ifdef DEBUG_CC
- mDoNothing (PR_GetEnv("XPCOM_CC_DO_NOTHING") != NULL),
- mLogGraphs (gAlwaysLogCCGraphs ||
- PR_GetEnv("XPCOM_CC_DRAW_GRAPHS") != NULL),
- mReportStats (PR_GetEnv("XPCOM_CC_REPORT_STATS") != NULL),
- mHookMalloc (PR_GetEnv("XPCOM_CC_HOOK_MALLOC") != NULL),
- mFaultIsFatal (PR_GetEnv("XPCOM_CC_FAULT_IS_FATAL") != NULL),
- mLogPointers (PR_GetEnv("XPCOM_CC_LOG_POINTERS") != NULL),
-
- mShutdownCollections(DEFAULT_SHUTDOWN_COLLECTIONS)
-#else
- mDoNothing (false),
- mLogGraphs (gAlwaysLogCCGraphs)
-#endif
- {
-#ifdef DEBUG_CC
- char *s = PR_GetEnv("XPCOM_CC_SHUTDOWN_COLLECTIONS");
- if (s)
- PR_sscanf(s, "%d", &mShutdownCollections);
-#endif
- }
-};
-
-#ifdef DEBUG_CC
-// Various operations involving the collector are recorded in a
-// statistics table. These are for diagnostics.
-
-struct nsCycleCollectorStats
-{
- PRUint32 mFailedQI;
- PRUint32 mSuccessfulQI;
-
- PRUint32 mVisitedNode;
- PRUint32 mWalkedGraph;
- PRUint32 mCollectedBytes;
- PRUint32 mFreeCalls;
- PRUint32 mFreedBytes;
-
- PRUint32 mSetColorGrey;
- PRUint32 mSetColorBlack;
- PRUint32 mSetColorWhite;
-
- PRUint32 mFailedUnlink;
- PRUint32 mCollectedNode;
-
- PRUint32 mSuspectNode;
- PRUint32 mForgetNode;
- PRUint32 mFreedWhilePurple;
-
- PRUint32 mCollection;
-
- nsCycleCollectorStats()
- {
- memset(this, 0, sizeof(nsCycleCollectorStats));
- }
-
- void Dump()
- {
- fprintf(stderr, "\f\n");
-#define DUMP(entry) fprintf(stderr, "%30.30s: %-20.20d\n", #entry, entry)
- DUMP(mFailedQI);
- DUMP(mSuccessfulQI);
-
- DUMP(mVisitedNode);
- DUMP(mWalkedGraph);
- DUMP(mCollectedBytes);
- DUMP(mFreeCalls);
- DUMP(mFreedBytes);
-
- DUMP(mSetColorGrey);
- DUMP(mSetColorBlack);
- DUMP(mSetColorWhite);
-
- DUMP(mFailedUnlink);
- DUMP(mCollectedNode);
-
- DUMP(mSuspectNode);
- DUMP(mForgetNode);
- DUMP(mFreedWhilePurple);
-
- DUMP(mCollection);
-#undef DUMP
- }
-};
-#endif
-
-#ifdef DEBUG_CC
-static bool nsCycleCollector_shouldSuppress(nsISupports *s);
-static void InitMemHook(void);
-#endif
-
-////////////////////////////////////////////////////////////////////////
-// Base types
-////////////////////////////////////////////////////////////////////////
-
-struct PtrInfo;
-
-class EdgePool
-{
-public:
- // EdgePool allocates arrays of void*, primarily to hold PtrInfo*.
- // However, at the end of a block, the last two pointers are a null
- // and then a void** pointing to the next block. This allows
- // EdgePool::Iterators to be a single word but still capable of crossing
- // block boundaries.
-
- EdgePool()
- {
- mSentinelAndBlocks[0].block = nsnull;
- mSentinelAndBlocks[1].block = nsnull;
- mNumBlocks = 0;
- }
-
- ~EdgePool()
- {
- NS_ASSERTION(!mSentinelAndBlocks[0].block &&
- !mSentinelAndBlocks[1].block,
- "Didn't call Clear()?");
- }
-
- void Clear()
- {
- Block *b = Blocks();
- while (b) {
- Block *next = b->Next();
- delete b;
- NS_ASSERTION(mNumBlocks > 0,
- "Expected EdgePool mNumBlocks to be positive.");
- mNumBlocks--;
- b = next;
- }
-
- mSentinelAndBlocks[0].block = nsnull;
- mSentinelAndBlocks[1].block = nsnull;
- }
-
-private:
- struct Block;
- union PtrInfoOrBlock {
- // Use a union to avoid reinterpret_cast and the ensuing
- // potential aliasing bugs.
- PtrInfo *ptrInfo;
- Block *block;
- };
- struct Block {
- enum { BlockSize = 16 * 1024 };
-
- PtrInfoOrBlock mPointers[BlockSize];
- Block() {
- mPointers[BlockSize - 2].block = nsnull; // sentinel
- mPointers[BlockSize - 1].block = nsnull; // next block pointer
- }
- Block*& Next()
- { return mPointers[BlockSize - 1].block; }
- PtrInfoOrBlock* Start()
- { return &mPointers[0]; }
- PtrInfoOrBlock* End()
- { return &mPointers[BlockSize - 2]; }
- };
-
- // Store the null sentinel so that we can have valid iterators
- // before adding any edges and without adding any blocks.
- PtrInfoOrBlock mSentinelAndBlocks[2];
- PRUint32 mNumBlocks;
-
- Block*& Blocks() { return mSentinelAndBlocks[1].block; }
-
-public:
- class Iterator
- {
- public:
- Iterator() : mPointer(nsnull) {}
- Iterator(PtrInfoOrBlock *aPointer) : mPointer(aPointer) {}
- Iterator(const Iterator& aOther) : mPointer(aOther.mPointer) {}
-
- Iterator& operator++()
- {
- if (mPointer->ptrInfo == nsnull) {
- // Null pointer is a sentinel for link to the next block.
- mPointer = (mPointer + 1)->block->mPointers;
- }
- ++mPointer;
- return *this;
- }
-
- PtrInfo* operator*() const
- {
- if (mPointer->ptrInfo == nsnull) {
- // Null pointer is a sentinel for link to the next block.
- return (mPointer + 1)->block->mPointers->ptrInfo;
- }
- return mPointer->ptrInfo;
- }
- bool operator==(const Iterator& aOther) const
- { return mPointer == aOther.mPointer; }
- bool operator!=(const Iterator& aOther) const
- { return mPointer != aOther.mPointer; }
-
- private:
- PtrInfoOrBlock *mPointer;
- };
-
- class Builder;
- friend class Builder;
- class Builder {
- public:
- Builder(EdgePool &aPool)
- : mCurrent(&aPool.mSentinelAndBlocks[0]),
- mBlockEnd(&aPool.mSentinelAndBlocks[0]),
- mNextBlockPtr(&aPool.Blocks()),
- mNumBlocks(aPool.mNumBlocks)
- {
- }
-
- Iterator Mark() { return Iterator(mCurrent); }
-
- void Add(PtrInfo* aEdge) {
- if (mCurrent == mBlockEnd) {
- Block *b = new Block();
- if (!b) {
- // This means we just won't collect (some) cycles.
- NS_NOTREACHED("out of memory, ignoring edges");
- return;
- }
- *mNextBlockPtr = b;
- mCurrent = b->Start();
- mBlockEnd = b->End();
- mNextBlockPtr = &b->Next();
- mNumBlocks++;
- }
- (mCurrent++)->ptrInfo = aEdge;
- }
- private:
- // mBlockEnd points to space for null sentinel
- PtrInfoOrBlock *mCurrent, *mBlockEnd;
- Block **mNextBlockPtr;
- PRUint32 &mNumBlocks;
- };
-
- size_t BlocksSize() const {
- return sizeof(Block) * mNumBlocks;
- }
-
-};
-
-#ifdef DEBUG_CC
-
-struct ReversedEdge {
- PtrInfo *mTarget;
- nsCString *mEdgeName;
- ReversedEdge *mNext;
-};
-
-#endif
-
-
-enum NodeColor { black, white, grey };
-
-// This structure should be kept as small as possible; we may expect
-// hundreds of thousands of them to be allocated and touched
-// repeatedly during each cycle collection.
-
-struct PtrInfo
-{
- void *mPointer;
- nsCycleCollectionParticipant *mParticipant;
- PRUint32 mColor : 2;
- PRUint32 mInternalRefs : 30;
- PRUint32 mRefCount;
-private:
- EdgePool::Iterator mFirstChild;
-
-public:
-#ifdef DEBUG_CC
- size_t mBytes;
- char *mName;
- PRUint32 mLangID;
-
- // For finding roots in ExplainLiveExpectedGarbage (when there are
- // missing calls to suspect or failures to unlink).
- PRUint32 mSCCIndex; // strongly connected component
-
- // For finding roots in ExplainLiveExpectedGarbage (when nodes
- // expected to be garbage are black).
- ReversedEdge* mReversedEdges; // linked list
- PtrInfo* mShortestPathToExpectedGarbage;
- nsCString* mShortestPathToExpectedGarbageEdgeName;
-
- nsTArray mEdgeNames;
-#endif
-
- PtrInfo(void *aPointer, nsCycleCollectionParticipant *aParticipant
- IF_DEBUG_CC_PARAM(PRUint32 aLangID)
- )
- : mPointer(aPointer),
- mParticipant(aParticipant),
- mColor(grey),
- mInternalRefs(0),
- mRefCount(0),
- mFirstChild()
-#ifdef DEBUG_CC
- , mBytes(0),
- mName(nsnull),
- mLangID(aLangID),
- mSCCIndex(0),
- mReversedEdges(nsnull),
- mShortestPathToExpectedGarbage(nsnull),
- mShortestPathToExpectedGarbageEdgeName(nsnull)
-#endif
- {
- }
-
-#ifdef DEBUG_CC
- void Destroy() {
- PL_strfree(mName);
- mEdgeNames.~nsTArray();
- }
-#endif
-
- // Allow NodePool::Block's constructor to compile.
- PtrInfo() {
- NS_NOTREACHED("should never be called");
- }
-
- EdgePool::Iterator FirstChild()
- {
- return mFirstChild;
- }
-
- // this PtrInfo must be part of a NodePool
- EdgePool::Iterator LastChild()
- {
- return (this + 1)->mFirstChild;
- }
-
- void SetFirstChild(EdgePool::Iterator aFirstChild)
- {
- mFirstChild = aFirstChild;
- }
-
- // this PtrInfo must be part of a NodePool
- void SetLastChild(EdgePool::Iterator aLastChild)
- {
- (this + 1)->mFirstChild = aLastChild;
- }
-};
-
-/**
- * A structure designed to be used like a linked list of PtrInfo, except
- * that allocates the PtrInfo 32K-at-a-time.
- */
-class NodePool
-{
-private:
- enum { BlockSize = 8 * 1024 }; // could be int template parameter
-
- struct Block {
- // We create and destroy Block using NS_Alloc/NS_Free rather
- // than new and delete to avoid calling its constructor and
- // destructor.
- Block() { NS_NOTREACHED("should never be called"); }
- ~Block() { NS_NOTREACHED("should never be called"); }
-
- Block* mNext;
- PtrInfo mEntries[BlockSize + 1]; // +1 to store last child of last node
- };
-
-public:
- NodePool()
- : mBlocks(nsnull),
- mLast(nsnull),
- mNumBlocks(0)
- {
- }
-
- ~NodePool()
- {
- NS_ASSERTION(!mBlocks, "Didn't call Clear()?");
- }
-
- void Clear()
- {
-#ifdef DEBUG_CC
- {
- Enumerator queue(*this);
- while (!queue.IsDone()) {
- queue.GetNext()->Destroy();
- }
- }
-#endif
- Block *b = mBlocks;
- while (b) {
- Block *n = b->mNext;
- NS_Free(b);
- NS_ASSERTION(mNumBlocks > 0,
- "Expected NodePool mNumBlocks to be positive.");
- mNumBlocks--;
- b = n;
- }
-
- mBlocks = nsnull;
- mLast = nsnull;
- }
-
- class Builder;
- friend class Builder;
- class Builder {
- public:
- Builder(NodePool& aPool)
- : mNextBlock(&aPool.mBlocks),
- mNext(aPool.mLast),
- mBlockEnd(nsnull),
- mNumBlocks(aPool.mNumBlocks)
- {
- NS_ASSERTION(aPool.mBlocks == nsnull && aPool.mLast == nsnull,
- "pool not empty");
- }
- PtrInfo *Add(void *aPointer, nsCycleCollectionParticipant *aParticipant
- IF_DEBUG_CC_PARAM(PRUint32 aLangID)
- )
- {
- if (mNext == mBlockEnd) {
- Block *block;
- if (!(*mNextBlock = block =
- static_cast(NS_Alloc(sizeof(Block)))))
- return nsnull;
- mNext = block->mEntries;
- mBlockEnd = block->mEntries + BlockSize;
- block->mNext = nsnull;
- mNextBlock = &block->mNext;
- mNumBlocks++;
- }
- return new (mNext++) PtrInfo(aPointer, aParticipant
- IF_DEBUG_CC_PARAM(aLangID)
- );
- }
- private:
- Block **mNextBlock;
- PtrInfo *&mNext;
- PtrInfo *mBlockEnd;
- PRUint32 &mNumBlocks;
- };
-
- class Enumerator;
- friend class Enumerator;
- class Enumerator {
- public:
- Enumerator(NodePool& aPool)
- : mFirstBlock(aPool.mBlocks),
- mCurBlock(nsnull),
- mNext(nsnull),
- mBlockEnd(nsnull),
- mLast(aPool.mLast)
- {
- }
-
- bool IsDone() const
- {
- return mNext == mLast;
- }
-
- bool AtBlockEnd() const
- {
- return mNext == mBlockEnd;
- }
-
- PtrInfo* GetNext()
- {
- NS_ASSERTION(!IsDone(), "calling GetNext when done");
- if (mNext == mBlockEnd) {
- Block *nextBlock = mCurBlock ? mCurBlock->mNext : mFirstBlock;
- mNext = nextBlock->mEntries;
- mBlockEnd = mNext + BlockSize;
- mCurBlock = nextBlock;
- }
- return mNext++;
- }
- private:
- Block *mFirstBlock, *mCurBlock;
- // mNext is the next value we want to return, unless mNext == mBlockEnd
- // NB: mLast is a reference to allow enumerating while building!
- PtrInfo *mNext, *mBlockEnd, *&mLast;
- };
-
- size_t BlocksSize() const {
- return sizeof(Block) * mNumBlocks;
- }
-
-private:
- Block *mBlocks;
- PtrInfo *mLast;
- PRUint32 mNumBlocks;
-};
-
-
-struct WeakMapping
-{
- // map and key will be null if the corresponding objects are GC marked
- PtrInfo *mMap;
- PtrInfo *mKey;
- PtrInfo *mVal;
-};
-
-class GCGraphBuilder;
-
-struct GCGraph
-{
- NodePool mNodes;
- EdgePool mEdges;
- nsTArray mWeakMaps;
- PRUint32 mRootCount;
-#ifdef DEBUG_CC
- ReversedEdge *mReversedEdges;
-#endif
-
- GCGraph() : mRootCount(0) {
- }
- ~GCGraph() {
- }
-
- size_t BlocksSize() const {
- return mNodes.BlocksSize() + mEdges.BlocksSize();
- }
-
-};
-
-// XXX Would be nice to have an nsHashSet API that has
-// Add/Remove/Has rather than PutEntry/RemoveEntry/GetEntry.
-typedef nsTHashtable PointerSet;
-
-static inline void
-ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp);
-
-struct nsPurpleBuffer
-{
-private:
- struct Block {
- Block *mNext;
- nsPurpleBufferEntry mEntries[255];
-
- Block() : mNext(nsnull) {}
- };
-public:
- // This class wraps a linked list of the elements in the purple
- // buffer.
-
- nsCycleCollectorParams &mParams;
- PRUint32 mNumBlocksAlloced;
- PRUint32 mCount;
- Block mFirstBlock;
- nsPurpleBufferEntry *mFreeList;
-
- // For objects compiled against Gecko 1.9 and 1.9.1.
- PointerSet mCompatObjects;
-#ifdef DEBUG_CC
- PointerSet mNormalObjects; // duplicates our blocks
- nsCycleCollectorStats &mStats;
-#endif
-
-#ifdef DEBUG_CC
- nsPurpleBuffer(nsCycleCollectorParams ¶ms,
- nsCycleCollectorStats &stats)
- : mParams(params),
- mStats(stats)
- {
- InitBlocks();
- mNormalObjects.Init();
- mCompatObjects.Init();
- }
-#else
- nsPurpleBuffer(nsCycleCollectorParams ¶ms)
- : mParams(params)
- {
- InitBlocks();
- mCompatObjects.Init();
- }
-#endif
-
- ~nsPurpleBuffer()
- {
- FreeBlocks();
- }
-
- void InitBlocks()
- {
- mNumBlocksAlloced = 0;
- mCount = 0;
- mFreeList = nsnull;
- StartBlock(&mFirstBlock);
- }
-
- void StartBlock(Block *aBlock)
- {
- NS_ABORT_IF_FALSE(!mFreeList, "should not have free list");
-
- // Put all the entries in the block on the free list.
- nsPurpleBufferEntry *entries = aBlock->mEntries;
- mFreeList = entries;
- for (PRUint32 i = 1; i < ArrayLength(aBlock->mEntries); ++i) {
- entries[i - 1].mNextInFreeList =
- (nsPurpleBufferEntry*)(PRUword(entries + i) | 1);
- }
- entries[ArrayLength(aBlock->mEntries) - 1].mNextInFreeList =
- (nsPurpleBufferEntry*)1;
- }
-
- void FreeBlocks()
- {
- if (mCount > 0)
- UnmarkRemainingPurple(&mFirstBlock);
- Block *b = mFirstBlock.mNext;
- while (b) {
- if (mCount > 0)
- UnmarkRemainingPurple(b);
- Block *next = b->mNext;
- delete b;
- b = next;
- NS_ASSERTION(mNumBlocksAlloced > 0,
- "Expected positive mNumBlocksAlloced.");
- mNumBlocksAlloced--;
- }
- mFirstBlock.mNext = nsnull;
- }
-
- void UnmarkRemainingPurple(Block *b)
- {
- for (nsPurpleBufferEntry *e = b->mEntries,
- *eEnd = ArrayEnd(b->mEntries);
- e != eEnd; ++e) {
- if (!(PRUword(e->mObject) & PRUword(1))) {
- // This is a real entry (rather than something on the
- // free list).
- if (e->mObject) {
- nsXPCOMCycleCollectionParticipant *cp;
- ToParticipant(e->mObject, &cp);
-
- cp->UnmarkPurple(e->mObject);
- }
-
- if (--mCount == 0)
- break;
- }
- }
- }
-
- void SelectPointers(GCGraphBuilder &builder);
-
-#ifdef DEBUG_CC
- void NoteAll(GCGraphBuilder &builder);
-
- bool Exists(void *p) const
- {
- return mNormalObjects.GetEntry(p) || mCompatObjects.GetEntry(p);
- }
-#endif
-
- nsPurpleBufferEntry* NewEntry()
- {
- if (!mFreeList) {
- Block *b = new Block;
- if (!b) {
- return nsnull;
- }
- mNumBlocksAlloced++;
- StartBlock(b);
-
- // Add the new block as the second block in the list.
- b->mNext = mFirstBlock.mNext;
- mFirstBlock.mNext = b;
- }
-
- nsPurpleBufferEntry *e = mFreeList;
- mFreeList = (nsPurpleBufferEntry*)
- (PRUword(mFreeList->mNextInFreeList) & ~PRUword(1));
- return e;
- }
-
- nsPurpleBufferEntry* Put(nsISupports *p)
- {
- nsPurpleBufferEntry *e = NewEntry();
- if (!e) {
- return nsnull;
- }
-
- ++mCount;
-
- e->mObject = p;
-
-#ifdef DEBUG_CC
- mNormalObjects.PutEntry(p);
-#endif
-
- // Caller is responsible for filling in result's mRefCnt.
- return e;
- }
-
- void Remove(nsPurpleBufferEntry *e)
- {
- NS_ASSERTION(mCount != 0, "must have entries");
-
-#ifdef DEBUG_CC
- mNormalObjects.RemoveEntry(e->mObject);
-#endif
-
- e->mNextInFreeList =
- (nsPurpleBufferEntry*)(PRUword(mFreeList) | PRUword(1));
- mFreeList = e;
-
- --mCount;
- }
-
- bool PutCompatObject(nsISupports *p)
- {
- ++mCount;
- return !!mCompatObjects.PutEntry(p);
- }
-
- void RemoveCompatObject(nsISupports *p)
- {
- --mCount;
- mCompatObjects.RemoveEntry(p);
- }
-
- PRUint32 Count() const
- {
- return mCount;
- }
-
- size_t BlocksSize() const
- {
- return sizeof(Block) * mNumBlocksAlloced;
- }
-
-};
-
-struct CallbackClosure
-{
- CallbackClosure(nsPurpleBuffer *aPurpleBuffer, GCGraphBuilder &aBuilder)
- : mPurpleBuffer(aPurpleBuffer),
- mBuilder(aBuilder)
- {
- }
- nsPurpleBuffer *mPurpleBuffer;
- GCGraphBuilder &mBuilder;
-};
-
-static bool
-AddPurpleRoot(GCGraphBuilder &builder, nsISupports *root);
-
-static PLDHashOperator
-selectionCallback(nsVoidPtrHashKey* key, void* userArg)
-{
- CallbackClosure *closure = static_cast(userArg);
- if (AddPurpleRoot(closure->mBuilder,
- static_cast(
- const_cast(key->GetKey()))))
- return PL_DHASH_REMOVE;
-
- return PL_DHASH_NEXT;
-}
-
-void
-nsPurpleBuffer::SelectPointers(GCGraphBuilder &aBuilder)
-{
-#ifdef DEBUG_CC
- NS_ABORT_IF_FALSE(mCompatObjects.Count() + mNormalObjects.Count() ==
- mCount,
- "count out of sync");
-#endif
-
- if (mCompatObjects.Count()) {
- mCount -= mCompatObjects.Count();
- CallbackClosure closure(this, aBuilder);
- mCompatObjects.EnumerateEntries(selectionCallback, &closure);
- mCount += mCompatObjects.Count(); // in case of allocation failure
- }
-
- // Walk through all the blocks.
- for (Block *b = &mFirstBlock; b; b = b->mNext) {
- for (nsPurpleBufferEntry *e = b->mEntries,
- *eEnd = ArrayEnd(b->mEntries);
- e != eEnd; ++e) {
- if (!(PRUword(e->mObject) & PRUword(1))) {
- // This is a real entry (rather than something on the
- // free list).
- if (!e->mObject || AddPurpleRoot(aBuilder, e->mObject)) {
-#ifdef DEBUG_CC
- mNormalObjects.RemoveEntry(e->mObject);
-#endif
- --mCount;
- // Put this entry on the free list in case some
- // call to AddPurpleRoot fails and we don't rebuild
- // the free list below.
- e->mNextInFreeList = (nsPurpleBufferEntry*)
- (PRUword(mFreeList) | PRUword(1));
- mFreeList = e;
- }
- }
- }
- }
-
- NS_WARN_IF_FALSE(mCount == 0, "AddPurpleRoot failed");
- if (mCount == 0) {
- FreeBlocks();
- InitBlocks();
- }
-}
-
-
-
-////////////////////////////////////////////////////////////////////////
-// Implement the LanguageRuntime interface for C++/XPCOM
-////////////////////////////////////////////////////////////////////////
-
-
-struct nsCycleCollectionXPCOMRuntime :
- public nsCycleCollectionLanguageRuntime
-{
- nsresult BeginCycleCollection(nsCycleCollectionTraversalCallback &cb,
- bool explainLiveExpectedGarbage)
- {
- return NS_OK;
- }
-
- nsresult FinishTraverse()
- {
- return NS_OK;
- }
-
- nsresult FinishCycleCollection()
- {
- return NS_OK;
- }
-
- inline nsCycleCollectionParticipant *ToParticipant(void *p);
-
-#ifdef DEBUG_CC
- virtual void PrintAllReferencesTo(void *p) {}
-#endif
-};
-
-struct nsCycleCollector
-{
- bool mCollectionInProgress;
- bool mScanInProgress;
- bool mFollowupCollection;
- PRUint32 mCollectedObjects;
- TimeStamp mCollectionStart;
-
- nsCycleCollectionLanguageRuntime *mRuntimes[nsIProgrammingLanguage::MAX+1];
- nsCycleCollectionXPCOMRuntime mXPCOMRuntime;
-
- GCGraph mGraph;
-
- nsCycleCollectorParams mParams;
-
- nsTArray *mWhiteNodes;
- PRUint32 mWhiteNodeCount;
-
- // mVisitedRefCounted and mVisitedGCed are only used for telemetry
- PRUint32 mVisitedRefCounted;
- PRUint32 mVisitedGCed;
-
- nsPurpleBuffer mPurpleBuf;
-
- void RegisterRuntime(PRUint32 langID,
- nsCycleCollectionLanguageRuntime *rt);
- nsCycleCollectionLanguageRuntime * GetRuntime(PRUint32 langID);
- void ForgetRuntime(PRUint32 langID);
-
- void SelectPurple(GCGraphBuilder &builder);
- void MarkRoots(GCGraphBuilder &builder);
- void ScanRoots();
- void ScanWeakMaps();
-
- // returns whether anything was collected
- bool CollectWhite(nsICycleCollectorListener *aListener);
-
- nsCycleCollector();
- ~nsCycleCollector();
-
- // The first pair of Suspect and Forget functions are only used by
- // old XPCOM binary components.
- bool Suspect(nsISupports *n);
- bool Forget(nsISupports *n);
- nsPurpleBufferEntry* Suspect2(nsISupports *n);
- bool Forget2(nsPurpleBufferEntry *e);
-
- PRUint32 Collect(PRUint32 aTryCollections,
- nsICycleCollectorListener *aListener);
-
- // Prepare for and cleanup after one or more collection(s).
- bool PrepareForCollection(nsTArray *aWhiteNodes);
- void GCIfNeeded(bool aForceGC);
- void CleanupAfterCollection();
-
- // Start and finish an individual collection.
- bool BeginCollection(nsICycleCollectorListener *aListener);
- bool FinishCollection(nsICycleCollectorListener *aListener);
-
- PRUint32 SuspectedCount();
- void Shutdown();
-
- void ClearGraph()
- {
- mGraph.mNodes.Clear();
- mGraph.mEdges.Clear();
- mGraph.mWeakMaps.Clear();
- mGraph.mRootCount = 0;
- }
-
-#ifdef DEBUG_CC
- nsCycleCollectorStats mStats;
-
- FILE *mPtrLog;
-
- void Allocated(void *n, size_t sz);
- void Freed(void *n);
-
- void ExplainLiveExpectedGarbage();
- bool CreateReversedEdges();
- void DestroyReversedEdges();
- void ShouldBeFreed(nsISupports *n);
- void WasFreed(nsISupports *n);
- PointerSet mExpectedGarbage;
-#endif
-};
-
-
-/**
- * GraphWalker is templatized over a Visitor class that must provide
- * the following two methods:
- *
- * bool ShouldVisitNode(PtrInfo const *pi);
- * void VisitNode(PtrInfo *pi);
- */
-template
-class GraphWalker
-{
-private:
- Visitor mVisitor;
-
- void DoWalk(nsDeque &aQueue);
-
-public:
- void Walk(PtrInfo *s0);
- void WalkFromRoots(GCGraph &aGraph);
- // copy-constructing the visitor should be cheap, and less
- // indirection than using a reference
- GraphWalker(const Visitor aVisitor) : mVisitor(aVisitor) {}
-};
-
-
-////////////////////////////////////////////////////////////////////////
-// The static collector object
-////////////////////////////////////////////////////////////////////////
-
-
-static nsCycleCollector *sCollector = nsnull;
-
-
-////////////////////////////////////////////////////////////////////////
-// Utility functions
-////////////////////////////////////////////////////////////////////////
-
-class CCRunnableFaultReport : public nsRunnable {
-public:
- CCRunnableFaultReport(const nsCString& report)
- {
- CopyUTF8toUTF16(report, mReport);
- }
-
- NS_IMETHOD Run() {
- nsCOMPtr obs =
- do_GetService(NS_OBSERVERSERVICE_CONTRACTID);
- if (obs) {
- obs->NotifyObservers(nsnull, "cycle-collector-fault",
- mReport.get());
- }
-
- nsCOMPtr cons =
- do_GetService(NS_CONSOLESERVICE_CONTRACTID);
- if (cons) {
- cons->LogStringMessage(mReport.get());
- }
- return NS_OK;
- }
-
-private:
- nsString mReport;
-};
-
-static void
-Fault(const char *msg, const void *ptr=nsnull)
-{
-#ifdef DEBUG_CC
- // This should be nearly impossible, but just in case.
- if (!sCollector)
- return;
-
- if (sCollector->mParams.mFaultIsFatal) {
-
- if (ptr)
- printf("Fatal fault in cycle collector: %s (ptr: %p)\n", msg, ptr);
- else
- printf("Fatal fault in cycle collector: %s\n", msg);
-
- exit(1);
- }
-#endif
-
- nsPrintfCString str(256, "Fault in cycle collector: %s (ptr: %p)\n",
- msg, ptr);
- NS_NOTREACHED(str.get());
-
- // When faults are not fatal, we assume we're running in a
- // production environment and we therefore want to disable the
- // collector on a fault. This will unfortunately cause the browser
- // to leak pretty fast wherever creates cyclical garbage, but it's
- // probably a better user experience than crashing. Besides, we
- // *should* never hit a fault.
-
- sCollector->mParams.mDoNothing = true;
-
- // Report to observers off an event so we don't run JS under GC
- // (which is where we might be right now).
- nsCOMPtr ev = new CCRunnableFaultReport(str);
- NS_DispatchToMainThread(ev);
-}
-
-#ifdef DEBUG_CC
-static void
-Fault(const char *msg, PtrInfo *pi)
-{
- printf("Fault in cycle collector: %s\n"
- " while operating on pointer %p %s\n",
- msg, pi->mPointer, pi->mName);
- if (pi->mInternalRefs) {
- printf(" which has internal references from:\n");
- NodePool::Enumerator queue(sCollector->mGraph.mNodes);
- while (!queue.IsDone()) {
- PtrInfo *ppi = queue.GetNext();
- for (EdgePool::Iterator e = ppi->FirstChild(),
- e_end = ppi->LastChild();
- e != e_end; ++e) {
- if (*e == pi) {
- printf(" %p %s\n", ppi->mPointer, ppi->mName);
- }
- }
- }
- }
-
- Fault(msg, pi->mPointer);
-}
-#else
-inline void
-Fault(const char *msg, PtrInfo *pi)
-{
- Fault(msg, pi->mPointer);
-}
-#endif
-
-static inline void
-AbortIfOffMainThreadIfCheckFast()
-{
-#if defined(XP_WIN) || defined(NS_TLS)
- if (!NS_IsMainThread() && !NS_IsCycleCollectorThread()) {
- NS_RUNTIMEABORT("Main-thread-only object used off the main thread");
- }
-#endif
-}
-
-static nsISupports *
-canonicalize(nsISupports *in)
-{
- nsISupports* child;
- in->QueryInterface(NS_GET_IID(nsCycleCollectionISupports),
- reinterpret_cast(&child));
- return child;
-}
-
-static inline void
-ToParticipant(nsISupports *s, nsXPCOMCycleCollectionParticipant **cp)
-{
- // We use QI to move from an nsISupports to an
- // nsXPCOMCycleCollectionParticipant, which is a per-class singleton helper
- // object that implements traversal and unlinking logic for the nsISupports
- // in question.
- CallQueryInterface(s, cp);
-#ifdef DEBUG_CC
- if (cp)
- ++sCollector->mStats.mSuccessfulQI;
- else
- ++sCollector->mStats.mFailedQI;
-#endif
-}
-
-nsCycleCollectionParticipant *
-nsCycleCollectionXPCOMRuntime::ToParticipant(void *p)
-{
- nsXPCOMCycleCollectionParticipant *cp;
- ::ToParticipant(static_cast(p), &cp);
- return cp;
-}
-
-
-template
-void
-GraphWalker::Walk(PtrInfo *s0)
-{
- nsDeque queue;
- queue.Push(s0);
- DoWalk(queue);
-}
-
-template
-void
-GraphWalker::WalkFromRoots(GCGraph& aGraph)
-{
- nsDeque queue;
- NodePool::Enumerator etor(aGraph.mNodes);
- for (PRUint32 i = 0; i < aGraph.mRootCount; ++i) {
- queue.Push(etor.GetNext());
- }
- DoWalk(queue);
-}
-
-template
-void
-GraphWalker::DoWalk(nsDeque &aQueue)
-{
- // Use a aQueue to match the breadth-first traversal used when we
- // built the graph, for hopefully-better locality.
- while (aQueue.GetSize() > 0) {
- PtrInfo *pi = static_cast(aQueue.PopFront());
-
- if (mVisitor.ShouldVisitNode(pi)) {
- mVisitor.VisitNode(pi);
- for (EdgePool::Iterator child = pi->FirstChild(),
- child_end = pi->LastChild();
- child != child_end; ++child) {
- aQueue.Push(*child);
- }
- }
- };
-
-#ifdef DEBUG_CC
- sCollector->mStats.mWalkedGraph++;
-#endif
-}
-
-
-class nsCycleCollectorLogger : public nsICycleCollectorListener
-{
-public:
- nsCycleCollectorLogger() : mStream(nsnull)
- {
- }
- ~nsCycleCollectorLogger()
- {
- if (mStream) {
- fclose(mStream);
- }
- }
- NS_DECL_ISUPPORTS
-
- NS_IMETHOD Begin()
- {
- char name[255];
- sprintf(name, "cc-edges-%d.%d.log", ++gLogCounter, base::GetCurrentProcId());
- mStream = fopen(name, "w");
-
- return mStream ? NS_OK : NS_ERROR_FAILURE;
- }
- NS_IMETHOD NoteRefCountedObject(PRUint64 aAddress, PRUint32 refCount,
- const char *aObjectDescription)
- {
- fprintf(mStream, "%p [rc=%u] %s\n", (void*)aAddress, refCount,
- aObjectDescription);
-
- return NS_OK;
- }
- NS_IMETHOD NoteGCedObject(PRUint64 aAddress, bool aMarked,
- const char *aObjectDescription)
- {
- fprintf(mStream, "%p [gc%s] %s\n", (void*)aAddress,
- aMarked ? ".marked" : "", aObjectDescription);
-
- return NS_OK;
- }
- NS_IMETHOD NoteEdge(PRUint64 aToAddress, const char *aEdgeName)
- {
- fprintf(mStream, "> %p %s\n", (void*)aToAddress, aEdgeName);
-
- return NS_OK;
- }
- NS_IMETHOD BeginResults()
- {
- fputs("==========\n", mStream);
-
- return NS_OK;
- }
- NS_IMETHOD DescribeRoot(PRUint64 aAddress, PRUint32 aKnownEdges)
- {
- fprintf(mStream, "%p [known=%u]\n", (void*)aAddress, aKnownEdges);
-
- return NS_OK;
- }
- NS_IMETHOD DescribeGarbage(PRUint64 aAddress)
- {
- fprintf(mStream, "%p [garbage]\n", (void*)aAddress);
-
- return NS_OK;
- }
- NS_IMETHOD End()
- {
- fclose(mStream);
- mStream = nsnull;
-
- return NS_OK;
- }
-
-private:
- FILE *mStream;
-
- static PRUint32 gLogCounter;
-};
-
-NS_IMPL_ISUPPORTS1(nsCycleCollectorLogger, nsICycleCollectorListener)
-
-PRUint32 nsCycleCollectorLogger::gLogCounter = 0;
-
-nsresult
-nsCycleCollectorLoggerConstructor(nsISupports* aOuter,
- const nsIID& aIID,
- void* *aInstancePtr)
-{
- NS_ENSURE_TRUE(!aOuter, NS_ERROR_NO_AGGREGATION);
-
- nsISupports *logger = new nsCycleCollectorLogger();
-
- return logger->QueryInterface(aIID, aInstancePtr);
-}
-
-////////////////////////////////////////////////////////////////////////
-// Bacon & Rajan's |MarkRoots| routine.
-////////////////////////////////////////////////////////////////////////
-
-struct PtrToNodeEntry : public PLDHashEntryHdr
-{
- // The key is mNode->mPointer
- PtrInfo *mNode;
-};
-
-static bool
-PtrToNodeMatchEntry(PLDHashTable *table,
- const PLDHashEntryHdr *entry,
- const void *key)
-{
- const PtrToNodeEntry *n = static_cast(entry);
- return n->mNode->mPointer == key;
-}
-
-static PLDHashTableOps PtrNodeOps = {
- PL_DHashAllocTable,
- PL_DHashFreeTable,
- PL_DHashVoidPtrKeyStub,
- PtrToNodeMatchEntry,
- PL_DHashMoveEntryStub,
- PL_DHashClearEntryStub,
- PL_DHashFinalizeStub,
- nsnull
-};
-
-class GCGraphBuilder : public nsCycleCollectionTraversalCallback
-{
-private:
- NodePool::Builder mNodeBuilder;
- EdgePool::Builder mEdgeBuilder;
- nsTArray &mWeakMaps;
- PLDHashTable mPtrToNodeMap;
- PtrInfo *mCurrPi;
- nsCycleCollectionLanguageRuntime **mRuntimes; // weak, from nsCycleCollector
- nsCString mNextEdgeName;
- nsICycleCollectorListener *mListener;
-
-public:
- GCGraphBuilder(GCGraph &aGraph,
- nsCycleCollectionLanguageRuntime **aRuntimes,
- nsICycleCollectorListener *aListener);
- ~GCGraphBuilder();
- bool Initialized();
-
- PRUint32 Count() const { return mPtrToNodeMap.entryCount; }
-
-#ifdef DEBUG_CC
- PtrInfo* AddNode(void *s, nsCycleCollectionParticipant *aParticipant,
- PRUint32 aLangID);
-#else
- PtrInfo* AddNode(void *s, nsCycleCollectionParticipant *aParticipant);
- PtrInfo* AddNode(void *s, nsCycleCollectionParticipant *aParticipant,
- PRUint32 aLangID)
- {
- return AddNode(s, aParticipant);
- }
-#endif
- PtrInfo* AddWeakMapNode(void* node);
- void Traverse(PtrInfo* aPtrInfo);
- void SetLastChild();
-
- // nsCycleCollectionTraversalCallback methods.
- NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports *root);
-
-private:
- void DescribeNode(PRUint32 refCount,
- size_t objSz,
- const char *objName)
- {
- mCurrPi->mRefCount = refCount;
-#ifdef DEBUG_CC
- mCurrPi->mBytes = objSz;
- mCurrPi->mName = PL_strdup(objName);
- sCollector->mStats.mVisitedNode++;
-#endif
- }
-
- NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refCount, size_t objSz,
- const char *objName);
- NS_IMETHOD_(void) DescribeGCedNode(bool isMarked, size_t objSz,
- const char *objName);
- NS_IMETHOD_(void) NoteRoot(PRUint32 langID, void *child,
- nsCycleCollectionParticipant* participant);
- NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child);
- NS_IMETHOD_(void) NoteNativeChild(void *child,
- nsCycleCollectionParticipant *participant);
- NS_IMETHOD_(void) NoteScriptChild(PRUint32 langID, void *child);
- NS_IMETHOD_(void) NoteNextEdgeName(const char* name);
- NS_IMETHOD_(void) NoteWeakMapping(void *map, void *key, void *val);
-};
-
-GCGraphBuilder::GCGraphBuilder(GCGraph &aGraph,
- nsCycleCollectionLanguageRuntime **aRuntimes,
- nsICycleCollectorListener *aListener)
- : mNodeBuilder(aGraph.mNodes),
- mEdgeBuilder(aGraph.mEdges),
- mWeakMaps(aGraph.mWeakMaps),
- mRuntimes(aRuntimes),
- mListener(aListener)
-{
- if (!PL_DHashTableInit(&mPtrToNodeMap, &PtrNodeOps, nsnull,
- sizeof(PtrToNodeEntry), 32768))
- mPtrToNodeMap.ops = nsnull;
- // We want all edges and all info if DEBUG_CC is set or if we have a
- // listener. Do we want them all the time?
-#ifndef DEBUG_CC
- if (mListener)
-#endif
- {
- mFlags |= nsCycleCollectionTraversalCallback::WANT_DEBUG_INFO |
- nsCycleCollectionTraversalCallback::WANT_ALL_TRACES;
- }
-}
-
-GCGraphBuilder::~GCGraphBuilder()
-{
- if (mPtrToNodeMap.ops)
- PL_DHashTableFinish(&mPtrToNodeMap);
-}
-
-bool
-GCGraphBuilder::Initialized()
-{
- return !!mPtrToNodeMap.ops;
-}
-
-PtrInfo*
-GCGraphBuilder::AddNode(void *s, nsCycleCollectionParticipant *aParticipant
- IF_DEBUG_CC_PARAM(PRUint32 aLangID)
- )
-{
- PtrToNodeEntry *e = static_cast(PL_DHashTableOperate(&mPtrToNodeMap, s, PL_DHASH_ADD));
- if (!e)
- return nsnull;
-
- PtrInfo *result;
- if (!e->mNode) {
- // New entry.
- result = mNodeBuilder.Add(s, aParticipant
- IF_DEBUG_CC_PARAM(aLangID)
- );
- if (!result) {
- PL_DHashTableRawRemove(&mPtrToNodeMap, e);
- return nsnull;
- }
- e->mNode = result;
- } else {
- result = e->mNode;
- NS_ASSERTION(result->mParticipant == aParticipant,
- "nsCycleCollectionParticipant shouldn't change!");
- }
- return result;
-}
-
-void
-GCGraphBuilder::Traverse(PtrInfo* aPtrInfo)
-{
- mCurrPi = aPtrInfo;
-
-#ifdef DEBUG_CC
- if (!mCurrPi->mParticipant) {
- Fault("unknown pointer during walk", aPtrInfo);
- return;
- }
-#endif
-
- mCurrPi->SetFirstChild(mEdgeBuilder.Mark());
-
- nsresult rv = aPtrInfo->mParticipant->Traverse(aPtrInfo->mPointer, *this);
- if (NS_FAILED(rv)) {
- Fault("script pointer traversal failed", aPtrInfo);
- }
-}
-
-void
-GCGraphBuilder::SetLastChild()
-{
- mCurrPi->SetLastChild(mEdgeBuilder.Mark());
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteXPCOMRoot(nsISupports *root)
-{
- root = canonicalize(root);
- NS_ASSERTION(root,
- "Don't add objects that don't participate in collection!");
-
-#ifdef DEBUG_CC
- if (nsCycleCollector_shouldSuppress(root))
- return;
-#endif
-
- nsXPCOMCycleCollectionParticipant *cp;
- ToParticipant(root, &cp);
-
- NoteRoot(nsIProgrammingLanguage::CPLUSPLUS, root, cp);
-}
-
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteRoot(PRUint32 langID, void *root,
- nsCycleCollectionParticipant* participant)
-{
- NS_ASSERTION(root, "Don't add a null root!");
-
- if (langID > nsIProgrammingLanguage::MAX || !mRuntimes[langID]) {
- Fault("adding root for unregistered language", root);
- return;
- }
-
- AddNode(root, participant, langID);
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::DescribeRefCountedNode(nsrefcnt refCount, size_t objSz,
- const char *objName)
-{
- if (refCount == 0)
- Fault("zero refcount", mCurrPi);
- if (refCount == PR_UINT32_MAX)
- Fault("overflowing refcount", mCurrPi);
- sCollector->mVisitedRefCounted++;
-
- if (mListener) {
- mListener->NoteRefCountedObject((PRUint64)mCurrPi->mPointer, refCount,
- objName);
- }
-
- DescribeNode(refCount, objSz, objName);
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::DescribeGCedNode(bool isMarked, size_t objSz,
- const char *objName)
-{
- PRUint32 refCount = isMarked ? PR_UINT32_MAX : 0;
- sCollector->mVisitedGCed++;
-
- if (mListener) {
- mListener->NoteGCedObject((PRUint64)mCurrPi->mPointer, isMarked,
- objName);
- }
-
- DescribeNode(refCount, objSz, objName);
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteXPCOMChild(nsISupports *child)
-{
- nsCString edgeName;
- if (WantDebugInfo()) {
- edgeName.Assign(mNextEdgeName);
- mNextEdgeName.Truncate();
- }
- if (!child || !(child = canonicalize(child)))
- return;
-
-#ifdef DEBUG_CC
- if (nsCycleCollector_shouldSuppress(child))
- return;
-#endif
-
- nsXPCOMCycleCollectionParticipant *cp;
- ToParticipant(child, &cp);
- if (cp) {
- PtrInfo *childPi = AddNode(child, cp, nsIProgrammingLanguage::CPLUSPLUS);
- if (!childPi)
- return;
- mEdgeBuilder.Add(childPi);
-#ifdef DEBUG_CC
- mCurrPi->mEdgeNames.AppendElement(edgeName);
-#endif
- if (mListener) {
- mListener->NoteEdge((PRUint64)child, edgeName.get());
- }
- ++childPi->mInternalRefs;
- }
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteNativeChild(void *child,
- nsCycleCollectionParticipant *participant)
-{
- nsCString edgeName;
- if (WantDebugInfo()) {
- edgeName.Assign(mNextEdgeName);
- mNextEdgeName.Truncate();
- }
- if (!child)
- return;
-
- NS_ASSERTION(participant, "Need a nsCycleCollectionParticipant!");
-
- PtrInfo *childPi = AddNode(child, participant, nsIProgrammingLanguage::CPLUSPLUS);
- if (!childPi)
- return;
- mEdgeBuilder.Add(childPi);
-#ifdef DEBUG_CC
- mCurrPi->mEdgeNames.AppendElement(edgeName);
-#endif
- if (mListener) {
- mListener->NoteEdge((PRUint64)child, edgeName.get());
- }
- ++childPi->mInternalRefs;
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteScriptChild(PRUint32 langID, void *child)
-{
- nsCString edgeName;
- if (WantDebugInfo()) {
- edgeName.Assign(mNextEdgeName);
- mNextEdgeName.Truncate();
- }
- if (!child)
- return;
-
- if (langID > nsIProgrammingLanguage::MAX) {
- Fault("traversing pointer for unknown language", child);
- return;
- }
-
- if (!mRuntimes[langID]) {
- NS_WARNING("Not collecting cycles involving objects for scripting "
- "languages that don't participate in cycle collection.");
- return;
- }
-
- // skip over non-grey JS children
- if (langID == nsIProgrammingLanguage::JAVASCRIPT &&
- !xpc_GCThingIsGrayCCThing(child) && !WantAllTraces()) {
- return;
- }
-
- nsCycleCollectionParticipant *cp = mRuntimes[langID]->ToParticipant(child);
- if (!cp)
- return;
-
- PtrInfo *childPi = AddNode(child, cp, langID);
- if (!childPi)
- return;
- mEdgeBuilder.Add(childPi);
-#ifdef DEBUG_CC
- mCurrPi->mEdgeNames.AppendElement(edgeName);
-#endif
- if (mListener) {
- mListener->NoteEdge((PRUint64)child, edgeName.get());
- }
- ++childPi->mInternalRefs;
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteNextEdgeName(const char* name)
-{
- if (WantDebugInfo()) {
- mNextEdgeName = name;
- }
-}
-
-PtrInfo*
-GCGraphBuilder::AddWeakMapNode(void *node)
-{
- nsCycleCollectionParticipant *cp;
- NS_ASSERTION(node, "Weak map node should be non-null.");
-
- if (!xpc_GCThingIsGrayCCThing(node) && !WantAllTraces())
- return nsnull;
-
- cp = mRuntimes[nsIProgrammingLanguage::JAVASCRIPT]->ToParticipant(node);
- NS_ASSERTION(cp, "Javascript runtime participant should be non-null.");
- return AddNode(node, cp);
-}
-
-NS_IMETHODIMP_(void)
-GCGraphBuilder::NoteWeakMapping(void *map, void *key, void *val)
-{
- PtrInfo *valNode = AddWeakMapNode(val);
-
- if (!valNode)
- return;
-
- WeakMapping *mapping = mWeakMaps.AppendElement();
- mapping->mMap = map ? AddWeakMapNode(map) : nsnull;
- mapping->mKey = key ? AddWeakMapNode(key) : nsnull;
- mapping->mVal = valNode;
-}
-
-static bool
-AddPurpleRoot(GCGraphBuilder &builder, nsISupports *root)
-{
- root = canonicalize(root);
- NS_ASSERTION(root,
- "Don't add objects that don't participate in collection!");
-
- nsXPCOMCycleCollectionParticipant *cp;
- ToParticipant(root, &cp);
-
- PtrInfo *pinfo = builder.AddNode(root, cp,
- nsIProgrammingLanguage::CPLUSPLUS);
- if (!pinfo) {
- return false;
- }
-
- cp->UnmarkPurple(root);
-
- return true;
-}
-
-#ifdef DEBUG_CC
-static PLDHashOperator
-noteAllCallback(nsVoidPtrHashKey* key, void* userArg)
-{
- GCGraphBuilder *builder = static_cast(userArg);
- builder->NoteXPCOMRoot(
- static_cast(const_cast(key->GetKey())));
- return PL_DHASH_NEXT;
-}
-
-void
-nsPurpleBuffer::NoteAll(GCGraphBuilder &builder)
-{
- mCompatObjects.EnumerateEntries(noteAllCallback, &builder);
-
- for (Block *b = &mFirstBlock; b; b = b->mNext) {
- for (nsPurpleBufferEntry *e = b->mEntries,
- *eEnd = ArrayEnd(b->mEntries);
- e != eEnd; ++e) {
- if (!(PRUword(e->mObject) & PRUword(1)) && e->mObject) {
- builder.NoteXPCOMRoot(e->mObject);
- }
- }
- }
-}
-#endif
-
-void
-nsCycleCollector::SelectPurple(GCGraphBuilder &builder)
-{
- mPurpleBuf.SelectPointers(builder);
-}
-
-void
-nsCycleCollector::MarkRoots(GCGraphBuilder &builder)
-{
- mGraph.mRootCount = builder.Count();
-
- // read the PtrInfo out of the graph that we are building
- NodePool::Enumerator queue(mGraph.mNodes);
- while (!queue.IsDone()) {
- PtrInfo *pi = queue.GetNext();
- builder.Traverse(pi);
- if (queue.AtBlockEnd())
- builder.SetLastChild();
- }
- if (mGraph.mRootCount > 0)
- builder.SetLastChild();
-}
-
-
-////////////////////////////////////////////////////////////////////////
-// Bacon & Rajan's |ScanRoots| routine.
-////////////////////////////////////////////////////////////////////////
-
-
-struct ScanBlackVisitor
-{
- ScanBlackVisitor(PRUint32 &aWhiteNodeCount)
- : mWhiteNodeCount(aWhiteNodeCount)
- {
- }
-
- bool ShouldVisitNode(PtrInfo const *pi)
- {
- return pi->mColor != black;
- }
-
- void VisitNode(PtrInfo *pi)
- {
- if (pi->mColor == white)
- --mWhiteNodeCount;
- pi->mColor = black;
-#ifdef DEBUG_CC
- sCollector->mStats.mSetColorBlack++;
-#endif
- }
-
- PRUint32 &mWhiteNodeCount;
-};
-
-
-struct scanVisitor
-{
- scanVisitor(PRUint32 &aWhiteNodeCount) : mWhiteNodeCount(aWhiteNodeCount)
- {
- }
-
- bool ShouldVisitNode(PtrInfo const *pi)
- {
- return pi->mColor == grey;
- }
-
- void VisitNode(PtrInfo *pi)
- {
- if (pi->mInternalRefs > pi->mRefCount && pi->mRefCount > 0)
- Fault("traversed refs exceed refcount", pi);
-
- if (pi->mInternalRefs == pi->mRefCount || pi->mRefCount == 0) {
- pi->mColor = white;
- ++mWhiteNodeCount;
-#ifdef DEBUG_CC
- sCollector->mStats.mSetColorWhite++;
-#endif
- } else {
- GraphWalker(ScanBlackVisitor(mWhiteNodeCount)).Walk(pi);
- NS_ASSERTION(pi->mColor == black,
- "Why didn't ScanBlackVisitor make pi black?");
- }
- }
-
- PRUint32 &mWhiteNodeCount;
-};
-
-// Iterate over the WeakMaps. If we mark anything while iterating
-// over the WeakMaps, we must iterate over all of the WeakMaps again.
-void
-nsCycleCollector::ScanWeakMaps()
-{
- bool anyChanged;
- do {
- anyChanged = false;
- for (PRUint32 i = 0; i < mGraph.mWeakMaps.Length(); i++) {
- WeakMapping *wm = &mGraph.mWeakMaps[i];
-
- // If mMap or mKey are null, the original object was marked black.
- uint32 mColor = wm->mMap ? wm->mMap->mColor : black;
- uint32 kColor = wm->mKey ? wm->mKey->mColor : black;
- PtrInfo *v = wm->mVal;
-
- // All non-null weak mapping maps, keys and values are
- // roots (in the sense of WalkFromRoots) in the cycle
- // collector graph, and thus should have been colored
- // either black or white in ScanRoots().
- NS_ASSERTION(mColor != grey, "Uncolored weak map");
- NS_ASSERTION(kColor != grey, "Uncolored weak map key");
- NS_ASSERTION(v->mColor != grey, "Uncolored weak map value");
-
- if (mColor == black && kColor == black && v->mColor != black) {
- GraphWalker(ScanBlackVisitor(mWhiteNodeCount)).Walk(v);
- anyChanged = true;
- }
- }
- } while (anyChanged);
-}
-
-void
-nsCycleCollector::ScanRoots()
-{
- mWhiteNodeCount = 0;
-
- // On the assumption that most nodes will be black, it's
- // probably faster to use a GraphWalker than a
- // NodePool::Enumerator.
- GraphWalker(scanVisitor(mWhiteNodeCount)).WalkFromRoots(mGraph);
-
- ScanWeakMaps();
-
-#ifdef DEBUG_CC
- // Sanity check: scan should have colored all grey nodes black or
- // white. So we ensure we have no grey nodes at this point.
- NodePool::Enumerator etor(mGraph.mNodes);
- while (!etor.IsDone())
- {
- PtrInfo *pinfo = etor.GetNext();
- if (pinfo->mColor == grey) {
- Fault("valid grey node after scanning", pinfo);
- }
- }
-#endif
-}
-
-
-////////////////////////////////////////////////////////////////////////
-// Bacon & Rajan's |CollectWhite| routine, somewhat modified.
-////////////////////////////////////////////////////////////////////////
-
-bool
-nsCycleCollector::CollectWhite(nsICycleCollectorListener *aListener)
-{
- // Explanation of "somewhat modified": we have no way to collect the
- // set of whites "all at once", we have to ask each of them to drop
- // their outgoing links and assume this will cause the garbage cycle
- // to *mostly* self-destruct (except for the reference we continue
- // to hold).
- //
- // To do this "safely" we must make sure that the white nodes we're
- // operating on are stable for the duration of our operation. So we
- // make 3 sets of calls to language runtimes:
- //
- // - Root(whites), which should pin the whites in memory.
- // - Unlink(whites), which drops outgoing links on each white.
- // - Unroot(whites), which returns the whites to normal GC.
-
- nsresult rv;
-
- NS_ASSERTION(mWhiteNodes->IsEmpty(),
- "FinishCollection wasn't called?");
-
- mWhiteNodes->SetCapacity(mWhiteNodeCount);
-
- NodePool::Enumerator etor(mGraph.mNodes);
- while (!etor.IsDone())
- {
- PtrInfo *pinfo = etor.GetNext();
- if (pinfo->mColor == white && mWhiteNodes->AppendElement(pinfo)) {
- rv = pinfo->mParticipant->Root(pinfo->mPointer);
- if (NS_FAILED(rv)) {
- Fault("Failed root call while unlinking", pinfo);
- mWhiteNodes->RemoveElementAt(mWhiteNodes->Length() - 1);
- }
- }
- }
-
-#if defined(DEBUG_CC) && !defined(__MINGW32__) && defined(WIN32)
- struct _CrtMemState ms1, ms2;
- _CrtMemCheckpoint(&ms1);
-#endif
-
- PRUint32 i, count = mWhiteNodes->Length();
-
- if (aListener) {
- for (i = 0; i < count; ++i) {
- PtrInfo *pinfo = mWhiteNodes->ElementAt(i);
- aListener->DescribeGarbage((PRUint64)pinfo->mPointer);
- }
- aListener->End();
- }
-
- for (i = 0; i < count; ++i) {
- PtrInfo *pinfo = mWhiteNodes->ElementAt(i);
- rv = pinfo->mParticipant->Unlink(pinfo->mPointer);
- if (NS_FAILED(rv)) {
- Fault("Failed unlink call while unlinking", pinfo);
-#ifdef DEBUG_CC
- mStats.mFailedUnlink++;
-#endif
- }
- else {
-#ifdef DEBUG_CC
- ++mStats.mCollectedNode;
-#endif
- }
- }
-
- for (i = 0; i < count; ++i) {
- PtrInfo *pinfo = mWhiteNodes->ElementAt(i);
- rv = pinfo->mParticipant->Unroot(pinfo->mPointer);
- if (NS_FAILED(rv))
- Fault("Failed unroot call while unlinking", pinfo);
- }
-
-#if defined(DEBUG_CC) && !defined(__MINGW32__) && defined(WIN32)
- _CrtMemCheckpoint(&ms2);
- if (ms2.lTotalCount < ms1.lTotalCount)
- mStats.mFreedBytes += (ms1.lTotalCount - ms2.lTotalCount);
-#endif
-
- mCollectedObjects += count;
- return count > 0;
-}
-
-
-#ifdef DEBUG_CC
-////////////////////////////////////////////////////////////////////////
-// Memory-hooking stuff
-// When debugging wild pointers, it sometimes helps to hook malloc and
-// free. This stuff is disabled unless you set an environment variable.
-////////////////////////////////////////////////////////////////////////
-
-static bool hookedMalloc = false;
-
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include
-
-static void* (*old_memalign_hook)(size_t, size_t, const void *);
-static void* (*old_realloc_hook)(void *, size_t, const void *);
-static void* (*old_malloc_hook)(size_t, const void *);
-static void (*old_free_hook)(void *, const void *);
-
-static void* my_memalign_hook(size_t, size_t, const void *);
-static void* my_realloc_hook(void *, size_t, const void *);
-static void* my_malloc_hook(size_t, const void *);
-static void my_free_hook(void *, const void *);
-
-static inline void
-install_old_hooks()
-{
- __memalign_hook = old_memalign_hook;
- __realloc_hook = old_realloc_hook;
- __malloc_hook = old_malloc_hook;
- __free_hook = old_free_hook;
-}
-
-static inline void
-save_old_hooks()
-{
- // Glibc docs recommend re-saving old hooks on
- // return from recursive calls. Strangely when
- // we do this, we find ourselves in infinite
- // recursion.
-
- // old_memalign_hook = __memalign_hook;
- // old_realloc_hook = __realloc_hook;
- // old_malloc_hook = __malloc_hook;
- // old_free_hook = __free_hook;
-}
-
-static inline void
-install_new_hooks()
-{
- __memalign_hook = my_memalign_hook;
- __realloc_hook = my_realloc_hook;
- __malloc_hook = my_malloc_hook;
- __free_hook = my_free_hook;
-}
-
-static void*
-my_realloc_hook(void *ptr, size_t size, const void *caller)
-{
- void *result;
-
- install_old_hooks();
- result = realloc(ptr, size);
- save_old_hooks();
-
- if (sCollector) {
- sCollector->Freed(ptr);
- sCollector->Allocated(result, size);
- }
-
- install_new_hooks();
-
- return result;
-}
-
-
-static void*
-my_memalign_hook(size_t size, size_t alignment, const void *caller)
-{
- void *result;
-
- install_old_hooks();
- result = memalign(size, alignment);
- save_old_hooks();
-
- if (sCollector)
- sCollector->Allocated(result, size);
-
- install_new_hooks();
-
- return result;
-}
-
-
-static void
-my_free_hook (void *ptr, const void *caller)
-{
- install_old_hooks();
- free(ptr);
- save_old_hooks();
-
- if (sCollector)
- sCollector->Freed(ptr);
-
- install_new_hooks();
-}
-
-
-static void*
-my_malloc_hook (size_t size, const void *caller)
-{
- void *result;
-
- install_old_hooks();
- result = malloc (size);
- save_old_hooks();
-
- if (sCollector)
- sCollector->Allocated(result, size);
-
- install_new_hooks();
-
- return result;
-}
-
-
-static void
-InitMemHook(void)
-{
- if (!hookedMalloc) {
- save_old_hooks();
- install_new_hooks();
- hookedMalloc = true;
- }
-}
-
-#elif defined(WIN32)
-#ifndef __MINGW32__
-
-static int
-AllocHook(int allocType, void *userData, size_t size, int
- blockType, long requestNumber, const unsigned char *filename, int
- lineNumber)
-{
- if (allocType == _HOOK_FREE)
- sCollector->Freed(userData);
- return 1;
-}
-
-
-static void InitMemHook(void)
-{
- if (!hookedMalloc) {
- _CrtSetAllocHook (AllocHook);
- hookedMalloc = true;
- }
-}
-#endif // __MINGW32__
-
-#elif 0 // defined(XP_MACOSX)
-
-#include
-
-static void (*old_free)(struct _malloc_zone_t *zone, void *ptr);
-
-static void
-freehook(struct _malloc_zone_t *zone, void *ptr)
-{
- if (sCollector)
- sCollector->Freed(ptr);
- old_free(zone, ptr);
-}
-
-
-static void
-InitMemHook(void)
-{
- if (!hookedMalloc) {
- malloc_zone_t *default_zone = malloc_default_zone();
- old_free = default_zone->free;
- default_zone->free = freehook;
- hookedMalloc = true;
- }
-}
-
-
-#else
-
-static void
-InitMemHook(void)
-{
-}
-
-#endif // GLIBC / WIN32 / OSX
-#endif // DEBUG_CC
-
-////////////////////////////////////////////////////////////////////////
-// Collector implementation
-////////////////////////////////////////////////////////////////////////
-
-nsCycleCollector::nsCycleCollector() :
- mCollectionInProgress(false),
- mScanInProgress(false),
- mCollectedObjects(0),
- mWhiteNodes(nsnull),
- mWhiteNodeCount(0),
- mVisitedRefCounted(0),
- mVisitedGCed(0),
-#ifdef DEBUG_CC
- mPurpleBuf(mParams, mStats),
- mPtrLog(nsnull)
-#else
- mPurpleBuf(mParams)
-#endif
-{
-#ifdef DEBUG_CC
- mExpectedGarbage.Init();
-#endif
-
- memset(mRuntimes, 0, sizeof(mRuntimes));
- mRuntimes[nsIProgrammingLanguage::CPLUSPLUS] = &mXPCOMRuntime;
-}
-
-
-nsCycleCollector::~nsCycleCollector()
-{
-}
-
-
-void
-nsCycleCollector::RegisterRuntime(PRUint32 langID,
- nsCycleCollectionLanguageRuntime *rt)
-{
- if (mParams.mDoNothing)
- return;
-
- if (langID > nsIProgrammingLanguage::MAX)
- Fault("unknown language runtime in registration");
-
- if (mRuntimes[langID])
- Fault("multiple registrations of language runtime", rt);
-
- mRuntimes[langID] = rt;
-}
-
-nsCycleCollectionLanguageRuntime *
-nsCycleCollector::GetRuntime(PRUint32 langID)
-{
- if (langID > nsIProgrammingLanguage::MAX)
- return nsnull;
-
- return mRuntimes[langID];
-}
-
-void
-nsCycleCollector::ForgetRuntime(PRUint32 langID)
-{
- if (mParams.mDoNothing)
- return;
-
- if (langID > nsIProgrammingLanguage::MAX)
- Fault("unknown language runtime in deregistration");
-
- if (! mRuntimes[langID])
- Fault("forgetting non-registered language runtime");
-
- mRuntimes[langID] = nsnull;
-}
-
-#ifdef DEBUG_CC
-
-class Suppressor :
- public nsCycleCollectionTraversalCallback
-{
-protected:
- static char *sSuppressionList;
- static bool sInitialized;
- bool mSuppressThisNode;
-public:
- Suppressor()
- {
- }
-
- bool shouldSuppress(nsISupports *s)
- {
- if (!sInitialized) {
- sSuppressionList = PR_GetEnv("XPCOM_CC_SUPPRESS");
- sInitialized = true;
- }
- if (sSuppressionList == nsnull) {
- mSuppressThisNode = false;
- } else {
- nsresult rv;
- nsXPCOMCycleCollectionParticipant *cp;
- rv = CallQueryInterface(s, &cp);
- if (NS_FAILED(rv)) {
- Fault("checking suppression on wrong type of pointer", s);
- return true;
- }
- cp->Traverse(s, *this);
- }
- return mSuppressThisNode;
- }
-
- NS_IMETHOD_(void) DescribeRefCountedNode(nsrefcnt refCount, size_t objSz,
- const char *objName)
- {
- mSuppressThisNode = (PL_strstr(sSuppressionList, objName) != nsnull);
- }
-
- NS_IMETHOD_(void) DescribeGCedNode(bool isMarked, size_t objSz,
- const char *objName)
- {
- mSuppressThisNode = (PL_strstr(sSuppressionList, objName) != nsnull);
- }
-
- NS_IMETHOD_(void) NoteXPCOMRoot(nsISupports *root) {};
- NS_IMETHOD_(void) NoteRoot(PRUint32 langID, void *root,
- nsCycleCollectionParticipant* participant) {};
- NS_IMETHOD_(void) NoteXPCOMChild(nsISupports *child) {}
- NS_IMETHOD_(void) NoteScriptChild(PRUint32 langID, void *child) {}
- NS_IMETHOD_(void) NoteNativeChild(void *child,
- nsCycleCollectionParticipant *participant) {}
- NS_IMETHOD_(void) NoteNextEdgeName(const char* name) {}
- NS_IMETHOD_(void) NoteWeakMapping(void *map, void *key, void *val) {}
-};
-
-char *Suppressor::sSuppressionList = nsnull;
-bool Suppressor::sInitialized = false;
-
-static bool
-nsCycleCollector_shouldSuppress(nsISupports *s)
-{
- Suppressor supp;
- return supp.shouldSuppress(s);
-}
-#endif
-
-#ifdef DEBUG
-static bool
-nsCycleCollector_isScanSafe(nsISupports *s)
-{
- if (!s)
- return false;
-
- nsXPCOMCycleCollectionParticipant *cp;
- ToParticipant(s, &cp);
-
- return cp != nsnull;
-}
-#endif
-
-bool
-nsCycleCollector::Suspect(nsISupports *n)
-{
- AbortIfOffMainThreadIfCheckFast();
-
- // Re-entering ::Suspect during collection used to be a fault, but
- // we are canonicalizing nsISupports pointers using QI, so we will
- // see some spurious refcount traffic here.
-
- if (mScanInProgress)
- return false;
-
- NS_ASSERTION(nsCycleCollector_isScanSafe(n),
- "suspected a non-scansafe pointer");
-
- if (mParams.mDoNothing)
- return false;
-
-#ifdef DEBUG_CC
- mStats.mSuspectNode++;
-
- if (nsCycleCollector_shouldSuppress(n))
- return false;
-
-#ifndef __MINGW32__
- if (mParams.mHookMalloc)
- InitMemHook();
-#endif
-
- if (mParams.mLogPointers) {
- if (!mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
- fprintf(mPtrLog, "S %p\n", static_cast(n));
- }
-#endif
-
- return mPurpleBuf.PutCompatObject(n);
-}
-
-
-bool
-nsCycleCollector::Forget(nsISupports *n)
-{
- AbortIfOffMainThreadIfCheckFast();
-
- // Re-entering ::Forget during collection used to be a fault, but
- // we are canonicalizing nsISupports pointers using QI, so we will
- // see some spurious refcount traffic here.
-
- if (mScanInProgress)
- return false;
-
- if (mParams.mDoNothing)
- return true; // it's as good as forgotten
-
-#ifdef DEBUG_CC
- mStats.mForgetNode++;
-
-#ifndef __MINGW32__
- if (mParams.mHookMalloc)
- InitMemHook();
-#endif
-
- if (mParams.mLogPointers) {
- if (!mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
- fprintf(mPtrLog, "F %p\n", static_cast(n));
- }
-#endif
-
- mPurpleBuf.RemoveCompatObject(n);
- return true;
-}
-
-nsPurpleBufferEntry*
-nsCycleCollector::Suspect2(nsISupports *n)
-{
- AbortIfOffMainThreadIfCheckFast();
-
- // Re-entering ::Suspect during collection used to be a fault, but
- // we are canonicalizing nsISupports pointers using QI, so we will
- // see some spurious refcount traffic here.
-
- if (mScanInProgress)
- return nsnull;
-
- NS_ASSERTION(nsCycleCollector_isScanSafe(n),
- "suspected a non-scansafe pointer");
-
- if (mParams.mDoNothing)
- return nsnull;
-
-#ifdef DEBUG_CC
- mStats.mSuspectNode++;
-
- if (nsCycleCollector_shouldSuppress(n))
- return nsnull;
-
-#ifndef __MINGW32__
- if (mParams.mHookMalloc)
- InitMemHook();
-#endif
-
- if (mParams.mLogPointers) {
- if (!mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
- fprintf(mPtrLog, "S %p\n", static_cast(n));
- }
-#endif
-
- // Caller is responsible for filling in result's mRefCnt.
- return mPurpleBuf.Put(n);
-}
-
-
-bool
-nsCycleCollector::Forget2(nsPurpleBufferEntry *e)
-{
- AbortIfOffMainThreadIfCheckFast();
-
- // Re-entering ::Forget during collection used to be a fault, but
- // we are canonicalizing nsISupports pointers using QI, so we will
- // see some spurious refcount traffic here.
-
- if (mScanInProgress)
- return false;
-
-#ifdef DEBUG_CC
- mStats.mForgetNode++;
-
-#ifndef __MINGW32__
- if (mParams.mHookMalloc)
- InitMemHook();
-#endif
-
- if (mParams.mLogPointers) {
- if (!mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
- fprintf(mPtrLog, "F %p\n", static_cast(e->mObject));
- }
-#endif
-
- mPurpleBuf.Remove(e);
- return true;
-}
-
-#ifdef DEBUG_CC
-void
-nsCycleCollector::Allocated(void *n, size_t sz)
-{
-}
-
-void
-nsCycleCollector::Freed(void *n)
-{
- mStats.mFreeCalls++;
-
- if (!n) {
- // Ignore null pointers coming through
- return;
- }
-
- if (mPurpleBuf.Exists(n)) {
- mStats.mForgetNode++;
- mStats.mFreedWhilePurple++;
- Fault("freed while purple", n);
-
- if (mParams.mLogPointers) {
- if (!mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
- fprintf(mPtrLog, "R %p\n", n);
- }
- }
-}
-#endif
-
-// The cycle collector uses the mark bitmap to discover what JS objects
-// were reachable only from XPConnect roots that might participate in
-// cycles. We ask the JS runtime whether we need to force a GC before
-// this CC. It returns true on startup (before the mark bits have been set),
-// and also when UnmarkGray has run out of stack. We also force GCs on shut
-// down to collect cycles involving both DOM and JS.
-void
-nsCycleCollector::GCIfNeeded(bool aForceGC)
-{
- NS_ASSERTION(NS_IsMainThread(),
- "nsCycleCollector::GCIfNeeded() must be called on the main thread.");
-
- if (mParams.mDoNothing)
- return;
-
- if (!mRuntimes[nsIProgrammingLanguage::JAVASCRIPT])
- return;
-
- nsCycleCollectionJSRuntime* rt =
- static_cast
- (mRuntimes[nsIProgrammingLanguage::JAVASCRIPT]);
- if (!rt->NeedCollect() && !aForceGC)
- return;
-
-#ifdef COLLECT_TIME_DEBUG
- PRTime start = PR_Now();
-#endif
- // rt->Collect() must be called from the main thread,
- // because it invokes XPCJSRuntime::GCCallback(cx, JSGC_BEGIN)
- // which returns false if not in the main thread.
- rt->Collect();
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: GC() took %lldms\n", (PR_Now() - start) / PR_USEC_PER_MSEC);
-#endif
-}
-
-bool
-nsCycleCollector::PrepareForCollection(nsTArray *aWhiteNodes)
-{
-#if defined(DEBUG_CC) && !defined(__MINGW32__)
- if (!mParams.mDoNothing && mParams.mHookMalloc)
- InitMemHook();
-#endif
-
- // This can legitimately happen in a few cases. See bug 383651.
- if (mCollectionInProgress)
- return false;
-
- NS_TIME_FUNCTION;
-
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: nsCycleCollector::PrepareForCollection()\n");
-#endif
- mCollectionStart = TimeStamp::Now();
- mVisitedRefCounted = 0;
- mVisitedGCed = 0;
-
- mCollectionInProgress = true;
-
- nsCOMPtr obs =
- mozilla::services::GetObserverService();
- if (obs)
- obs->NotifyObservers(nsnull, "cycle-collector-begin", nsnull);
-
- mFollowupCollection = false;
- mCollectedObjects = 0;
-
- mWhiteNodes = aWhiteNodes;
-
- return true;
-}
-
-void
-nsCycleCollector::CleanupAfterCollection()
-{
- mWhiteNodes = nsnull;
- mCollectionInProgress = false;
-
-#ifdef XP_OS2
- // Now that the cycle collector has freed some memory, we can try to
- // force the C library to give back as much memory to the system as
- // possible.
- _heapmin();
-#endif
-
- PRUint32 interval((TimeStamp::Now() - mCollectionStart).ToMilliseconds());
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: CleanupAfterCollection(), total time %ums\n", interval);
-#endif
- Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR, interval);
- Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_VISITED_REF_COUNTED, mVisitedRefCounted);
- Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_VISITED_GCED, mVisitedGCed);
- Telemetry::Accumulate(Telemetry::CYCLE_COLLECTOR_COLLECTED, mWhiteNodeCount);
-
-#ifdef DEBUG_CC
- ExplainLiveExpectedGarbage();
-#endif
-}
-
-PRUint32
-nsCycleCollector::Collect(PRUint32 aTryCollections,
- nsICycleCollectorListener *aListener)
-{
- nsAutoTArray whiteNodes;
-
- if (!PrepareForCollection(&whiteNodes))
- return 0;
-
- PRUint32 totalCollections = 0;
- while (aTryCollections > totalCollections) {
- // Synchronous cycle collection. Always force a JS GC beforehand.
- GCIfNeeded(true);
- if (aListener && NS_FAILED(aListener->Begin()))
- aListener = nsnull;
- if (!(BeginCollection(aListener) &&
- FinishCollection(aListener)))
- break;
-
- ++totalCollections;
- }
-
- CleanupAfterCollection();
-
- return mCollectedObjects;
-}
-
-bool
-nsCycleCollector::BeginCollection(nsICycleCollectorListener *aListener)
-{
- // aListener should be Begin()'d before this
- if (mParams.mDoNothing)
- return false;
-
- GCGraphBuilder builder(mGraph, mRuntimes, aListener);
- if (!builder.Initialized())
- return false;
-
-#ifdef COLLECT_TIME_DEBUG
- PRTime now = PR_Now();
-#endif
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i])
- mRuntimes[i]->BeginCycleCollection(builder, false);
- }
-
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: mRuntimes[*]->BeginCycleCollection() took %lldms\n",
- (PR_Now() - now) / PR_USEC_PER_MSEC);
-
- now = PR_Now();
-#endif
-
-#ifdef DEBUG_CC
- PRUint32 purpleStart = builder.Count();
-#endif
- mScanInProgress = true;
- SelectPurple(builder);
-#ifdef DEBUG_CC
- PRUint32 purpleEnd = builder.Count();
-
- if (purpleStart != purpleEnd) {
-#ifndef __MINGW32__
- if (mParams.mHookMalloc)
- InitMemHook();
-#endif
- if (mParams.mLogPointers && !mPtrLog)
- mPtrLog = fopen("pointer_log", "w");
-
- PRUint32 i = 0;
- NodePool::Enumerator queue(mGraph.mNodes);
- while (i++ < purpleStart) {
- queue.GetNext();
- }
- while (i++ < purpleEnd) {
- mStats.mForgetNode++;
- if (mParams.mLogPointers)
- fprintf(mPtrLog, "F %p\n", queue.GetNext()->mPointer);
- }
- }
-#endif
-
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: SelectPurple() took %lldms\n",
- (PR_Now() - now) / PR_USEC_PER_MSEC);
-#endif
-
- if (builder.Count() > 0) {
- // The main Bacon & Rajan collection algorithm.
-
-#ifdef COLLECT_TIME_DEBUG
- now = PR_Now();
-#endif
-
- MarkRoots(builder);
-
-#ifdef COLLECT_TIME_DEBUG
- {
- PRTime then = PR_Now();
- printf("cc: MarkRoots() took %lldms\n",
- (then - now) / PR_USEC_PER_MSEC);
- now = then;
- }
-#endif
-
- ScanRoots();
-
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: ScanRoots() took %lldms\n",
- (PR_Now() - now) / PR_USEC_PER_MSEC);
-#endif
-
- mScanInProgress = false;
-
- if (aListener) {
- aListener->BeginResults();
-
- NodePool::Enumerator etor(mGraph.mNodes);
- while (!etor.IsDone()) {
- PtrInfo *pi = etor.GetNext();
- if (pi->mColor == black &&
- pi->mRefCount > 0 && pi->mRefCount < PR_UINT32_MAX &&
- pi->mInternalRefs != pi->mRefCount) {
- aListener->DescribeRoot((PRUint64)pi->mPointer,
- pi->mInternalRefs);
- }
- }
- }
-
-#ifdef DEBUG_CC
- if (mFollowupCollection && purpleStart != purpleEnd) {
- PRUint32 i = 0;
- NodePool::Enumerator queue(mGraph.mNodes);
- while (i++ < purpleStart) {
- queue.GetNext();
- }
- while (i++ < purpleEnd) {
- PtrInfo *pi = queue.GetNext();
- if (pi->mColor == white) {
- printf("nsCycleCollector: a later shutdown collection collected the additional\n"
- " suspect %p %s\n"
- " (which could be fixed by improving traversal)\n",
- pi->mPointer, pi->mName);
- }
- }
- }
-#endif
-
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i])
- mRuntimes[i]->FinishTraverse();
- }
- }
- else {
- mScanInProgress = false;
- }
-
- return true;
-}
-
-bool
-nsCycleCollector::FinishCollection(nsICycleCollectorListener *aListener)
-{
-#ifdef COLLECT_TIME_DEBUG
- PRTime now = PR_Now();
-#endif
-
- bool collected = CollectWhite(aListener);
-
-#ifdef COLLECT_TIME_DEBUG
- printf("cc: CollectWhite() took %lldms\n",
- (PR_Now() - now) / PR_USEC_PER_MSEC);
-#endif
-
-#ifdef DEBUG_CC
- mStats.mCollection++;
- if (mParams.mReportStats)
- mStats.Dump();
-#endif
-
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i])
- mRuntimes[i]->FinishCycleCollection();
- }
-
- mFollowupCollection = true;
-
-#ifdef DEBUG_CC
- // We wait until after FinishCollection to check the white nodes because
- // some objects may outlive CollectWhite but then be freed by
- // FinishCycleCollection (like XPConnect's deferred release of native
- // objects).
- PRUint32 i, count = mWhiteNodes->Length();
- for (i = 0; i < count; ++i) {
- PtrInfo *pinfo = mWhiteNodes->ElementAt(i);
- if (pinfo->mLangID == nsIProgrammingLanguage::CPLUSPLUS &&
- mPurpleBuf.Exists(pinfo->mPointer)) {
- printf("nsCycleCollector: %s object @%p is still alive after\n"
- " calling RootAndUnlinkJSObjects, Unlink, and Unroot on"
- " it! This probably\n"
- " means the Unlink implementation was insufficient.\n",
- pinfo->mName, pinfo->mPointer);
- }
- }
-#endif
-
- mWhiteNodes->Clear();
- ClearGraph();
-
- mParams.mDoNothing = false;
-
- return collected;
-}
-
-PRUint32
-nsCycleCollector::SuspectedCount()
-{
- return mPurpleBuf.Count();
-}
-
-void
-nsCycleCollector::Shutdown()
-{
- // Here we want to run a final collection and then permanently
- // disable the collector because the program is shutting down.
-
- nsCOMPtr listener;
- if (mParams.mLogGraphs) {
- listener = new nsCycleCollectorLogger();
- }
- Collect(SHUTDOWN_COLLECTIONS(mParams), listener);
-
-#ifdef DEBUG_CC
- GCGraphBuilder builder(mGraph, mRuntimes, nsnull);
- mScanInProgress = true;
- SelectPurple(builder);
- mScanInProgress = false;
- if (builder.Count() != 0) {
- printf("Might have been able to release more cycles if the cycle collector would "
- "run once more at shutdown.\n");
- }
- ClearGraph();
-#endif
- mParams.mDoNothing = true;
-}
-
-#ifdef DEBUG_CC
-
-static PLDHashOperator
-AddExpectedGarbage(nsVoidPtrHashKey *p, void *arg)
-{
- GCGraphBuilder *builder = static_cast(arg);
- nsISupports *root =
- static_cast(const_cast(p->GetKey()));
- builder->NoteXPCOMRoot(root);
- return PL_DHASH_NEXT;
-}
-
-struct SetSCCVisitor
-{
- SetSCCVisitor(PRUint32 aIndex) : mIndex(aIndex) {}
- bool ShouldVisitNode(PtrInfo const *pi) { return pi->mSCCIndex == 0; }
- void VisitNode(PtrInfo *pi) { pi->mSCCIndex = mIndex; }
-private:
- PRUint32 mIndex;
-};
-
-struct SetNonRootGreyVisitor
-{
- bool ShouldVisitNode(PtrInfo const *pi) { return pi->mColor == white; }
- void VisitNode(PtrInfo *pi) { pi->mColor = grey; }
-};
-
-static void
-PrintPathToExpectedGarbage(PtrInfo *pi)
-{
- printf(" An object expected to be garbage could be "
- "reached from it by the path:\n");
- for (PtrInfo *path = pi, *prev = nsnull; prev != path;
- prev = path,
- path = path->mShortestPathToExpectedGarbage) {
- if (prev) {
- nsCString *edgeName = prev
- ->mShortestPathToExpectedGarbageEdgeName;
- printf(" via %s\n",
- edgeName->IsEmpty() ? ""
- : edgeName->get());
- }
- printf(" %s %p\n", path->mName, path->mPointer);
- }
-}
-
-void
-nsCycleCollector::ExplainLiveExpectedGarbage()
-{
- if (mScanInProgress || mCollectionInProgress)
- Fault("can't explain expected garbage during collection itself");
-
- if (mParams.mDoNothing) {
- printf("nsCycleCollector: not explaining expected garbage since\n"
- " cycle collection disabled\n");
- return;
- }
-
- mCollectionInProgress = true;
- mScanInProgress = true;
-
- {
- GCGraphBuilder builder(mGraph, mRuntimes, nsnull);
-
- // Instead of adding roots from the purple buffer, we add them
- // from the list of nodes we were expected to collect.
- // Put the expected garbage in *before* calling
- // BeginCycleCollection so that we can separate the expected
- // garbage from the NoteRoot calls in such a way that something
- // that's in both is considered expected garbage.
- mExpectedGarbage.EnumerateEntries(&AddExpectedGarbage, &builder);
-
- PRUint32 expectedGarbageCount = builder.Count();
-
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i])
- mRuntimes[i]->BeginCycleCollection(builder, true);
- }
-
- // But just for extra information, add entries from the purple
- // buffer too, since it may give us extra information about
- // traversal deficiencies.
- mPurpleBuf.NoteAll(builder);
-
- MarkRoots(builder);
- ScanRoots();
-
- mScanInProgress = false;
-
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i]) {
- mRuntimes[i]->FinishTraverse();
- }
- }
-
- bool describeExtraRefcounts = false;
- bool findCycleRoots = false;
- {
- NodePool::Enumerator queue(mGraph.mNodes);
- PRUint32 i = 0;
- while (!queue.IsDone()) {
- PtrInfo *pi = queue.GetNext();
- if (pi->mColor == white) {
- findCycleRoots = true;
- }
-
- if (pi->mInternalRefs != pi->mRefCount &&
- (i < expectedGarbageCount || i >= mGraph.mRootCount)) {
- // This check isn't particularly useful anymore
- // given that we need to enter this part for i >=
- // mGraph.mRootCount and there are plenty of
- // NoteRoot roots.
- describeExtraRefcounts = true;
- }
- ++i;
- }
- }
-
- if ((describeExtraRefcounts || findCycleRoots) &&
- CreateReversedEdges()) {
- // Note that the external references may have been external
- // to a different node in the cycle collection that just
- // happened, if that different node was purple and then
- // black.
-
- // Use mSCCIndex temporarily to track whether we've reached
- // nodes in the breadth-first search.
- const PRUint32 INDEX_UNREACHED = 0;
- const PRUint32 INDEX_REACHED = 1;
- NodePool::Enumerator etor_clear(mGraph.mNodes);
- while (!etor_clear.IsDone()) {
- PtrInfo *pi = etor_clear.GetNext();
- pi->mSCCIndex = INDEX_UNREACHED;
- }
-
- nsDeque queue; // for breadth-first search
- NodePool::Enumerator etor_roots(mGraph.mNodes);
- for (PRUint32 i = 0; i < mGraph.mRootCount; ++i) {
- PtrInfo *root_pi = etor_roots.GetNext();
- if (i < expectedGarbageCount) {
- root_pi->mSCCIndex = INDEX_REACHED;
- root_pi->mShortestPathToExpectedGarbage = root_pi;
- queue.Push(root_pi);
- }
- }
-
- while (queue.GetSize() > 0) {
- PtrInfo *pi = (PtrInfo*)queue.PopFront();
- for (ReversedEdge *e = pi->mReversedEdges; e; e = e->mNext) {
- if (e->mTarget->mSCCIndex == INDEX_UNREACHED) {
- e->mTarget->mSCCIndex = INDEX_REACHED;
- PtrInfo *target = e->mTarget;
- if (!target->mShortestPathToExpectedGarbage) {
- target->mShortestPathToExpectedGarbage = pi;
- target->mShortestPathToExpectedGarbageEdgeName =
- e->mEdgeName;
- }
- queue.Push(target);
- }
- }
-
- if (pi->mRefCount == PR_UINT32_MAX ||
- (pi->mInternalRefs != pi->mRefCount && pi->mRefCount > 0)) {
- if (pi->mRefCount == PR_UINT32_MAX) {
- printf("nsCycleCollector: %s %p was not collected due "
- "to \n"
- " external references\n",
- pi->mName, pi->mPointer);
- }
- else {
- printf("nsCycleCollector: %s %p was not collected due "
- "to %d\n"
- " external references (%d total - %d known)\n",
- pi->mName, pi->mPointer,
- pi->mRefCount - pi->mInternalRefs,
- pi->mRefCount, pi->mInternalRefs);
- }
-
- PrintPathToExpectedGarbage(pi);
-
- if (pi->mRefCount == PR_UINT32_MAX) {
- printf(" The known references to it were from:\n");
- }
- else {
- printf(" The %d known references to it were from:\n",
- pi->mInternalRefs);
- }
- for (ReversedEdge *e = pi->mReversedEdges;
- e; e = e->mNext) {
- printf(" %s %p",
- e->mTarget->mName, e->mTarget->mPointer);
- if (!e->mEdgeName->IsEmpty()) {
- printf(" via %s", e->mEdgeName->get());
- }
- printf("\n");
- }
- mRuntimes[pi->mLangID]->PrintAllReferencesTo(pi->mPointer);
- }
- }
-
- if (findCycleRoots) {
- // NOTE: This code changes the white nodes that are not
- // roots to gray.
-
- // Put the nodes in post-order traversal order from a
- // depth-first search.
- nsDeque DFSPostOrder;
-
- {
- // Use mSCCIndex temporarily to track the DFS numbering:
- const PRUint32 INDEX_UNREACHED = 0;
- const PRUint32 INDEX_TRAVERSING = 1;
- const PRUint32 INDEX_NUMBERED = 2;
-
- NodePool::Enumerator etor_clear(mGraph.mNodes);
- while (!etor_clear.IsDone()) {
- PtrInfo *pi = etor_clear.GetNext();
- pi->mSCCIndex = INDEX_UNREACHED;
- }
-
- nsDeque stack;
-
- NodePool::Enumerator etor_roots(mGraph.mNodes);
- for (PRUint32 i = 0; i < mGraph.mRootCount; ++i) {
- PtrInfo *root_pi = etor_roots.GetNext();
- stack.Push(root_pi);
- }
-
- while (stack.GetSize() > 0) {
- PtrInfo *pi = (PtrInfo*)stack.Peek();
- if (pi->mSCCIndex == INDEX_UNREACHED) {
- pi->mSCCIndex = INDEX_TRAVERSING;
- for (EdgePool::Iterator child = pi->FirstChild(),
- child_end = pi->LastChild();
- child != child_end; ++child) {
- stack.Push(*child);
- }
- } else {
- stack.Pop();
- // Somebody else might have numbered it already
- // (since this is depth-first, not breadth-first).
- // This happens if a node is pushed on the stack
- // a second time while it is on the stack in
- // UNREACHED state.
- if (pi->mSCCIndex == INDEX_TRAVERSING) {
- pi->mSCCIndex = INDEX_NUMBERED;
- DFSPostOrder.Push(pi);
- }
- }
- }
- }
-
- // Put the nodes into strongly-connected components.
- {
- NodePool::Enumerator etor_clear(mGraph.mNodes);
- while (!etor_clear.IsDone()) {
- PtrInfo *pi = etor_clear.GetNext();
- pi->mSCCIndex = 0;
- }
-
- PRUint32 currentSCC = 1;
-
- while (DFSPostOrder.GetSize() > 0) {
- GraphWalker(SetSCCVisitor(currentSCC)).Walk((PtrInfo*)DFSPostOrder.PopFront());
- ++currentSCC;
- }
- }
-
- // Mark any white nodes reachable from other components as
- // grey.
- {
- NodePool::Enumerator queue(mGraph.mNodes);
- while (!queue.IsDone()) {
- PtrInfo *pi = queue.GetNext();
- if (pi->mColor != white)
- continue;
- for (EdgePool::Iterator child = pi->FirstChild(),
- child_end = pi->LastChild();
- child != child_end; ++child) {
- if ((*child)->mSCCIndex != pi->mSCCIndex) {
- GraphWalker(SetNonRootGreyVisitor()).Walk(*child);
- }
- }
- }
- }
-
- {
- NodePool::Enumerator queue(mGraph.mNodes);
- while (!queue.IsDone()) {
- PtrInfo *pi = queue.GetNext();
- if (pi->mColor == white) {
- if (pi->mLangID ==
- nsIProgrammingLanguage::CPLUSPLUS &&
- mPurpleBuf.Exists(pi->mPointer)) {
- printf(
-"nsCycleCollector: %s %p in component %d\n"
-" which was reference counted during the root/unlink/unroot phase of the\n"
-" last collection was not collected due to failure to unlink (see other\n"
-" warnings) or deficiency in traverse that causes cycles referenced only\n"
-" from other cycles to require multiple rounds of cycle collection in which\n"
-" this object was likely the reachable object\n",
- pi->mName, pi->mPointer, pi->mSCCIndex);
- } else {
- printf(
-"nsCycleCollector: %s %p in component %d\n"
-" was not collected due to missing call to suspect, failure to unlink (see\n"
-" other warnings), or deficiency in traverse that causes cycles referenced\n"
-" only from other cycles to require multiple rounds of cycle collection\n",
- pi->mName, pi->mPointer, pi->mSCCIndex);
- }
- if (pi->mShortestPathToExpectedGarbage)
- PrintPathToExpectedGarbage(pi);
- }
- }
- }
- }
-
- DestroyReversedEdges();
- }
- }
-
- ClearGraph();
-
- mCollectionInProgress = false;
-
- for (PRUint32 i = 0; i <= nsIProgrammingLanguage::MAX; ++i) {
- if (mRuntimes[i])
- mRuntimes[i]->FinishCycleCollection();
- }
-}
-
-bool
-nsCycleCollector::CreateReversedEdges()
-{
- // Count the edges in the graph.
- PRUint32 edgeCount = 0;
- NodePool::Enumerator countQueue(mGraph.mNodes);
- while (!countQueue.IsDone()) {
- PtrInfo *pi = countQueue.GetNext();
- for (EdgePool::Iterator e = pi->FirstChild(), e_end = pi->LastChild();
- e != e_end; ++e, ++edgeCount) {
- }
- }
-
- // Allocate a pool to hold all of the edges.
- mGraph.mReversedEdges = new ReversedEdge[edgeCount];
- if (mGraph.mReversedEdges == nsnull) {
- NS_NOTREACHED("allocation failure creating reversed edges");
- return false;
- }
-
- // Fill in the reversed edges by scanning all forward edges.
- ReversedEdge *current = mGraph.mReversedEdges;
- NodePool::Enumerator buildQueue(mGraph.mNodes);
- while (!buildQueue.IsDone()) {
- PtrInfo *pi = buildQueue.GetNext();
- PRInt32 i = 0;
- for (EdgePool::Iterator e = pi->FirstChild(), e_end = pi->LastChild();
- e != e_end; ++e) {
- current->mTarget = pi;
- current->mEdgeName = &pi->mEdgeNames[i];
- current->mNext = (*e)->mReversedEdges;
- (*e)->mReversedEdges = current;
- ++current;
- ++i;
- }
- }
- NS_ASSERTION(current - mGraph.mReversedEdges == ptrdiff_t(edgeCount),
- "misallocation");
- return true;
-}
-
-void
-nsCycleCollector::DestroyReversedEdges()
-{
- NodePool::Enumerator queue(mGraph.mNodes);
- while (!queue.IsDone()) {
- PtrInfo *pi = queue.GetNext();
- pi->mReversedEdges = nsnull;
- }
-
- delete mGraph.mReversedEdges;
- mGraph.mReversedEdges = nsnull;
-}
-
-void
-nsCycleCollector::ShouldBeFreed(nsISupports *n)
-{
- if (n) {
- mExpectedGarbage.PutEntry(n);
- }
-}
-
-void
-nsCycleCollector::WasFreed(nsISupports *n)
-{
- if (n) {
- mExpectedGarbage.RemoveEntry(n);
- }
-}
-#endif
-
-
-////////////////////////
-// Memory reporter
-////////////////////////
-
-static PRInt64
-ReportCycleCollectorMem()
-{
- if (!sCollector)
- return 0;
- PRInt64 size = sizeof(nsCycleCollector) +
- sCollector->mPurpleBuf.BlocksSize() +
- sCollector->mGraph.BlocksSize();
- if (sCollector->mWhiteNodes)
- size += sCollector->mWhiteNodes->Capacity() * sizeof(PtrInfo*);
- return size;
-}
-
-NS_MEMORY_REPORTER_IMPLEMENT(CycleCollector,
- "explicit/cycle-collector",
- KIND_HEAP,
- UNITS_BYTES,
- ReportCycleCollectorMem,
- "Memory used by the cycle collector. This "
- "includes the cycle collector structure, the "
- "purple buffer, the graph, and the white nodes. "
- "The latter two are expected to be empty when the "
- "cycle collector is idle.")
-
-
-////////////////////////////////////////////////////////////////////////
-// Module public API (exported in nsCycleCollector.h)
-// Just functions that redirect into the singleton, once it's built.
-////////////////////////////////////////////////////////////////////////
-
-void
-nsCycleCollector_registerRuntime(PRUint32 langID,
- nsCycleCollectionLanguageRuntime *rt)
-{
- static bool regMemReport = true;
- if (sCollector)
- sCollector->RegisterRuntime(langID, rt);
- if (regMemReport) {
- regMemReport = false;
- NS_RegisterMemoryReporter(new NS_MEMORY_REPORTER_NAME(CycleCollector));
- }
-}
-
-nsCycleCollectionLanguageRuntime *
-nsCycleCollector_getRuntime(PRUint32 langID)
-{
- if (sCollector)
- sCollector->GetRuntime(langID);
- return nsnull;
-}
-
-void
-nsCycleCollector_forgetRuntime(PRUint32 langID)
-{
- if (sCollector)
- sCollector->ForgetRuntime(langID);
-}
-
-
-bool
-NS_CycleCollectorSuspect(nsISupports *n)
-{
- if (sCollector)
- return sCollector->Suspect(n);
- return false;
-}
-
-bool
-NS_CycleCollectorForget(nsISupports *n)
-{
- return sCollector ? sCollector->Forget(n) : true;
-}
-
-nsPurpleBufferEntry*
-NS_CycleCollectorSuspect2(nsISupports *n)
-{
- if (sCollector)
- return sCollector->Suspect2(n);
- return nsnull;
-}
-
-bool
-NS_CycleCollectorForget2(nsPurpleBufferEntry *e)
-{
- return sCollector ? sCollector->Forget2(e) : true;
-}
-
-PRUint32
-nsCycleCollector_suspectedCount()
-{
- return sCollector ? sCollector->SuspectedCount() : 0;
-}
-
-#ifdef DEBUG
-void
-nsCycleCollector_DEBUG_shouldBeFreed(nsISupports *n)
-{
-#ifdef DEBUG_CC
- if (sCollector)
- sCollector->ShouldBeFreed(n);
-#endif
-}
-
-void
-nsCycleCollector_DEBUG_wasFreed(nsISupports *n)
-{
-#ifdef DEBUG_CC
- if (sCollector)
- sCollector->WasFreed(n);
-#endif
-}
-#endif
-
-class nsCycleCollectorRunner : public nsRunnable
-{
- nsCycleCollector *mCollector;
- nsICycleCollectorListener *mListener;
- Mutex mLock;
- CondVar mRequest;
- CondVar mReply;
- bool mRunning;
- bool mShutdown;
- bool mCollected;
-
- nsCycleCollectionJSRuntime *GetJSRuntime()
- {
- return static_cast
- (mCollector->mRuntimes[nsIProgrammingLanguage::JAVASCRIPT]);
- }
-
-public:
- NS_IMETHOD Run()
- {
-#ifdef XP_WIN
- TlsSetValue(gTLSThreadIDIndex,
- (void*) mozilla::threads::CycleCollector);
-#elif defined(NS_TLS)
- gTLSThreadID = mozilla::threads::CycleCollector;
-#else
- gCycleCollectorThread = PR_GetCurrentThread();
-#endif
-
- NS_ASSERTION(NS_IsCycleCollectorThread() && !NS_IsMainThread(),
- "Wrong thread!");
-
- MutexAutoLock autoLock(mLock);
-
- if (mShutdown)
- return NS_OK;
-
- mRunning = true;
-
- while (1) {
- mRequest.Wait();
-
- if (!mRunning) {
- mReply.Notify();
- return NS_OK;
- }
-
- GetJSRuntime()->NotifyEnterCycleCollectionThread();
- mCollected = mCollector->BeginCollection(mListener);
- GetJSRuntime()->NotifyLeaveCycleCollectionThread();
-
- mReply.Notify();
- }
-
- return NS_OK;
- }
-
- nsCycleCollectorRunner(nsCycleCollector *collector)
- : mCollector(collector),
- mListener(nsnull),
- mLock("cycle collector lock"),
- mRequest(mLock, "cycle collector request condvar"),
- mReply(mLock, "cycle collector reply condvar"),
- mRunning(false),
- mShutdown(false),
- mCollected(false)
- {
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
- }
-
- PRUint32 Collect(nsICycleCollectorListener* aListener)
- {
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
-
- mCollector->GCIfNeeded(false);
-
- MutexAutoLock autoLock(mLock);
-
- if (!mRunning)
- return 0;
-
- nsAutoTArray whiteNodes;
- if (!mCollector->PrepareForCollection(&whiteNodes))
- return 0;
-
- NS_ASSERTION(!mListener, "Should have cleared this already!");
- if (aListener && NS_FAILED(aListener->Begin()))
- aListener = nsnull;
- mListener = aListener;
-
- GetJSRuntime()->NotifyLeaveMainThread();
- mRequest.Notify();
- mReply.Wait();
- GetJSRuntime()->NotifyEnterMainThread();
-
- mListener = nsnull;
-
- if (mCollected) {
- mCollected = mCollector->FinishCollection(aListener);
-
- mCollector->CleanupAfterCollection();
-
- return mCollected ? mCollector->mCollectedObjects : 0;
- }
-
- return 0;
- }
-
- void Shutdown()
- {
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
-
- MutexAutoLock autoLock(mLock);
-
- mShutdown = true;
-
- if (!mRunning)
- return;
-
- mRunning = false;
- mRequest.Notify();
- mReply.Wait();
- }
-};
-
-// Holds a reference.
-static nsCycleCollectorRunner* sCollectorRunner;
-
-// Holds a reference.
-static nsIThread* sCollectorThread;
-
-nsresult
-nsCycleCollector_startup()
-{
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
- NS_ASSERTION(!sCollector, "Forgot to call nsCycleCollector_shutdown?");
-
- sCollector = new nsCycleCollector();
-
- nsRefPtr runner =
- new nsCycleCollectorRunner(sCollector);
-
- nsCOMPtr thread;
- nsresult rv = NS_NewThread(getter_AddRefs(thread), runner);
- NS_ENSURE_SUCCESS(rv, rv);
-
- runner.swap(sCollectorRunner);
- thread.swap(sCollectorThread);
-
- return rv;
-}
-
-PRUint32
-nsCycleCollector_collect(nsICycleCollectorListener *aListener)
-{
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
- nsCOMPtr listener(aListener);
- if (!aListener && sCollector && sCollector->mParams.mLogGraphs) {
- listener = new nsCycleCollectorLogger();
- }
-
- if (sCollectorRunner)
- return sCollectorRunner->Collect(listener);
- return sCollector ? sCollector->Collect(1, listener) : 0;
-}
-
-void
-nsCycleCollector_shutdownThreads()
-{
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
- if (sCollectorRunner) {
- nsRefPtr runner;
- runner.swap(sCollectorRunner);
- runner->Shutdown();
- }
-
- if (sCollectorThread) {
- nsCOMPtr thread;
- thread.swap(sCollectorThread);
- thread->Shutdown();
- }
-}
-
-void
-nsCycleCollector_shutdown()
-{
- NS_ASSERTION(NS_IsMainThread(), "Wrong thread!");
- NS_ASSERTION(!sCollectorRunner, "Should have finished before!");
- NS_ASSERTION(!sCollectorThread, "Should have finished before!");
-
- if (sCollector) {
- sCollector->Shutdown();
- delete sCollector;
- sCollector = nsnull;
- }
-}
diff --git a/tests/cpp/nsIOThreadPool.cpp b/tests/cpp/nsIOThreadPool.cpp
deleted file mode 100644
index 36d2042..0000000
--- a/tests/cpp/nsIOThreadPool.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is Mozilla.
- *
- * The Initial Developer of the Original Code is IBM Corporation.
- * Portions created by IBM Corporation are Copyright (C) 2003
- * IBM Corporation. All Rights Reserved.
- *
- * Contributor(s):
- * IBM Corp.
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either the GNU General Public License Version 2 or later (the "GPL"), or
- * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-#include "nsIEventTarget.h"
-#include "nsIServiceManager.h"
-#include "nsIObserverService.h"
-#include "nsIObserver.h"
-#include "nsAutoLock.h"
-#include "nsCOMPtr.h"
-#include "prclist.h"
-#include "prlog.h"
-
-#if defined(PR_LOGGING)
-//
-// NSPR_LOG_MODULES=nsIOThreadPool:5
-//
-static PRLogModuleInfo *gIOThreadPoolLog = nsnull;
-#endif
-#define LOG(args) PR_LOG(gIOThreadPoolLog, PR_LOG_DEBUG, args)
-
-// this number specifies the maximum number of threads.
-#define MAX_THREADS 4
-
-// this number specifies how long to wait before killing an idle thread. it's
-// important to pick a large enough value here to minimize thread churn.
-#define IDLE_TIMEOUT PR_SecondsToInterval(60)
-
-#define PLEVENT_FROM_LINK(_link) \
- ((PLEvent*) ((char*) (_link) - offsetof(PLEvent, link)))
-
-//-----------------------------------------------------------------------------
-// pool of joinable threads used for general purpose i/o tasks
-//
-// the main entry point to this class is nsIEventTarget. events posted to
-// the thread pool are dispatched on one of the threads. a variable number
-// of threads are maintained. the threads die off if they remain idle for
-// more than THREAD_IDLE_TIMEOUT. the thread pool shuts down when it receives
-// the "xpcom-shutdown" event.
-//-----------------------------------------------------------------------------
-
-class nsIOThreadPool : public nsIEventTarget
- , public nsIObserver
-{
-public:
- NS_DECL_ISUPPORTS
- NS_DECL_NSIEVENTTARGET
- NS_DECL_NSIOBSERVER
-
- nsresult Init();
- void Shutdown();
-
-private:
- virtual ~nsIOThreadPool();
-
- PR_STATIC_CALLBACK(void) ThreadFunc(void *);
-
- // mLock protects all (exceptions during Init and Shutdown)
- PRLock *mLock;
- PRCondVar *mIdleThreadCV; // notified to wake up an idle thread
- PRCondVar *mExitThreadCV; // notified when a thread exits
- PRUint32 mNumThreads; // number of active + idle threads
- PRUint32 mNumIdleThreads; // number of idle threads
- PRCList mEventQ; // queue of PLEvent structs
- PRBool mShutdown; // set to true if shutting down
-};
-
-NS_IMPL_THREADSAFE_ISUPPORTS2(nsIOThreadPool, nsIEventTarget, nsIObserver)
-
-nsresult
-nsIOThreadPool::Init()
-{
-#if defined(PR_LOGGING)
- if (!gIOThreadPoolLog)
- gIOThreadPoolLog = PR_NewLogModule("nsIOThreadPool");
-#endif
-
- mNumThreads = 0;
- mNumIdleThreads = 0;
- mShutdown = PR_FALSE;
-
- mLock = PR_NewLock();
- if (!mLock)
- return NS_ERROR_OUT_OF_MEMORY;
-
- mIdleThreadCV = PR_NewCondVar(mLock);
- if (!mIdleThreadCV)
- return NS_ERROR_OUT_OF_MEMORY;
-
- mExitThreadCV = PR_NewCondVar(mLock);
- if (!mExitThreadCV)
- return NS_ERROR_OUT_OF_MEMORY;
-
- PR_INIT_CLIST(&mEventQ);
-
- // we want to shutdown the i/o thread pool at xpcom-shutdown time...
- nsCOMPtr os = do_GetService("@mozilla.org/observer-service;1");
- if (os)
- os->AddObserver(this, "xpcom-shutdown", PR_FALSE);
- return NS_OK;
-}
-
-nsIOThreadPool::~nsIOThreadPool()
-{
- LOG(("Destroying nsIOThreadPool @%p\n", this));
-
-#ifdef DEBUG
- NS_ASSERTION(PR_CLIST_IS_EMPTY(&mEventQ), "leaking events");
- NS_ASSERTION(mNumThreads == 0, "leaking thread(s)");
-#endif
-
- if (mIdleThreadCV)
- PR_DestroyCondVar(mIdleThreadCV);
- if (mExitThreadCV)
- PR_DestroyCondVar(mExitThreadCV);
- if (mLock)
- PR_DestroyLock(mLock);
-}
-
-void
-nsIOThreadPool::Shutdown()
-{
- LOG(("nsIOThreadPool::Shutdown\n"));
-
- // synchronize with background threads...
- {
- nsAutoLock lock(mLock);
- mShutdown = PR_TRUE;
-
- PR_NotifyAllCondVar(mIdleThreadCV);
-
- while (mNumThreads != 0)
- PR_WaitCondVar(mExitThreadCV, PR_INTERVAL_NO_TIMEOUT);
- }
-}
-
-NS_IMETHODIMP
-nsIOThreadPool::PostEvent(PLEvent *event)
-{
- LOG(("nsIOThreadPool::PostEvent [event=%p]\n", event));
-
- nsAutoLock lock(mLock);
-
- // if we are shutting down, then prevent additional events from being
- // added to the queue...
- if (mShutdown)
- return NS_ERROR_UNEXPECTED;
-
- nsresult rv = NS_OK;
-
- PR_APPEND_LINK(&event->link, &mEventQ);
-
- // now, look for an available idle thread...
- if (mNumIdleThreads)
- PR_NotifyCondVar(mIdleThreadCV); // wake up an idle thread
-
- // or, try to create a new thread unless we have reached our maximum...
- else if (mNumThreads < MAX_THREADS) {
- NS_ADDREF_THIS(); // the thread owns a reference to us
- mNumThreads++;
- PRThread *thread = PR_CreateThread(PR_USER_THREAD,
- ThreadFunc,
- this,
- PR_PRIORITY_NORMAL,
- PR_GLOBAL_THREAD,
- PR_UNJOINABLE_THREAD,
- 0);
- if (!thread) {
- NS_RELEASE_THIS();
- mNumThreads--;
- rv = NS_ERROR_OUT_OF_MEMORY;
- }
- }
- // else, we expect one of the active threads to process the event queue.
-
- return rv;
-}
-
-NS_IMETHODIMP
-nsIOThreadPool::IsOnCurrentThread(PRBool *result)
-{
- // no one should be calling this method. if this assertion gets hit,
- // then we need to think carefully about what this method should be
- // returning.
- NS_NOTREACHED("nsIOThreadPool::IsOnCurrentThread");
-
- // fudging this a bit since we actually cover several threads...
- *result = PR_FALSE;
- return NS_OK;
-}
-
-NS_IMETHODIMP
-nsIOThreadPool::Observe(nsISupports *, const char *topic, const PRUnichar *)
-{
- NS_ASSERTION(strcmp(topic, "xpcom-shutdown") == 0, "unexpected topic");
- Shutdown();
- return NS_OK;
-}
-
-void
-nsIOThreadPool::ThreadFunc(void *arg)
-{
- nsIOThreadPool *pool = (nsIOThreadPool *) arg;
-
- LOG(("entering ThreadFunc\n"));
-
- {
- nsAutoLock lock(pool->mLock);
-
- for (;;) {
- PRIntervalTime start = PR_IntervalNow(), timeout = IDLE_TIMEOUT;
- //
- // wait for one or more of the following to occur:
- // (1) the event queue has an event to process
- // (2) the shutdown flag has been set
- // (3) the thread has been idle for too long
- //
- // PR_WaitCondVar will return when any of these conditions is true.
- //
- while (PR_CLIST_IS_EMPTY(&pool->mEventQ) && !pool->mShutdown) {
- pool->mNumIdleThreads++;
- PR_WaitCondVar(pool->mIdleThreadCV, timeout);
- pool->mNumIdleThreads--;
-
- PRIntervalTime delta = PR_IntervalNow() - start;
- if (delta >= timeout)
- break;
- timeout -= delta;
- start += delta;
- }
-
- // if the queue is still empty, then kill this thread (either we
- // are shutting down or the thread exceeded the idle timeout)...
- if (PR_CLIST_IS_EMPTY(&pool->mEventQ))
- break;
-
- // handle one event at a time: we don't want this one thread to hog
- // all the events while other threads may be able to help out ;-)
- do {
- PLEvent *event = PLEVENT_FROM_LINK(PR_LIST_HEAD(&pool->mEventQ));
- PR_REMOVE_AND_INIT_LINK(&event->link);
-
- LOG(("event:%p\n", event));
-
- // release lock!
- lock.unlock();
- PL_HandleEvent(event);
- lock.lock();
- }
- while (!PR_CLIST_IS_EMPTY(&pool->mEventQ));
- }
-
- // thread is going away...
- pool->mNumThreads--;
- PR_NotifyCondVar(pool->mExitThreadCV);
- }
-
- // release our reference to the pool
- NS_RELEASE(pool);
-
- LOG(("leaving ThreadFunc\n"));
-}
-
-//-----------------------------------------------------------------------------
-
-NS_METHOD
-net_NewIOThreadPool(nsISupports *outer, REFNSIID iid, void **result)
-{
- nsIOThreadPool *pool = new nsIOThreadPool();
- if (!pool)
- return NS_ERROR_OUT_OF_MEMORY;
- NS_ADDREF(pool);
- nsresult rv = pool->Init();
- if (NS_SUCCEEDED(rv))
- rv = pool->QueryInterface(iid, result);
- NS_RELEASE(pool);
- return rv;
-}
diff --git a/tests/cpp/nsTextFrameThebes.cpp b/tests/cpp/nsTextFrameThebes.cpp
deleted file mode 100644
index 014368a..0000000
--- a/tests/cpp/nsTextFrameThebes.cpp
+++ /dev/null
@@ -1,6873 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* ***** BEGIN LICENSE BLOCK *****
- * Version: MPL 1.1/GPL 2.0/LGPL 2.1
- *
- * The contents of this file are subject to the Mozilla Public License Version
- * 1.1 (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- * http://www.mozilla.org/MPL/
- *
- * Software distributed under the License is distributed on an "AS IS" basis,
- * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
- * for the specific language governing rights and limitations under the
- * License.
- *
- * The Original Code is mozilla.org code.
- *
- * The Initial Developer of the Original Code is
- * Netscape Communications Corporation.
- * Portions created by the Initial Developer are Copyright (C) 1998
- * the Initial Developer. All Rights Reserved.
- *
- * Contributor(s):
- * Robert O'Callahan
- * Roger B. Sidje
- * Pierre Phaneuf
- * Prabhat Hegde
- * Tomi Leppikangas
- * Roland Mainz
- * Daniel Glazman
- * Neil Deakin
- * Masayuki Nakano
- * Mats Palmgren
- * Uri Bernstein
- * Stephen Blackheath
- * Michael Ventnor
- * Ehsan Akhgari
- *
- * Alternatively, the contents of this file may be used under the terms of
- * either of the GNU General Public License Version 2 or later (the "GPL"),
- * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
- * in which case the provisions of the GPL or the LGPL are applicable instead
- * of those above. If you wish to allow use of your version of this file only
- * under the terms of either the GPL or the LGPL, and not to allow others to
- * use your version of this file under the terms of the MPL, indicate your
- * decision by deleting the provisions above and replace them with the notice
- * and other provisions required by the GPL or the LGPL. If you do not delete
- * the provisions above, a recipient may use your version of this file under
- * the terms of any one of the MPL, the GPL or the LGPL.
- *
- * ***** END LICENSE BLOCK ***** */
-
-/* rendering object for textual content of elements */
-
-#include "nsCOMPtr.h"
-#include "nsHTMLParts.h"
-#include "nsCRT.h"
-#include "nsSplittableFrame.h"
-#include "nsLineLayout.h"
-#include "nsString.h"
-#include "nsUnicharUtils.h"
-#include "nsPresContext.h"
-#include "nsIContent.h"
-#include "nsStyleConsts.h"
-#include "nsStyleContext.h"
-#include "nsCoord.h"
-#include "nsIFontMetrics.h"
-#include "nsIRenderingContext.h"
-#include "nsIPresShell.h"
-#include "nsITimer.h"
-#include "nsTArray.h"
-#include "nsIDOMText.h"
-#include "nsIDocument.h"
-#include "nsIDeviceContext.h"
-#include "nsCSSPseudoElements.h"
-#include "nsCompatibility.h"
-#include "nsCSSColorUtils.h"
-#include "nsLayoutUtils.h"
-#include "nsDisplayList.h"
-#include "nsFrame.h"
-#include "nsTextFrameUtils.h"
-#include "nsTextRunTransformations.h"
-#include "nsFrameManager.h"
-#include "nsTextFrameTextRunCache.h"
-#include "nsExpirationTracker.h"
-#include "nsTextFrame.h"
-#include "nsICaseConversion.h"
-#include "nsIUGenCategory.h"
-#include "nsUnicharUtilCIID.h"
-
-#include "nsTextFragment.h"
-#include "nsGkAtoms.h"
-#include "nsFrameSelection.h"
-#include "nsISelection.h"
-#include "nsIDOMRange.h"
-#include "nsILookAndFeel.h"
-#include "nsCSSRendering.h"
-#include "nsContentUtils.h"
-#include "nsLineBreaker.h"
-#include "nsIWordBreaker.h"
-#include "nsGenericDOMDataNode.h"
-
-#include "nsILineIterator.h"
-
-#include "nsIServiceManager.h"
-#ifdef ACCESSIBILITY
-#include "nsIAccessible.h"
-#include "nsIAccessibilityService.h"
-#endif
-#include "nsAutoPtr.h"
-#include "nsStyleSet.h"
-
-#include "nsBidiFrames.h"
-#include "nsBidiPresUtils.h"
-#include "nsBidiUtils.h"
-
-#include "nsIThebesFontMetrics.h"
-#include "gfxFont.h"
-#include "gfxContext.h"
-#include "gfxTextRunWordCache.h"
-#include "gfxImageSurface.h"
-
-#ifdef NS_DEBUG
-#undef NOISY_BLINK
-#undef NOISY_REFLOW
-#undef NOISY_TRIM
-#else
-#undef NOISY_BLINK
-#undef NOISY_REFLOW
-#undef NOISY_TRIM
-#endif
-
-// The following flags are set during reflow
-
-// This bit is set on the first frame in a continuation indicating
-// that it was chopped short because of :first-letter style.
-#define TEXT_FIRST_LETTER 0x00100000
-// This bit is set on frames that are logically adjacent to the start of the
-// line (i.e. no prior frame on line with actual displayed in-flow content).
-#define TEXT_START_OF_LINE 0x00200000
-// This bit is set on frames that are logically adjacent to the end of the
-// line (i.e. no following on line with actual displayed in-flow content).
-#define TEXT_END_OF_LINE 0x00400000
-// This bit is set on frames that end with a hyphenated break.
-#define TEXT_HYPHEN_BREAK 0x00800000
-// This bit is set on frames that trimmed trailing whitespace characters when
-// calculating their width during reflow.
-#define TEXT_TRIMMED_TRAILING_WHITESPACE 0x01000000
-// This bit is set on frames that have justification enabled. We record
-// this in a state bit because we don't always have the containing block
-// easily available to check text-align on.
-#define TEXT_JUSTIFICATION_ENABLED 0x02000000
-// Set this bit if the textframe has overflow area for IME/spellcheck underline.
-#define TEXT_SELECTION_UNDERLINE_OVERFLOWED 0x04000000
-
-#define TEXT_REFLOW_FLAGS \
- (TEXT_FIRST_LETTER|TEXT_START_OF_LINE|TEXT_END_OF_LINE|TEXT_HYPHEN_BREAK| \
- TEXT_TRIMMED_TRAILING_WHITESPACE|TEXT_JUSTIFICATION_ENABLED| \
- TEXT_HAS_NONCOLLAPSED_CHARACTERS|TEXT_SELECTION_UNDERLINE_OVERFLOWED)
-
-// Cache bits for IsEmpty().
-// Set this bit if the textframe is known to be only collapsible whitespace.
-#define TEXT_IS_ONLY_WHITESPACE 0x08000000
-// Set this bit if the textframe is known to be not only collapsible whitespace.
-#define TEXT_ISNOT_ONLY_WHITESPACE 0x10000000
-
-#define TEXT_WHITESPACE_FLAGS 0x18000000
-
-// nsTextFrame.h has
-// #define TEXT_BLINK_ON_OR_PRINTING 0x20000000
-
-// Set when this text frame is mentioned in the userdata for a textrun
-#define TEXT_IN_TEXTRUN_USER_DATA 0x40000000
-
-// nsTextFrame.h has
-// #define TEXT_HAS_NONCOLLAPSED_CHARACTERS 0x80000000
-
-/*
- * Some general notes
- *
- * Text frames delegate work to gfxTextRun objects. The gfxTextRun object
- * transforms text to positioned glyphs. It can report the geometry of the
- * glyphs and paint them. Text frames configure gfxTextRuns by providing text,
- * spacing, language, and other information.
- *
- * A gfxTextRun can cover more than one DOM text node. This is necessary to
- * get kerning, ligatures and shaping for text that spans multiple text nodes
- * but is all the same font. The userdata for a gfxTextRun object is a
- * TextRunUserData* or an nsIFrame*.
- *
- * We go to considerable effort to make sure things work even if in-flow
- * siblings have different style contexts (i.e., first-letter and first-line).
- *
- * Our convention is that unsigned integer character offsets are offsets into
- * the transformed string. Signed integer character offsets are offsets into
- * the DOM string.
- *
- * XXX currently we don't handle hyphenated breaks between text frames where the
- * hyphen occurs at the end of the first text frame, e.g.
- * Kitty
- */
-
-/**
- * We use an array of these objects to record which text frames
- * are associated with the textrun. mStartFrame is the start of a list of
- * text frames. Some sequence of its continuations are covered by the textrun.
- * A content textnode can have at most one TextRunMappedFlow associated with it
- * for a given textrun.
- *
- * mDOMOffsetToBeforeTransformOffset is added to DOM offsets for those frames to obtain
- * the offset into the before-transformation text of the textrun. It can be
- * positive (when a text node starts in the middle of a text run) or
- * negative (when a text run starts in the middle of a text node). Of course
- * it can also be zero.
- */
-struct TextRunMappedFlow {
- nsTextFrame* mStartFrame;
- PRInt32 mDOMOffsetToBeforeTransformOffset;
- // The text mapped starts at mStartFrame->GetContentOffset() and is this long
- PRUint32 mContentLength;
-};
-
-/**
- * This is our user data for the textrun, when textRun->GetFlags() does not
- * have TEXT_SIMPLE_FLOW set. When TEXT_SIMPLE_FLOW is set, there is just one
- * flow, the textrun's user data pointer is a pointer to mStartFrame
- * for that flow, mDOMOffsetToBeforeTransformOffset is zero, and mContentLength
- * is the length of the text node.
- */
-struct TextRunUserData {
- TextRunMappedFlow* mMappedFlows;
- PRInt32 mMappedFlowCount;
-
- PRUint32 mLastFlowIndex;
-};
-
-/**
- * This helper object computes colors used for painting, and also IME
- * underline information. The data is computed lazily and cached as necessary.
- * These live for just the duration of one paint operation.
- */
-class nsTextPaintStyle {
-public:
- nsTextPaintStyle(nsTextFrame* aFrame);
-
- nscolor GetTextColor();
- /**
- * Compute the colors for normally-selected text. Returns false if
- * the normal selection is not being displayed.
- */
- PRBool GetSelectionColors(nscolor* aForeColor,
- nscolor* aBackColor);
- void GetHighlightColors(nscolor* aForeColor,
- nscolor* aBackColor);
- void GetIMESelectionColors(PRInt32 aIndex,
- nscolor* aForeColor,
- nscolor* aBackColor);
- // if this returns PR_FALSE, we don't need to draw underline.
- PRBool GetSelectionUnderlineForPaint(PRInt32 aIndex,
- nscolor* aLineColor,
- float* aRelativeSize,
- PRUint8* aStyle);
-
- // if this returns PR_FALSE, we don't need to draw underline.
- static PRBool GetSelectionUnderline(nsPresContext* aPresContext,
- PRInt32 aIndex,
- nscolor* aLineColor,
- float* aRelativeSize,
- PRUint8* aStyle);
-
- nsPresContext* PresContext() { return mPresContext; }
-
- enum {
- eIndexRawInput = 0,
- eIndexSelRawText,
- eIndexConvText,
- eIndexSelConvText,
- eIndexSpellChecker
- };
-
- static PRInt32 GetUnderlineStyleIndexForSelectionType(PRInt32 aSelectionType)
- {
- switch (aSelectionType) {
- case nsISelectionController::SELECTION_IME_RAWINPUT:
- return eIndexRawInput;
- case nsISelectionController::SELECTION_IME_SELECTEDRAWTEXT:
- return eIndexSelRawText;
- case nsISelectionController::SELECTION_IME_CONVERTEDTEXT:
- return eIndexConvText;
- case nsISelectionController::SELECTION_IME_SELECTEDCONVERTEDTEXT:
- return eIndexSelConvText;
- case nsISelectionController::SELECTION_SPELLCHECK:
- return eIndexSpellChecker;
- default:
- NS_WARNING("non-IME selection type");
- return eIndexRawInput;
- }
- }
-
-protected:
- nsTextFrame* mFrame;
- nsPresContext* mPresContext;
- PRPackedBool mInitCommonColors;
- PRPackedBool mInitSelectionColors;
-
- // Selection data
-
- PRInt16 mSelectionStatus; // see nsIDocument.h SetDisplaySelection()
- nscolor mSelectionTextColor;
- nscolor mSelectionBGColor;
-
- // Common data
-
- PRInt32 mSufficientContrast;
- nscolor mFrameBackgroundColor;
-
- // selection colors and underline info, the colors are resolved colors,
- // i.e., the foreground color and background color are swapped if it's needed.
- // And also line color will be resolved from them.
- struct nsSelectionStyle {
- PRBool mInit;
- nscolor mTextColor;
- nscolor mBGColor;
- nscolor mUnderlineColor;
- PRUint8 mUnderlineStyle;
- float mUnderlineRelativeSize;
- };
- nsSelectionStyle mSelectionStyle[5];
-
- // Color initializations
- void InitCommonColors();
- PRBool InitSelectionColors();
-
- nsSelectionStyle* GetSelectionStyle(PRInt32 aIndex);
- void InitSelectionStyle(PRInt32 aIndex);
-
- PRBool EnsureSufficientContrast(nscolor *aForeColor, nscolor *aBackColor);
-
- nscolor GetResolvedForeColor(nscolor aColor, nscolor aDefaultForeColor,
- nscolor aBackColor);
-};
-
-static void
-DestroyUserData(void* aUserData)
-{
- TextRunUserData* userData = static_cast(aUserData);
- if (userData) {
- nsMemory::Free(userData);
- }
-}
-
-// Remove the textrun from the frame continuation chain starting at aFrame,
-// which should be marked as a textrun owner.
-static void
-ClearAllTextRunReferences(nsTextFrame* aFrame, gfxTextRun* aTextRun)
-{
- aFrame->RemoveStateBits(TEXT_IN_TEXTRUN_USER_DATA);
- while (aFrame) {
- NS_ASSERTION(aFrame->GetType() == nsGkAtoms::textFrame,
- "Bad frame");
- if (aFrame->GetTextRun() != aTextRun)
- break;
- aFrame->SetTextRun(nsnull);
- aFrame = static_cast(aFrame->GetNextContinuation());
- }
-}
-
-// Figure out which frames
-static void
-UnhookTextRunFromFrames(gfxTextRun* aTextRun)
-{
- if (!aTextRun->GetUserData())
- return;
-
- // Kill all references to the textrun. It could be referenced by any of its
- // owners, and all their in-flows.
- if (aTextRun->GetFlags() & nsTextFrameUtils::TEXT_IS_SIMPLE_FLOW) {
- nsIFrame* firstInFlow = static_cast(aTextRun->GetUserData());
- ClearAllTextRunReferences(static_cast(firstInFlow), aTextRun);
- } else {
- TextRunUserData* userData =
- static_cast(aTextRun->GetUserData());
- PRInt32 i;
- for (i = 0; i < userData->mMappedFlowCount; ++i) {
- ClearAllTextRunReferences(userData->mMappedFlows[i].mStartFrame, aTextRun);
- }
- DestroyUserData(userData);
- }
- aTextRun->SetUserData(nsnull);
-}
-
-class FrameTextRunCache;
-
-static FrameTextRunCache *gTextRuns = nsnull;
-
-/*
- * Cache textruns and expire them after 3*10 seconds of no use.
- */
-class FrameTextRunCache : public nsExpirationTracker {
-public:
- enum { TIMEOUT_SECONDS = 10 };
- FrameTextRunCache()
- : nsExpirationTracker(TIMEOUT_SECONDS*1000) {}
- ~FrameTextRunCache() {
- AgeAllGenerations();
- }
-
- void RemoveFromCache(gfxTextRun* aTextRun) {
- if (aTextRun->GetExpirationState()->IsTracked()) {
- RemoveObject(aTextRun);
- }
- if (aTextRun->GetFlags() & gfxTextRunWordCache::TEXT_IN_CACHE) {
- gfxTextRunWordCache::RemoveTextRun(aTextRun);
- }
- }
-
- // This gets called when the timeout has expired on a gfxTextRun
- virtual void NotifyExpired(gfxTextRun* aTextRun) {
- UnhookTextRunFromFrames(aTextRun);
- RemoveFromCache(aTextRun);
- delete aTextRun;
- }
-};
-
-static gfxTextRun *
-MakeTextRun(const PRUnichar *aText, PRUint32 aLength,
- gfxFontGroup *aFontGroup, const gfxFontGroup::Parameters* aParams,
- PRUint32 aFlags)
-{
- nsAutoPtr textRun;
- if (aLength == 0) {
- textRun = aFontGroup->MakeEmptyTextRun(aParams, aFlags);
- } else if (aLength == 1 && aText[0] == ' ') {
- textRun = aFontGroup->MakeSpaceTextRun(aParams, aFlags);
- } else {
- textRun = gfxTextRunWordCache::MakeTextRun(aText, aLength, aFontGroup,
- aParams, aFlags);
- }
- if (!textRun)
- return nsnull;
- nsresult rv = gTextRuns->AddObject(textRun);
- if (NS_FAILED(rv)) {
- gTextRuns->RemoveFromCache(textRun);
- return nsnull;
- }
- return textRun.forget();
-}
-
-static gfxTextRun *
-MakeTextRun(const PRUint8 *aText, PRUint32 aLength,
- gfxFontGroup *aFontGroup, const gfxFontGroup::Parameters* aParams,
- PRUint32 aFlags)
-{
- nsAutoPtr textRun;
- if (aLength == 0) {
- textRun = aFontGroup->MakeEmptyTextRun(aParams, aFlags);
- } else if (aLength == 1 && aText[0] == ' ') {
- textRun = aFontGroup->MakeSpaceTextRun(aParams, aFlags);
- } else {
- textRun = gfxTextRunWordCache::MakeTextRun(aText, aLength, aFontGroup,
- aParams, aFlags);
- }
- if (!textRun)
- return nsnull;
- nsresult rv = gTextRuns->AddObject(textRun);
- if (NS_FAILED(rv)) {
- gTextRuns->RemoveFromCache(textRun);
- return nsnull;
- }
- return textRun.forget();
-}
-
-nsresult
-nsTextFrameTextRunCache::Init() {
- gTextRuns = new FrameTextRunCache();
- return gTextRuns ? NS_OK : NS_ERROR_OUT_OF_MEMORY;
-}
-
-void
-nsTextFrameTextRunCache::Shutdown() {
- delete gTextRuns;
- gTextRuns = nsnull;
-}
-
-PRInt32 nsTextFrame::GetContentEnd() const {
- nsTextFrame* next = static_cast(GetNextContinuation());
- return next ? next->GetContentOffset() : GetFragment()->GetLength();
-}
-
-PRInt32 nsTextFrame::GetInFlowContentLength() {
-#ifdef IBMBIDI
- nsTextFrame* nextBidi = nsnull;
- PRInt32 start = -1, end;
-
- if (mState & NS_FRAME_IS_BIDI) {
- nextBidi = static_cast(GetLastInFlow()->GetNextContinuation());
- if (nextBidi) {
- nextBidi->GetOffsets(start, end);
- return start - mContentOffset;
- }
- }
-#endif //IBMBIDI
- return GetFragment()->GetLength() - mContentOffset;
-}
-
-// Smarter versions of XP_IS_SPACE.
-// Unicode is really annoying; sometimes a space character isn't whitespace ---
-// when it combines with another character
-// So we have several versions of IsSpace for use in different contexts.
-
-static PRBool IsSpaceCombiningSequenceTail(const nsTextFragment* aFrag, PRUint32 aPos)
-{
- NS_ASSERTION(aPos <= aFrag->GetLength(), "Bad offset");
- if (!aFrag->Is2b())
- return PR_FALSE;
- return nsTextFrameUtils::IsSpaceCombiningSequenceTail(
- aFrag->Get2b() + aPos, aFrag->GetLength() - aPos);
-}
-
-// Check whether aPos is a space for CSS 'word-spacing' purposes
-static PRBool IsCSSWordSpacingSpace(const nsTextFragment* aFrag,
- PRUint32 aPos, const nsStyleText* aStyleText)
-{
- NS_ASSERTION(aPos < aFrag->GetLength(), "No text for IsSpace!");
-
- PRUnichar ch = aFrag->CharAt(aPos);
- switch (ch) {
- case ' ':
- case CH_NBSP:
- return !IsSpaceCombiningSequenceTail(aFrag, aPos + 1);
- case '\t': return !aStyleText->WhiteSpaceIsSignificant();
- case '\n': return !aStyleText->NewlineIsSignificant();
- default: return PR_FALSE;
- }
-}
-
-// Check whether the string aChars/aLength starts with space that's
-// trimmable according to CSS 'white-space:normal/nowrap'.
-static PRBool IsTrimmableSpace(const PRUnichar* aChars, PRUint32 aLength)
-{
- NS_ASSERTION(aLength > 0, "No text for IsSpace!");
-
- PRUnichar ch = *aChars;
- if (ch == ' ')
- return !nsTextFrameUtils::IsSpaceCombiningSequenceTail(aChars + 1, aLength - 1);
- return ch == '\t' || ch == '\f' || ch == '\n';
-}
-
-// Check whether the character aCh is trimmable according to CSS
-// 'white-space:normal/nowrap'
-static PRBool IsTrimmableSpace(char aCh)
-{
- return aCh == ' ' || aCh == '\t' || aCh == '\f' || aCh == '\n';
-}
-
-static PRBool IsTrimmableSpace(const nsTextFragment* aFrag, PRUint32 aPos,
- const nsStyleText* aStyleText)
-{
- NS_ASSERTION(aPos < aFrag->GetLength(), "No text for IsSpace!");
-
- switch (aFrag->CharAt(aPos)) {
- case ' ': return !aStyleText->WhiteSpaceIsSignificant() &&
- !IsSpaceCombiningSequenceTail(aFrag, aPos + 1);
- case '\n': return !aStyleText->NewlineIsSignificant();
- case '\t':
- case '\f': return !aStyleText->WhiteSpaceIsSignificant();
- default: return PR_FALSE;
- }
-}
-
-static PRBool IsSelectionSpace(const nsTextFragment* aFrag, PRUint32 aPos)
-{
- NS_ASSERTION(aPos < aFrag->GetLength(), "No text for IsSpace!");
- PRUnichar ch = aFrag->CharAt(aPos);
- if (ch == ' ' || ch == CH_NBSP)
- return !IsSpaceCombiningSequenceTail(aFrag, aPos + 1);
- return ch == '\t' || ch == '\n' || ch == '\f';
-}
-
-// Count the amount of trimmable whitespace (as per CSS
-// 'white-space:normal/nowrap') in a text fragment. The first
-// character is at offset aStartOffset; the maximum number of characters
-// to check is aLength. aDirection is -1 or 1 depending on whether we should
-// progress backwards or forwards.
-static PRUint32
-GetTrimmableWhitespaceCount(const nsTextFragment* aFrag,
- PRInt32 aStartOffset, PRInt32 aLength,
- PRInt32 aDirection)
-{
- PRInt32 count = 0;
- if (aFrag->Is2b()) {
- const PRUnichar* str = aFrag->Get2b() + aStartOffset;
- PRInt32 fragLen = aFrag->GetLength() - aStartOffset;
- for (; count < aLength; ++count) {
- if (!IsTrimmableSpace(str, fragLen))
- break;
- str += aDirection;
- fragLen -= aDirection;
- }
- } else {
- const char* str = aFrag->Get1b() + aStartOffset;
- for (; count < aLength; ++count) {
- if (!IsTrimmableSpace(*str))
- break;
- str += aDirection;
- }
- }
- return count;
-}
-
-static PRBool
-IsAllWhitespace(const nsTextFragment* aFrag, PRBool aAllowNewline)
-{
- if (aFrag->Is2b())
- return PR_FALSE;
- PRInt32 len = aFrag->GetLength();
- const char* str = aFrag->Get1b();
- for (PRInt32 i = 0; i < len; ++i) {
- char ch = str[i];
- if (ch == ' ' || ch == '\t' || (ch == '\n' && aAllowNewline))
- continue;
- return PR_FALSE;
- }
- return PR_TRUE;
-}
-
-/**
- * This class accumulates state as we scan a paragraph of text. It detects
- * textrun boundaries (changes from text to non-text, hard
- * line breaks, and font changes) and builds a gfxTextRun at each boundary.
- * It also detects linebreaker run boundaries (changes from text to non-text,
- * and hard line breaks) and at each boundary runs the linebreaker to compute
- * potential line breaks. It also records actual line breaks to store them in
- * the textruns.
- */
-class BuildTextRunsScanner {
-public:
- BuildTextRunsScanner(nsPresContext* aPresContext, gfxContext* aContext,
- nsIFrame* aLineContainer) :
- mCurrentFramesAllSameTextRun(nsnull),
- mContext(aContext),
- mLineContainer(aLineContainer),
- mBidiEnabled(aPresContext->BidiEnabled()),
- mSkipIncompleteTextRuns(PR_FALSE),
- mNextRunContextInfo(nsTextFrameUtils::INCOMING_NONE),
- mCurrentRunContextInfo(nsTextFrameUtils::INCOMING_NONE) {
- ResetRunInfo();
- }
- ~BuildTextRunsScanner() {
- NS_ASSERTION(mBreakSinks.IsEmpty(), "Should have been cleared");
- NS_ASSERTION(mTextRunsToDelete.IsEmpty(), "Should have been cleared");
- NS_ASSERTION(mLineBreakBeforeFrames.IsEmpty(), "Should have been cleared");
- NS_ASSERTION(mMappedFlows.IsEmpty(), "Should have been cleared");
- }
-
- void SetAtStartOfLine() {
- mStartOfLine = PR_TRUE;
- mCanStopOnThisLine = PR_FALSE;
- }
- void SetSkipIncompleteTextRuns(PRBool aSkip) {
- mSkipIncompleteTextRuns = aSkip;
- }
- void SetCommonAncestorWithLastFrame(nsIFrame* aFrame) {
- mCommonAncestorWithLastFrame = aFrame;
- }
- PRBool CanStopOnThisLine() {
- return mCanStopOnThisLine;
- }
- nsIFrame* GetCommonAncestorWithLastFrame() {
- return mCommonAncestorWithLastFrame;
- }
- void LiftCommonAncestorWithLastFrameToParent(nsIFrame* aFrame) {
- if (mCommonAncestorWithLastFrame &&
- mCommonAncestorWithLastFrame->GetParent() == aFrame) {
- mCommonAncestorWithLastFrame = aFrame;
- }
- }
- void ScanFrame(nsIFrame* aFrame);
- PRBool IsTextRunValidForMappedFlows(gfxTextRun* aTextRun);
- void FlushFrames(PRBool aFlushLineBreaks, PRBool aSuppressTrailingBreak);
- void FlushLineBreaks(gfxTextRun* aTrailingTextRun);
- void ResetRunInfo() {
- mLastFrame = nsnull;
- mMappedFlows.Clear();
- mLineBreakBeforeFrames.Clear();
- mMaxTextLength = 0;
- mDoubleByteText = PR_FALSE;
- }
- void AccumulateRunInfo(nsTextFrame* aFrame);
- /**
- * @return null to indicate either textrun construction failed or
- * we constructed just a partial textrun to set up linebreaker and other
- * state for following textruns.
- */
- gfxTextRun* BuildTextRunForFrames(void* aTextBuffer);
- void AssignTextRun(gfxTextRun* aTextRun);
- nsTextFrame* GetNextBreakBeforeFrame(PRUint32* aIndex);
- void SetupBreakSinksForTextRun(gfxTextRun* aTextRun, PRBool aIsExistingTextRun,
- PRBool aSuppressSink);
- struct FindBoundaryState {
- nsIFrame* mStopAtFrame;
- nsTextFrame* mFirstTextFrame;
- nsTextFrame* mLastTextFrame;
- PRPackedBool mSeenTextRunBoundaryOnLaterLine;
- PRPackedBool mSeenTextRunBoundaryOnThisLine;
- PRPackedBool mSeenSpaceForLineBreakingOnThisLine;
- };
- enum FindBoundaryResult {
- FB_CONTINUE,
- FB_STOPPED_AT_STOP_FRAME,
- FB_FOUND_VALID_TEXTRUN_BOUNDARY
- };
- FindBoundaryResult FindBoundaries(nsIFrame* aFrame, FindBoundaryState* aState);
-
- PRBool ContinueTextRunAcrossFrames(nsTextFrame* aFrame1, nsTextFrame* aFrame2);
-
- // Like TextRunMappedFlow but with some differences. mStartFrame to mEndFrame
- // (exclusive) are a sequence of in-flow frames (if mEndFrame is null, then
- // continuations starting from mStartFrame are a sequence of in-flow frames).
- struct MappedFlow {
- nsTextFrame* mStartFrame;
- nsTextFrame* mEndFrame;
- // When we consider breaking between elements, the nearest common
- // ancestor of the elements containing the characters is the one whose
- // CSS 'white-space' property governs. So this records the nearest common
- // ancestor of mStartFrame and the previous text frame, or null if there
- // was no previous text frame on this line.
- nsIFrame* mAncestorControllingInitialBreak;
-
- PRInt32 GetContentEnd() {
- return mEndFrame ? mEndFrame->GetContentOffset()
- : mStartFrame->GetFragment()->GetLength();
- }
- };
-
- class BreakSink : public nsILineBreakSink {
- public:
- BreakSink(gfxTextRun* aTextRun, gfxContext* aContext, PRUint32 aOffsetIntoTextRun,
- PRBool aExistingTextRun) :
- mTextRun(aTextRun), mContext(aContext),
- mOffsetIntoTextRun(aOffsetIntoTextRun),
- mChangedBreaks(PR_FALSE), mExistingTextRun(aExistingTextRun) {}
-
- virtual void SetBreaks(PRUint32 aOffset, PRUint32 aLength,
- PRPackedBool* aBreakBefore) {
- if (mTextRun->SetPotentialLineBreaks(aOffset + mOffsetIntoTextRun, aLength,
- aBreakBefore, mContext)) {
- mChangedBreaks = PR_TRUE;
- // Be conservative and assume that some breaks have been set
- mTextRun->ClearFlagBits(nsTextFrameUtils::TEXT_NO_BREAKS);
- }
- }
-
- virtual void SetCapitalization(PRUint32 aOffset, PRUint32 aLength,
- PRPackedBool* aCapitalize) {
- NS_ASSERTION(mTextRun->GetFlags() & nsTextFrameUtils::TEXT_IS_TRANSFORMED,
- "Text run should be transformed!");
- nsTransformedTextRun* transformedTextRun =
- static_cast(mTextRun);
- transformedTextRun->SetCapitalization(aOffset + mOffsetIntoTextRun, aLength,
- aCapitalize, mContext);
- }
-
- void Finish() {
- NS_ASSERTION(!(mTextRun->GetFlags() &
- (gfxTextRunWordCache::TEXT_UNUSED_FLAGS |
- nsTextFrameUtils::TEXT_UNUSED_FLAG)),
- "Flag set that should never be set! (memory safety error?)");
- if (mTextRun->GetFlags() & nsTextFrameUtils::TEXT_IS_TRANSFORMED) {
- nsTransformedTextRun* transformedTextRun =
- static_cast(mTextRun);
- transformedTextRun->FinishSettingProperties(mContext);
- }
- }
-
- gfxTextRun* mTextRun;
- gfxContext* mContext;
- PRUint32 mOffsetIntoTextRun;
- PRPackedBool mChangedBreaks;
- PRPackedBool mExistingTextRun;
- };
-
-private:
- nsAutoTArray mMappedFlows;
- nsAutoTArray mLineBreakBeforeFrames;
- nsAutoTArray,10> mBreakSinks;
- nsAutoTArray mTextRunsToDelete;
- nsLineBreaker mLineBreaker;
- gfxTextRun* mCurrentFramesAllSameTextRun;
- gfxContext* mContext;
- nsIFrame* mLineContainer;
- nsTextFrame* mLastFrame;
- // The common ancestor of the current frame and the previous leaf frame
- // on the line, or null if there was no previous leaf frame.
- nsIFrame* mCommonAncestorWithLastFrame;
- // mMaxTextLength is an upper bound on the size of the text in all mapped frames
- PRUint32 mMaxTextLength;
- PRPackedBool mDoubleByteText;
- PRPackedBool mBidiEnabled;
- PRPackedBool mStartOfLine;
- PRPackedBool mSkipIncompleteTextRuns;
- PRPackedBool mCanStopOnThisLine;
- PRUint8 mNextRunContextInfo;
- PRUint8 mCurrentRunContextInfo;
-};
-
-static nsIFrame*
-FindLineContainer(nsIFrame* aFrame)
-{
- while (aFrame && aFrame->CanContinueTextRun()) {
- aFrame = aFrame->GetParent();
- }
- return aFrame;
-}
-
-static PRBool
-TextContainsLineBreakerWhiteSpace(const void* aText, PRUint32 aLength,
- PRBool aIsDoubleByte)
-{
- PRUint32 i;
- if (aIsDoubleByte) {
- const PRUnichar* chars = static_cast(aText);
- for (i = 0; i < aLength; ++i) {
- if (nsLineBreaker::IsSpace(chars[i]))
- return PR_TRUE;
- }
- return PR_FALSE;
- } else {
- const PRUint8* chars = static_cast(aText);
- for (i = 0; i < aLength; ++i) {
- if (nsLineBreaker::IsSpace(chars[i]))
- return PR_TRUE;
- }
- return PR_FALSE;
- }
-}
-
-struct FrameTextTraversal {
- // These fields identify which frames should be recursively scanned
- // The first normal frame to scan (or null, if no such frame should be scanned)
- nsIFrame* mFrameToScan;
- // The first overflow frame to scan (or null, if no such frame should be scanned)
- nsIFrame* mOverflowFrameToScan;
- // Whether to scan the siblings of mFrameToDescendInto/mOverflowFrameToDescendInto
- PRPackedBool mScanSiblings;
-
- // These identify the boundaries of the context required for
- // line breaking or textrun construction
- PRPackedBool mLineBreakerCanCrossFrameBoundary;
- PRPackedBool mTextRunCanCrossFrameBoundary;
-
- nsIFrame* NextFrameToScan() {
- nsIFrame* f;
- if (mFrameToScan) {
- f = mFrameToScan;
- mFrameToScan = mScanSiblings ? f->GetNextSibling() : nsnull;
- } else if (mOverflowFrameToScan) {
- f = mOverflowFrameToScan;
- mOverflowFrameToScan = mScanSiblings ? f->GetNextSibling() : nsnull;
- } else {
- f = nsnull;
- }
- return f;
- }
-};
-
-static FrameTextTraversal
-CanTextCrossFrameBoundary(nsIFrame* aFrame, nsIAtom* aType)
-{
- NS_ASSERTION(aType == aFrame->GetType(), "Wrong type");
-
- FrameTextTraversal result;
-
- PRBool continuesTextRun = aFrame->CanContinueTextRun();
- if (aType == nsGkAtoms::placeholderFrame) {
- // placeholders are "invisible", so a text run should be able to span
- // across one. But don't descend into the out-of-flow.
- result.mLineBreakerCanCrossFrameBoundary = PR_TRUE;
- result.mOverflowFrameToScan = nsnull;
- if (continuesTextRun) {
- // ... Except for first-letter floats, which are really in-flow
- // from the point of view of capitalization etc, so we'd better
- // descend into them. But we actually need to break the textrun for
- // first-letter floats since things look bad if, say, we try to make a
- // ligature across the float boundary.
- result.mFrameToScan =
- (static_cast(aFrame))->GetOutOfFlowFrame();
- result.mScanSiblings = PR_FALSE;
- result.mTextRunCanCrossFrameBoundary = PR_FALSE;
- } else {
- result.mFrameToScan = nsnull;
- result.mTextRunCanCrossFrameBoundary = PR_TRUE;
- }
- } else {
- if (continuesTextRun) {
- result.mFrameToScan = aFrame->GetFirstChild(nsnull);
- result.mOverflowFrameToScan = aFrame->GetFirstChild(nsGkAtoms::overflowList);
- NS_WARN_IF_FALSE(!result.mOverflowFrameToScan,
- "Scanning overflow inline frames is something we should avoid");
- result.mScanSiblings = PR_TRUE;
- result.mTextRunCanCrossFrameBoundary = PR_TRUE;
- result.mLineBreakerCanCrossFrameBoundary = PR_TRUE;
- } else {
- result.mFrameToScan = nsnull;
- result.mOverflowFrameToScan = nsnull;
- result.mTextRunCanCrossFrameBoundary = PR_FALSE;
- result.mLineBreakerCanCrossFrameBoundary = PR_FALSE;
- }
- }
- return result;
-}
-
-BuildTextRunsScanner::FindBoundaryResult
-BuildTextRunsScanner::FindBoundaries(nsIFrame* aFrame, FindBoundaryState* aState)
-{
- nsIAtom* frameType = aFrame->GetType();
- nsTextFrame* textFrame = frameType == nsGkAtoms::textFrame
- ? static_cast(aFrame) : nsnull;
- if (textFrame) {
- if (aState->mLastTextFrame &&
- textFrame != aState->mLastTextFrame->GetNextInFlow() &&
- !ContinueTextRunAcrossFrames(aState->mLastTextFrame, textFrame)) {
- aState->mSeenTextRunBoundaryOnThisLine = PR_TRUE;
- if (aState->mSeenSpaceForLineBreakingOnThisLine)
- return FB_FOUND_VALID_TEXTRUN_BOUNDARY;
- }
- if (!aState->mFirstTextFrame) {
- aState->mFirstTextFrame = textFrame;
- }
- aState->mLastTextFrame = textFrame;
- }
-
- if (aFrame == aState->mStopAtFrame)
- return FB_STOPPED_AT_STOP_FRAME;
-
- if (textFrame) {
- if (!aState->mSeenSpaceForLineBreakingOnThisLine) {
- const nsTextFragment* frag = textFrame->GetFragment();
- PRUint32 start = textFrame->GetContentOffset();
- const void* text = frag->Is2b()
- ? static_cast(frag->Get2b() + start)
- : static_cast(frag->Get1b() + start);
- if (TextContainsLineBreakerWhiteSpace(text, textFrame->GetContentLength(),
- frag->Is2b())) {
- aState->mSeenSpaceForLineBreakingOnThisLine = PR_TRUE;
- if (aState->mSeenTextRunBoundaryOnLaterLine)
- return FB_FOUND_VALID_TEXTRUN_BOUNDARY;
- }
- }
- return FB_CONTINUE;
- }
-
- FrameTextTraversal traversal =
- CanTextCrossFrameBoundary(aFrame, frameType);
- if (!traversal.mTextRunCanCrossFrameBoundary) {
- aState->mSeenTextRunBoundaryOnThisLine = PR_TRUE;
- if (aState->mSeenSpaceForLineBreakingOnThisLine)
- return FB_FOUND_VALID_TEXTRUN_BOUNDARY;
- }
-
- for (nsIFrame* f = traversal.NextFrameToScan(); f;
- f = traversal.NextFrameToScan()) {
- FindBoundaryResult result = FindBoundaries(f, aState);
- if (result != FB_CONTINUE)
- return result;
- }
-
- if (!traversal.mTextRunCanCrossFrameBoundary) {
- aState->mSeenTextRunBoundaryOnThisLine = PR_TRUE;
- if (aState->mSeenSpaceForLineBreakingOnThisLine)
- return FB_FOUND_VALID_TEXTRUN_BOUNDARY;
- }
-
- return FB_CONTINUE;
-}
-
-// build text runs for the 200 lines following aForFrame, and stop after that
-// when we get a chance.
-#define NUM_LINES_TO_BUILD_TEXT_RUNS 200
-
-/**
- * General routine for building text runs. This is hairy because of the need
- * to build text runs that span content nodes.
- *
- * @param aForFrameLine the line containing aForFrame; if null, we'll figure
- * out the line (slowly)
- * @param aLineContainer the line container containing aForFrame; if null,
- * we'll walk the ancestors to find it. It's required to be non-null when
- * aForFrameLine is non-null.
- */
-static void
-BuildTextRuns(gfxContext* aContext, nsTextFrame* aForFrame,
- nsIFrame* aLineContainer,
- const nsLineList::iterator* aForFrameLine)
-{
- NS_ASSERTION(aForFrame || aLineContainer,
- "One of aForFrame or aLineContainer must be set!");
- NS_ASSERTION(!aForFrameLine || aLineContainer,
- "line but no line container");
-
- if (!aLineContainer) {
- aLineContainer = FindLineContainer(aForFrame);
- } else {
- NS_ASSERTION(!aForFrame ||
- (aLineContainer == FindLineContainer(aForFrame) ||
- (aLineContainer->GetType() == nsGkAtoms::letterFrame &&
- aLineContainer->GetStyleDisplay()->IsFloating())),
- "Wrong line container hint");
- }
-
- nsPresContext* presContext = aLineContainer->PresContext();
- BuildTextRunsScanner scanner(presContext, aContext, aLineContainer);
-
- nsBlockFrame* block = nsLayoutUtils::GetAsBlock(aLineContainer);
-
- if (!block) {
- NS_ASSERTION(!aLineContainer->GetPrevInFlow() && !aLineContainer->GetNextInFlow(),
- "Breakable non-block line containers not supported");
- // Just loop through all the children of the linecontainer ... it's really
- // just one line
- scanner.SetAtStartOfLine();
- scanner.SetCommonAncestorWithLastFrame(nsnull);
- nsIFrame* child = aLineContainer->GetFirstChild(nsnull);
- while (child) {
- scanner.ScanFrame(child);
- child = child->GetNextSibling();
- }
- // Set mStartOfLine so FlushFrames knows its textrun ends a line
- scanner.SetAtStartOfLine();
- scanner.FlushFrames(PR_TRUE, PR_FALSE);
- return;
- }
-
- // Find the line containing aForFrame
-
- PRBool isValid = PR_TRUE;
- nsBlockInFlowLineIterator backIterator(block, &isValid);
- if (aForFrameLine) {
- backIterator = nsBlockInFlowLineIterator(block, *aForFrameLine, PR_FALSE);
- } else {
- backIterator = nsBlockInFlowLineIterator(block, aForFrame, &isValid);
- NS_ASSERTION(isValid, "aForFrame not found in block, someone lied to us");
- NS_ASSERTION(backIterator.GetContainer() == block,
- "Someone lied to us about the block");
- }
- nsBlockFrame::line_iterator startLine = backIterator.GetLine();
-
- // Find a line where we can start building text runs. We choose the last line
- // where:
- // -- there is a textrun boundary between the start of the line and the
- // start of aForFrame
- // -- there is a space between the start of the line and the textrun boundary
- // (this is so we can be sure the line breaks will be set properly
- // on the textruns we construct).
- // The possibly-partial text runs up to and including the first space
- // are not reconstructed. We construct partial text runs for that text ---
- // for the sake of simplifying the code and feeding the linebreaker ---
- // but we discard them instead of assigning them to frames.
- // This is a little awkward because we traverse lines in the reverse direction
- // but we traverse the frames in each line in the forward direction.
- nsBlockInFlowLineIterator forwardIterator = backIterator;
- nsTextFrame* stopAtFrame = aForFrame;
- nsTextFrame* nextLineFirstTextFrame = nsnull;
- PRBool seenTextRunBoundaryOnLaterLine = PR_FALSE;
- PRBool mayBeginInTextRun = PR_TRUE;
- while (PR_TRUE) {
- forwardIterator = backIterator;
- nsBlockFrame::line_iterator line = backIterator.GetLine();
- if (!backIterator.Prev() || backIterator.GetLine()->IsBlock()) {
- mayBeginInTextRun = PR_FALSE;
- break;
- }
-
- BuildTextRunsScanner::FindBoundaryState state = { stopAtFrame, nsnull, nsnull,
- seenTextRunBoundaryOnLaterLine, PR_FALSE, PR_FALSE };
- nsIFrame* child = line->mFirstChild;
- PRBool foundBoundary = PR_FALSE;
- PRInt32 i;
- for (i = line->GetChildCount() - 1; i >= 0; --i) {
- BuildTextRunsScanner::FindBoundaryResult result =
- scanner.FindBoundaries(child, &state);
- if (result == BuildTextRunsScanner::FB_FOUND_VALID_TEXTRUN_BOUNDARY) {
- foundBoundary = PR_TRUE;
- break;
- } else if (result == BuildTextRunsScanner::FB_STOPPED_AT_STOP_FRAME) {
- break;
- }
- child = child->GetNextSibling();
- }
- if (foundBoundary)
- break;
- if (!stopAtFrame && state.mLastTextFrame && nextLineFirstTextFrame &&
- !scanner.ContinueTextRunAcrossFrames(state.mLastTextFrame, nextLineFirstTextFrame)) {
- // Found a usable textrun boundary at the end of the line
- if (state.mSeenSpaceForLineBreakingOnThisLine)
- break;
- seenTextRunBoundaryOnLaterLine = PR_TRUE;
- } else if (state.mSeenTextRunBoundaryOnThisLine) {
- seenTextRunBoundaryOnLaterLine = PR_TRUE;
- }
- stopAtFrame = nsnull;
- if (state.mFirstTextFrame) {
- nextLineFirstTextFrame = state.mFirstTextFrame;
- }
- }
- scanner.SetSkipIncompleteTextRuns(mayBeginInTextRun);
-
- // Now iterate over all text frames starting from the current line. First-in-flow
- // text frames will be accumulated into textRunFrames as we go. When a
- // text run boundary is required we flush textRunFrames ((re)building their
- // gfxTextRuns as necessary).
- PRBool seenStartLine = PR_FALSE;
- PRUint32 linesAfterStartLine = 0;
- do {
- nsBlockFrame::line_iterator line = forwardIterator.GetLine();
- if (line->IsBlock())
- break;
- line->SetInvalidateTextRuns(PR_FALSE);
- scanner.SetAtStartOfLine();
- scanner.SetCommonAncestorWithLastFrame(nsnull);
- nsIFrame* child = line->mFirstChild;
- PRInt32 i;
- for (i = line->GetChildCount() - 1; i >= 0; --i) {
- scanner.ScanFrame(child);
- child = child->GetNextSibling();
- }
- if (line.get() == startLine.get()) {
- seenStartLine = PR_TRUE;
- }
- if (seenStartLine) {
- ++linesAfterStartLine;
- if (linesAfterStartLine >= NUM_LINES_TO_BUILD_TEXT_RUNS && scanner.CanStopOnThisLine()) {
- // Don't flush frames; we may be in the middle of a textrun
- // that we can't end here. That's OK, we just won't build it.
- // Note that we must already have finished the textrun for aForFrame,
- // because we've seen the end of a textrun in a line after the line
- // containing aForFrame.
- scanner.FlushLineBreaks(nsnull);
- // This flushes out mMappedFlows and mLineBreakBeforeFrames, which
- // silences assertions in the scanner destructor.
- scanner.ResetRunInfo();
- return;
- }
- }
- } while (forwardIterator.Next());
-
- // Set mStartOfLine so FlushFrames knows its textrun ends a line
- scanner.SetAtStartOfLine();
- scanner.FlushFrames(PR_TRUE, PR_FALSE);
-}
-
-static PRUnichar*
-ExpandBuffer(PRUnichar* aDest, PRUint8* aSrc, PRUint32 aCount)
-{
- while (aCount) {
- *aDest = *aSrc;
- ++aDest;
- ++aSrc;
- --aCount;
- }
- return aDest;
-}
-
-PRBool BuildTextRunsScanner::IsTextRunValidForMappedFlows(gfxTextRun* aTextRun)
-{
- if (aTextRun->GetFlags() & nsTextFrameUtils::TEXT_IS_SIMPLE_FLOW)
- return mMappedFlows.Length() == 1 &&
- mMappedFlows[0].mStartFrame == static_cast(aTextRun->GetUserData()) &&
- mMappedFlows[0].mEndFrame == nsnull;
-
- TextRunUserData* userData = static_cast(aTextRun->GetUserData());
- if (userData->mMappedFlowCount != PRInt32(mMappedFlows.Length()))
- return PR_FALSE;
- PRUint32 i;
- for (i = 0; i < mMappedFlows.Length(); ++i) {
- if (userData->mMappedFlows[i].mStartFrame != mMappedFlows[i].mStartFrame ||
- PRInt32(userData->mMappedFlows[i].mContentLength) !=
- mMappedFlows[i].GetContentEnd() - mMappedFlows[i].mStartFrame->GetContentOffset())
- return PR_FALSE;
- }
- return PR_TRUE;
-}
-
-/**
- * This gets called when we need to make a text run for the current list of
- * frames.
- */
-void BuildTextRunsScanner::FlushFrames(PRBool aFlushLineBreaks, PRBool aSuppressTrailingBreak)
-{
- gfxTextRun* textRun = nsnull;
- if (!mMappedFlows.IsEmpty()) {
- if (!mSkipIncompleteTextRuns && mCurrentFramesAllSameTextRun &&
- ((mCurrentFramesAllSameTextRun->GetFlags() & nsTextFrameUtils::TEXT_INCOMING_WHITESPACE) != 0) ==
- ((mCurrentRunContextInfo & nsTextFrameUtils::INCOMING_WHITESPACE) != 0) &&
- ((mCurrentFramesAllSameTextRun->GetFlags() & gfxTextRunWordCache::TEXT_INCOMING_ARABICCHAR) != 0) ==
- ((mCurrentRunContextInfo & nsTextFrameUtils::INCOMING_ARABICCHAR) != 0) &&
- IsTextRunValidForMappedFlows(mCurrentFramesAllSameTextRun)) {
- // Optimization: We do not need to (re)build the textrun.
- textRun = mCurrentFramesAllSameTextRun;
-
- // Feed this run's text into the linebreaker to provide context. This also
- // updates mNextRunContextInfo appropriately.
- SetupBreakSinksForTextRun(textRun, PR_TRUE, PR_FALSE);
- mNextRunContextInfo = nsTextFrameUtils::INCOMING_NONE;
- if (textRun->GetFlags() & nsTextFrameUtils::TEXT_TRAILING_WHITESPACE) {
- mNextRunContextInfo |= nsTextFrameUtils::INCOMING_WHITESPACE;
- }
- if (textRun->GetFlags() & gfxTextRunWordCache::TEXT_TRAILING_ARABICCHAR) {
- mNextRunContextInfo |= nsTextFrameUtils::INCOMING_ARABICCHAR;
- }
- } else {
- nsAutoTArray buffer;
- if (!buffer.AppendElements(mMaxTextLength*(mDoubleByteText ? 2 : 1)))
- return;
- textRun = BuildTextRunForFrames(buffer.Elements());
- }
- }
-
- if (aFlushLineBreaks) {
- FlushLineBreaks(aSuppressTrailingBreak ? nsnull : textRun);
- }
-
- mCanStopOnThisLine = PR_TRUE;
- ResetRunInfo();
-}
-
-void BuildTextRunsScanner::FlushLineBreaks(gfxTextRun* aTrailingTextRun)
-{
- PRBool trailingLineBreak;
- nsresult rv = mLineBreaker.Reset(&trailingLineBreak);
- // textRun may be null for various reasons, including because we constructed
- // a partial textrun just to get the linebreaker and other state set up
- // to build the next textrun.
- if (NS_SUCCEEDED(rv) && trailingLineBreak && aTrailingTextRun) {
- aTrailingTextRun->SetFlagBits(nsTextFrameUtils::TEXT_HAS_TRAILING_BREAK);
- }
-
- PRUint32 i;
- for (i = 0; i < mBreakSinks.Length(); ++i) {
- if (!mBreakSinks[i]->mExistingTextRun || mBreakSinks[i]->mChangedBreaks) {
- // TODO cause frames associated with the textrun to be reflowed, if they
- // aren't being reflowed already!
- }
- mBreakSinks[i]->Finish();
- }
- mBreakSinks.Clear();
-
- for (i = 0; i < mTextRunsToDelete.Length(); ++i) {
- gfxTextRun* deleteTextRun = mTextRunsToDelete[i];
- gTextRuns->RemoveFromCache(deleteTextRun);
- delete deleteTextRun;
- }
- mTextRunsToDelete.Clear();
-}
-
-void BuildTextRunsScanner::AccumulateRunInfo(nsTextFrame* aFrame)
-{
- NS_ASSERTION(mMaxTextLength <= mMaxTextLength + aFrame->GetContentLength(), "integer overflow");
- mMaxTextLength += aFrame->GetContentLength();
- mDoubleByteText |= aFrame->GetFragment()->Is2b();
- mLastFrame = aFrame;
- mCommonAncestorWithLastFrame = aFrame->GetParent();
-
- MappedFlow* mappedFlow = &mMappedFlows[mMappedFlows.Length() - 1];
- NS_ASSERTION(mappedFlow->mStartFrame == aFrame ||
- mappedFlow->GetContentEnd() == aFrame->GetContentOffset(),
- "Overlapping or discontiguous frames => BAD");
- mappedFlow->mEndFrame = static_cast(aFrame->GetNextContinuation());
- if (mCurrentFramesAllSameTextRun != aFrame->GetTextRun()) {
- mCurrentFramesAllSameTextRun = nsnull;
- }
-
- if (mStartOfLine) {
- mLineBreakBeforeFrames.AppendElement(aFrame);
- mStartOfLine = PR_FALSE;
- }
-}
-
-static nscoord StyleToCoord(const nsStyleCoord& aCoord)
-{
- if (eStyleUnit_Coord == aCoord.GetUnit()) {
- return aCoord.GetCoordValue();
- } else {
- return 0;
- }
-}
-
-static PRBool
-HasTerminalNewline(const nsTextFrame* aFrame)
-{
- if (aFrame->GetContentLength() == 0)
- return PR_FALSE;
- const nsTextFragment* frag = aFrame->GetFragment();
- return frag->CharAt(aFrame->GetContentEnd() - 1) == '\n';
-}
-
-PRBool
-BuildTextRunsScanner::ContinueTextRunAcrossFrames(nsTextFrame* aFrame1, nsTextFrame* aFrame2)
-{
- if (mBidiEnabled &&
- NS_GET_EMBEDDING_LEVEL(aFrame1) != NS_GET_EMBEDDING_LEVEL(aFrame2))
- return PR_FALSE;
-
- nsStyleContext* sc1 = aFrame1->GetStyleContext();
- const nsStyleText* textStyle1 = sc1->GetStyleText();
- // If the first frame ends in a preformatted newline, then we end the textrun
- // here. This avoids creating giant textruns for an entire plain text file.
- // Note that we create a single text frame for a preformatted text node,
- // even if it has newlines in it, so typically we won't see trailing newlines
- // until after reflow has broken up the frame into one (or more) frames per
- // line. That's OK though.
- if (textStyle1->NewlineIsSignificant() && HasTerminalNewline(aFrame1))
- return PR_FALSE;
-
- if (aFrame1->GetContent() == aFrame2->GetContent() &&
- aFrame1->GetNextInFlow() != aFrame2) {
- // aFrame2 must be a non-fluid continuation of aFrame1. This can happen
- // sometimes when the unicode-bidi property is used; the bidi resolver
- // breaks text into different frames even though the text has the same
- // direction. We can't allow these two frames to share the same textrun
- // because that would violate our invariant that two flows in the same
- // textrun have different content elements.
- return PR_FALSE;
- }
-
- nsStyleContext* sc2 = aFrame2->GetStyleContext();
- if (sc1 == sc2)
- return PR_TRUE;
- const nsStyleFont* fontStyle1 = sc1->GetStyleFont();
- const nsStyleFont* fontStyle2 = sc2->GetStyleFont();
- const nsStyleText* textStyle2 = sc2->GetStyleText();
- return fontStyle1->mFont.BaseEquals(fontStyle2->mFont) &&
- sc1->GetStyleVisibility()->mLangGroup == sc2->GetStyleVisibility()->mLangGroup &&
- nsLayoutUtils::GetTextRunFlagsForStyle(sc1, textStyle1, fontStyle1) ==
- nsLayoutUtils::GetTextRunFlagsForStyle(sc2, textStyle2, fontStyle2);
-}
-
-void BuildTextRunsScanner::ScanFrame(nsIFrame* aFrame)
-{
- // First check if we can extend the current mapped frame block. This is common.
- if (mMappedFlows.Length() > 0) {
- MappedFlow* mappedFlow = &mMappedFlows[mMappedFlows.Length() - 1];
- if (mappedFlow->mEndFrame == aFrame &&
- (aFrame->GetStateBits() & NS_FRAME_IS_FLUID_CONTINUATION)) {
- NS_ASSERTION(aFrame->GetType() == nsGkAtoms::textFrame,
- "Flow-sibling of a text frame is not a text frame?");
-
- // Don't do this optimization if mLastFrame has a terminal newline...
- // it's quite likely preformatted and we might want to end the textrun here.
- // This is almost always true:
- if (mLastFrame->GetStyleContext() == aFrame->GetStyleContext() &&
- !HasTerminalNewline(mLastFrame)) {
- AccumulateRunInfo(static_cast(aFrame));
- return;
- }
- }
- }
-
- nsIAtom* frameType = aFrame->GetType();
- // Now see if we can add a new set of frames to the current textrun
- if (frameType == nsGkAtoms::textFrame) {
- nsTextFrame* frame = static_cast(aFrame);
-
- if (mLastFrame) {
- if (!ContinueTextRunAcrossFrames(mLastFrame, frame)) {
- FlushFrames(PR_FALSE, PR_FALSE);
- } else {
- if (mLastFrame->GetContent() == frame->GetContent()) {
- AccumulateRunInfo(frame);
- return;
- }
- }
- }
-
- MappedFlow* mappedFlow = mMappedFlows.AppendElement();
- if (!mappedFlow)
- return;
-
- mappedFlow->mStartFrame = frame;
- mappedFlow->mAncestorControllingInitialBreak = mCommonAncestorWithLastFrame;
-
- AccumulateRunInfo(frame);
- if (mMappedFlows.Length() == 1) {
- mCurrentFramesAllSameTextRun = frame->GetTextRun();
- mCurrentRunContextInfo = mNextRunContextInfo;
- }
- return;
- }
-
- FrameTextTraversal traversal =
- CanTextCrossFrameBoundary(aFrame, frameType);
- PRBool isBR = frameType == nsGkAtoms::brFrame;
- if (!traversal.mLineBreakerCanCrossFrameBoundary) {
- // BR frames are special. We do not need or want to record a break opportunity
- // before a BR frame.
- FlushFrames(PR_TRUE, isBR);
- mCommonAncestorWithLastFrame = aFrame;
- mNextRunContextInfo &= ~nsTextFrameUtils::INCOMING_WHITESPACE;
- mStartOfLine = PR_FALSE;
- } else if (!traversal.mTextRunCanCrossFrameBoundary) {
- FlushFrames(PR_FALSE, PR_FALSE);
- }
-
- for (nsIFrame* f = traversal.NextFrameToScan(); f;
- f = traversal.NextFrameToScan()) {
- ScanFrame(f);
- }
-
- if (!traversal.mLineBreakerCanCrossFrameBoundary) {
- // Really if we're a BR frame this is unnecessary since descendInto will be
- // false. In fact this whole "if" statement should move into the descendInto.
- FlushFrames(PR_TRUE, isBR);
- mCommonAncestorWithLastFrame = aFrame;
- mNextRunContextInfo &= ~nsTextFrameUtils::INCOMING_WHITESPACE;
- } else if (!traversal.mTextRunCanCrossFrameBoundary) {
- FlushFrames(PR_FALSE, PR_FALSE);
- }
-
- LiftCommonAncestorWithLastFrameToParent(aFrame->GetParent());
-}
-
-nsTextFrame*
-BuildTextRunsScanner::GetNextBreakBeforeFrame(PRUint32* aIndex)
-{
- PRUint32 index = *aIndex;
- if (index >= mLineBreakBeforeFrames.Length())
- return nsnull;
- *aIndex = index + 1;
- return static_cast(mLineBreakBeforeFrames.ElementAt(index));
-}
-
-static PRUint32
-GetSpacingFlags(nscoord spacing)
-{
- return spacing ? gfxTextRunFactory::TEXT_ENABLE_SPACING : 0;
-}
-
-static gfxFontGroup*
-GetFontGroupForFrame(nsIFrame* aFrame,
- nsIFontMetrics** aOutFontMetrics = nsnull)
-{
- if (aOutFontMetrics)
- *aOutFontMetrics = nsnull;
-
- nsCOMPtr metrics;
- nsLayoutUtils::GetFontMetricsForFrame(aFrame, getter_AddRefs(metrics));
-
- if (!metrics)
- return nsnull;
-
- nsIFontMetrics* metricsRaw = metrics;
- if (aOutFontMetrics) {
- *aOutFontMetrics = metricsRaw;
- NS_ADDREF(*aOutFontMetrics);
- }
- nsIThebesFontMetrics* fm = static_cast(metricsRaw);
- // XXX this is a bit bogus, we're releasing 'metrics' so the returned font-group
- // might actually be torn down, although because of the way the device context
- // caches font metrics, this seems to not actually happen. But we should fix
- // this.
- return fm->GetThebesFontGroup();
-}
-
-static already_AddRefed
-GetReferenceRenderingContext(nsTextFrame* aTextFrame, nsIRenderingContext* aRC)
-{
- nsCOMPtr tmp = aRC;
- if (!tmp) {
- nsresult rv = aTextFrame->PresContext()->PresShell()->
- CreateRenderingContext(aTextFrame, getter_AddRefs(tmp));
- if (NS_FAILED(rv))
- return nsnull;
- }
-
- gfxContext* ctx = tmp->ThebesContext();
- NS_ADDREF(ctx);
- return ctx;
-}
-
-/**
- * The returned textrun must be released via gfxTextRunCache::ReleaseTextRun
- * or gfxTextRunCache::AutoTextRun.
- */
-static gfxTextRun*
-GetHyphenTextRun(gfxTextRun* aTextRun, gfxContext* aContext, nsTextFrame* aTextFrame)
-{
- nsRefPtr ctx = aContext;
- if (!ctx) {
- ctx = GetReferenceRenderingContext(aTextFrame, nsnull);
- }
- if (!ctx)
- return nsnull;
-
- gfxFontGroup* fontGroup = aTextRun->GetFontGroup();
- PRUint32 flags = gfxFontGroup::TEXT_IS_PERSISTENT;
-
- // only use U+2010 if it is supported by the first font in the group;
- // it's better to use ASCII '-' from the primary font than to fall back to U+2010
- // from some other, possibly poorly-matching face
- static const PRUnichar unicodeHyphen = 0x2010;
- gfxFont *font = fontGroup->GetFontAt(0);
- if (font && font->HasCharacter(unicodeHyphen)) {
- return gfxTextRunCache::MakeTextRun(&unicodeHyphen, 1, fontGroup, ctx,
- aTextRun->GetAppUnitsPerDevUnit(), flags);
- }
-
- static const PRUint8 dash = '-';
- return gfxTextRunCache::MakeTextRun(&dash, 1, fontGroup, ctx,
- aTextRun->GetAppUnitsPerDevUnit(),
- flags);
-}
-
-static gfxFont::Metrics
-GetFirstFontMetrics(gfxFontGroup* aFontGroup)
-{
- if (!aFontGroup)
- return gfxFont::Metrics();
- gfxFont* font = aFontGroup->GetFontAt(0);
- if (!font)
- return gfxFont::Metrics();
- return font->GetMetrics();
-}
-
-PR_STATIC_ASSERT(NS_STYLE_WHITESPACE_NORMAL == 0);
-PR_STATIC_ASSERT(NS_STYLE_WHITESPACE_PRE == 1);
-PR_STATIC_ASSERT(NS_STYLE_WHITESPACE_NOWRAP == 2);
-PR_STATIC_ASSERT(NS_STYLE_WHITESPACE_PRE_WRAP == 3);
-PR_STATIC_ASSERT(NS_STYLE_WHITESPACE_PRE_LINE == 4);
-
-static const nsTextFrameUtils::CompressionMode CSSWhitespaceToCompressionMode[] =
-{
- nsTextFrameUtils::COMPRESS_WHITESPACE_NEWLINE, // normal
- nsTextFrameUtils::COMPRESS_NONE, // pre
- nsTextFrameUtils::COMPRESS_WHITESPACE_NEWLINE, // nowrap
- nsTextFrameUtils::COMPRESS_NONE, // pre-wrap
- nsTextFrameUtils::COMPRESS_WHITESPACE // pre-line
-};
-
-gfxTextRun*
-BuildTextRunsScanner::BuildTextRunForFrames(void* aTextBuffer)
-{
- gfxSkipCharsBuilder builder;
-
- const void* textPtr = aTextBuffer;
- PRBool anySmallcapsStyle = PR_FALSE;
- PRBool anyTextTransformStyle = PR_FALSE;
- PRInt32 endOfLastContent = 0;
- PRUint32 textFlags = nsTextFrameUtils::TEXT_NO_BREAKS;
-
- if (mCurrentRunContextInfo & nsTextFrameUtils::INCOMING_WHITESPACE) {
- textFlags |= nsTextFrameUtils::TEXT_INCOMING_WHITESPACE;
- }
- if (mCurrentRunContextInfo & nsTextFrameUtils::INCOMING_ARABICCHAR) {
- textFlags |= gfxTextRunWordCache::TEXT_INCOMING_ARABICCHAR;
- }
-
- nsAutoTArray textBreakPoints;
- TextRunUserData dummyData;
- TextRunMappedFlow dummyMappedFlow;
-
- TextRunUserData* userData;
- TextRunUserData* userDataToDestroy;
- // If the situation is particularly simple (and common) we don't need to
- // allocate userData.
- if (mMappedFlows.Length() == 1 && !mMappedFlows[0].mEndFrame &&
- mMappedFlows[0].mStartFrame->GetContentOffset() == 0) {
- userData = &dummyData;
- userDataToDestroy = nsnull;
- dummyData.mMappedFlows = &dummyMappedFlow;
- } else {
- userData = static_cast
- (nsMemory::Alloc(sizeof(TextRunUserData) + mMappedFlows.Length()*sizeof(TextRunMappedFlow)));
- userDataToDestroy = userData;
- userData->mMappedFlows = reinterpret_cast(userData + 1);
- }
- userData->mMappedFlowCount = mMappedFlows.Length();
- userData->mLastFlowIndex = 0;
-
- PRUint32 currentTransformedTextOffset = 0;
-
- PRUint32 nextBreakIndex = 0;
- nsTextFrame* nextBreakBeforeFrame = GetNextBreakBeforeFrame(&nextBreakIndex);
- PRBool enabledJustification = mLineContainer &&
- mLineContainer->GetStyleText()->mTextAlign == NS_STYLE_TEXT_ALIGN_JUSTIFY;
-
- PRUint32 i;
- const nsStyleText* textStyle = nsnull;
- const nsStyleFont* fontStyle = nsnull;
- nsStyleContext* lastStyleContext = nsnull;
- for (i = 0; i < mMappedFlows.Length(); ++i) {
- MappedFlow* mappedFlow = &mMappedFlows[i];
- nsTextFrame* f = mappedFlow->mStartFrame;
-
- lastStyleContext = f->GetStyleContext();
- // Detect use of text-transform or font-variant anywhere in the run
- textStyle = f->GetStyleText();
- if (NS_STYLE_TEXT_TRANSFORM_NONE != textStyle->mTextTransform) {
- anyTextTransformStyle = PR_TRUE;
- }
- textFlags |= GetSpacingFlags(StyleToCoord(textStyle->mLetterSpacing));
- textFlags |= GetSpacingFlags(textStyle->mWordSpacing);
- nsTextFrameUtils::CompressionMode compression =
- CSSWhitespaceToCompressionMode[textStyle->mWhiteSpace];
- if (enabledJustification && !textStyle->WhiteSpaceIsSignificant()) {
- textFlags |= gfxTextRunFactory::TEXT_ENABLE_SPACING;
- }
- fontStyle = f->GetStyleFont();
- if (NS_STYLE_FONT_VARIANT_SMALL_CAPS == fontStyle->mFont.variant) {
- anySmallcapsStyle = PR_TRUE;
- }
-
- // Figure out what content is included in this flow.
- nsIContent* content = f->GetContent();
- const nsTextFragment* frag = f->GetFragment();
- PRInt32 contentStart = mappedFlow->mStartFrame->GetContentOffset();
- PRInt32 contentEnd = mappedFlow->GetContentEnd();
- PRInt32 contentLength = contentEnd - contentStart;
-
- TextRunMappedFlow* newFlow = &userData->mMappedFlows[i];
- newFlow->mStartFrame = mappedFlow->mStartFrame;
- newFlow->mDOMOffsetToBeforeTransformOffset = builder.GetCharCount() -
- mappedFlow->mStartFrame->GetContentOffset();
- newFlow->mContentLength = contentLength;
-
- while (nextBreakBeforeFrame && nextBreakBeforeFrame->GetContent() == content) {
- textBreakPoints.AppendElement(
- nextBreakBeforeFrame->GetContentOffset() + newFlow->mDOMOffsetToBeforeTransformOffset);
- nextBreakBeforeFrame = GetNextBreakBeforeFrame(&nextBreakIndex);
- }
-
- PRUint32 analysisFlags;
- if (frag->Is2b()) {
- NS_ASSERTION(mDoubleByteText, "Wrong buffer char size!");
- PRUnichar* bufStart = static_cast(aTextBuffer);
- PRUnichar* bufEnd = nsTextFrameUtils::TransformText(
- frag->Get2b() + contentStart, contentLength, bufStart,
- compression, &mNextRunContextInfo, &builder, &analysisFlags);
- aTextBuffer = bufEnd;
- } else {
- if (mDoubleByteText) {
- // Need to expand the text. First transform it into a temporary buffer,
- // then expand.
- nsAutoTArray tempBuf;
- if (!tempBuf.AppendElements(contentLength)) {
- DestroyUserData(userDataToDestroy);
- return nsnull;
- }
- PRUint8* bufStart = tempBuf.Elements();
- PRUint8* end = nsTextFrameUtils::TransformText(
- reinterpret_cast(frag->Get1b()) + contentStart, contentLength,
- bufStart, compression, &mNextRunContextInfo, &builder, &analysisFlags);
- aTextBuffer = ExpandBuffer(static_cast(aTextBuffer),
- tempBuf.Elements(), end - tempBuf.Elements());
- } else {
- PRUint8* bufStart = static_cast(aTextBuffer);
- PRUint8* end = nsTextFrameUtils::TransformText(
- reinterpret_cast(frag->Get1b()) + contentStart, contentLength,
- bufStart, compression, &mNextRunContextInfo, &builder, &analysisFlags);
- aTextBuffer = end;
- }
- }
- textFlags |= analysisFlags;
-
- currentTransformedTextOffset =
- (static_cast(aTextBuffer) - static_cast(textPtr)) >> mDoubleByteText;
-
- endOfLastContent = contentEnd;
- }
-
- // Check for out-of-memory in gfxSkipCharsBuilder
- if (!builder.IsOK()) {
- DestroyUserData(userDataToDestroy);
- return nsnull;
- }
-
- void* finalUserData;
- if (userData == &dummyData) {
- textFlags |= nsTextFrameUtils::TEXT_IS_SIMPLE_FLOW;
- userData = nsnull;
- finalUserData = mMappedFlows[0].mStartFrame;
- } else {
- finalUserData = userData;
- }
-
- PRUint32 transformedLength = currentTransformedTextOffset;
-
- // Now build the textrun
- nsTextFrame* firstFrame = mMappedFlows[0].mStartFrame;
- gfxFontGroup* fontGroup = GetFontGroupForFrame(firstFrame);
- if (!fontGroup) {
- DestroyUserData(userDataToDestroy);
- return nsnull;
- }
-
- if (textFlags & nsTextFrameUtils::TEXT_HAS_TAB) {
- textFlags |= gfxTextRunFactory::TEXT_ENABLE_SPACING;
- }
- if (textFlags & nsTextFrameUtils::TEXT_HAS_SHY) {
- textFlags |= gfxTextRunFactory::TEXT_ENABLE_HYPHEN_BREAKS;
- }
- if (mBidiEnabled && (NS_GET_EMBEDDING_LEVEL(firstFrame) & 1)) {
- textFlags |= gfxTextRunFactory::TEXT_IS_RTL;
- }
- if (mNextRunContextInfo & nsTextFrameUtils::INCOMING_WHITESPACE) {
- textFlags |= nsTextFrameUtils::TEXT_TRAILING_WHITESPACE;
- }
- if (mNextRunContextInfo & nsTextFrameUtils::INCOMING_ARABICCHAR) {
- textFlags |= gfxTextRunWordCache::TEXT_TRAILING_ARABICCHAR;
- }
- // ContinueTextRunAcrossFrames guarantees that it doesn't matter which
- // frame's style is used, so use the last frame's
- textFlags |= nsLayoutUtils::GetTextRunFlagsForStyle(lastStyleContext,
- textStyle, fontStyle);
- // XXX this is a bit of a hack. For performance reasons, if we're favouring
- // performance over quality, don't try to get accurate glyph extents.
- if (!(textFlags & gfxTextRunFactory::TEXT_OPTIMIZE_SPEED)) {
- textFlags |= gfxTextRunFactory::TEXT_NEED_BOUNDING_BOX;
- }
-
- gfxSkipChars skipChars;
- skipChars.TakeFrom(&builder);
- // Convert linebreak coordinates to transformed string offsets
- NS_ASSERTION(nextBreakIndex == mLineBreakBeforeFrames.Length(),
- "Didn't find all the frames to break-before...");
- gfxSkipCharsIterator iter(skipChars);
- nsAutoTArray textBreakPointsAfterTransform;
- for (i = 0; i < textBreakPoints.Length(); ++i) {
- nsTextFrameUtils::AppendLineBreakOffset(&textBreakPointsAfterTransform,
- iter.ConvertOriginalToSkipped(textBreakPoints[i]));
- }
- if (mStartOfLine) {
- nsTextFrameUtils::AppendLineBreakOffset(&textBreakPointsAfterTransform,
- transformedLength);
- }
-
- // Setup factory chain
- nsAutoPtr transformingFactory;
- if (anySmallcapsStyle) {
- transformingFactory = new nsFontVariantTextRunFactory();
- }
- if (anyTextTransformStyle) {
- transformingFactory =
- new nsCaseTransformTextRunFactory(transformingFactory.forget());
- }
- nsTArray styles;
- if (transformingFactory) {
- iter.SetOriginalOffset(0);
- for (i = 0; i < mMappedFlows.Length(); ++i) {
- MappedFlow* mappedFlow = &mMappedFlows[i];
- nsTextFrame* f;
- for (f = mappedFlow->mStartFrame; f != mappedFlow->mEndFrame;
- f = static_cast(f->GetNextContinuation())) {
- PRUint32 offset = iter.GetSkippedOffset();
- iter.AdvanceOriginal(f->GetContentLength());
- PRUint32 end = iter.GetSkippedOffset();
- nsStyleContext* sc = f->GetStyleContext();
- PRUint32 j;
- for (j = offset; j < end; ++j) {
- styles.AppendElement(sc);
- }
- }
- }
- textFlags |= nsTextFrameUtils::TEXT_IS_TRANSFORMED;
- NS_ASSERTION(iter.GetSkippedOffset() == transformedLength,
- "We didn't cover all the characters in the text run!");
- }
-
- gfxTextRun* textRun;
- gfxTextRunFactory::Parameters params =
- { mContext, finalUserData, &skipChars,
- textBreakPointsAfterTransform.Elements(), textBreakPointsAfterTransform.Length(),
- firstFrame->PresContext()->AppUnitsPerDevPixel() };
-
- if (mDoubleByteText) {
- const PRUnichar* text = static_cast(textPtr);
- if (transformingFactory) {
- textRun = transformingFactory->MakeTextRun(text, transformedLength, ¶ms,
- fontGroup, textFlags, styles.Elements());
- if (textRun) {
- // ownership of the factory has passed to the textrun
- transformingFactory.forget();
- }
- } else {
- textRun = MakeTextRun(text, transformedLength, fontGroup, ¶ms, textFlags);
- }
- } else {
- const PRUint8* text = static_cast(textPtr);
- textFlags |= gfxFontGroup::TEXT_IS_8BIT;
- if (transformingFactory) {
- textRun = transformingFactory->MakeTextRun(text, transformedLength, ¶ms,
- fontGroup, textFlags, styles.Elements());
- if (textRun) {
- // ownership of the factory has passed to the textrun
- transformingFactory.forget();
- }
- } else {
- textRun = MakeTextRun(text, transformedLength, fontGroup, ¶ms, textFlags);
- }
- }
- if (!textRun) {
- DestroyUserData(userDataToDestroy);
- return nsnull;
- }
-
- // We have to set these up after we've created the textrun, because
- // the breaks may be stored in the textrun during this very call.
- // This is a bit annoying because it requires another loop over the frames
- // making up the textrun, but I don't see a way to avoid this.
- SetupBreakSinksForTextRun(textRun, PR_FALSE, mSkipIncompleteTextRuns);
-
- if (mSkipIncompleteTextRuns) {
- mSkipIncompleteTextRuns = !TextContainsLineBreakerWhiteSpace(textPtr,
- transformedLength, mDoubleByteText);
- // Arrange for this textrun to be deleted the next time the linebreaker
- // is flushed out
- mTextRunsToDelete.AppendElement(textRun);
- // Since we're doing to destroy the user data now, avoid a dangling
- // pointer. Strictly speaking we don't need to do this since it should
- // not be used (since this textrun will not be used and will be
- // itself deleted soon), but it's always better to not have dangling
- // pointers around.
- textRun->SetUserData(nsnull);
- DestroyUserData(userDataToDestroy);
- return nsnull;
- }
-
- // Actually wipe out the textruns associated with the mapped frames and associate
- // those frames with this text run.
- AssignTextRun(textRun);
- return textRun;
-}
-
-static PRBool
-HasCompressedLeadingWhitespace(nsTextFrame* aFrame, const nsStyleText* aStyleText,
- PRInt32 aContentEndOffset,
- const gfxSkipCharsIterator& aIterator)
-{
- if (!aIterator.IsOriginalCharSkipped())
- return PR_FALSE;
-
- gfxSkipCharsIterator iter = aIterator;
- PRInt32 frameContentOffset = aFrame->GetContentOffset();
- const nsTextFragment* frag = aFrame->GetFragment();
- while (frameContentOffset < aContentEndOffset && iter.IsOriginalCharSkipped()) {
- if (IsTrimmableSpace(frag, frameContentOffset, aStyleText))
- return PR_TRUE;
- ++frameContentOffset;
- iter.AdvanceOriginal(1);
- }
- return PR_FALSE;
-}
-
-void
-BuildTextRunsScanner::SetupBreakSinksForTextRun(gfxTextRun* aTextRun,
- PRBool aIsExistingTextRun,
- PRBool aSuppressSink)
-{
- // textruns have uniform language
- nsIAtom* lang = mMappedFlows[0].mStartFrame->GetStyleVisibility()->mLangGroup;
- // We keep this pointed at the skip-chars data for the current mappedFlow.
- // This lets us cheaply check whether the flow has compressed initial
- // whitespace...
- gfxSkipCharsIterator iter(aTextRun->GetSkipChars());
-
- PRUint32 i;
- for (i = 0; i < mMappedFlows.Length(); ++i) {
- MappedFlow* mappedFlow = &mMappedFlows[i];
- PRUint32 offset = iter.GetSkippedOffset();
- gfxSkipCharsIterator iterNext = iter;
- iterNext.AdvanceOriginal(mappedFlow->GetContentEnd() -
- mappedFlow->mStartFrame->GetContentOffset());
-
- nsAutoPtr* breakSink = mBreakSinks.AppendElement(
- new BreakSink(aTextRun, mContext, offset, aIsExistingTextRun));
- if (!breakSink || !*breakSink)
- return;
-
- PRUint32 length = iterNext.GetSkippedOffset() - offset;
- PRUint32 flags = 0;
- nsIFrame* initialBreakController = mappedFlow->mAncestorControllingInitialBreak;
- if (!initialBreakController) {
- initialBreakController = mLineContainer;
- }
- if (!initialBreakController->GetStyleText()->WhiteSpaceCanWrap()) {
- flags |= nsLineBreaker::BREAK_SUPPRESS_INITIAL;
- }
- nsTextFrame* startFrame = mappedFlow->mStartFrame;
- const nsStyleText* textStyle = startFrame->GetStyleText();
- if (!textStyle->WhiteSpaceCanWrap()) {
- flags |= nsLineBreaker::BREAK_SUPPRESS_INSIDE;
- }
- if (aTextRun->GetFlags() & nsTextFrameUtils::TEXT_NO_BREAKS) {
- flags |= nsLineBreaker::BREAK_SKIP_SETTING_NO_BREAKS;
- }
- if (textStyle->mTextTransform == NS_STYLE_TEXT_TRANSFORM_CAPITALIZE) {
- flags |= nsLineBreaker::BREAK_NEED_CAPITALIZATION;
- }
-
- if (HasCompressedLeadingWhitespace(startFrame, textStyle,
- mappedFlow->GetContentEnd(), iter)) {
- mLineBreaker.AppendInvisibleWhitespace(flags);
- }
-
- if (length > 0) {
- BreakSink* sink = aSuppressSink ? nsnull : (*breakSink).get();
- if (aTextRun->GetFlags() & gfxFontGroup::TEXT_IS_8BIT) {
- mLineBreaker.AppendText(lang, aTextRun->GetText8Bit() + offset,
- length, flags, sink);
- } else {
- mLineBreaker.AppendText(lang, aTextRun->GetTextUnicode() + offset,
- length, flags, sink);
- }
- }
-
- iter = iterNext;
- }
-}
-
-void
-BuildTextRunsScanner::AssignTextRun(gfxTextRun* aTextRun)
-{
- PRUint32 i;
- for (i = 0; i < mMappedFlows.Length(); ++i) {
- MappedFlow* mappedFlow = &mMappedFlows[i];
- nsTextFrame* startFrame = mappedFlow->mStartFrame;
- nsTextFrame* endFrame = mappedFlow->mEndFrame;
- nsTextFrame* f;
- for (f = startFrame; f != endFrame;
- f = static_cast(f->GetNextContinuation())) {
-#ifdef DEBUG_roc
- if (f->GetTextRun()) {
- gfxTextRun* textRun = f->GetTextRun();
- if (textRun->GetFlags() & nsTextFrameUtils::TEXT_IS_SIMPLE_FLOW) {
- if (mMappedFlows[0].mStartFrame != static_cast(textRun->GetUserData())) {
- NS_WARNING("REASSIGNING SIMPLE FLOW TEXT RUN!");
- }
- } else {
- TextRunUserData* userData =
- static_cast(textRun->GetUserData());
-
- if (PRUint32(userData->mMappedFlowCount) >= mMappedFlows.Length() ||
- userData->mMappedFlows[userData->mMappedFlowCount - 1].mStartFrame !=
- mMappedFlows[userData->mMappedFlowCount - 1].mStartFrame) {
- NS_WARNING("REASSIGNING MULTIFLOW TEXT RUN (not append)!");
- }
- }
- }
-#endif
- f->ClearTextRun();
- f->SetTextRun(aTextRun);
- }
- // Set this bit now; we can't set it any earlier because
- // f->ClearTextRun() might clear it out.
- startFrame->AddStateBits(TEXT_IN_TEXTRUN_USER_DATA);
- }
-}
-
-gfxSkipCharsIterator
-nsTextFrame::EnsureTextRun(gfxContext* aReferenceContext, nsIFrame* aLineContainer,
- const nsLineList::iterator* aLine,
- PRUint32* aFlowEndInTextRun)
-{
- if (mTextRun && (!aLine || !(*aLine)->GetInvalidateTextRuns())) {
- if (mTextRun->GetExpirationState()->IsTracked()) {
- gTextRuns->MarkUsed(mTextRun);
- }
- } else {
- nsRefPtr ctx = aReferenceContext;
- if (!ctx) {
- ctx = GetReferenceRenderingContext(this, nsnull);
- }
- if (ctx) {
- BuildTextRuns(ctx, this, aLineContainer, aLine);
- }
- if (!mTextRun) {
- // A text run was not constructed for this frame. This is bad. The caller
- // will check mTextRun.
- static const gfxSkipChars emptySkipChars;
- return gfxSkipCharsIterator(emptySkipChars, 0);
- }
- }
-
- if (mTextRun->GetFlags() & nsTextFrameUtils::TEXT_IS_SIMPLE_FLOW) {
- if (aFlowEndInTextRun) {
- *aFlowEndInTextRun = mTextRun->GetLength();
- }
- return gfxSkipCharsIterator(mTextRun->GetSkipChars(), 0, mContentOffset);
- }
-
- TextRunUserData* userData = static_cast(mTextRun->GetUserData());
- // Find the flow that contains us
- PRInt32 direction;
- PRInt32 startAt = userData->mLastFlowIndex;
- // Search first forward and then backward from the current position
- for (direction = 1; direction >= -1; direction -= 2) {
- PRInt32 i;
- for (i = startAt; 0 <= i && i < userData->mMappedFlowCount; i += direction) {
- TextRunMappedFlow* flow = &userData->mMappedFlows[i];
- if (flow->mStartFrame->GetContent() == mContent) {
- // Since textruns can only contain one flow for a given content element,
- // this must be our flow.
- userData->mLastFlowIndex = i;
- gfxSkipCharsIterator iter(mTextRun->GetSkipChars(),
- flow->mDOMOffsetToBeforeTransformOffset, mContentOffset);
- if (aFlowEndInTextRun) {
- if (i + 1 < userData->mMappedFlowCount) {
- gfxSkipCharsIterator end(mTextRun->GetSkipChars());
- *aFlowEndInTextRun = end.ConvertOriginalToSkipped(
- flow[1].mStartFrame->GetContentOffset() + flow[1].mDOMOffsetToBeforeTransformOffset);
- } else {
- *aFlowEndInTextRun = mTextRun->GetLength();
- }
- }
- return iter;
- }
- ++flow;
- }
- startAt = userData->mLastFlowIndex - 1;
- }
- NS_ERROR("Can't find flow containing this frame???");
- static const gfxSkipChars emptySkipChars;
- return gfxSkipCharsIterator(emptySkipChars, 0);
-}
-
-static PRUint32
-GetEndOfTrimmedText(const nsTextFragment* aFrag, const nsStyleText* aStyleText,
- PRUint32 aStart, PRUint32 aEnd,
- gfxSkipCharsIterator* aIterator)
-{
- aIterator->SetSkippedOffset(aEnd);
- while (aIterator->GetSkippedOffset() > aStart) {
- aIterator->AdvanceSkipped(-1);
- if (!IsTrimmableSpace(aFrag, aIterator->GetOriginalOffset(), aStyleText))
- return aIterator->GetSkippedOffset() + 1;
- }
- return aStart;
-}
-
-nsTextFrame::TrimmedOffsets
-nsTextFrame::GetTrimmedOffsets(const nsTextFragment* aFrag,
- PRBool aTrimAfter)
-{
- NS_ASSERTION(mTextRun, "Need textrun here");
- // This should not be used during reflow. We need our TEXT_REFLOW_FLAGS
- // to be set correctly.
- NS_ASSERTION(!(GetStateBits() & NS_FRAME_FIRST_REFLOW),
- "Can only call this on frames that have been reflowed");
- NS_ASSERTION(!(GetStateBits() & NS_FRAME_IN_REFLOW),
- "Can only call this on frames that are not being reflowed");
-
- TrimmedOffsets offsets = { GetContentOffset(), GetContentLength() };
- const nsStyleText* textStyle = GetStyleText();
- // Note that pre-line newlines should still allow us to trim spaces
- // for display
- if (textStyle->WhiteSpaceIsSignificant())
- return offsets;
-
- if (GetStateBits() & TEXT_START_OF_LINE) {
- PRInt32 whitespaceCount =
- GetTrimmableWhitespaceCount(aFrag,
- offsets.mStart, offsets.mLength, 1);
- offsets.mStart += whitespaceCount;
- offsets.mLength -= whitespaceCount;
- }
-
- if (aTrimAfter && (GetStateBits() & TEXT_END_OF_LINE)) {
- // This treats a trailing 'pre-line' newline as trimmable. That's fine,
- // it's actually what we want since we want whitespace before it to
- // be trimmed.
- PRInt32 whitespaceCount =
- GetTrimmableWhitespaceCount(aFrag,
- offsets.GetEnd() - 1, offsets.mLength, -1);
- offsets.mLength -= whitespaceCount;
- }
- return offsets;
-}
-
-/*
- * Currently only Unicode characters below 0x10000 have their spacing modified
- * by justification. If characters above 0x10000 turn out to need
- * justification spacing, that will require extra work. Currently,
- * this function must not include 0xd800 to 0xdbff because these characters
- * are surrogates.
- */
-static PRBool IsJustifiableCharacter(const nsTextFragment* aFrag, PRInt32 aPos,
- PRBool aLangIsCJ)
-{
- PRUnichar ch = aFrag->CharAt(aPos);
- if (ch == '\n' || ch == '\t')
- return PR_TRUE;
- if (ch == ' ') {
- // Don't justify spaces that are combined with diacriticals
- if (!aFrag->Is2b())
- return PR_TRUE;
- return !nsTextFrameUtils::IsSpaceCombiningSequenceTail(
- aFrag->Get2b() + aPos + 1, aFrag->GetLength() - (aPos + 1));
- }
- if (ch < 0x2150u)
- return PR_FALSE;
- if (aLangIsCJ && (
- (0x2150u <= ch && ch <= 0x22ffu) || // Number Forms, Arrows, Mathematical Operators
- (0x2460u <= ch && ch <= 0x24ffu) || // Enclosed Alphanumerics
- (0x2580u <= ch && ch <= 0x27bfu) || // Block Elements, Geometric Shapes, Miscellaneous Symbols, Dingbats
- (0x27f0u <= ch && ch <= 0x2bffu) || // Supplemental Arrows-A, Braille Patterns, Supplemental Arrows-B,
- // Miscellaneous Mathematical Symbols-B, Supplemental Mathematical Operators,
- // Miscellaneous Symbols and Arrows
- (0x2e80u <= ch && ch <= 0x312fu) || // CJK Radicals Supplement, CJK Radicals Supplement,
- // Ideographic Description Characters, CJK Symbols and Punctuation,
- // Hiragana, Katakana, Bopomofo
- (0x3190u <= ch && ch <= 0xabffu) || // Kanbun, Bopomofo Extended, Katakana Phonetic Extensions,
- // Enclosed CJK Letters and Months, CJK Compatibility,
- // CJK Unified Ideographs Extension A, Yijing Hexagram Symbols,
- // CJK Unified Ideographs, Yi Syllables, Yi Radicals
- (0xf900u <= ch && ch <= 0xfaffu) || // CJK Compatibility Ideographs
- (0xff5eu <= ch && ch <= 0xff9fu) // Halfwidth and Fullwidth Forms(a part)
- ))
- return PR_TRUE;
- return PR_FALSE;
-}
-
-static void ClearMetrics(nsHTMLReflowMetrics& aMetrics)
-{
- aMetrics.width = 0;
- aMetrics.height = 0;
- aMetrics.ascent = 0;
-}
-
-static PRInt32 FindChar(const nsTextFragment* frag,
- PRInt32 aOffset, PRInt32 aLength, PRUnichar ch)
-{
- PRInt32 i = 0;
- if (frag->Is2b()) {
- const PRUnichar* str = frag->Get2b() + aOffset;
- for (; i < aLength; ++i) {
- if (*str == ch)
- return i + aOffset;
- ++str;
- }
- } else {
- if (PRUint16(ch) <= 0xFF) {
- const char* str = frag->Get1b() + aOffset;
- const void* p = memchr(str, ch, aLength);
- if (p)
- return (static_cast(p) - str) + aOffset;
- }
- }
- return -1;
-}
-
-static PRBool IsChineseJapaneseLangGroup(nsIFrame* aFrame)
-{
- nsIAtom* langGroup = aFrame->GetStyleVisibility()->mLangGroup;
- return langGroup == nsGkAtoms::Japanese
- || langGroup == nsGkAtoms::Chinese
- || langGroup == nsGkAtoms::Taiwanese
- || langGroup == nsGkAtoms::HongKongChinese;
-}
-
-#ifdef DEBUG
-static PRBool IsInBounds(const gfxSkipCharsIterator& aStart, PRInt32 aContentLength,
- PRUint32 aOffset, PRUint32 aLength) {
- if (aStart.GetSkippedOffset() > aOffset)
- return PR_FALSE;
- if (aContentLength == PR_INT32_MAX)
- return PR_TRUE;
- gfxSkipCharsIterator iter(aStart);
- iter.AdvanceOriginal(aContentLength);
- return iter.GetSkippedOffset() >= aOffset + aLength;
-}
-#endif
-
-class NS_STACK_CLASS PropertyProvider : public gfxTextRun::PropertyProvider {
-public:
- /**
- * Use this constructor for reflow, when we don't know what text is
- * really mapped by the frame and we have a lot of other data around.
- *
- * @param aLength can be PR_INT32_MAX to indicate we cover all the text
- * associated with aFrame up to where its flow chain ends in the given
- * textrun. If PR_INT32_MAX is passed, justification and hyphen-related methods
- * cannot be called, nor can GetOriginalLength().
- */
- PropertyProvider(gfxTextRun* aTextRun, const nsStyleText* aTextStyle,
- const nsTextFragment* aFrag, nsTextFrame* aFrame,
- const gfxSkipCharsIterator& aStart, PRInt32 aLength,
- nsIFrame* aLineContainer,
- nscoord aOffsetFromBlockOriginForTabs)
- : mTextRun(aTextRun), mFontGroup(nsnull),
- mTextStyle(aTextStyle), mFrag(aFrag),
- mLineContainer(aLineContainer),
- mFrame(aFrame), mStart(aStart), mTempIterator(aStart),
- mTabWidths(nsnull), mLength(aLength),
- mWordSpacing(mTextStyle->mWordSpacing),
- mLetterSpacing(StyleToCoord(mTextStyle->mLetterSpacing)),
- mJustificationSpacing(0),
- mHyphenWidth(-1),
- mOffsetFromBlockOriginForTabs(aOffsetFromBlockOriginForTabs),
- mReflowing(PR_TRUE)
- {
- NS_ASSERTION(mStart.IsInitialized(), "Start not initialized?");
- }
-
- /**
- * Use this constructor after the frame has been reflowed and we don't
- * have other data around. Gets everything from the frame. EnsureTextRun
- * *must* be called before this!!!
- */
- PropertyProvider(nsTextFrame* aFrame, const gfxSkipCharsIterator& aStart)
- : mTextRun(aFrame->GetTextRun()), mFontGroup(nsnull),
- mTextStyle(aFrame->GetStyleText()),
- mFrag(aFrame->GetFragment()),
- mLineContainer(nsnull),
- mFrame(aFrame), mStart(aStart), mTempIterator(aStart),
- mTabWidths(nsnull),
- mLength(aFrame->GetContentLength()),
- mWordSpacing(mTextStyle->mWordSpacing),
- mLetterSpacing(StyleToCoord(mTextStyle->mLetterSpacing)),
- mJustificationSpacing(0),
- mHyphenWidth(-1),
- mOffsetFromBlockOriginForTabs(0),
- mReflowing(PR_FALSE)
- {
- NS_ASSERTION(mTextRun, "Textrun not initialized!");
- }
-
- // Call this after construction if you're not going to reflow the text
- void InitializeForDisplay(PRBool aTrimAfter);
-
- virtual void GetSpacing(PRUint32 aStart, PRUint32 aLength, Spacing* aSpacing);
- virtual gfxFloat GetHyphenWidth();
- virtual void GetHyphenationBreaks(PRUint32 aStart, PRUint32 aLength,
- PRPackedBool* aBreakBefore);
-
- void GetSpacingInternal(PRUint32 aStart, PRUint32 aLength, Spacing* aSpacing,
- PRBool aIgnoreTabs);
-
- /**
- * Count the number of justifiable characters in the given DOM range
- */
- PRUint32 ComputeJustifiableCharacters(PRInt32 aOffset, PRInt32 aLength);
- /**
- * Find the start and end of the justifiable characters. Does not depend on the
- * position of aStart or aEnd, although it's most efficient if they are near the
- * start and end of the text frame.
- */
- void FindJustificationRange(gfxSkipCharsIterator* aStart,
- gfxSkipCharsIterator* aEnd);
-
- const nsStyleText* GetStyleText() { return mTextStyle; }
- nsTextFrame* GetFrame() { return mFrame; }
- // This may not be equal to the frame offset/length in because we may have
- // adjusted for whitespace trimming according to the state bits set in the frame
- // (for the static provider)
- const gfxSkipCharsIterator& GetStart() { return mStart; }
- // May return PR_INT32_MAX if that was given to the constructor
- PRUint32 GetOriginalLength() {
- NS_ASSERTION(mLength != PR_INT32_MAX, "Length not known");
- return mLength;
- }
- const nsTextFragment* GetFragment() { return mFrag; }
-
- gfxFontGroup* GetFontGroup() {
- if (!mFontGroup)
- InitFontGroupAndFontMetrics();
- return mFontGroup;
- }
-
- nsIFontMetrics* GetFontMetrics() {
- if (!mFontMetrics)
- InitFontGroupAndFontMetrics();
- return mFontMetrics;
- }
-
- gfxFloat* GetTabWidths(PRUint32 aTransformedStart, PRUint32 aTransformedLength);
-
- const gfxSkipCharsIterator& GetEndHint() { return mTempIterator; }
-
-protected:
- void SetupJustificationSpacing();
-
- void InitFontGroupAndFontMetrics() {
- mFontGroup = GetFontGroupForFrame(mFrame, getter_AddRefs(mFontMetrics));
- }
-
- gfxTextRun* mTextRun;
- gfxFontGroup* mFontGroup;
- nsCOMPtr mFontMetrics;
- const nsStyleText* mTextStyle;
- const nsTextFragment* mFrag;
- nsIFrame* mLineContainer;
- nsTextFrame* mFrame;
- gfxSkipCharsIterator mStart; // Offset in original and transformed string
- gfxSkipCharsIterator mTempIterator;
-
- // Widths for each transformed string character, 0 for non-tab characters.
- // Either null, or pointing to the frame's tabWidthProperty.
- nsTArray