Index: src/backend/optimizer/path/costsize.c =================================================================== RCS file: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v retrieving revision 1.169 diff -c -r1.169 costsize.c *** src/backend/optimizer/path/costsize.c 11 Nov 2006 01:14:19 -0000 1.169 --- src/backend/optimizer/path/costsize.c 10 Dec 2006 19:19:56 -0000 *************** *** 614,619 **** --- 614,626 ---- { *cost = ((IndexPath *) path)->indextotalcost; *selec = ((IndexPath *) path)->indexselectivity; + /* + * Charge a small amount per retrieved tuple to reflect the costs of + * manipulating the bitmap. This is mostly to make sure that a bitmap + * scan doesn't look to be the same cost as an indexscan to retrieve + * a single tuple. + */ + *cost += 0.1 * cpu_operator_cost * ((IndexPath *) path)->rows; } else if (IsA(path, BitmapAndPath)) { Index: src/backend/utils/adt/selfuncs.c =================================================================== RCS file: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v retrieving revision 1.214 diff -c -r1.214 selfuncs.c *** src/backend/utils/adt/selfuncs.c 4 Oct 2006 00:29:59 -0000 1.214 --- src/backend/utils/adt/selfuncs.c 10 Dec 2006 19:19:56 -0000 *************** *** 4673,4686 **** * way in order to get the right answer for partial indexes. */ if (numIndexTuples <= 0.0) numIndexTuples = *indexSelectivity * index->rel->tuples; ! /* ! * The estimate obtained so far counts all the tuples returned by all ! * scans of ScalarArrayOpExpr calls. We want to consider the per-scan ! * number, so adjust. This is a handy place to round to integer, too. ! */ ! numIndexTuples = rint(numIndexTuples / num_sa_scans); /* * We can bound the number of tuples by the index size in any case. Also, --- 4673,4690 ---- * way in order to get the right answer for partial indexes. */ if (numIndexTuples <= 0.0) + { numIndexTuples = *indexSelectivity * index->rel->tuples; ! /* ! * The above calculation counts all the tuples visited across all ! * scans induced by ScalarArrayOpExpr nodes. We want to consider the ! * average per-indexscan number, so adjust. This is a handy place to ! * round to integer, too. (If caller supplied tuple estimate, it's ! * responsible for handling these considerations.) ! */ ! numIndexTuples = rint(numIndexTuples / num_sa_scans); ! } /* * We can bound the number of tuples by the index size in any case. Also, *************** *** 4786,4792 **** * evaluated once at the start of the scan to reduce them to runtime keys * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per ! * indexqual operator. * * Note: this neglects the possible costs of rechecking lossy operators * and OR-clause expressions. Detecting that that might be needed seems --- 4790,4798 ---- * evaluated once at the start of the scan to reduce them to runtime keys * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per ! * indexqual operator. Because we have numIndexTuples as a per-scan ! * number, we have to multiply by num_sa_scans to get the correct result ! * for ScalarArrayOpExpr cases. * * Note: this neglects the possible costs of rechecking lossy operators * and OR-clause expressions. Detecting that that might be needed seems *************** *** 4801,4807 **** qual_arg_cost = 0; *indexStartupCost = qual_arg_cost; *indexTotalCost += qual_arg_cost; ! *indexTotalCost += numIndexTuples * (cpu_index_tuple_cost + qual_op_cost); /* * Generic assumption about index correlation: there isn't any. --- 4807,4826 ---- qual_arg_cost = 0; *indexStartupCost = qual_arg_cost; *indexTotalCost += qual_arg_cost; ! *indexTotalCost += numIndexTuples * num_sa_scans * (cpu_index_tuple_cost + qual_op_cost); ! ! /* ! * We also add a CPU-cost component to represent the general costs of ! * starting an indexscan (such as analysis of btree index keys). This ! * is estimated at 100x cpu_operator_cost, which is a bit arbitrary but ! * seems the right order of magnitude. ! * ! * Although this is startup cost with respect to any one scan, we add ! * it to the "total" cost component because it's only very interesting ! * in the many-ScalarArrayOpExpr-scan case, and there it will be paid ! * over the life of the scan node. ! */ ! *indexTotalCost += num_sa_scans * 100.0 * cpu_operator_cost; /* * Generic assumption about index correlation: there isn't any. *************** *** 4829,4834 **** --- 4848,4854 ---- int indexcol; bool eqQualHere; bool found_saop; + double num_sa_scans; ListCell *l; /* *************** *** 4852,4857 **** --- 4872,4878 ---- indexcol = 0; eqQualHere = false; found_saop = false; + num_sa_scans = 1; foreach(l, indexQuals) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); *************** *** 4928,4933 **** --- 4949,4963 ---- Assert(op_strategy != 0); /* not a member of opclass?? */ if (op_strategy == BTEqualStrategyNumber) eqQualHere = true; + /* count up number of SA scans induced by indexBoundQuals only */ + if (IsA(clause, ScalarArrayOpExpr)) + { + ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause; + int alength = estimate_array_length(lsecond(saop->args)); + + if (alength > 1) + num_sa_scans *= alength; + } indexBoundQuals = lappend(indexBoundQuals, rinfo); } *************** *** 4949,4954 **** --- 4979,4990 ---- index->rel->relid, JOIN_INNER); numIndexTuples = btreeSelectivity * index->rel->tuples; + /* + * As in genericcostestimate(), we have to adjust for any + * ScalarArrayOpExpr quals included in indexBoundQuals, and then + * round to integer. + */ + numIndexTuples = rint(numIndexTuples / num_sa_scans); } genericcostestimate(root, index, indexQuals, outer_rel, numIndexTuples,