case nir_op_umin:
dst[0] = ir3_MIN_U(b, src[0], 0, src[1], 0);
break;
- case nir_op_imul:
- if (bs[0] > 16 || bs[1] > 16) {
- /*
- * dst = (al * bl) + (ah * bl << 16) + (al * bh << 16)
- * mull.u tmp0, a, b ; mul low, i.e. al * bl
- * madsh.m16 tmp1, a, b, tmp0 ; mul-add shift high mix,
- * ; i.e. ah * bl << 16
- * madsh.m16 dst, b, a, tmp1 ; i.e. al * bh << 16
- */
- dst[0] = ir3_MADSH_M16(b, src[1], 0, src[0], 0,
- ir3_MADSH_M16(b, src[0], 0, src[1], 0,
- ir3_MULL_U(b, src[0], 0,
- src[1], 0), 0), 0);
- } else {
- dst[0] = ir3_MUL_S(b, src[0], 0, src[1], 0);
- }
- break;
case nir_op_umul_low:
dst[0] = ir3_MULL_U(b, src[0], 0, src[1], 0);
break;
*/
NIR_PASS_V(ctx->s, nir_lower_bool_to_int32);
NIR_PASS_V(ctx->s, nir_lower_locals_to_regs);
+
+ /* We want to lower nir_op_imul as late as possible, to catch also
+ * those generated by earlier passes (e.g, nir_lower_locals_to_regs).
+ * However, we want a final swing of a few passes to have a chance
+ * at optimizing the result.
+ */
+ bool progress;
+ NIR_PASS(progress, ctx->s, ir3_nir_lower_imul);
+ if (progress) {
+ NIR_PASS_V(ctx->s, nir_opt_algebraic);
+ NIR_PASS_V(ctx->s, nir_opt_copy_prop_vars);
+ NIR_PASS_V(ctx->s, nir_opt_dead_write_vars);
+ NIR_PASS_V(ctx->s, nir_opt_dce);
+ NIR_PASS_V(ctx->s, nir_opt_constant_folding);
+ }
+
NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
if (ir3_shader_debug & IR3_DBG_DISASM) {