Skip to content

[mlir][NFC] update mlir/Dialect create APIs (14/n) #149920

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 15 additions & 14 deletions mlir/lib/Dialect/AMDGPU/Transforms/EmulateAtomics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ static Value flattenVecToBits(ConversionPatternRewriter &rewriter, Location loc,
vectorType.getElementTypeBitWidth() * vectorType.getNumElements();
Type allBitsType = rewriter.getIntegerType(bitwidth);
auto allBitsVecType = VectorType::get({1}, allBitsType);
Value bitcast = rewriter.create<vector::BitCastOp>(loc, allBitsVecType, val);
Value scalar = rewriter.create<vector::ExtractOp>(loc, bitcast, 0);
Value bitcast = vector::BitCastOp::create(rewriter, loc, allBitsVecType, val);
Value scalar = vector::ExtractOp::create(rewriter, loc, bitcast, 0);
return scalar;
}

Expand All @@ -118,27 +118,27 @@ LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite(

SmallVector<NamedAttribute> loadAttrs;
patchOperandSegmentSizes(origAttrs, loadAttrs, DataArgAction::Drop);
Value initialLoad =
rewriter.create<RawBufferLoadOp>(loc, dataType, invariantArgs, loadAttrs);
Value initialLoad = RawBufferLoadOp::create(rewriter, loc, dataType,
invariantArgs, loadAttrs);
Block *currentBlock = rewriter.getInsertionBlock();
Block *afterAtomic =
rewriter.splitBlock(currentBlock, rewriter.getInsertionPoint());
Block *loopBlock = rewriter.createBlock(afterAtomic, {dataType}, {loc});

rewriter.setInsertionPointToEnd(currentBlock);
rewriter.create<cf::BranchOp>(loc, loopBlock, initialLoad);
cf::BranchOp::create(rewriter, loc, loopBlock, initialLoad);

rewriter.setInsertionPointToEnd(loopBlock);
Value prevLoad = loopBlock->getArgument(0);
Value operated = rewriter.create<ArithOp>(loc, data, prevLoad);
Value operated = ArithOp::create(rewriter, loc, data, prevLoad);
dataType = operated.getType();

SmallVector<NamedAttribute> cmpswapAttrs;
patchOperandSegmentSizes(origAttrs, cmpswapAttrs, DataArgAction::Duplicate);
SmallVector<Value> cmpswapArgs = {operated, prevLoad};
cmpswapArgs.append(invariantArgs.begin(), invariantArgs.end());
Value atomicRes = rewriter.create<RawBufferAtomicCmpswapOp>(
loc, dataType, cmpswapArgs, cmpswapAttrs);
Value atomicRes = RawBufferAtomicCmpswapOp::create(rewriter, loc, dataType,
cmpswapArgs, cmpswapAttrs);

// We care about exact bitwise equality here, so do some bitcasts.
// These will fold away during lowering to the ROCDL dialect, where
Expand All @@ -150,14 +150,15 @@ LogicalResult RawBufferAtomicByCasPattern<AtomicOp, ArithOp>::matchAndRewrite(
if (auto floatDataTy = dyn_cast<FloatType>(dataType)) {
Type equivInt = rewriter.getIntegerType(floatDataTy.getWidth());
prevLoadForCompare =
rewriter.create<arith::BitcastOp>(loc, equivInt, prevLoad);
arith::BitcastOp::create(rewriter, loc, equivInt, prevLoad);
atomicResForCompare =
rewriter.create<arith::BitcastOp>(loc, equivInt, atomicRes);
arith::BitcastOp::create(rewriter, loc, equivInt, atomicRes);
}
Value canLeave = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::eq, atomicResForCompare, prevLoadForCompare);
rewriter.create<cf::CondBranchOp>(loc, canLeave, afterAtomic, ValueRange{},
loopBlock, atomicRes);
Value canLeave =
arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::eq,
atomicResForCompare, prevLoadForCompare);
cf::CondBranchOp::create(rewriter, loc, canLeave, afterAtomic, ValueRange{},
loopBlock, atomicRes);
rewriter.eraseOp(atomicOp);
return success();
}
Expand Down
55 changes: 28 additions & 27 deletions mlir/lib/Dialect/AMDGPU/Transforms/MaskedloadToLoad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@ static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
vector::MaskedLoadOp maskedOp,
bool passthru) {
VectorType vectorType = maskedOp.getVectorType();
Value load = builder.create<vector::LoadOp>(
loc, vectorType, maskedOp.getBase(), maskedOp.getIndices());
Value load = vector::LoadOp::create(
builder, loc, vectorType, maskedOp.getBase(), maskedOp.getIndices());
if (passthru)
load = builder.create<arith::SelectOp>(loc, vectorType, maskedOp.getMask(),
load, maskedOp.getPassThru());
load = arith::SelectOp::create(builder, loc, vectorType, maskedOp.getMask(),
load, maskedOp.getPassThru());
return load;
}

Expand Down Expand Up @@ -108,7 +108,7 @@ struct MaskedLoadLowering final : OpRewritePattern<vector::MaskedLoadOp> {
SmallVector<OpFoldResult> indices = maskedOp.getIndices();

auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, src);
memref::ExtractStridedMetadataOp::create(rewriter, loc, src);
SmallVector<OpFoldResult> strides =
stridedMetadata.getConstifiedMixedStrides();
SmallVector<OpFoldResult> sizes = stridedMetadata.getConstifiedMixedSizes();
Expand All @@ -122,47 +122,47 @@ struct MaskedLoadLowering final : OpRewritePattern<vector::MaskedLoadOp> {

// delta = bufferSize - linearizedOffset
Value vectorSizeOffset =
rewriter.create<arith::ConstantIndexOp>(loc, vectorSize);
arith::ConstantIndexOp::create(rewriter, loc, vectorSize);
Value linearIndex =
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
Value totalSize = getValueOrCreateConstantIndexOp(
rewriter, loc, linearizedInfo.linearizedSize);
Value delta = rewriter.create<arith::SubIOp>(loc, totalSize, linearIndex);
Value delta = arith::SubIOp::create(rewriter, loc, totalSize, linearIndex);

// 1) check if delta < vectorSize
Value isOutofBounds = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ult, delta, vectorSizeOffset);
Value isOutofBounds = arith::CmpIOp::create(
rewriter, loc, arith::CmpIPredicate::ult, delta, vectorSizeOffset);

// 2) check if (detla % elements_per_word != 0)
Value elementsPerWord = rewriter.create<arith::ConstantIndexOp>(
loc, llvm::divideCeil(32, elementBitWidth));
Value isNotWordAligned = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ne,
rewriter.create<arith::RemUIOp>(loc, delta, elementsPerWord),
rewriter.create<arith::ConstantIndexOp>(loc, 0));
Value elementsPerWord = arith::ConstantIndexOp::create(
rewriter, loc, llvm::divideCeil(32, elementBitWidth));
Value isNotWordAligned = arith::CmpIOp::create(
rewriter, loc, arith::CmpIPredicate::ne,
arith::RemUIOp::create(rewriter, loc, delta, elementsPerWord),
arith::ConstantIndexOp::create(rewriter, loc, 0));

// We take the fallback of maskedload default lowering only it is both
// out-of-bounds and not word aligned. The fallback ensures correct results
// when loading at the boundary of the buffer since buffer load returns
// inconsistent zeros for the whole word when boundary is crossed.
Value ifCondition =
rewriter.create<arith::AndIOp>(loc, isOutofBounds, isNotWordAligned);
arith::AndIOp::create(rewriter, loc, isOutofBounds, isNotWordAligned);

auto thenBuilder = [&](OpBuilder &builder, Location loc) {
Operation *read = builder.clone(*maskedOp.getOperation());
read->setAttr(kMaskedloadNeedsMask, builder.getUnitAttr());
Value readResult = read->getResult(0);
builder.create<scf::YieldOp>(loc, readResult);
scf::YieldOp::create(builder, loc, readResult);
};

auto elseBuilder = [&](OpBuilder &builder, Location loc) {
Value res = createVectorLoadForMaskedLoad(builder, loc, maskedOp,
/*passthru=*/true);
rewriter.create<scf::YieldOp>(loc, res);
scf::YieldOp::create(rewriter, loc, res);
};

auto ifOp =
rewriter.create<scf::IfOp>(loc, ifCondition, thenBuilder, elseBuilder);
scf::IfOp::create(rewriter, loc, ifCondition, thenBuilder, elseBuilder);

rewriter.replaceOp(maskedOp, ifOp);

Expand All @@ -185,13 +185,13 @@ struct FullMaskedLoadToConditionalLoad
auto trueBuilder = [&](OpBuilder &builder, Location loc) {
Value res = createVectorLoadForMaskedLoad(builder, loc, loadOp,
/*passthru=*/false);
rewriter.create<scf::YieldOp>(loc, res);
scf::YieldOp::create(rewriter, loc, res);
};
auto falseBuilder = [&](OpBuilder &builder, Location loc) {
rewriter.create<scf::YieldOp>(loc, loadOp.getPassThru());
scf::YieldOp::create(rewriter, loc, loadOp.getPassThru());
};
auto ifOp = rewriter.create<scf::IfOp>(loadOp.getLoc(), cond, trueBuilder,
falseBuilder);
auto ifOp = scf::IfOp::create(rewriter, loadOp.getLoc(), cond, trueBuilder,
falseBuilder);
rewriter.replaceOp(loadOp, ifOp);
return success();
}
Expand All @@ -210,11 +210,12 @@ struct FullMaskedStoreToConditionalStore
Value cond = maybeCond.value();

auto trueBuilder = [&](OpBuilder &builder, Location loc) {
rewriter.create<vector::StoreOp>(loc, storeOp.getValueToStore(),
storeOp.getBase(), storeOp.getIndices());
rewriter.create<scf::YieldOp>(loc);
vector::StoreOp::create(rewriter, loc, storeOp.getValueToStore(),
storeOp.getBase(), storeOp.getIndices());
scf::YieldOp::create(rewriter, loc);
};
auto ifOp = rewriter.create<scf::IfOp>(storeOp.getLoc(), cond, trueBuilder);
auto ifOp =
scf::IfOp::create(rewriter, storeOp.getLoc(), cond, trueBuilder);
rewriter.replaceOp(storeOp, ifOp);
return success();
}
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/AMDGPU/Transforms/ResolveStridedMetadata.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ struct ExtractStridedMetadataOnFatRawBufferCastFolder final
return rewriter.notifyMatchFailure(metadataOp,
"not a fat raw buffer cast");
Location loc = castOp.getLoc();
auto sourceMetadata = rewriter.create<memref::ExtractStridedMetadataOp>(
loc, castOp.getSource());
auto sourceMetadata = memref::ExtractStridedMetadataOp::create(
rewriter, loc, castOp.getSource());
SmallVector<Value> results;
if (metadataOp.getBaseBuffer().use_empty()) {
results.push_back(nullptr);
Expand All @@ -48,13 +48,13 @@ struct ExtractStridedMetadataOnFatRawBufferCastFolder final
if (baseBufferType == castOp.getResult().getType()) {
results.push_back(castOp.getResult());
} else {
results.push_back(rewriter.create<memref::ReinterpretCastOp>(
loc, baseBufferType, castOp.getResult(), /*offset=*/0,
results.push_back(memref::ReinterpretCastOp::create(
rewriter, loc, baseBufferType, castOp.getResult(), /*offset=*/0,
/*sizes=*/ArrayRef<int64_t>{}, /*strides=*/ArrayRef<int64_t>{}));
}
}
if (castOp.getResetOffset())
results.push_back(rewriter.create<arith::ConstantIndexOp>(loc, 0));
results.push_back(arith::ConstantIndexOp::create(rewriter, loc, 0));
else
results.push_back(sourceMetadata.getOffset());
llvm::append_range(results, sourceMetadata.getSizes());
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/AMX/IR/AMXDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ static SmallVector<Value> getTileSizes(Location loc, amx::TileType tType,
auto mattr = rewriter.getI16IntegerAttr(tType.getDimSize(0));
auto nattr = rewriter.getI16IntegerAttr(tType.getDimSize(1) * bytes);
return SmallVector<Value>{
rewriter.create<LLVM::ConstantOp>(loc, llvmInt16Type, mattr),
rewriter.create<LLVM::ConstantOp>(loc, llvmInt16Type, nattr)};
LLVM::ConstantOp::create(rewriter, loc, llvmInt16Type, mattr),
LLVM::ConstantOp::create(rewriter, loc, llvmInt16Type, nattr)};
}

/// Maps the 2-dim memref shape to the 64-bit stride. Note that the buffer
Expand All @@ -95,15 +95,15 @@ static Value getStride(Location loc, MemRefType mType, Value base,
// Dynamic stride needs code to compute the stride at runtime.
MemRefDescriptor memrefDescriptor(base);
auto attr = rewriter.getI64IntegerAttr(bytes);
Value scale = rewriter.create<LLVM::ConstantOp>(loc, llvmInt64Type, attr);
Value scale = LLVM::ConstantOp::create(rewriter, loc, llvmInt64Type, attr);
return rewriter
.create<LLVM::MulOp>(loc, llvmInt64Type, scale,
memrefDescriptor.stride(rewriter, loc, preLast))
.getResult();
}
// Use direct constant for static stride.
auto attr = rewriter.getI64IntegerAttr(strides[preLast] * bytes);
return rewriter.create<LLVM::ConstantOp>(loc, llvmInt64Type, attr)
return LLVM::ConstantOp::create(rewriter, loc, llvmInt64Type, attr)
.getResult();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
void AffineDataCopyGeneration::runOnOperation() {
func::FuncOp f = getOperation();
OpBuilder topBuilder(f.getBody());
zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
zeroIndex = arith::ConstantIndexOp::create(topBuilder, f.getLoc(), 0);

// Nests that are copy-in's or copy-out's; the root AffineForOps of those
// nests are stored herein.
Expand Down
33 changes: 17 additions & 16 deletions mlir/lib/Dialect/Affine/Transforms/AffineExpandIndexOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@ static SmallVector<Value> computeStrides(Location loc, RewriterBase &rewriter,
// Note: basis elements and their products are, definitionally,
// non-negative, so `nuw` is justified.
if (dynamicPart)
dynamicPart = rewriter.create<arith::MulIOp>(
loc, dynamicPart, dynamicBasis[dynamicIndex - 1], ovflags);
dynamicPart =
arith::MulIOp::create(rewriter, loc, dynamicPart,
dynamicBasis[dynamicIndex - 1], ovflags);
else
dynamicPart = dynamicBasis[dynamicIndex - 1];
--dynamicIndex;
Expand All @@ -74,7 +75,7 @@ static SmallVector<Value> computeStrides(Location loc, RewriterBase &rewriter,
rewriter.createOrFold<arith::ConstantIndexOp>(loc, staticPart);
if (dynamicPart)
stride =
rewriter.create<arith::MulIOp>(loc, dynamicPart, stride, ovflags);
arith::MulIOp::create(rewriter, loc, dynamicPart, stride, ovflags);
result.push_back(stride);
}
}
Expand Down Expand Up @@ -106,20 +107,20 @@ affine::lowerAffineDelinearizeIndexOp(RewriterBase &rewriter,
Value zero = rewriter.createOrFold<arith::ConstantIndexOp>(loc, 0);

Value initialPart =
rewriter.create<arith::FloorDivSIOp>(loc, linearIdx, strides.front());
arith::FloorDivSIOp::create(rewriter, loc, linearIdx, strides.front());
results.push_back(initialPart);

auto emitModTerm = [&](Value stride) -> Value {
Value remainder = rewriter.create<arith::RemSIOp>(loc, linearIdx, stride);
Value remainderNegative = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::slt, remainder, zero);
Value remainder = arith::RemSIOp::create(rewriter, loc, linearIdx, stride);
Value remainderNegative = arith::CmpIOp::create(
rewriter, loc, arith::CmpIPredicate::slt, remainder, zero);
// If the correction is relevant, this term is <= stride, which is known
// to be positive in `index`. Otherwise, while 2 * stride might overflow,
// this branch won't be taken, so the risk of `poison` is fine.
Value corrected = rewriter.create<arith::AddIOp>(
loc, remainder, stride, arith::IntegerOverflowFlags::nsw);
Value mod = rewriter.create<arith::SelectOp>(loc, remainderNegative,
corrected, remainder);
Value corrected = arith::AddIOp::create(rewriter, loc, remainder, stride,
arith::IntegerOverflowFlags::nsw);
Value mod = arith::SelectOp::create(rewriter, loc, remainderNegative,
corrected, remainder);
return mod;
};

Expand All @@ -131,7 +132,7 @@ affine::lowerAffineDelinearizeIndexOp(RewriterBase &rewriter,
// We know both inputs are positive, so floorDiv == div.
// This could potentially be a divui, but it's not clear if that would
// cause issues.
Value divided = rewriter.create<arith::DivSIOp>(loc, modulus, nextStride);
Value divided = arith::DivSIOp::create(rewriter, loc, modulus, nextStride);
results.push_back(divided);
}

Expand Down Expand Up @@ -167,8 +168,8 @@ LogicalResult affine::lowerAffineLinearizeIndexOp(RewriterBase &rewriter,
// our hands on an `OpOperand&` for the loop invariant counting function.
for (auto [stride, idxOp] :
llvm::zip_equal(strides, llvm::drop_end(op.getMultiIndexMutable()))) {
Value scaledIdx = rewriter.create<arith::MulIOp>(
loc, idxOp.get(), stride, arith::IntegerOverflowFlags::nsw);
Value scaledIdx = arith::MulIOp::create(rewriter, loc, idxOp.get(), stride,
arith::IntegerOverflowFlags::nsw);
int64_t numHoistableLoops = numEnclosingInvariantLoops(idxOp);
scaledValues.emplace_back(scaledIdx, numHoistableLoops);
}
Expand All @@ -184,8 +185,8 @@ LogicalResult affine::lowerAffineLinearizeIndexOp(RewriterBase &rewriter,
Value result = scaledValues.front().first;
for (auto [scaledValue, numHoistableLoops] : llvm::drop_begin(scaledValues)) {
std::ignore = numHoistableLoops;
result = rewriter.create<arith::AddIOp>(loc, result, scaledValue,
arith::IntegerOverflowFlags::nsw);
result = arith::AddIOp::create(rewriter, loc, result, scaledValue,
arith::IntegerOverflowFlags::nsw);
}
rewriter.replaceOp(op, result);
return success();
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/Affine/Transforms/DecomposeAffineOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ static AffineApplyOp createSubApply(RewriterBase &rewriter,
auto rhsMap = AffineMap::get(m.getNumDims(), m.getNumSymbols(), expr, ctx);
SmallVector<Value> rhsOperands = originalOp->getOperands();
canonicalizeMapAndOperands(&rhsMap, &rhsOperands);
return rewriter.create<AffineApplyOp>(originalOp.getLoc(), rhsMap,
rhsOperands);
return AffineApplyOp::create(rewriter, originalOp.getLoc(), rhsMap,
rhsOperands);
}

FailureOr<AffineApplyOp> mlir::affine::decompose(RewriterBase &rewriter,
Expand Down Expand Up @@ -160,8 +160,8 @@ FailureOr<AffineApplyOp> mlir::affine::decompose(RewriterBase &rewriter,
auto current = createSubApply(rewriter, op, subExpressions[0]);
for (int64_t i = 1, e = subExpressions.size(); i < e; ++i) {
Value tmp = createSubApply(rewriter, op, subExpressions[i]);
current = rewriter.create<AffineApplyOp>(op.getLoc(), binMap,
ValueRange{current, tmp});
current = AffineApplyOp::create(rewriter, op.getLoc(), binMap,
ValueRange{current, tmp});
LLVM_DEBUG(DBGS() << "--reassociate into: " << current << "\n");
}

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ static Value createPrivateMemRef(AffineForOp forOp,
// consumer loop nests to reduce their live range. Currently they are added
// at the beginning of the block, because loop nests can be reordered
// during the fusion pass.
Value newMemRef = top.create<memref::AllocOp>(forOp.getLoc(), newMemRefType);
Value newMemRef = memref::AllocOp::create(top, forOp.getLoc(), newMemRefType);

// Build an AffineMap to remap access functions based on lower bound offsets.
SmallVector<AffineExpr, 4> remapExprs;
Expand Down
Loading