-
Notifications
You must be signed in to change notification settings - Fork 1.6k
Improve async_udf example and docs #16846
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
7 commits
Select commit
Hold shift + click to select a range
643a9af
Improve async_udf example and docs
alamb 1d107df
tweak
alamb f49503f
Merge remote-tracking branch 'apache/main' into alamb/async_udf_example
alamb 1014841
Remove random monospace async and version note
alamb a82ecae
Fix explain plan diff by hard coding parallelism
alamb 9578a73
rename arguments, use as_string_view_array
alamb 42c54f9
request --> reqwest
alamb File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,104 +15,104 @@ | |
// specific language governing permissions and limitations | ||
// under the License. | ||
|
||
use arrow::array::{ArrayIter, ArrayRef, AsArray, Int64Array, RecordBatch, StringArray}; | ||
use arrow::compute::kernels::cmp::eq; | ||
//! This example shows how to create and use "Async UDFs" in DataFusion. | ||
//! | ||
//! Async UDFs allow you to perform asynchronous operations, such as | ||
//! making network requests. This can be used for tasks like fetching | ||
//! data from an external API such as a LLM service or an external database. | ||
|
||
use arrow::array::{ArrayRef, BooleanArray, Int64Array, RecordBatch, StringArray}; | ||
use arrow_schema::{DataType, Field, Schema}; | ||
use async_trait::async_trait; | ||
use datafusion::assert_batches_eq; | ||
use datafusion::common::cast::as_string_view_array; | ||
use datafusion::common::error::Result; | ||
use datafusion::common::types::{logical_int64, logical_string}; | ||
use datafusion::common::not_impl_err; | ||
use datafusion::common::utils::take_function_args; | ||
use datafusion::common::{internal_err, not_impl_err}; | ||
use datafusion::config::ConfigOptions; | ||
use datafusion::execution::SessionStateBuilder; | ||
use datafusion::logical_expr::async_udf::{AsyncScalarUDF, AsyncScalarUDFImpl}; | ||
use datafusion::logical_expr::{ | ||
ColumnarValue, ScalarFunctionArgs, ScalarUDFImpl, Signature, TypeSignature, | ||
TypeSignatureClass, Volatility, | ||
ColumnarValue, ScalarFunctionArgs, ScalarUDFImpl, Signature, Volatility, | ||
}; | ||
use datafusion::logical_expr_common::signature::Coercion; | ||
use datafusion::physical_expr_common::datum::apply_cmp; | ||
use datafusion::prelude::SessionContext; | ||
use log::trace; | ||
use datafusion::prelude::{SessionConfig, SessionContext}; | ||
use std::any::Any; | ||
use std::sync::Arc; | ||
|
||
#[tokio::main] | ||
async fn main() -> Result<()> { | ||
let ctx: SessionContext = SessionContext::new(); | ||
|
||
let async_upper = AsyncUpper::new(); | ||
let udf = AsyncScalarUDF::new(Arc::new(async_upper)); | ||
ctx.register_udf(udf.into_scalar_udf()); | ||
let async_equal = AsyncEqual::new(); | ||
// Use a hard coded parallelism level of 4 so the explain plan | ||
// is consistent across machines. | ||
let config = SessionConfig::new().with_target_partitions(4); | ||
let ctx = | ||
SessionContext::from(SessionStateBuilder::new().with_config(config).build()); | ||
|
||
// Similarly to regular UDFs, you create an AsyncScalarUDF by implementing | ||
// `AsyncScalarUDFImpl` and creating an instance of `AsyncScalarUDF`. | ||
let async_equal = AskLLM::new(); | ||
let udf = AsyncScalarUDF::new(Arc::new(async_equal)); | ||
|
||
// Async UDFs are registered with the SessionContext, using the same | ||
// `register_udf` method as regular UDFs. | ||
ctx.register_udf(udf.into_scalar_udf()); | ||
|
||
// Create a table named 'animal' with some sample data | ||
ctx.register_batch("animal", animal()?)?; | ||
|
||
// use Async UDF in the projection | ||
// +---------------+----------------------------------------------------------------------------------------+ | ||
// | plan_type | plan | | ||
// +---------------+----------------------------------------------------------------------------------------+ | ||
// | logical_plan | Projection: async_equal(a.id, Int64(1)) | | ||
// | | SubqueryAlias: a | | ||
// | | TableScan: animal projection=[id] | | ||
// | physical_plan | ProjectionExec: expr=[__async_fn_0@1 as async_equal(a.id,Int64(1))] | | ||
// | | AsyncFuncExec: async_expr=[async_expr(name=__async_fn_0, expr=async_equal(id@0, 1))] | | ||
// | | CoalesceBatchesExec: target_batch_size=8192 | | ||
// | | DataSourceExec: partitions=1, partition_sizes=[1] | | ||
// | | | | ||
// +---------------+----------------------------------------------------------------------------------------+ | ||
ctx.sql("explain select async_equal(a.id, 1) from animal a") | ||
// You can use the async UDF as normal in SQL queries | ||
// | ||
// Note: Async UDFs can currently be used in the select list and filter conditions. | ||
let results = ctx | ||
.sql("select * from animal a where ask_llm(a.name, 'Is this animal furry?')") | ||
.await? | ||
.show() | ||
.collect() | ||
.await?; | ||
|
||
// +----------------------------+ | ||
// | async_equal(a.id,Int64(1)) | | ||
// +----------------------------+ | ||
// | true | | ||
// | false | | ||
// | false | | ||
// | false | | ||
// | false | | ||
// +----------------------------+ | ||
ctx.sql("select async_equal(a.id, 1) from animal a") | ||
assert_batches_eq!( | ||
[ | ||
"+----+------+", | ||
"| id | name |", | ||
"+----+------+", | ||
"| 1 | cat |", | ||
"| 2 | dog |", | ||
"+----+------+", | ||
], | ||
&results | ||
); | ||
|
||
// While the interface is the same for both normal and async UDFs, you can | ||
// use `EXPLAIN` output to see that the async UDF uses a special | ||
// `AsyncFuncExec` node in the physical plan: | ||
let results = ctx | ||
.sql("explain select * from animal a where ask_llm(a.name, 'Is this animal furry?')") | ||
.await? | ||
.show() | ||
.collect() | ||
.await?; | ||
|
||
// use Async UDF in the filter | ||
// +---------------+--------------------------------------------------------------------------------------------+ | ||
// | plan_type | plan | | ||
// +---------------+--------------------------------------------------------------------------------------------+ | ||
// | logical_plan | SubqueryAlias: a | | ||
// | | Filter: async_equal(animal.id, Int64(1)) | | ||
// | | TableScan: animal projection=[id, name] | | ||
// | physical_plan | CoalesceBatchesExec: target_batch_size=8192 | | ||
// | | FilterExec: __async_fn_0@2, projection=[id@0, name@1] | | ||
// | | RepartitionExec: partitioning=RoundRobinBatch(12), input_partitions=1 | | ||
// | | AsyncFuncExec: async_expr=[async_expr(name=__async_fn_0, expr=async_equal(id@0, 1))] | | ||
// | | CoalesceBatchesExec: target_batch_size=8192 | | ||
// | | DataSourceExec: partitions=1, partition_sizes=[1] | | ||
// | | | | ||
// +---------------+--------------------------------------------------------------------------------------------+ | ||
ctx.sql("explain select * from animal a where async_equal(a.id, 1)") | ||
.await? | ||
.show() | ||
.await?; | ||
|
||
// +----+------+ | ||
// | id | name | | ||
// +----+------+ | ||
// | 1 | cat | | ||
// +----+------+ | ||
ctx.sql("select * from animal a where async_equal(a.id, 1)") | ||
.await? | ||
.show() | ||
.await?; | ||
assert_batches_eq!( | ||
[ | ||
"+---------------+--------------------------------------------------------------------------------------------------------------------------------+", | ||
"| plan_type | plan |", | ||
"+---------------+--------------------------------------------------------------------------------------------------------------------------------+", | ||
"| logical_plan | SubqueryAlias: a |", | ||
"| | Filter: ask_llm(CAST(animal.name AS Utf8View), Utf8View(\"Is this animal furry?\")) |", | ||
"| | TableScan: animal projection=[id, name] |", | ||
"| physical_plan | CoalesceBatchesExec: target_batch_size=8192 |", | ||
"| | FilterExec: __async_fn_0@2, projection=[id@0, name@1] |", | ||
"| | RepartitionExec: partitioning=RoundRobinBatch(4), input_partitions=1 |", | ||
"| | AsyncFuncExec: async_expr=[async_expr(name=__async_fn_0, expr=ask_llm(CAST(name@1 AS Utf8View), Is this animal furry?))] |", | ||
"| | CoalesceBatchesExec: target_batch_size=8192 |", | ||
"| | DataSourceExec: partitions=1, partition_sizes=[1] |", | ||
"| | |", | ||
"+---------------+--------------------------------------------------------------------------------------------------------------------------------+", | ||
], | ||
&results | ||
); | ||
|
||
Ok(()) | ||
} | ||
|
||
/// Returns a sample `RecordBatch` representing an "animal" table with two columns: | ||
fn animal() -> Result<RecordBatch> { | ||
let schema = Arc::new(Schema::new(vec![ | ||
Field::new("id", DataType::Int64, false), | ||
|
@@ -127,118 +127,45 @@ fn animal() -> Result<RecordBatch> { | |
Ok(RecordBatch::try_new(schema, vec![id_array, name_array])?) | ||
} | ||
|
||
/// An async UDF that simulates asking a large language model (LLM) service a | ||
/// question based on the content of two columns. The UDF will return a boolean | ||
/// indicating whether the LLM thinks the first argument matches the question in | ||
/// the second argument. | ||
/// | ||
/// Since this is a simplified example, it does not call an LLM service, but | ||
/// could be extended to do so in a real-world scenario. | ||
#[derive(Debug)] | ||
pub struct AsyncUpper { | ||
signature: Signature, | ||
} | ||
|
||
impl Default for AsyncUpper { | ||
fn default() -> Self { | ||
Self::new() | ||
} | ||
} | ||
|
||
impl AsyncUpper { | ||
pub fn new() -> Self { | ||
Self { | ||
signature: Signature::new( | ||
TypeSignature::Coercible(vec![Coercion::Exact { | ||
desired_type: TypeSignatureClass::Native(logical_string()), | ||
}]), | ||
Volatility::Volatile, | ||
), | ||
} | ||
} | ||
} | ||
|
||
#[async_trait] | ||
impl ScalarUDFImpl for AsyncUpper { | ||
fn as_any(&self) -> &dyn Any { | ||
self | ||
} | ||
|
||
fn name(&self) -> &str { | ||
"async_upper" | ||
} | ||
|
||
fn signature(&self) -> &Signature { | ||
&self.signature | ||
} | ||
|
||
fn return_type(&self, _arg_types: &[DataType]) -> Result<DataType> { | ||
Ok(DataType::Utf8) | ||
} | ||
|
||
fn invoke_with_args(&self, _args: ScalarFunctionArgs) -> Result<ColumnarValue> { | ||
not_impl_err!("AsyncUpper can only be called from async contexts") | ||
} | ||
} | ||
|
||
#[async_trait] | ||
impl AsyncScalarUDFImpl for AsyncUpper { | ||
fn ideal_batch_size(&self) -> Option<usize> { | ||
Some(10) | ||
} | ||
|
||
async fn invoke_async_with_args( | ||
&self, | ||
args: ScalarFunctionArgs, | ||
_option: &ConfigOptions, | ||
) -> Result<ArrayRef> { | ||
trace!("Invoking async_upper with args: {:?}", args); | ||
let value = &args.args[0]; | ||
let result = match value { | ||
ColumnarValue::Array(array) => { | ||
let string_array = array.as_string::<i32>(); | ||
let iter = ArrayIter::new(string_array); | ||
let result = iter | ||
.map(|string| string.map(|s| s.to_uppercase())) | ||
.collect::<StringArray>(); | ||
Arc::new(result) as ArrayRef | ||
} | ||
_ => return internal_err!("Expected a string argument, got {:?}", value), | ||
}; | ||
Ok(result) | ||
} | ||
} | ||
|
||
#[derive(Debug)] | ||
struct AsyncEqual { | ||
struct AskLLM { | ||
signature: Signature, | ||
} | ||
|
||
impl Default for AsyncEqual { | ||
impl Default for AskLLM { | ||
fn default() -> Self { | ||
Self::new() | ||
} | ||
} | ||
|
||
impl AsyncEqual { | ||
impl AskLLM { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. :) |
||
pub fn new() -> Self { | ||
Self { | ||
signature: Signature::new( | ||
TypeSignature::Coercible(vec![ | ||
Coercion::Exact { | ||
desired_type: TypeSignatureClass::Native(logical_int64()), | ||
}, | ||
Coercion::Exact { | ||
desired_type: TypeSignatureClass::Native(logical_int64()), | ||
}, | ||
]), | ||
signature: Signature::exact( | ||
vec![DataType::Utf8View, DataType::Utf8View], | ||
Volatility::Volatile, | ||
), | ||
} | ||
} | ||
} | ||
|
||
#[async_trait] | ||
impl ScalarUDFImpl for AsyncEqual { | ||
/// All async UDFs implement the `ScalarUDFImpl` trait, which provides the basic | ||
/// information for the function, such as its name, signature, and return type. | ||
/// [async_trait] | ||
impl ScalarUDFImpl for AskLLM { | ||
fn as_any(&self) -> &dyn Any { | ||
self | ||
} | ||
|
||
fn name(&self) -> &str { | ||
"async_equal" | ||
"ask_llm" | ||
} | ||
|
||
fn signature(&self) -> &Signature { | ||
|
@@ -249,19 +176,64 @@ impl ScalarUDFImpl for AsyncEqual { | |
Ok(DataType::Boolean) | ||
} | ||
|
||
/// Since this is an async UDF, the `invoke_with_args` method will not be | ||
/// called directly. | ||
fn invoke_with_args(&self, _args: ScalarFunctionArgs) -> Result<ColumnarValue> { | ||
not_impl_err!("AsyncEqual can only be called from async contexts") | ||
not_impl_err!("AskLLM can only be called from async contexts") | ||
} | ||
} | ||
|
||
/// In addition to [`ScalarUDFImpl`], we also need to implement the | ||
/// [`AsyncScalarUDFImpl`] trait. | ||
#[async_trait] | ||
impl AsyncScalarUDFImpl for AsyncEqual { | ||
impl AsyncScalarUDFImpl for AskLLM { | ||
/// The `invoke_async_with_args` method is similar to `invoke_with_args`, | ||
/// but it returns a `Future` that resolves to the result. | ||
/// | ||
/// Since this signature is `async`, it can do any `async` operations, such | ||
/// as network requests. This method is run on the same tokio `Runtime` that | ||
/// is processing the query, so you may wish to make actual network requests | ||
/// on a different `Runtime`, as explained in the `thread_pools.rs` example | ||
/// in this directory. | ||
async fn invoke_async_with_args( | ||
&self, | ||
args: ScalarFunctionArgs, | ||
_option: &ConfigOptions, | ||
) -> Result<ArrayRef> { | ||
let [arg1, arg2] = take_function_args(self.name(), &args.args)?; | ||
apply_cmp(arg1, arg2, eq)?.to_array(args.number_rows) | ||
// in a real UDF you would likely want to special case constant | ||
// arguments to improve performance, but this example converts the | ||
// arguments to arrays for simplicity. | ||
let args = ColumnarValue::values_to_arrays(&args.args)?; | ||
let [content_column, question_column] = take_function_args(self.name(), args)?; | ||
|
||
// In a real function, you would use a library such as `reqwest` here to | ||
// make an async HTTP request. Credentials and other configurations can | ||
// be supplied via the `ConfigOptions` parameter. | ||
|
||
// In this example, we will simulate the LLM response by comparing the two | ||
// input arguments using some static strings | ||
let content_column = as_string_view_array(&content_column)?; | ||
let question_column = as_string_view_array(&question_column)?; | ||
|
||
let result_array: BooleanArray = content_column | ||
.iter() | ||
.zip(question_column.iter()) | ||
.map(|(a, b)| { | ||
// If either value is null, return None | ||
let a = a?; | ||
let b = b?; | ||
// Simulate an LLM response by checking the arguments to some | ||
// hardcoded conditions. | ||
if a.contains("cat") && b.contains("furry") | ||
|| a.contains("dog") && b.contains("furry") | ||
{ | ||
Some(true) | ||
} else { | ||
Some(false) | ||
} | ||
}) | ||
.collect(); | ||
|
||
Ok(Arc::new(result_array)) | ||
} | ||
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Follow the trend 🤣
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
💯 I am not above using the latest shiny tech trend to advertise DataFusion :)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this (external service calls) is actually a nice use-case for async functions.
It's good they allow for batching, since they are invoked on a batch of data (as everything in DF).
Do async functions also allow interleaved execution of batches? I.e. can next call to an async UDF start before the previous completed?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
They are invoked per batch as I understand
batch
-- so internally the implementation could make 8000 concurrent requests (or pipeline the requests, etc)There is no pipelineling across batches that I know of -- so the next call to an async UDF will not happen until the first call has completed
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
would it be something to consider to keep the pipeline busy? (in case the bottleneck is the remote call, which is not unlikely when query uses the async UDFs)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes I do think it is likely a good idea. I think the first thing to do might be to create an example / test case showing what this would look like (maybe fetching urls or something) before we design the API