Skip to content

Commit

Permalink
Ordering in the connector for batches
Browse files Browse the repository at this point in the history
  • Loading branch information
Julius de Bruijn committed Mar 16, 2020
1 parent e22cd50 commit 186979d
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 6 deletions.
14 changes: 14 additions & 0 deletions libs/prisma-models/src/order_by.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,20 @@ pub enum SortOrder {
}

impl SortOrder {
pub fn is_ascending(self) -> bool {
match self {
Self::Ascending => true,
_ => false,
}
}

pub fn is_descending(self) -> bool {
match self {
Self::Descending => true,
_ => false,
}
}

pub fn abbreviated(self) -> &'static str {
match self {
SortOrder::Ascending => "ASC",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,23 @@ class InSelectionBatching extends FlatSpec with Matchers with ApiSpecBase {
)
}

"ordering of batched IN queries" should "work when having more than the specified amount of items" in {
"ascending ordering of batched IN queries" should "work when having more than the specified amount of items" in {
val res = server.query(
"""query idInTest {
| findManyArtist(where: { ArtistId_in: [5,4,3,2,1,2,1,1,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }, orderBy: ArtistId_ASC) { ArtistId }
|}
|""".stripMargin,
project = project,
legacy = false,
batchSize = 2,
)

res.toString should be(
"""{"data":{"findManyArtist":[{"ArtistId":1},{"ArtistId":2},{"ArtistId":3},{"ArtistId":4},{"ArtistId":5}]}}""".stripMargin
)
}

"descending ordering of batched IN queries" should "work when having more than the specified amount of items" in {
val res = server.query(
"""query idInTest {
| findManyArtist(where: { ArtistId_in: [5,4,3,2,1,1,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }, orderBy: ArtistId_DESC) { ArtistId }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@ use crate::{
QueryExt, SqlError,
};
use connector_interface::*;
use futures::future;
use futures::stream::{FuturesUnordered, StreamExt};
use prisma_models::*;
use quaint::ast::*;
use std::collections::HashMap;

pub async fn get_single_record(
conn: &dyn QueryExt,
Expand Down Expand Up @@ -35,27 +36,52 @@ pub async fn get_single_record(
pub async fn get_many_records(
conn: &dyn QueryExt,
model: &ModelRef,
query_arguments: QueryArguments,
mut query_arguments: QueryArguments,
selected_fields: &SelectedFields,
) -> crate::Result<ManyRecords> {
let field_names = selected_fields.db_names().map(String::from).collect();
let field_names: Vec<String> = selected_fields.db_names().map(String::from).collect();
let idents: Vec<_> = selected_fields.types().collect();
let mut records = Vec::new();

if query_arguments.can_batch() {
// We don't need to order in the database due to us ordering in this
// function.
let order = query_arguments.order_by.take();

let batches = query_arguments.batched();
let mut futures = Vec::with_capacity(batches.len());
let mut futures = FuturesUnordered::new();

for args in batches.into_iter() {
let query = read::get_records(model, selected_fields.columns(), args);
futures.push(conn.filter(query.into(), idents.as_slice()));
}

for result in future::join_all(futures).await.into_iter() {
while let Some(result) = futures.next().await {
for item in result?.into_iter() {
records.push(Record::from(item))
}
}

let field_indices: HashMap<&str, usize> = field_names
.iter()
.enumerate()
.map(|(i, name)| (name.as_str(), i))
.collect();

if let Some(order_by) = order {
records.sort_by(|a, b| match &order_by.field {
Field::Scalar(sf) => {
let index = field_indices[sf.db_name()];

if order_by.sort_order.is_ascending() {
a.values[index].cmp(&b.values[index])
} else {
b.values[index].cmp(&a.values[index])
}
}
Field::Relation(_) => todo!(),
})
}
} else {
let query = read::get_records(model, selected_fields.columns(), query_arguments);

Expand Down

0 comments on commit 186979d

Please sign in to comment.