I have a sorted JavaScript array, and want to insert one more item into the array such the resulting array remains sorted. I could certainly implement a simple quicksort-style insertion function:
var array = [1,2,3,4,5,6,7,8,9];
var element = 3.5;
function insert(element, array) {
array.splice(locationOf(element, array) + 1, 0, element);
return array;
}
function locationOf(element, array, start, end) {
start = start || 0;
end = end || array.length;
var pivot = parseInt(start + (end - start) / 2, 10);
if (end-start <= 1 || array[pivot] === element) return pivot;
if (array[pivot] < element) {
return locationOf(element, array, pivot, end);
} else {
return locationOf(element, array, start, pivot);
}
}
console.log(insert(element, array));
[WARNING] this code has a bug when trying to insert to the beginning of the array, e.g. insert(2, [3, 7 ,9]
) produces incorrect [ 3, 2, 7, 9 ].
However, I noticed that implementations of the Array.sort function might potentially do this for me, and natively:
var array = [1,2,3,4,5,6,7,8,9];
var element = 3.5;
function insert(element, array) {
array.push(element);
array.sort(function(a, b) {
return a - b;
});
return array;
}
console.log(insert(element, array));
Is there a good reason to choose the first implementation over the second?
Edit: Note that for the general case, an O(log(n)) insertion (as implemented in the first example) will be faster than a generic sorting algorithm; however this is not necessarily the case for JavaScript in particular. Note that:
Best case for several insertion algorithms is O(n), which is still significantly different from O(log(n)), but not quite as bad as O(n log(n)) as mentioned below. It would come down to the particular sorting algorithm used (see Javascript Array.sort implementation?)
The sort method in JavaScript is a native function, so potentially realizing huge benefits -- O(log(n)) with a huge coefficient can still be much worse than O(n) for reasonably sized data sets.
splice()
(e.g. your 1st example) is already O(n). Even if it doesn't internally create a new copy of the entire array, it potentially has to shunt all n items back 1 position if the element is to be inserted in position 0. Maybe it's fast because it's a native function and the constant is low, but it's O(n) nonetheless.
parseInt
use Math.floor
instead. Math.floor
is much faster than parseInt
: jsperf.com/test-parseint-and-math-floor
Simple (Demo):
function sortedIndex(array, value) {
var low = 0,
high = array.length;
while (low < high) {
var mid = (low + high) >>> 1;
if (array[mid] < value) low = mid + 1;
else high = mid;
}
return low;
}
Just as a single data point, for kicks I tested this out inserting 1000 random elements into an array of 100,000 pre-sorted numbers using the two methods using Chrome on Windows 7:
First Method:
~54 milliseconds
Second Method:
~57 seconds
So, at least on this setup, the native method doesn't make up for it. This is true even for small data sets, inserting 100 elements into an array of 1000:
First Method:
1 milliseconds
Second Method:
34 milliseconds
Array.prototype.sort
, you lose the benefits of C++ because the JS function is called so much.
Very good and remarkable question with a very interesting discussion! I also was using the Array.sort()
function after pushing a single element in an array with some thousands of objects.
I had to extend your locationOf
function for my purpose because of having complex objects and therefore the need for a compare function like in Array.sort()
:
function locationOf(element, array, comparer, start, end) {
if (array.length === 0)
return -1;
start = start || 0;
end = end || array.length;
var pivot = (start + end) >> 1; // should be faster than dividing by 2
var c = comparer(element, array[pivot]);
if (end - start <= 1) return c == -1 ? pivot - 1 : pivot;
switch (c) {
case -1: return locationOf(element, array, comparer, start, pivot);
case 0: return pivot;
case 1: return locationOf(element, array, comparer, pivot, end);
};
};
// sample for objects like {lastName: 'Miller', ...}
var patientCompare = function (a, b) {
if (a.lastName < b.lastName) return -1;
if (a.lastName > b.lastName) return 1;
return 0;
};
return c == -1 ? pivot : pivot + 1;
in order to return the correct index. Otherwise for an array with length 1 the function would return -1 or 0.
>> 1
should be faster (or not slower) than / 2
comparer
function. In this algorithm it is compared to +-1
but it could be arbitrary value <0
/ >0
. See compare function. The problematic part is not only the switch
statement but also the line: if (end - start <= 1) return c == -1 ? pivot - 1 : pivot;
where c
is compared to -1
as well.
There's a bug in your code. It should read:
function locationOf(element, array, start, end) {
start = start || 0;
end = end || array.length;
var pivot = parseInt(start + (end - start) / 2, 10);
if (array[pivot] === element) return pivot;
if (end - start <= 1)
return array[pivot] > element ? pivot - 1 : pivot;
if (array[pivot] < element) {
return locationOf(element, array, pivot, end);
} else {
return locationOf(element, array, start, pivot);
}
}
Without this fix the code will never be able to insert an element at the beginning of the array.
I know this is an old question that has an answer already, and there are a number of other decent answers. I see some answers that propose that you can solve this problem by looking up the correct insertion index in O(log n) - you can, but you can't insert in that time, because the array needs to be partially copied out to make space.
Bottom line: If you really need O(log n) inserts and deletes into a sorted array, you need a different data structure - not an array. You should use a B-Tree. The performance gains you will get from using a B-Tree for a large data set, will dwarf any of the improvements offered here.
If you must use an array. I offer the following code, based on insertion sort, which works, if and only if the array is already sorted. This is useful for the case when you need to resort after every insert:
function addAndSort(arr, val) {
arr.push(val);
for (i = arr.length - 1; i > 0 && arr[i] < arr[i-1]; i--) {
var tmp = arr[i];
arr[i] = arr[i-1];
arr[i-1] = tmp;
}
return arr;
}
It should operate in O(n), which I think is the best you can do. Would be nicer if js supported multiple assignment. here's an example to play with:
Update:
this might be faster:
function addAndSort2(arr, val) {
arr.push(val);
i = arr.length - 1;
item = arr[i];
while (i > 0 && item < arr[i-1]) {
arr[i] = arr[i-1];
i -= 1;
}
arr[i] = item;
return arr;
}
Update 2
@terrymorse pointed out in the comments that javascripts Array.splice method is crazy fast, and it's more than just constant improvement in the time complexity. It seems some linked list magic is being used. It means you still do need a different data structure than a plain array - just that javascript arrays might provide that different data structure natively.
N
between 100 and 100,000.
Your insertion function assumes that the given array is sorted, it searches directly for the location where the new element can be inserted, usually by just looking at a few of the elements in the array.
The general sort function of an array can't take these shortcuts. Obviously it at least has to inspect all elements in the array to see if they are already correctly ordered. This fact alone makes the general sort slower than the insertion function.
A generic sort algorithm is usually on average O(n ⋅ log(n)) and depending on the implementation it might actually be the worst case if the array is already sorted, leading to complexities of O(n2). Directly searching for the insertion position instead has just a complexity of O(log(n)), so it will always be much faster.
For a small number of items, the difference is pretty trivial. However, if you're inserting a lot of items, or working with a very large array, calling .sort() after each insertion will cause a tremendous amount of overhead.
I ended up writing a pretty slick binary search/insert function for this exact purpose, so I thought I'd share it. Since it uses a while
loop instead of recursion, there is no overheard for extra function calls, so I think the performance will be even better than either of the originally posted methods. And it emulates the default Array.sort()
comparator by default, but accepts a custom comparator function if desired.
function insertSorted(arr, item, comparator) {
if (comparator == null) {
// emulate the default Array.sort() comparator
comparator = function(a, b) {
if (typeof a !== 'string') a = String(a);
if (typeof b !== 'string') b = String(b);
return (a > b ? 1 : (a < b ? -1 : 0));
};
}
// get the index we need to insert the item at
var min = 0;
var max = arr.length;
var index = Math.floor((min + max) / 2);
while (max > min) {
if (comparator(item, arr[index]) < 0) {
max = index;
} else {
min = index + 1;
}
index = Math.floor((min + max) / 2);
}
// insert the item
arr.splice(index, 0, item);
};
If you're open to using other libraries, lodash provides sortedIndex and sortedLastIndex functions, which could be used in place of the while
loop. The two potential downsides are 1) performance isn't as good as my method (thought I'm not sure how much worse it is) and 2) it does not accept a custom comparator function, only a method for getting the value to compare (using the default comparator, I assume).
arr.splice()
is surely O(n) time complexity.
Here's a version that uses lodash.
const _ = require('lodash');
sortedArr.splice(_.sortedIndex(sortedArr,valueToInsert) ,0,valueToInsert);
note: sortedIndex does a binary search.
Here are a few thoughts: Firstly, if you're genuinely concerned about the runtime of your code, be sure to know what happens when you call the built-in functions! I don't know up from down in javascript, but a quick google of the splice function returned this, which seems to indicate that you're creating a whole new array each call! I don't know if it actually matters, but it is certainly related to efficiency. I see that Breton, in the comments, has already pointed this out, but it certainly holds for whatever array-manipulating function you choose.
Anyways, onto actually solving the problem.
When I read that you wanted to sort, my first thought is to use insertion sort!. It is handy because it runs in linear time on sorted, or nearly-sorted lists. As your arrays will have only 1 element out of order, that counts as nearly-sorted (except for, well, arrays of size 2 or 3 or whatever, but at that point, c'mon). Now, implementing the sort isn't too too bad, but it is a hassle you may not want to deal with, and again, I don't know a thing about javascript and if it will be easy or hard or whatnot. This removes the need for your lookup function, and you just push (as Breton suggested).
Secondly, your "quicksort-esque" lookup function seems to be a binary search algorithm! It is a very nice algorithm, intuitive and fast, but with one catch: it is notoriously difficult to implement correctly. I won't dare say if yours is correct or not (I hope it is, of course! :)), but be wary if you want to use it.
Anyways, summary: using "push" with insertion sort will work in linear time (assuming the rest of the array is sorted), and avoid any messy binary search algorithm requirements. I don't know if this is the best way (underlying implementation of arrays, maybe a crazy built-in function does it better, who knows), but it seems reasonable to me. :) - Agor.
splice()
is already O(n). Even if it doesn't internally create a new copy of the entire array, it potentially has to shunt all n items back 1 position if the element is to be inserted in position 0.
Here's a comparison of four different algorithms for accomplishing this: https://jsperf.com/sorted-array-insert-comparison/1
Algorithms
Naive: just push and sort() afterwards
Linear: iterate over array and insert where appropriate
Binary Search: taken from https://stackoverflow.com/a/20352387/154329
"Quick Sort Like": the refined solution from syntheticzero (https://stackoverflow.com/a/18341744/154329)
Naive is always horrible. It seems for small array sizes, the other three dont differ too much, but for larger arrays, the last 2 outperform the simple linear approach.
The best data structure I can think of is an indexed skip list which maintains the insertion properties of linked lists with a hierarchy structure that enables log time operations. On average, search, insertion, and random access lookups can be done in O(log n) time.
An order statistic tree enables log time indexing with a rank function.
If you do not need random access but you need O(log n) insertion and searching for keys, you can ditch the array structure and use any kind of binary search tree.
None of the answers that use array.splice()
are efficient at all since that is on average O(n) time. What's the time complexity of array.splice() in Google Chrome?
Is there a good reason to choose [splice into location found] over [push & sort]?
Here is my function, uses binary search to find item and then inserts appropriately:
function binaryInsert(val, arr){ let mid, len=arr.length, start=0, end=len-1; while(start <= end){ mid = Math.floor((end + start)/2); if(val <= arr[mid]){ if(val >= arr[mid-1]){ arr.splice(mid,0,val); break; } end = mid-1; }else{ if(val <= arr[mid+1]){ arr.splice(mid+1,0,val); break; } start = mid+1; } } return arr; } console.log(binaryInsert(16, [ 5, 6, 14, 19, 23, 44, 35, 51, 86, 68, 63, 71, 87, 117 ]));
Don't re-sort after every item, its overkill..
If there is only one item to insert, you can find the location to insert using binary search. Then use memcpy or similar to bulk copy the remaining items to make space for the inserted one. The binary search is O(log n), and the copy is O(n), giving O(n + log n) total. Using the methods above, you are doing a re-sort after every insertion, which is O(n log n).
Does it matter? Lets say you are randomly inserting k elements, where k = 1000. The sorted list is 5000 items.
Binary search + Move = k*(n + log n) = 1000*(5000 + 12) = 5,000,012 = ~5 million ops
Re-sort on each = k*(n log n) = ~60 million ops
If the k items to insert arrive whenever, then you must do search+move. However, if you are given a list of k items to insert into a sorted array - ahead of time - then you can do even better. Sort the k items, separately from the already sorted n array. Then do a scan sort, in which you move down both sorted arrays simultaneously, merging one into the other. - One-step Merge sort = k log k + n = 9965 + 5000 = ~15,000 ops
Update: Regarding your question.
First method = binary search+move = O(n + log n)
. Second method = re-sort = O(n log n)
Exactly explains the timings you're getting.
TypeScript version with custom compare method:
const { compare } = new Intl.Collator(undefined, {
numeric: true,
sensitivity: "base"
});
const insert = (items: string[], item: string) => {
let low = 0;
let high = items.length;
while (low < high) {
const mid = (low + high) >> 1;
compare(items[mid], item) > 0
? (high = mid)
: (low = mid + 1);
}
items.splice(low, 0, item);
};
Use:
const items = [];
insert(items, "item 12");
insert(items, "item 1");
insert(items, "item 2");
insert(items, "item 22");
console.log(items);
// ["item 1", "item 2", "item 12", "item 22"]
Had your first code been bug free, my best guess is, it would have been how you do this job in JS. I mean;
Make a binary search to find the index of insertion Use splice to perform your insertion.
This is almost always 2x faster than a top down or bottom up linear search and insert as mentioned in domoarigato's answer which i liked very much and took it as a basis to my benchmark and finally push
and sort
.
Of course under many cases you are probably doing this job on some objects in real life and here i have generated a benchmark test for these three cases for an array of size 100000 holding some objects. Feel free to play with it.
function insertElementToSorted(arr, ele, start=0,end=null) {
var n , mid
if (end == null) {
end = arr.length-1;
}
n = end - start
if (n%2 == 0) {
mid = start + n/2;
} else {
mid = start + (n-1)/2
}
if (start == end) {
return start
}
if (arr[0] > ele ) return 0;
if (arr[end] < ele) return end+2;
if (arr[mid] >= ele && arr[mid-1] <= ele) {
return mid
}
if (arr[mid] > ele && arr[mid-1] > ele) {
return insertElementToSorted(arr,ele,start,mid-1)
}
if (arr[mid] <= ele && arr[mid+1] >= ele) {
return mid + 1
}
if (arr[mid] < ele && arr[mid-1] < ele) {
return insertElementToSorted(arr,ele,mid,end)
}
if(arr[mid] < ele && arr[mid+1] < ele) {
console.log("mid+1", mid+1, end)
return insertElementToSorted(arr,ele,mid+1,end)
}
}
// Example
var test = [1,2,5,9, 10, 14, 17,21, 35, 38,54, 78, 89,102];
insertElementToSorted(test,6)
As a memo to my future self, here is yet another version, findOrAddSorted
with some optimizations for corner cases and a rudimentary test.
// returns BigInt(index) if the item has been found // or BigInt(index) + BigInt(MAX_SAFE_INTEGER) if it has been inserted function findOrAddSorted(items, newItem) { let from = 0; let to = items.length; let item; // check if the array is empty if (to === 0) { items.push(newItem); return BigInt(Number.MAX_SAFE_INTEGER); } // compare with the first item item = items[0]; if (newItem === item) { return 0; } if (newItem < item) { items.splice(0, 0, newItem); return BigInt(Number.MAX_SAFE_INTEGER); } // compare with the last item item = items[to-1]; if (newItem === item) { return BigInt(to-1); } if (newItem > item) { items.push(newItem); return BigInt(to) + BigInt(Number.MAX_SAFE_INTEGER); } // binary search let where; for (;;) { where = (from + to) >> 1; if (from >= to) { break; } item = items[where]; if (item === newItem) { return BigInt(where); } if (item < newItem) { from = where + 1; } else { to = where; } } // insert newItem items.splice(where, 0, newItem); return BigInt(where) + BigInt(Number.MAX_SAFE_INTEGER); } // generate a random integer < MAX_SAFE_INTEGER const generateRandomInt = () => Math.floor(Math.random() * Number.MAX_SAFE_INTEGER); // fill the array with random numbers const items = new Array(); const amount = 1000; let i = 0; let where = 0; for (i = 0; i < amount; i++) { where = findOrAddSorted(items, generateRandomInt()); if (where < BigInt(Number.MAX_SAFE_INTEGER)) { break; } } if (where < BigInt(Number.MAX_SAFE_INTEGER)) { console.log(`items: ${i}, repeated at ${where}: ${items[Number(where)]}`) } else { const at = Number(where - BigInt(Number.MAX_SAFE_INTEGER)); console.log(`items: ${i}, last insert at: ${at}: ${items[at]}`); } console.log(items);
function insertOrdered(array, elem) {
let _array = array;
let i = 0;
while ( i < array.length && array[i] < elem ) {i ++};
_array.splice(i, 0, elem);
return _array;
}
Success story sharing
x >>> 1
is binary right shift by 1 position, which is effectively just a division by 2. e.g. for 11:1011
->101
results to 5.>>> 1
and (seen here and there)>> 1
?>>>
is an unsigned right shift, whereas>>
is sign-extending - it all boils down to in-memory representation of negative numbers, where the high bit is set if negative. So if you shift0b1000
right 1 place with>>
you'll get0b1100
, if you instead use>>>
you'll get0b0100
. While in the case given in the answer it doesn't really matter (the number being shifted with neither be larger than a signed 32-bit positive integer's max value nor negative), it's important to use the right one in those two cases (you need to pick which case you need to handle).0b1000
right 1 place with>>
you'll get0b1100
". No, you get0b0100
. The result of the different right shift operators will be the same for all values except negative numbers and numbers greater than 2^31 (ie, numbers with a 1 in the first bit).