// algorithm standard header
#pragma once
#ifndef _ALGORITHM_
#define _ALGORITHM_
#ifndef RC_INVOKED
#include <xmemory>

#pragma pack(push,_CRT_PACKING)
#pragma warning(push,_STL_WARNING_LEVEL)
#pragma warning(disable: _STL_DISABLED_WARNINGS)
_STL_DISABLE_CLANG_WARNINGS
#pragma push_macro("new")
#undef new

#if (defined(_M_IX86) || defined(_M_X64)) && !defined(_M_CEE_PURE) && !defined(_M_HYBRID)
_EXTERN_C
// See note about "noalias" in <xutility>
__declspec(noalias) void __cdecl __std_swap_ranges_trivially_swappable_noalias(
	void * _First1, void * _Last1, void * _First2) noexcept;
_END_EXTERN_C
#endif /* (defined(_M_IX86) || defined(_M_X64)) && !defined(_M_CEE_PURE) && !defined(_M_HYBRID) */

_STD_BEGIN
		// COMMON SORT PARAMETERS
const int _ISORT_MAX = 32;	// maximum size for insertion sort

		// STRUCT TEMPLATE _Optimistic_temporary_buffer
template<class _Diff>
	constexpr ptrdiff_t _Temporary_buffer_size(const _Diff _Value) noexcept
	{	// convert an iterator difference_type to a ptrdiff_t for use in temporary buffers
	using _CT = common_type_t<ptrdiff_t, _Diff>;
	return (static_cast<ptrdiff_t>(_Min_value(static_cast<_CT>(PTRDIFF_MAX), static_cast<_CT>(_Value))));
	}

template<class _Ty>
	struct _Optimistic_temporary_buffer
	{	// temporary storage with _alloca-like attempt
	static constexpr size_t _Optimistic_size = 4096; // default to ~1 page
	static constexpr size_t _Optimistic_count = _Max_value(static_cast<size_t>(1), _Optimistic_size / sizeof(_Ty));

	template<class _Diff>
		explicit _Optimistic_temporary_buffer(const _Diff _Requested_size) noexcept
		{	// get temporary storage
		const auto _Attempt = _Temporary_buffer_size(_Requested_size);
		if (_Requested_size <= _Optimistic_count)
			{	// unconditionally engage stack space
			_Data = reinterpret_cast<_Ty *>(&_Stack_space[0]);
			_Capacity = static_cast<ptrdiff_t>(_Requested_size);	// in bounds due to if condition
			return;
			}

		const pair<_Ty *, ptrdiff_t> _Raw = _Get_temporary_buffer<_Ty>(_Attempt);
		if (_Raw.second > _Optimistic_count)
			{	// engage heap space
			_Data = _Raw.first;
			_Capacity = _Raw.second;
			return;
			}

		// less heap space than stack space, give up and use stack instead
		_Return_temporary_buffer(_Raw.first);
		_Data = reinterpret_cast<_Ty *>(&_Stack_space[0]);
		_Capacity = _Optimistic_count;
		}

	_Optimistic_temporary_buffer(const _Optimistic_temporary_buffer&) = delete;
	_Optimistic_temporary_buffer& operator=(const _Optimistic_temporary_buffer&) = delete;

	~_Optimistic_temporary_buffer() noexcept
		{	// return temporary storage
		if (_Capacity > _Optimistic_count)
			{
			_Return_temporary_buffer(_Data);
			}
		}

	_Ty * _Data;	// points to heap memory iff _Capacity > _Optimistic_count
	ptrdiff_t _Capacity;
	aligned_union_t<0, _Ty> _Stack_space[_Optimistic_count];
	};

		// FUNCTION TEMPLATE for_each
template<class _InIt,
	class _Fn> inline
	_Fn for_each(_InIt _First, _InIt _Last, _Fn _Func)
	{	// perform function for each element [_First, _Last)
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		_Func(*_UFirst);
		}

	return (_Func);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void for_each(_ExPo&& _Exec, _FwdIt _First, _FwdIt _Last, _Fn _Func) noexcept;

		// FUNCTION TEMPLATE for_each_n
template<class _InIt,
	class _Diff,
	class _Fn> inline
	_InIt for_each_n(_InIt _First, const _Diff _Count_raw, _Fn _Func)
	{	// perform function for each element [_First, _First + _Count)
	_Algorithm_int_t<_Diff> _Count = _Count_raw;
	if (0 < _Count)
		{
		auto _UFirst = _Get_unwrapped_n(_First, _Count);
		do
			{
			_Func(*_UFirst);
			--_Count;
			++_UFirst;
			}
		while (0 < _Count);

		_Seek_wrapped(_First, _UFirst);
		}

	return (_First);
	}

template<class _ExPo,
	class _FwdIt,
	class _Diff,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt for_each_n(_ExPo&& _Exec, _FwdIt _First, _Diff _Count_raw, _Fn _Func) noexcept;

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InTy,
	size_t _InSize,
	class _Diff,
	class _Fn> inline
	_InTy * for_each_n(_InTy (&_First)[_InSize], const _Diff _Count_raw, _Fn _Func)
	{	// perform function for each element [_First, _First + _Count)
	_Algorithm_int_t<_Diff> _Count = _Count_raw;
	_STL_VERIFY_ARRAY_SIZE(_First, _Count);
	_InTy * _UFirst = _First;
	for (; 0 < _Count; --_Count, (void)++_UFirst)
		{
		_Func(*_UFirst);
		}

	return (_UFirst);
	}

template<class _ExPo,
	class _SourceTy,
	size_t _SourceSize,
	class _Diff,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_SourceTy * for_each_n(_ExPo&& _Exec, _SourceTy (&_First)[_SourceSize], _Diff _Count_raw, _Fn _Func) noexcept;
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE find_if
template<class _InIt,
	class _Pr>
	_NODISCARD inline _InIt find_if(_InIt _First, const _InIt _Last, _Pr _Pred)
	{	// find first satisfying _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			break;
			}
		}

	_Seek_wrapped(_First, _UFirst);
	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD _FwdIt find_if(_ExPo&& _Exec, _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE find_if_not
template<class _InIt,
	class _Pr>
	_NODISCARD inline _InIt find_if_not(_InIt _First, const _InIt _Last, _Pr _Pred)
	{	// find first element that satisfies !_Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (!_Pred(*_UFirst))
			{
			break;
			}
		}

	_Seek_wrapped(_First, _UFirst);
	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt find_if_not(_ExPo&& _Exec, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE adjacent_find
template<class _FwdIt,
	class _Pr>
	_NODISCARD inline _FwdIt adjacent_find(const _FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find first satisfying _Pred with successor
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	auto _ULast = _Get_unwrapped(_Last);
	if (_UFirst != _ULast)
		{
		for (auto _UNext = _UFirst; ++_UNext != _ULast; _UFirst = _UNext)
			{
			if (_Pred(*_UFirst, *_UNext))
				{
				_ULast = _UFirst;
				break;
				}
			}
		}

	_Seek_wrapped(_Last, _ULast);
	return (_Last);
	}

template<class _FwdIt>
	_NODISCARD inline _FwdIt adjacent_find(const _FwdIt _First, const _FwdIt _Last)
	{	// find first matching successor
	return (_STD adjacent_find(_First, _Last, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt adjacent_find(_ExPo&& _Exec, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept;

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt adjacent_find(_ExPo&& _Exec, const _FwdIt _First, const _FwdIt _Last) noexcept
	{	// find first matching successor
	return (_STD adjacent_find(_STD forward<_ExPo>(_Exec), _First, _Last, equal_to<>()));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE count_if
template<class _InIt,
	class _Pr>
	_NODISCARD inline _Iter_diff_t<_InIt> count_if(_InIt _First, _InIt _Last, _Pr _Pred)
	{	// count elements satisfying _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	_Iter_diff_t<_InIt> _Count = 0;
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			++_Count;
			}
		}

	return (_Count);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _Iter_diff_t<_FwdIt> count_if(_ExPo&& _Exec,
		const _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE mismatch
template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	pair<_InIt1, _InIt2> _Mismatch_unchecked(_InIt1 _First1, const _InIt1 _Last1, _InIt2 _First2, _Pr _Pred)
	{	// return [_First1, _Last1)/[_First2, ...) mismatch using _Pred
	while (_First1 != _Last1 && _Pred(*_First1, *_First2))
		{
		++_First1;
		++_First2;
		}

	return {_First1, _First2};
	}

template<class _InIt1,
	class _InIt2,
	class _Pr>
	_NODISCARD inline pair<_InIt1, _InIt2> mismatch(_InIt1 _First1, const _InIt1 _Last1, _InIt2 _First2, _Pr _Pred)
	{	// return [_First1, _Last1)/[_First2, ...) mismatch using _Pred
	_Adl_verify_range(_First1, _Last1);
	const auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	const auto _UFirst2 = _Get_unwrapped_n(_First2, _Idl_distance<_InIt1>(_UFirst1, _ULast1));
	const auto _Result = _Mismatch_unchecked(_UFirst1, _ULast1, _UFirst2, _Pass_fn(_Pred));
	_Seek_wrapped(_First2, _Result.second);
	_Seek_wrapped(_First1, _Result.first);
	return {_First1, _First2};
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _RightTy,
	size_t _RightSize,
	class _Pr,
	enable_if_t<!is_same_v<_RightTy *, _Pr>, int> = 0>
	_NODISCARD inline pair<_InIt1, _RightTy *> mismatch(const _InIt1 _First1, const _InIt1 _Last1,
		_RightTy (&_First2)[_RightSize], _Pr _Pred)
	{	// return [_First1, _Last1)/[_First2, ...) mismatch using _Pred
	const auto _Result = _STD mismatch(_First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2), _Pass_fn(_Pred));
	return {_Result.first, _Result.second._Unwrapped()};
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt1, _FwdIt2> mismatch(_ExPo&& _Exec, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _Pr _Pred) noexcept;

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _RightTy,
	size_t _RightSize,
	class _Pr,
	enable_if_t<is_execution_policy_v<decay_t<_ExPo>> && !is_same_v<_RightTy *, _Pr>, int> = 0>
	_NODISCARD inline pair<_FwdIt1, _RightTy *> mismatch(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		_RightTy (&_First2)[_RightSize], _Pr _Pred)
	{	// return [_First1, _Last1)/[_First2, ...) mismatch using _Pred
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	const auto _Result = _STD mismatch(_STD forward<_ExPo>(_Exec), _First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2), _Pass_fn(_Pred));
	return {_Result.first, _Result.second._Unwrapped()};
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

template<class _InIt1,
	class _InIt2>
	_NODISCARD inline pair<_InIt1, _InIt2> mismatch(const _InIt1 _First1, const _InIt1 _Last1, const _InIt2 _First2)
	{	// return [_First1, _Last1)/[_First2, ...) mismatch
	return (_STD mismatch(_First1, _Last1, _First2, equal_to<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _RightTy,
	size_t _RightSize>
	_NODISCARD inline pair<_InIt1, _RightTy *> mismatch(const _InIt1 _First1, const _InIt1 _Last1,
		_RightTy (&_First2)[_RightSize])
	{	// return [_First1, _Last1)/[_First2, ...) mismatch, array source
	return (_STD mismatch(_First1, _Last1, _First2, equal_to<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt1, _FwdIt2> mismatch(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2) noexcept
	{	// return [_First1, _Last1)/[_First2, ...) mismatch
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD mismatch(_STD forward<_ExPo>(_Exec), _First1, _Last1, _First2, equal_to<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _RightTy,
	size_t _RightSize,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt1, _RightTy *> mismatch(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		_RightTy (&_First2)[_RightSize]) noexcept
	{	// return [_First1, _Last1)/[_First2, ...) mismatch, array source
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD mismatch(_STD forward<_ExPo>(_Exec), _First1, _Last1, _First2, equal_to<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	pair<_InIt1, _InIt2> _Mismatch_unchecked(_InIt1 _First1, const _InIt1 _Last1,
		_InIt2 _First2, const _InIt2 _Last2, _Pr _Pred,
		input_iterator_tag, input_iterator_tag)
	{	// return [_First1, _Last1)/[_First2, _Last2) mismatch using _Pred, no special optimization
	while (_First1 != _Last1 && _First2 != _Last2 && _Pred(*_First1, *_First2))
		{
		++_First1;
		++_First2;
		}

	return {_First1, _First2};
	}

template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	pair<_InIt1, _InIt2> _Mismatch_unchecked(const _InIt1 _First1, const _InIt1 _Last1,
		const _InIt2 _First2, const _InIt2 _Last2, _Pr _Pred,
		random_access_iterator_tag, random_access_iterator_tag)
	{	// return [_First1, _Last1)/[_First2, _Last2) mismatch using _Pred, random-access iterators
	using _CT = _Common_diff_t<_InIt1, _InIt2>;
	const _CT _Count1 = _Last1 - _First1;
	const _CT _Count2 = _Last2 - _First2;
	const auto _Count = static_cast<_Iter_diff_t<_InIt1>>(_Min_value(_Count1, _Count2));
	return (_Mismatch_unchecked(_First1, _First1 + _Count, _First2, _Pred));
	}

template<class _InIt1,
	class _InIt2,
	class _Pr>
	_NODISCARD inline pair<_InIt1, _InIt2> mismatch(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _Pr _Pred)
	{	// return [_First1, _Last1)/[_First2, _Last2) mismatch using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	const auto _Result = _Mismatch_unchecked(_Get_unwrapped(_First1), _Get_unwrapped(_Last1),
			_Get_unwrapped(_First2), _Get_unwrapped(_Last2), _Pass_fn(_Pred),
			_Iter_cat_t<_InIt1>(), _Iter_cat_t<_InIt2>());
	_Seek_wrapped(_First2, _Result.second);
	_Seek_wrapped(_First1, _Result.first);
	return {_First1, _First2};
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt1, _FwdIt2> mismatch(_ExPo&& _Exec, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

template<class _InIt1,
	class _InIt2>
	_NODISCARD inline pair<_InIt1, _InIt2> mismatch(_InIt1 _First1, _InIt1 _Last1, _InIt2 _First2, _InIt2 _Last2)
	{	// return [_First1, _Last1)/[_First2, _Last2) mismatch
	return (_STD mismatch(_First1, _Last1, _First2, _Last2, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt1, _FwdIt2> mismatch(_ExPo&& _Exec, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2) noexcept
	{	// return [_First1, _Last1)/[_First2, _Last2) mismatch
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD mismatch(_STD forward<_ExPo>(_Exec), _First1, _Last1, _First2, _Last2, equal_to<>()));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE all_of
template<class _InIt,
	class _Pr>
	_NODISCARD inline bool all_of(_InIt _First, _InIt _Last, _Pr _Pred)
	{	// test if all elements satisfy _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (!_Pred(*_UFirst))
			{
			return (false);
			}
		}

	return (true);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool all_of(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE any_of
template<class _InIt,
	class _Pr>
	_NODISCARD inline bool any_of(const _InIt _First, const _InIt _Last, _Pr _Pred)
	{	// test if any element satisfies _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			return (true);
			}
		}

	return (false);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool any_of(_ExPo&&, const _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE none_of
template<class _InIt,
	class _Pr>
	_NODISCARD inline bool none_of(const _InIt _First, const _InIt _Last, _Pr _Pred)
	{	// test if no elements satisfy _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			return (false);
			}
		}

	return (true);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool none_of(_ExPo&&, const _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE copy_if
template<class _InIt,
	class _OutIt,
	class _Pr> inline
	_OutIt copy_if(_InIt _First, _InIt _Last, _OutIt _Dest, _Pr _Pred)
	{	// copy each satisfying _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			*_UDest = *_UFirst;
			++_UDest;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * copy_if(_InIt _First, _InIt _Last, _DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// copy each satisfying _Pred, array dest
	return (_STD copy_if(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _FwdIt2 _Dest, _Pr _Pred) noexcept
	{	// copy each satisfying _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD copy_if(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// copy each satisfying _Pred, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD copy_if(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE partition_copy
template<class _InIt,
	class _OutIt1,
	class _OutIt2,
	class _Pr> inline
	pair<_OutIt1, _OutIt2>
		partition_copy(_InIt _First, _InIt _Last,
			_OutIt1 _Dest_true, _OutIt2 _Dest_false, _Pr _Pred)
	{	// copy true partition to _Dest_true, false to _Dest_false
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest_true = _Get_unwrapped_unverified(_Dest_true);
	auto _UDest_false = _Get_unwrapped_unverified(_Dest_false);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			*_UDest_true = *_UFirst;
			++_UDest_true;
			}
		else
			{
			*_UDest_false = *_UFirst;
			++_UDest_false;
			}
		}

	_Seek_wrapped(_Dest_false, _UDest_false);
	_Seek_wrapped(_Dest_true, _UDest_true);
	return {_Dest_true, _Dest_false};
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTrueTy,
	size_t _DestTrueSize,
	class _OutIt2,
	class _Pr> inline
	pair<_DestTrueTy *, _OutIt2>
		partition_copy(_InIt _First, _InIt _Last,
			_DestTrueTy (&_Dest_true)[_DestTrueSize], _OutIt2 _Dest_false, _Pr _Pred)
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
	const auto _Result = _STD partition_copy(_First, _Last,
		_Array_iterator<_DestTrueTy, _DestTrueSize>(_Dest_true), _Dest_false, _Pass_fn(_Pred));
	return {_Result.first._Unwrapped(), _Result.second};
	}

template<class _InIt,
	class _OutIt1,
	class _DestFalseTy,
	size_t _DestFalseSize,
	class _Pr> inline
	pair<_OutIt1, _DestFalseTy *>
		partition_copy(_InIt _First, _InIt _Last,
			_OutIt1 _Dest_true, _DestFalseTy (&_Dest_false)[_DestFalseSize], _Pr _Pred)
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
	const auto _Result = _STD partition_copy(_First, _Last,
		_Dest_true, _Array_iterator<_DestFalseTy, _DestFalseSize>(_Dest_false), _Pass_fn(_Pred));
	return {_Result.first, _Result.second._Unwrapped()};
	}

template<class _InIt,
	class _DestTrueTy,
	size_t _DestTrueSize,
	class _DestFalseTy,
	size_t _DestFalseSize,
	class _Pr> inline
	pair<_DestTrueTy *, _DestFalseTy *>
		partition_copy(_InIt _First, _InIt _Last,
			_DestTrueTy (&_Dest_true)[_DestTrueSize], _DestFalseTy (&_Dest_false)[_DestFalseSize],
			_Pr _Pred)
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
	const auto _Result = _STD partition_copy(_First, _Last,
		_Array_iterator<_DestTrueTy, _DestTrueSize>(_Dest_true),
		_Array_iterator<_DestFalseTy, _DestFalseSize>(_Dest_false), _Pass_fn(_Pred));
	return {_Result.first._Unwrapped(), _Result.second._Unwrapped()};
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	pair<_FwdIt2, _FwdIt3>
		partition_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
			_FwdIt2 _Dest_true, _FwdIt3 _Dest_false, _Pr _Pred) noexcept
	{	// copy true partition to _Dest_true, false to _Dest_false
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD partition_copy(_First, _Last, _Dest_true, _Dest_false, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTrueTy,
	size_t _DestTrueSize,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	pair<_DestTrueTy *, _FwdIt3>
		partition_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
			_DestTrueTy (&_Dest_true)[_DestTrueSize], _FwdIt3 _Dest_false, _Pr _Pred) noexcept
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD partition_copy(_First, _Last, _Dest_true, _Dest_false, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestFalseTy,
	size_t _DestFalseSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	pair<_FwdIt2, _DestFalseTy *>
		partition_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
			_FwdIt2 _Dest_true, _DestFalseTy (&_Dest_false)[_DestFalseSize], _Pr _Pred) noexcept
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD partition_copy(_First, _Last, _Dest_true, _Dest_false, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt1,
	class _DestTrueTy,
	size_t _DestTrueSize,
	class _DestFalseTy,
	size_t _DestFalseSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	pair<_DestTrueTy *, _DestFalseTy *>
		partition_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
			_DestTrueTy (&_Dest_true)[_DestTrueSize], _DestFalseTy (&_Dest_false)[_DestFalseSize],
			_Pr _Pred) noexcept
	{	// copy true partition to _Dest_true, false to _Dest_false, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD partition_copy(_First, _Last, _Dest_true, _Dest_false, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE is_partitioned
template<class _InIt,
	class _Pr>
	_NODISCARD inline bool is_partitioned(_InIt _First, _InIt _Last, _Pr _Pred)
	{	// test if [_First, _Last) partitioned by _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);

	for (;; ++_UFirst)
		{	// skip true partition
		if (_UFirst == _ULast)
			{
			return (true);
			}

		if (!_Pred(*_UFirst))
			{
			break;
			}
		}

	while (++_UFirst != _ULast)
		{	// verify false partition
		if (_Pred(*_UFirst))
			{
			return (false);	// found out of place element
			}
		}

	return (true);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool is_partitioned(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// test if [_First, _Last) partitioned by _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_partitioned(_First, _Last, _Pass_fn(_Pred)));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE partition_point
template<class _FwdIt,
	class _Pr>
	_NODISCARD inline _FwdIt partition_point(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find beginning of false partition in [_First, _Last)
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _Count = _STD distance(_UFirst, _ULast);
	while (0 < _Count)
		{	// divide and conquer, find half that contains answer
		const auto _Count2 = static_cast<_Iter_diff_t<_FwdIt>>(_Count >> 1);
		const auto _UMid = _STD next(_UFirst, _Count2);

		if (_Pred(*_UMid))
			{	// try top half
			_UFirst = _Next_iter(_UMid);
			_Count -= _Count2;
			--_Count;
			}
		else
			{
			_Count = _Count2;
			}
		}

	_Seek_wrapped(_First, _UFirst);
	return (_First);
	}

		// FUNCTION TEMPLATE _Equal_rev_pred_unchecked
template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	bool _Equal_rev_pred_unchecked1(_InIt1 _First1, _InIt2 _First2, const _InIt2 _Last2, _Pr _Pred, false_type)
	{	// compare [_First1, ...) to [_First2, _Last2) using _Pred, no special optimization
	for (; _First2 != _Last2; ++_First1, (void)++_First2)
		{
		if (!_Pred(*_First1, *_First2))
			{
			return (false);
			}
		}

	return (true);
	}

template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	bool _Equal_rev_pred_unchecked1(const _InIt1 _First1, const _InIt2 _First2, const _InIt2 _Last2, _Pr, true_type)
	{	// compare [_First1, ...) to [_First2, _Last2), memcmp optimization
	const auto _First1_ch = reinterpret_cast<const char *>(_First1);
	const auto _First2_ch = reinterpret_cast<const char *>(_First2);
	const auto _Count = static_cast<size_t>(reinterpret_cast<const char *>(_Last2) - _First2_ch);
	return (_CSTD memcmp(_First1_ch, _First2_ch, _Count) == 0);
	}

template<class _InIt1,
	class _InIt2,
	class _Pr> inline
	bool _Equal_rev_pred_unchecked(const _InIt1 _First1, const _InIt2 _First2, const _InIt2 _Last2, _Pr _Pred)
	{	// compare [_First1, ...) to [_First2, _Last2) using _Pred, choose optimization
	return (_Equal_rev_pred_unchecked1(_First1, _First2, _Last2, _Pred,
		_Equal_memcmp_is_safe(_First1, _First2, _Pred)));
	}

		// FUNCTION TEMPLATE search
template<class _FwdItHaystack,
	class _FwdItPat,
	class _Pr> inline
	_FwdItHaystack _Search_unchecked(_FwdItHaystack _First1, _FwdItHaystack _Last1,
		_FwdItPat _First2, _FwdItPat _Last2, _Pr _Pred,
		forward_iterator_tag, forward_iterator_tag)
	{	// find first [_First2, _Last2) satisfying _Pred, arbitrary iterators
	for (;; ++_First1)
		{	// loop until match or end of a sequence
		_FwdItHaystack _Mid1 = _First1;
		for (_FwdItPat _Mid2 = _First2; ; ++_Mid1, (void)++_Mid2)
			{
			if (_Mid2 == _Last2)
				{
				return (_First1);
				}
			else if (_Mid1 == _Last1)
				{
				return (_Last1);
				}
			else if (!_Pred(*_Mid1, *_Mid2))
				{
				break;
				}
			}
		}
	}

template<class _FwdItHaystack,
	class _FwdItPat,
	class _Pr> inline
	_FwdItHaystack _Search_unchecked(_FwdItHaystack _First1, const _FwdItHaystack _Last1,
		const _FwdItPat _First2, const _FwdItPat _Last2, _Pr _Pred,
		random_access_iterator_tag, random_access_iterator_tag)
	{	// find first [_First2, _Last2) satisfying _Pred, random-access iterators
	_Iter_diff_t<_FwdItPat> _Count2 = _Last2 - _First2;
	if (_Last1 - _First1 >= _Count2)
		{
		const auto _Last_possible = _Last1 - static_cast<_Iter_diff_t<_FwdItHaystack>>(_Count2);
		for (;; ++_First1)
			{
			if (_Equal_rev_pred_unchecked(_First1, _First2, _Last2, _Pred))
				{
				return (_First1);
				}

			if (_First1 == _Last_possible)
				{
				break;
				}
			}
		}

	return (_Last1);
	}

template<class _FwdItHaystack,
	class _FwdItPat,
	class _Pr>
	_NODISCARD inline _FwdItHaystack search(_FwdItHaystack _First1, const _FwdItHaystack _Last1,
		const _FwdItPat _First2, const _FwdItPat _Last2, _Pr _Pred)
	{	// find first [_First2, _Last2) satisfying _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	_Seek_wrapped(_First1,
		_Search_unchecked(_Get_unwrapped(_First1), _Get_unwrapped(_Last1),
			_Get_unwrapped(_First2), _Get_unwrapped(_Last2), _Pass_fn(_Pred),
			_Iter_cat_t<_FwdItHaystack>(), _Iter_cat_t<_FwdItPat>()));
	return (_First1);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdItHaystack,
	class _FwdItPat,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdItHaystack search(_ExPo&& _Exec, _FwdItHaystack _First1, const _FwdItHaystack _Last1,
		const _FwdItPat _First2, const _FwdItPat _Last2, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

template<class _FwdItHaystack,
	class _FwdItPat>
	_NODISCARD inline _FwdItHaystack search(const _FwdItHaystack _First1, const _FwdItHaystack _Last1,
		const _FwdItPat _First2, const _FwdItPat _Last2)
	{	// find first [_First2, _Last2) match
	return (_STD search(_First1, _Last1, _First2, _Last2, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdItHaystack,
	class _FwdItPat,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdItHaystack search(_ExPo&& _Exec, const _FwdItHaystack _First1, const _FwdItHaystack _Last1,
		const _FwdItPat _First2, const _FwdItPat _Last2) noexcept
	{	// find first [_First2, _Last2) match
	return (_STD search(_STD forward<_ExPo>(_Exec), _First1, _Last1, _First2, _Last2, equal_to<>()));
	}
#endif /* _HAS_CXX17 */

template<class _FwdItHaystack,
	class _Searcher>
	_NODISCARD inline _FwdItHaystack search(const _FwdItHaystack _First, const _FwdItHaystack _Last,
		const _Searcher& _Search)
	{	// find _Search's pattern in [_First, _Last)
	return (_Search(_First, _Last).first);
	}

		// FUNCTION TEMPLATE search_n
template<class _FwdIt,
	class _Diff,
	class _Ty,
	class _Pr> inline
	_FwdIt _Search_n_unchecked(_FwdIt _First, const _FwdIt _Last,
		const _Diff _Count, const _Ty& _Val, _Pr _Pred, forward_iterator_tag)
	{	// find first _Count * _Val satisfying _Pred, forward iterators
	if (_Count <= 0)
		return (_First);

	for (; _First != _Last; ++_First)
		{
		if (_Pred(*_First, _Val))
			{	// found start of possible match, check it out
			_FwdIt _Mid = _First;

			for (_Diff _Count1 = _Count;;)
				{
				if (--_Count1 == 0)
					{
					return (_First);	// found rest of match, report it
					}
				else if (++_Mid == _Last)
					{
					return (_Last);	// short match at end
					}
				else if (!_Pred(*_Mid, _Val))
					{	// short match not at end
					break;
					}
				}

			_First = _Mid;	// pick up just beyond failed match
			}
		}

	return (_Last);
	}

template<class _FwdIt,
	class _Diff,
	class _Ty,
	class _Pr> inline
	_FwdIt _Search_n_unchecked(_FwdIt _First, const _FwdIt _Last,
		const _Diff _Count, const _Ty& _Val, _Pr _Pred, random_access_iterator_tag)
	{	// find first _Count * _Val satisfying _Pred, random-access iterators
	if (_Count <= 0)
		{
		return (_First);
		}

	if (static_cast<uintmax_t>(_Count)
		> static_cast<uintmax_t>((numeric_limits<_Iter_diff_t<_FwdIt>>::max)()))
		{	// if the number of _Vals searched for is larger than the longest possible
			// sequence, we can't find it
		return (_Last);
		}

	const auto _Count_diff = static_cast<_Iter_diff_t<_FwdIt>>(_Count);
	_FwdIt _Old_first = _First;
	for (_Iter_diff_t<_FwdIt> _Inc = 0; _Count_diff <= _Last - _Old_first; )
		{	// enough room, look for a match
		_First = _Old_first + _Inc;
		if (_Pred(*_First, _Val))
			{	// found part of possible match, check it out
			_Iter_diff_t<_FwdIt> _Count1 = _Count_diff;
			_FwdIt _Mid = _First;

			for (; _Old_first != _First && _Pred(_First[-1], _Val);
				--_First)
				--_Count1;	// back up over any skipped prefix

			if (_Count1 <= _Last - _Mid)
				{
				for (;;)
					{	// enough left, test suffix
					if (--_Count1 == 0)
						{
						return (_First);	// found rest of match, report it
						}
					else if (!_Pred(*++_Mid, _Val))
						{	// short match not at end
						break;
						}
					}
				}
			_Old_first = ++_Mid;	// failed match, take small jump
			_Inc = 0;
			}
		else
			{	// no match, take big jump and back up as needed
			_Old_first = _First + 1;
			_Inc = _Count_diff - 1;
			}
		}

	return (_Last);
	}

template<class _FwdIt,
	class _Diff,
	class _Ty,
	class _Pr>
	_NODISCARD inline _FwdIt search_n(_FwdIt _First, const _FwdIt _Last,
		const _Diff _Count_raw, const _Ty& _Val, _Pr _Pred)
	{	// find first _Count * _Val satisfying _Pred
	const _Algorithm_int_t<_Diff> _Count = _Count_raw;
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Search_n_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Count, _Val,
			_Pass_fn(_Pred), _Iter_cat_t<_FwdIt>()));

	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Diff,
	class _Ty,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt search_n(_ExPo&& _Exec, _FwdIt _First, const _FwdIt _Last,
		const _Diff _Count_raw, const _Ty& _Val, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

template<class _FwdIt,
	class _Diff,
	class _Ty>
	_NODISCARD inline _FwdIt search_n(const _FwdIt _First, const _FwdIt _Last, const _Diff _Count, const _Ty& _Val)
	{	// find first _Count * _Val match
	return (_STD search_n(_First, _Last, _Count, _Val, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Diff,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt search_n(_ExPo&& _Exec, const _FwdIt _First, const _FwdIt _Last,
		const _Diff _Count, const _Ty& _Val) noexcept
	{	// find first _Count * _Val match
	return (_STD search_n(_STD forward<_ExPo>(_Exec), _First, _Last, _Count, _Val, equal_to<>()));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE find_end
template<class _FwdIt1,
	class _FwdIt2,
	class _Pr> inline
	_FwdIt1 _Find_end_unchecked(_FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2, _Pr _Pred,
		forward_iterator_tag, forward_iterator_tag)
	{	// find last [_First2, _Last2) satisfying _Pred in forward ranges
	_FwdIt1 _Result = _Last1;
	for (;;)
		{	// try a match at _First1
		_FwdIt1 _Next1 = _First1;
		_FwdIt2 _Next2 = _First2;
		for (;;)
			{	// test if [_First2, _Last2) is a prefix of [_First1, _Last1)
			const bool _End_of_needle = static_cast<bool>(_Next2 == _Last2);
			if (_End_of_needle)
				{	// match candidate found
				_Result = _First1;
				}

			if (_Next1 == _Last1)
				{	// trying the next candidate would make [_First1, _Last1) shorter than [_First2, _Last2), done
				return (_Result);
				}

			if (_End_of_needle || !_Pred(*_Next1, *_Next2))
				{	// end of match or counterexample found, go to the next candidate
				break;
				}

			++_Next1;
			++_Next2;
			}

		++_First1;
		}
	}

template<class _BidIt1,
	class _BidIt2,
	class _Pr> inline
	_BidIt1 _Find_end_unchecked(const _BidIt1 _First1, const _BidIt1 _Last1,
		const _BidIt2 _First2, const _BidIt2 _Last2, _Pr _Pred,
		bidirectional_iterator_tag, bidirectional_iterator_tag)
	{	// find last [_First2, _Last2) satisfying _Pred in bidirectional ranges
	for (_BidIt1 _Candidate = _Last1; ; --_Candidate)
		{	// try a match at _Candidate
		_BidIt1 _Next1 = _Candidate;
		_BidIt2 _Next2 = _Last2;
		for (;;)
			{	// test if [_First2, _Last2) is a suffix of [_First1, _Candidate)
			if (_First2 == _Next2)
				{	// match found
				return (_Next1);
				}

			if (_First1 == _Next1)
				{	// [_First1, _Candidate) is shorter than [_First2, _Last2), remaining candidates nonviable
				return (_Last1);
				}

			--_Next1;
			--_Next2;
			if (!_Pred(*_Next1, *_Next2))
				{	// counterexample found
				break;
				}
			}
		}
	}

template<class _RanIt1,
	class _RanIt2,
	class _Pr> inline
	_RanIt1 _Find_end_unchecked(const _RanIt1 _First1, const _RanIt1 _Last1,
		const _RanIt2 _First2, const _RanIt2 _Last2, _Pr _Pred,
		random_access_iterator_tag, random_access_iterator_tag)
	{	// find last [_First2, _Last2) satisfying _Pred in random-access ranges
	const _Iter_diff_t<_RanIt2> _Count2 = _Last2 - _First2;
	if (0 < _Count2 && _Count2 <= _Last1 - _First1)
		{
		for (_RanIt1 _Candidate = _Last1 - static_cast<_Iter_diff_t<_RanIt1>>(_Count2); ; --_Candidate)
			{
			if (_Equal_rev_pred_unchecked(_Candidate, _First2, _Last2, _Pred))
				{
				return (_Candidate);
				}

			if (_First1 == _Candidate)
				{
				break;
				}
			}
		}

	return (_Last1);
	}

template<class _FwdIt1,
	class _FwdIt2,
	class _Pr>
	_NODISCARD inline _FwdIt1 find_end(_FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2, _Pr _Pred)
	{	// find last [_First2, _Last2) satisfying _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	_Seek_wrapped(_First1,
		_Find_end_unchecked(_Get_unwrapped(_First1), _Get_unwrapped(_Last1),
		_Get_unwrapped(_First2), _Get_unwrapped(_Last2),
		_Pass_fn(_Pred), _Iter_cat_t<_FwdIt1>(), _Iter_cat_t<_FwdIt2>()));

	return (_First1);
	}

template<class _FwdIt1,
	class _FwdIt2>
	_NODISCARD inline _FwdIt1 find_end(_FwdIt1 const _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2)
	{	// find last [_First2, _Last2) match
	return (_STD find_end(_First1, _Last1, _First2, _Last2, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt1 find_end(_ExPo&& _Exec, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _Pr _Pred) noexcept;

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt1 find_end(_ExPo&& _Exec, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2) noexcept
	{	// find last [_First2, _Last2) match
	return (_STD find_end(_STD forward<_ExPo>(_Exec), _First1, _Last1, _First2, _Last2, equal_to<>()));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE find_first_of
template<class _FwdIt1,
	class _FwdIt2,
	class _Pr>
	_NODISCARD inline _FwdIt1 find_first_of(_FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2, _Pr _Pred)
	{	// look for one of [_First2, _Last2) satisfying _Pred with element
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	const auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	for (; _UFirst1 != _ULast1; ++_UFirst1)
		{
		for (auto _UMid2 = _UFirst2; _UMid2 != _ULast2; ++_UMid2)
			{
			if (_Pred(*_UFirst1, *_UMid2))
				{
				_Seek_wrapped(_First1, _UFirst1);
				return (_First1);
				}
			}
		}

	_Seek_wrapped(_First1, _UFirst1);
	return (_First1);
	}

template<class _FwdIt1,
	class _FwdIt2>
	_NODISCARD inline _FwdIt1 find_first_of(const _FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2)
	{	// look for one of [_First2, _Last2) that matches element
	return (_STD find_first_of(_First1, _Last1, _First2, _Last2, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt1 find_first_of(_ExPo&& _Exec, const _FwdIt1 _First1, _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2, _Pr _Pred) noexcept;

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt1 find_first_of(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, const _FwdIt2 _Last2) noexcept
	{	// look for one of [_First2, _Last2) that matches element
	return (_STD find_first_of(_STD forward<_ExPo>(_Exec), _First1, _Last1,
		_First2, _Last2, equal_to<>()));
	}
#endif /* _HAS_CXX17 */


		// FUNCTION TEMPLATE swap_ranges
template<class _FwdIt1,
	class _FwdIt2> inline
	_FwdIt2 _Swap_ranges_unchecked(_FwdIt1 _First1, const _FwdIt1 _Last1, _FwdIt2 _First2)
	{	// swap [_First1, _Last1) with [_First2, ...), no special optimization
	for (; _First1 != _Last1; ++_First1, (void)++_First2)
		{
		_STD iter_swap(_First1, _First2);
		}

	return (_First2);
	}

#if (defined(_M_IX86) || defined(_M_X64)) && !defined(_M_CEE_PURE) && !defined(_M_HYBRID)
template<class _Ty,
	enable_if_t<_Is_trivially_swappable_v<_Ty>, int> = 0> inline
	_Ty * _Swap_ranges_unchecked(_Ty * const _First1, _Ty * const _Last1, _Ty * const _First2)
	{	// swap [_First1, _Last1) with [_First2, ...), trivially swappable optimization
	__std_swap_ranges_trivially_swappable_noalias(_First1, _Last1, _First2);
	return (_First2 + (_Last1 - _First1));
	}
#endif /* (defined(_M_IX86) || defined(_M_X64)) && !defined(_M_CEE_PURE) && !defined(_M_HYBRID) */

template<class _FwdIt1,
	class _FwdIt2> inline
	_FwdIt2 swap_ranges(const _FwdIt1 _First1, const _FwdIt1 _Last1, _FwdIt2 _First2)
	{	// swap [_First1, _Last1) with [_First2, ...)
	_Adl_verify_range(_First1, _Last1);
	const auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	const auto _UFirst2 = _Get_unwrapped_n(_First2, _Idl_distance<_FwdIt1>(_UFirst1, _ULast1));
	_Seek_wrapped(_First2, _Swap_ranges_unchecked(_UFirst1, _ULast1, _UFirst2));
	return (_First2);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _FwdIt1,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * swap_ranges(_FwdIt1 _First1, _FwdIt1 _Last1, _DestTy (&_Dest)[_DestSize])
	{	// swap [_First1, _Last1) with [_Dest, ...), array dest
	return (_STD swap_ranges(_First1, _Last1, _Array_iterator<_DestTy, _DestSize>(_Dest))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 swap_ranges(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1, _FwdIt2 _Dest) noexcept
	{	// swap [_First1, _Last1) with [_Dest, ...)
		// not parallelized as benchmarks show it isn't worth it
	return (_STD swap_ranges(_First1, _Last1, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * swap_ranges(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1, _DestTy (&_Dest)[_DestSize]) noexcept
	{	// swap [_First1, _Last1) with [_Dest, ...), array dest
		// not parallelized as benchmarks show it isn't worth it
	return (_STD swap_ranges(_First1, _Last1, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE transform
template<class _InIt,
	class _OutIt,
	class _Fn> inline
	_OutIt transform(const _InIt _First, const _InIt _Last, _OutIt _Dest, _Fn _Func)
	{	// transform [_First, _Last) with _Func
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_InIt>(_UFirst, _ULast));
	for (; _UFirst != _ULast; ++_UFirst, (void)++_UDest)
		{
		*_UDest = _Func(*_UFirst);
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Fn> inline
	_DestTy * transform(const _InIt _First, const _InIt _Last, _DestTy (&_Dest)[_DestSize], _Fn _Func)
	{	// transform [_First, _Last) with _Func, array dest
	return (_STD transform(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 transform(_ExPo&& _Exec, const _FwdIt1 _First, const _FwdIt1 _Last, _FwdIt2 _Dest, _Fn _Func) noexcept;

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * transform(_ExPo&& _Exec, _FwdIt1 _First, _FwdIt1 _Last, _DestTy (&_Dest)[_DestSize], _Fn _Func) noexcept
	{	// transform [_First, _Last) with _Func, array dest
	return (_STD transform(_STD forward<_ExPo>(_Exec), _First, _Last,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Fn> inline
	_OutIt transform(const _InIt1 _First1, const _InIt1 _Last1,
		const _InIt2 _First2, _OutIt _Dest, _Fn _Func)
	{	// transform [_First1, _Last1) and [_First2, ...) with _Func
	_Adl_verify_range(_First1, _Last1);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	const auto _Count = _Idl_distance<_InIt1>(_UFirst1, _ULast1);
	auto _UFirst2 = _Get_unwrapped_n(_First2, _Count);
	auto _UDest = _Get_unwrapped_n(_Dest, _Count);
	for (; _UFirst1 != _ULast1; ++_UFirst1, (void)++_UFirst2, ++_UDest)
		{
		*_UDest = _Func(*_UFirst1, *_UFirst2);
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _RightTy,
	size_t _RightSize,
	class _OutIt,
	class _Fn> inline
	_OutIt transform(const _InIt1 _First1, const _InIt1 _Last1,
		_RightTy (&_First2)[_RightSize], const _OutIt _Dest, _Fn _Func)
	{	// transform [_First1, _Last1) and [_First2, ...), array source
	return (_STD transform(_First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2), _Dest, _Pass_fn(_Func)));
	}

template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Fn> inline
	_DestTy * transform(const _InIt1 _First1, const _InIt1 _Last1,
		_InIt2 _First2, _DestTy (&_Dest)[_DestSize], _Fn _Func)
	{	// transform [_First1, _Last1) and [_First2, ...), array dest
	return (_STD transform(_First1, _Last1,
			_First2, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))._Unwrapped());
	}

template<class _InIt1,
	class _RightTy,
	size_t _RightSize,
	class _DestTy,
	size_t _DestSize,
	class _Fn> inline
	_DestTy * transform(const _InIt1 _First1, const _InIt1 _Last1,
		_RightTy (&_First2)[_RightSize], _DestTy (&_Dest)[_DestSize], _Fn _Func)
	{	// transform [_First1, _Last1) and [_First2, ...), array source/dest
	return (_STD transform(_First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2),
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 transform(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		const _FwdIt2 _First2, _FwdIt3 _Dest, _Fn _Func) noexcept;

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _RightTy,
	size_t _RightSize,
	class _FwdIt3,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 transform(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		_RightTy (&_First2)[_RightSize], const _FwdIt3 _Dest, _Fn _Func) noexcept
	{	// transform [_First1, _Last1) and [_First2, ...), array source
	return (_STD transform(_STD forward<_ExPo>(_Exec), _First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2), _Dest, _Pass_fn(_Func)));
	}

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * transform(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		_FwdIt2 _First2, _DestTy (&_Dest)[_DestSize], _Fn _Func) noexcept
	{	// transform [_First1, _Last1) and [_First2, ...), array dest
	return (_STD transform(_STD forward<_ExPo>(_Exec), _First1, _Last1,
		_First2, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))._Unwrapped());
	}

template<class _ExPo,
	class _FwdIt1,
	class _RightTy,
	size_t _RightSize,
	class _DestTy,
	size_t _DestSize,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * transform(_ExPo&& _Exec, const _FwdIt1 _First1, const _FwdIt1 _Last1,
		_RightTy (&_First2)[_RightSize], _DestTy (&_Dest)[_DestSize], _Fn _Func) noexcept
	{	// transform [_First1, _Last1) and [_First2, ...), array source/dest
	return (_STD transform(_STD forward<_ExPo>(_Exec), _First1, _Last1,
		_Array_iterator<_RightTy, _RightSize>(_First2),
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Func))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE replace
template<class _FwdIt,
	class _Ty> inline
	void replace(const _FwdIt _First, const _FwdIt _Last, const _Ty& _Oldval, const _Ty& _Newval)
	{	// replace each matching _Oldval with _Newval
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (*_UFirst == _Oldval)
			{
			*_UFirst = _Newval;
			}
		}
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void replace(_ExPo&& _Exec,
		const _FwdIt _First, const _FwdIt _Last, const _Ty& _Oldval, const _Ty& _Newval) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE replace_if
template<class _FwdIt,
	class _Pr,
	class _Ty> inline
	void replace_if(const _FwdIt _First, const _FwdIt _Last, _Pr _Pred, const _Ty& _Val)
	{	// replace each satisfying _Pred with _Val
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (_Pred(*_UFirst))
			{
			*_UFirst = _Val;
			}
		}
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void replace_if(_ExPo&& _Exec,
		_FwdIt _First, _FwdIt _Last, _Pr _Pred, const _Ty& _Val) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE replace_copy
template<class _InIt,
	class _OutIt,
	class _Ty> inline
	_OutIt replace_copy(_InIt _First, _InIt _Last,
		_OutIt _Dest, const _Ty& _Oldval, const _Ty& _Newval)
	{	// copy replacing each matching _Oldval with _Newval
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_InIt>(_UFirst, _ULast));
	for (; _UFirst != _ULast; ++_UFirst, (void)++_UDest)
		{
		if (*_UFirst == _Oldval)
			{
			*_UDest = _Newval;
			}
		else
			{
			*_UDest = *_UFirst;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Ty> inline
	_DestTy * replace_copy(_InIt _First, _InIt _Last,
		_DestTy (&_Dest)[_DestSize], const _Ty& _Oldval, const _Ty& _Newval)
	{	// copy replacing each matching _Oldval with _Newval, array dest
	return (_STD replace_copy(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Oldval, _Newval)
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 replace_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_FwdIt2 _Dest, const _Ty& _Oldval, const _Ty& _Newval) noexcept
	{	// copy replacing each matching _Oldval with _Newval
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD replace_copy(_First, _Last, _Dest, _Oldval, _Newval));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * replace_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_DestTy (&_Dest)[_DestSize], const _Ty& _Oldval, const _Ty& _Newval) noexcept
	{	// copy replacing each matching _Oldval with _Newval
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD replace_copy(_First, _Last, _Dest, _Oldval, _Newval));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE replace_copy_if
template<class _InIt,
	class _OutIt,
	class _Pr,
	class _Ty> inline
	_OutIt replace_copy_if(_InIt _First, _InIt _Last,
		_OutIt _Dest, _Pr _Pred, const _Ty& _Val)
	{	// copy replacing each satisfying _Pred with _Val
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_InIt>(_UFirst, _ULast));
	for (; _UFirst != _ULast; ++_UFirst, (void)++_UDest)
		{
		if (_Pred(*_UFirst))
			{
			*_UDest = _Val;
			}
		else
			{
			*_UDest = *_UFirst;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	class _Ty> inline
	_DestTy * replace_copy_if(_InIt _First, _InIt _Last,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred, const _Ty& _Val)
	{	// copy replacing each satisfying _Pred with _Val, array dest
	return (_STD replace_copy_if(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred), _Val)
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 replace_copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_FwdIt2 _Dest, _Pr _Pred, const _Ty& _Val) noexcept
	{	// copy replacing each satisfying _Pred with _Val
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD replace_copy_if(_First, _Last, _Dest, _Pass_fn(_Pred), _Val));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * replace_copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred, const _Ty& _Val) noexcept
	{	// copy replacing each satisfying _Pred with _Val, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD replace_copy_if(_First, _Last, _Dest, _Pass_fn(_Pred), _Val));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE generate
template<class _FwdIt,
	class _Fn> inline
	void generate(_FwdIt _First, _FwdIt _Last, _Fn _Func)
	{	// replace [_First, _Last) with _Func()
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		*_UFirst = _Func();
		}
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void generate(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Fn _Func) noexcept
	{	// replace [_First, _Last) with _Func()
		// not parallelized at present due to unclear parallelism requirements on _Func
	return (_STD generate(_First, _Last, _Pass_fn(_Func)));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE generate_n
template<class _OutIt,
	class _Diff,
	class _Fn> inline
	_OutIt generate_n(_OutIt _Dest, const _Diff _Count_raw, _Fn _Func)
	{	// replace [_Dest, _Dest + _Count) with _Func()
	_Algorithm_int_t<_Diff> _Count = _Count_raw;
	if (0 < _Count)
		{
		auto _UDest = _Get_unwrapped_n(_Dest, _Count);
		do
			{
			*_UDest = _Func();
			--_Count;
			++_UDest;
			}
		while (0 < _Count);

		_Seek_wrapped(_Dest, _UDest);
		}

	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _DestTy,
	size_t _DestSize,
	class _Diff,
	class _Fn> inline
	_DestTy * generate_n(_DestTy (&_Dest)[_DestSize], const _Diff _Count_raw, _Fn _Func)
	{	// replace [_Dest, _Dest + _Count) with _Func(), array dest
	_Algorithm_int_t<_Diff> _Count = _Count_raw;
	_STL_VERIFY_ARRAY_SIZE(_Dest, _Count);
	_DestTy * _UDest = _Dest;
	for (; 0 < _Count; --_Count, (void)++_UDest)
		{
		*_UDest = _Func();
		}

	return (_UDest);
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Diff,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt generate_n(_ExPo&&, const _FwdIt _Dest, const _Diff _Count_raw, _Fn _Func) noexcept
	{	// replace [_Dest, _Dest + _Count) with _Func()
		// not parallelized at present due to unclear parallelism requirements on _Func
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt);
	return (_STD generate_n(_Dest, _Count_raw, _Pass_fn(_Func)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _DestTy,
	size_t _DestSize,
	class _Diff,
	class _Fn,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * generate_n(_ExPo&&, _DestTy (&_Dest)[_DestSize], const _Diff _Count_raw, _Fn _Func) noexcept
	{	// replace [_Dest, _Dest + _Count) with _Func()
		// not parallelized at present due to unclear parallelism requirements on _Func
	return (_STD generate_n(_Dest, _Count_raw, _Pass_fn(_Func)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE remove_copy
template<class _InIt,
	class _OutIt,
	class _Ty> inline
	_OutIt remove_copy(_InIt _First, _InIt _Last, _OutIt _Dest, const _Ty& _Val)
	{	// copy omitting each matching _Val
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (!(*_UFirst == _Val))
			{
			*_UDest = *_UFirst;
			++_UDest;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Ty> inline
	_DestTy * remove_copy(_InIt _First, _InIt _Last,
		_DestTy (&_Dest)[_DestSize], const _Ty& _Val)
	{	// copy omitting each matching _Val, array dest
	return (_STD remove_copy(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Val)
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 remove_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _FwdIt2 _Dest, const _Ty& _Val) noexcept
	{	// copy omitting each matching _Val
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD remove_copy(_First, _Last, _Dest, _Val));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * remove_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_DestTy (&_Dest)[_DestSize], const _Ty& _Val) noexcept
	{	// copy omitting each matching _Val, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD remove_copy(_First, _Last, _Dest, _Val));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE remove_copy_if
template<class _InIt,
	class _OutIt,
	class _Pr> inline
	_OutIt remove_copy_if(_InIt _First, _InIt _Last, _OutIt _Dest, _Pr _Pred)
	{	// copy omitting each element satisfying _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	for (; _UFirst != _ULast; ++_UFirst)
		{
		if (!_Pred(*_UFirst))
			{
			*_UDest = *_UFirst;
			++_UDest;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * remove_copy_if(_InIt _First, _InIt _Last,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// copy omitting each element satisfying _Pred, array dest
	return (_STD remove_copy_if(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 remove_copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _FwdIt2 _Dest, _Pr _Pred) noexcept
	{	// copy omitting each element satisfying _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD remove_copy_if(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * remove_copy_if(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// copy omitting each element satisfying _Pred, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD remove_copy_if(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE remove
template<class _FwdIt,
	class _Ty>
	_NODISCARD inline _FwdIt remove(_FwdIt _First, const _FwdIt _Last, const _Ty& _Val)
	{	// remove each matching _Val
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	_UFirst = _Find_unchecked(_UFirst, _ULast, _Val);
	auto _UNext = _UFirst;
	if (_UFirst != _ULast)
		{
		while (++_UFirst != _ULast)
			{
			if (!(*_UFirst == _Val))
				{
				*_UNext = _STD move(*_UFirst);
				++_UNext;
				}
			}
		}

	_Seek_wrapped(_First, _UNext);
	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Ty,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt remove(_ExPo&& _Exec, const _FwdIt _First, const _FwdIt _Last, const _Ty& _Val) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE remove_if
template<class _FwdIt,
	class _Pr>
	_NODISCARD inline _FwdIt remove_if(_FwdIt _First, const _FwdIt _Last, _Pr _Pred)
	{	// remove each satisfying _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	_UFirst = _STD find_if(_UFirst, _ULast, _Pass_fn(_Pred));
	auto _UNext = _UFirst;
	if (_UFirst != _ULast)
		{
		while (++_UFirst != _ULast)
			{
			if (!_Pred(*_UFirst))
				{
				*_UNext = _STD move(*_UFirst);
				++_UNext;
				}
			}
		}

	_Seek_wrapped(_First, _UNext);
	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt remove_if(_ExPo&& _Exec, _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE unique
template<class _FwdIt,
	class _Pr>
	_NODISCARD inline _FwdIt unique(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// remove each satisfying _Pred with previous
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	if (_UFirst != _ULast)
		{
		for (auto _UFirstb = _UFirst; ++_UFirst != _ULast; _UFirstb = _UFirst)
			{
			if (_Pred(*_UFirstb, *_UFirst))
				{	// copy down
				while (++_UFirst != _ULast)
					{
					if (!_Pred(*_UFirstb, *_UFirst))
						{
						*++_UFirstb = _STD move(*_UFirst);
						}
					}

				_Seek_wrapped(_Last, ++_UFirstb);
				return (_Last);
				}
			}
		}

	_Seek_wrapped(_Last, _ULast);
	return (_Last);
	}

template<class _FwdIt>
	_NODISCARD inline _FwdIt unique(_FwdIt _First, _FwdIt _Last)
	{	// remove each matching previous
	return (_STD unique(_First, _Last, equal_to<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt unique(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// remove each satisfying _Pred with previous
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD unique(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt unique(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// remove each matching previous
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD unique(_First, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE unique_copy
template<class _FwdIt,
	class _OutIt,
	class _Pr> inline
	_OutIt _Unique_copy_unchecked(_FwdIt _First, _FwdIt _Last,
		_OutIt _Dest, _Pr _Pred, true_type, _Any_tag)
	{	// copy compressing pairs satisfying _Pred, forward source iterator
		// (can reread the source for comparison)
	if (_First != _Last)
		{
		_FwdIt _Firstb = _First;

		*_Dest = *_Firstb;
		++_Dest;

		while (++_First != _Last)
			{
			if (!_Pred(*_Firstb, *_First))
				{	// copy unmatched
				_Firstb = _First;
				*_Dest = *_Firstb;
				++_Dest;
				}
			}
		}

	return (_Dest);
	}

template<class _InIt,
	class _FwdIt,
	class _Pr> inline
	_FwdIt _Unique_copy_unchecked(_InIt _First, _InIt _Last,
		_FwdIt _Dest, _Pr _Pred, false_type, true_type)
	{	// copy compressing pairs satisfying _Pred, forward dest iterator with matching T
		// (assignment copies T; can reread dest for comparison)
	if (_First != _Last)
		{
		*_Dest = *_First;

		while (++_First != _Last)
			{
			if (!_Pred(*_Dest, *_First))
				{
				*++_Dest = *_First;
				}
			}

		++_Dest;
		}

	return (_Dest);
	}

template<class _InIt,
	class _OutIt,
	class _Pr> inline
	_OutIt _Unique_copy_unchecked(_InIt _First, _InIt _Last,
		_OutIt _Dest, _Pr _Pred, false_type, false_type)
	{	// copy compressing pairs satisfying _Pred, otherwise
		// (can't reread source or dest, construct a temporary)
	if (_First != _Last)
		{
		_Iter_value_t<_InIt> _Val = *_First;

		*_Dest = _Val;
		++_Dest;

		while (++_First != _Last)
			{
			if (!_Pred(_Val, *_First))
				{	// copy unmatched
				_Val = *_First;
				*_Dest = _Val;
				++_Dest;
				}
			}
		}

	return (_Dest);
	}

template<class _InIt,
	class _OutIt,
	class _Pr> inline
	_OutIt unique_copy(_InIt _First, _InIt _Last,
		_OutIt _Dest, _Pr _Pred)
	{	// copy compressing pairs that match
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_Dest,
		_Unique_copy_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last),
			_Get_unwrapped_unverified(_Dest), _Pass_fn(_Pred),
			bool_constant<_Is_fwd_iter_v<_InIt>>{}, // to avoid ambiguity
			bool_constant<conjunction_v<bool_constant<_Is_fwd_iter_v<_OutIt>>,
				is_same<_Iter_value_t<_InIt>, _Iter_value_t<_OutIt>>>>{}
			));

	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * unique_copy(_InIt _First, _InIt _Last, _DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// copy compressing pairs that match, array dest
	return (_STD unique_copy(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt,
	class _OutIt> inline
	_OutIt unique_copy(_InIt _First, _InIt _Last, _OutIt _Dest)
	{	// copy compressing pairs that match
	return (_STD unique_copy(_First, _Last, _Dest, equal_to<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * unique_copy(_InIt _First, _InIt _Last, _DestTy (&_Dest)[_DestSize])
	{	// copy compressing pairs that match, array dest
	return (_STD unique_copy(_First, _Last, _Dest, equal_to<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 unique_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last,
		_FwdIt2 _Dest, _Pr _Pred) noexcept
	{	// copy compressing pairs that match
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD unique_copy(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * unique_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// copy compressing pairs that match, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD unique_copy(_First, _Last, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 unique_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _FwdIt2 _Dest) noexcept
	{	// copy compressing pairs that match
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD unique_copy(_First, _Last, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * unique_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Last, _DestTy (&_Dest)[_DestSize]) noexcept
	{	// copy compressing pairs that match, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	return (_STD unique_copy(_First, _Last, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE reverse_copy
template<class _BidIt,
	class _OutIt> inline
	_OutIt reverse_copy(_BidIt _First, _BidIt _Last,
		_OutIt _Dest)
	{	// copy reversing elements in [_First, _Last)
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_BidIt>(_UFirst, _ULast));
	for (; _UFirst != _ULast; ++_UDest)
		{
		*_UDest = *--_ULast;
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _BidIt,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * reverse_copy(_BidIt _First, _BidIt _Last, _DestTy (&_Dest)[_DestSize])
	{	// copy reversing elements in [_First, _Last), array dest
	return (_STD reverse_copy(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest))
		._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _BidIt,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt reverse_copy(_ExPo&&, _BidIt _First, _BidIt _Last, _FwdIt _Dest) noexcept
	{	// copy reversing elements in [_First, _Last)
		// not parallelized as benchmarks show it isn't worth it
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt);
	return (_STD reverse_copy(_First, _Last, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _BidIt,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * reverse_copy(_ExPo&&, _BidIt _First, _BidIt _Last, _DestTy (&_Dest)[_DestSize]) noexcept
	{	// copy reversing elements in [_First, _Last), array dest
		// not parallelized as benchmarks show it isn't worth it
	return (_STD reverse_copy(_First, _Last, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE rotate_copy
template<class _FwdIt,
	class _OutIt> inline
	_OutIt rotate_copy(_FwdIt _First, _FwdIt _Mid, _FwdIt _Last, _OutIt _Dest)
	{	// copy rotating [_First, _Last)
	_Adl_verify_range(_First, _Mid);
	_Adl_verify_range(_Mid, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _UMid = _Get_unwrapped(_Mid);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_distance<_FwdIt>(_UFirst, _ULast));
	_UDest = _Copy_unchecked(_UMid, _ULast, _UDest);
	_Seek_wrapped(_Dest, _Copy_unchecked(_UFirst, _UMid, _UDest));
	return (_Dest);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt2 rotate_copy(_ExPo&&, _FwdIt1 _First, _FwdIt1 _Mid, _FwdIt1 _Last, _FwdIt2 _Dest) noexcept
	{	// copy rotating [_First, _Last)
		// not parallelized as benchmarks show it isn't worth it
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD rotate_copy(_First, _Mid, _Last, _Dest));
	}

		// FUNCTION TEMPLATE sample
template<class _PopIt,
	class _SampleIt,
	class _Diff,
	class _RngFn> inline
	_SampleIt _Sample_reservoir_unchecked(_PopIt _First, const _PopIt _Last, const _SampleIt _Dest,
		const _Diff _Count, _RngFn& _RngFunc)
	{	// source is input: use reservoir sampling (unstable)
		// pre: _SampleIt is random-access && 0 < _Count && the range [_Dest, _Dest + _Count) is valid
	using _Diff_sample = _Iter_diff_t<_SampleIt>;
	const auto _SCount = static_cast<_Diff_sample>(_Count);
	_Iter_diff_t<_PopIt> _PopSize{};
	for (; _PopSize < _SCount; ++_PopSize, (void)++_First)
		{
		// _PopSize is less than _SCount, and [_Dest, _Dest + _SCount) is valid,
		// so [_Dest, _Dest + _PopSize) must be valid, so narrowing to _Diff_sample
		// can't overflow
		const auto _Sample_pop = static_cast<_Diff_sample>(_PopSize);
		if (_First == _Last)
			{
			return (_Dest + _Sample_pop);
			}

		_Dest[_Sample_pop] = *_First;
		}
	for (; _First != _Last; ++_First)
		{
		const auto _Idx = _RngFunc(++_PopSize);
		if (_Idx < _SCount)
			{
			_Dest[static_cast<_Diff_sample>(_Idx)] = *_First; // again, valid narrowing because _Idx < _SCount
			}
		}
	return (_Dest + _SCount);
	}

template<class _PopIt,
	class _SampleIt,
	class _Diff,
	class _RngFn> inline
	_SampleIt _Sample_selection_unchecked(_PopIt _First, const _PopIt _Last,
		_Iter_diff_t<_PopIt> _PopSize, _SampleIt _Dest,
		_Diff _Count, _RngFn& _RngFunc)
	{	// source is forward *and* we know the source range size: use selection sampling (stable)
		// pre: _PopIt is forward && _Count <= _PopSize
	using _CT = common_type_t<_Iter_diff_t<_PopIt>, _Diff>;
	for (; _Count > 0 && _First != _Last; ++_First, (void)--_PopSize)
		{
		if (static_cast<_CT>(_RngFunc(_PopSize)) < static_cast<_CT>(_Count))
			{
			--_Count;
			*_Dest = *_First;
			++_Dest;
			}
		}
	return (_Dest);
	}

template<class _PopIt,
	class _SampleIt,
	class _Diff,
	class _RngFn> inline
	_SampleIt _Sample1(_PopIt _First, _PopIt _Last, _SampleIt _Dest,
		_Diff _Count, _RngFn& _RngFunc, input_iterator_tag)
	{	// source is input: use reservoir sampling (unstable)
		// pre: _Count > 0
	_Seek_wrapped(_Dest,
		_Sample_reservoir_unchecked(_First, _Last, _Get_unwrapped_unverified(_Dest), _Count,
		_RngFunc));
	return (_Dest);
	}

template<class _PopIt,
	class _SampleIt,
	class _Diff,
	class _RngFn> inline
	_SampleIt _Sample1(_PopIt _First, _PopIt _Last, _SampleIt _Dest,
		_Diff _Count, _RngFn& _RngFunc, forward_iterator_tag)
	{	// source is forward: use selection sampling (stable)
		// pre: _Count > 0
	using _PopDiff = _Iter_diff_t<_PopIt>;
	using _CT = common_type_t<_Diff, _PopDiff>;
	const auto _PopSize = _STD distance(_First, _Last);
	if (static_cast<_CT>(_Count) > static_cast<_CT>(_PopSize))
		{
		_Count = static_cast<_Diff>(_PopSize); // narrowing OK because _Count is getting smaller
		}

	_Seek_wrapped(_Dest,
		_Sample_selection_unchecked(_First, _Last, _PopSize, _Get_unwrapped_n(_Dest, _Count),
		_Count, _RngFunc));
	return (_Dest);
	}

template<class _PopIt,
	class _SampleIt,
	class _Diff,
	class _Urng> inline
	_SampleIt sample(_PopIt _First, _PopIt _Last, _SampleIt _Dest,
		_Diff _Count, _Urng&& _Func)
	{	// randomly select _Count elements from [_First, _Last) into _Dest
	static_assert(_Is_fwd_iter_v<_PopIt> || _Is_random_iter_v<_SampleIt>,
		"If the source range is not forward, the destination range must be random-access.");
	static_assert(is_integral_v<_Diff>,
		"The sample size must have an integer type.");
	_Adl_verify_range(_First, _Last);
	if (0 < _Count)
		{
		_Rng_from_urng<_Iter_diff_t<_PopIt>, remove_reference_t<_Urng>> _RngFunc(_Func);
		_Dest = _Sample1(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Dest, _Count,
			_RngFunc, _Iter_cat_t<_PopIt>());
		}

	return (_Dest);
	}

#if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _PopIt,
	class _DestTy,
	size_t _DestSize,
	class _Diff,
	class _Urng> inline
	_DestTy * sample(_PopIt _First, _PopIt _Last, _DestTy (&_Dest)[_DestSize],
		_Diff _Count, _Urng&& _Func)
	{	// randomly select _Count elements from [_First, _Last) into _Dest
	return (_STD sample(_First, _Last, _Array_iterator<_DestTy, _DestSize>(_Dest), _Count, _Func)
		._Unwrapped());
	}
#endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE shuffle WITH URNG
template<class _RanIt,
	class _RngFn> inline
	void _Random_shuffle1(_RanIt _First, _RanIt _Last, _RngFn& _RngFunc)
	{	// shuffle [_First, _Last) using random function _RngFunc
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	if (_UFirst == _ULast)
		{
		return;
		}

	using _Diff = _Iter_diff_t<_RanIt>;
	auto _UTarget = _UFirst;
	_Diff _Target_index = 1;
	for (; ++_UTarget != _ULast; ++_Target_index)
		{	// randomly place an element from [_First, _Target] at _Target
		_Diff _Off = _RngFunc(static_cast<_Diff>(_Target_index + 1));
		_STL_ASSERT(0 <= _Off && _Off <= _Target_index, "random value out of range");
		if (_Off != _Target_index)	// avoid self-move-assignment
			{
			_STD iter_swap(_UTarget, _UFirst + _Off);
			}
		}
	}

template<class _RanIt,
	class _Urng> inline
	void shuffle(_RanIt _First, _RanIt _Last, _Urng&& _Func)
	{	// shuffle [_First, _Last) using URNG _Func
	typedef remove_reference_t<_Urng> _Urng0;
	_Rng_from_urng<_Iter_diff_t<_RanIt>, _Urng0> _RngFunc(_Func);
	_Random_shuffle1(_First, _Last, _RngFunc);
	}

 #if _HAS_AUTO_PTR_ETC
		// FUNCTION TEMPLATE random_shuffle WITH RANDOM FN
template<class _RanIt,
	class _RngFn> inline
	void random_shuffle(_RanIt _First, _RanIt _Last, _RngFn&& _RngFunc)
	{	// shuffle [_First, _Last) using random function _RngFunc
	_Random_shuffle1(_First, _Last, _RngFunc);
	}

	// STRUCT _Rand_urng_from_func
struct _Rand_urng_from_func
	{	// wrap rand() as a URNG
	typedef unsigned int result_type;

	static result_type (min)()
		{	// return minimum possible generated value
		return (0);
		}

	static result_type (max)()
		{	// return maximum possible generated value
		return (RAND_MAX);
		}

	result_type operator()()
		{	// invoke rand()
		return (static_cast<result_type>(_CSTD rand()));
		}
	};

		// FUNCTION TEMPLATE random_shuffle
template<class _RanIt> inline
	void random_shuffle(_RanIt _First, _RanIt _Last)
	{	// shuffle [_First, _Last) using rand()
	_Rand_urng_from_func _Func;
	_STD shuffle(_First, _Last, _Func);
	}
 #endif /* _HAS_AUTO_PTR_ETC */

		// FUNCTION TEMPLATE partition
template<class _FwdIt,
	class _Pr> inline
	_FwdIt _Partition_unchecked(_FwdIt _First, const _FwdIt _Last, _Pr _Pred,
		forward_iterator_tag)
	{	// move elements satisfying _Pred to front, forward iterators
	for (;;)
		{	// skip in-place elements at beginning
		if (_First == _Last)
			{
			return (_First);
			}

		if (!_Pred(*_First))
			{
			break;
			}

		++_First;
		}

	for (_FwdIt _Next = _First; ++_Next != _Last; )
		{
		if (_Pred(*_Next))
			{
			_STD iter_swap(_First, _Next);	// out of place, swap and loop
			++_First;
			}
		}

	return (_First);
	}

template<class _BidIt,
	class _Pr> inline
	_BidIt _Partition_unchecked(_BidIt _First, _BidIt _Last, _Pr _Pred,
		bidirectional_iterator_tag)
	{	// move elements satisfying _Pred to front, bidirectional iterators
	for (;;)
		{	// find any out-of-order pair
		for (;;)
			{	// skip in-place elements at beginning
			if (_First == _Last)
				{
				return (_First);
				}

			if (!_Pred(*_First))
				{
				break;
				}

			++_First;
			}

		do
			{	// skip in-place elements at end
			--_Last;
			if (_First == _Last)
				{
				return (_First);
				}
			}
		while (!_Pred(*_Last));

		_STD iter_swap(_First, _Last);	// out of place, swap and loop
		++_First;
		}
	}

template<class _FwdIt,
	class _Pr> inline
	_FwdIt partition(_FwdIt _First, const _FwdIt _Last, _Pr _Pred)
	{	// move elements satisfying _Pred to beginning of sequence
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Partition_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred),
			_Iter_cat_t<_FwdIt>()));

	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt partition(_ExPo&& _Exec, _FwdIt _First, const _FwdIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE stable_partition
template<class _BidIt> inline
	_BidIt _Buffered_rotate_unchecked(const _BidIt _First, const _BidIt _Mid, const _BidIt _Last,
		const _Iter_diff_t<_BidIt> _Count1, const _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity)
	{	// rotate [_First, _Last) using temp buffer
		// precondition: _Count1 == distance(_First, _Mid)
		// precondition: _Count2 == distance(_Mid, _Last)
	if (_Count1 == 0)
		{
		return (_Last);
		}

	if (_Count2 == 0)
		{
		return (_First);
		}

	if (_Count1 <= _Count2 && _Count1 <= _Capacity)
		{	// buffer left range, then copy parts
		_Uninitialized_backout<_Iter_value_t<_BidIt> *> _Backout{_Temp_ptr,
			_Uninitialized_move_unchecked(_First, _Mid, _Temp_ptr)};
		const _BidIt _New_mid = _Move_unchecked(_Mid, _Last, _First);
		_Move_unchecked(_Backout._First, _Backout._Last, _New_mid);
		return (_New_mid); // _Backout destroys elements in temporary buffer
		}

	if (_Count2 <= _Capacity)
		{	// buffer right range, then copy parts
		_Uninitialized_backout<_Iter_value_t<_BidIt> *> _Backout{_Temp_ptr,
			_Uninitialized_move_unchecked(_Mid, _Last, _Temp_ptr)};
		_Move_backward_unchecked(_First, _Mid, _Last);
		return (_Move_unchecked(_Backout._First, _Backout._Last, _First)); // ditto _Backout destroys elements
		}

	// buffer too small, rotate in place
	return (_Rotate_unchecked(_First, _Mid, _Last));
	}

template<class _BidIt,
	class _Pr> inline
	pair<_BidIt, _Iter_diff_t<_BidIt>> _Stable_partition_unchecked1(_BidIt _First, _BidIt _Last, _Pr _Pred,
		const _Iter_diff_t<_BidIt> _Count, _Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity)
	{	// implement stable_partition of [_First, _Last] (note: closed range)
		// precondition: !_Pred(*_First)
		// precondition: _Pred(*_Last)
		// precondition: distance(_First, _Last) + 1 == _Count
		// note: _Count >= 2 and _First != _Last
		// returns: a pair such that first is the partition point, and second is distance(_First, partition point)
	using _Diff = _Iter_diff_t<_BidIt>;
	if (_Count - static_cast<_Diff>(1) <= _Capacity) // - 1 since we never need to store *_Last
		{
		_Uninitialized_backout<_Iter_value_t<_BidIt> *> _Backout{_Temp_ptr};
		_BidIt _Next = _First;
		_Backout._Emplace_back(_STD move(*_First));
		while (++_First != _Last)
			{	// test each element, copying to _Temp_ptr if it's in the false range, or assigning backwards
				// if it's in the true range
			if (_Pred(*_First))
				{
				*_Next = _STD move(*_First);
				++_Next;
				}
			else
				{
				_Backout._Emplace_back(_STD move(*_First));
				}
			}

		// move the last true element, *_Last, to the end of the true range
		*_Next = _STD move(*_Last);
		++_Next;
		_Move_unchecked(_Backout._First, _Backout._Last, _Next);	// copy back the false range
		_Diff _True_distance = static_cast<_Diff>(_Count - static_cast<_Diff>(_Backout._Last - _Backout._First));
		return (pair<_BidIt, _Diff>(_Next, _True_distance));	// _Backout destroys elements
		}

	const _Diff _Mid_offset = _Count / static_cast<_Diff>(2); // note: >= 1 because _Count >= 2
	const _BidIt _Mid = _STD next(_First, _Mid_offset);

	// form [_First, _Left) true range, [_Left, _Mid) false range
	_BidIt _Left = _Mid;
	_Diff _Left_true_count = _Mid_offset;
	for (;;)
		{	// skip over the trailing false range before _Mid
		--_Left;
		if (_First == _Left)
			{	// the entire left range is false
			--_Left_true_count;	// to exclude *_First
			break;
			}

		if (_Pred(*_Left))
			{	// excluded the false range before _Mid, invariants reestablished, recurse
			const pair<_BidIt, _Diff> _Low =
				_Stable_partition_unchecked1(_First, _Left, _Pred, _Left_true_count, _Temp_ptr, _Capacity);
			_Left = _Low.first;
			_Left_true_count = _Low.second;
			break;
			}

		--_Left_true_count;
		}

	// form [_Mid, _Right) true range, [_Right, next(_Last)) false range
	_BidIt _Right = _Mid;
	_Diff _Right_true_count = 0;
	for (;;)
		{	// skip over the leading true range after and including _Mid
		if (_Right == _Last)
			{	// the entire right range is true
			++_Right;	// to include _Last
			++_Right_true_count;
			break;
			}

		if (!_Pred(*_Right))
			{	// excluded the true range after and including _Mid, invariants reestablished, recurse
			const _Diff _Right_count = _Count - _Mid_offset;
			const _Diff _Remaining = _Right_count - _Right_true_count;
			const pair<_BidIt, _Diff> _High =
				_Stable_partition_unchecked1(_Right, _Last, _Pred, _Remaining, _Temp_ptr, _Capacity);
			_Right = _High.first;
			_Right_true_count += _High.second;
			break;
			}

		++_Right;
		++_Right_true_count;
		}

	// swap the [_Left, _Mid) false range with the [_Mid, _Right) true range
	const _BidIt _Partition_point = _Buffered_rotate_unchecked(_Left, _Mid, _Right,
		static_cast<_Diff>(_Mid_offset - _Left_true_count), _Right_true_count, _Temp_ptr, _Capacity);
	return (pair<_BidIt, _Diff>(_Partition_point, static_cast<_Diff>(_Left_true_count + _Right_true_count)));
	}

template<class _BidIt,
	class _Pr> inline
	_BidIt _Stable_partition_unchecked(_BidIt _First, _BidIt _Last, _Pr _Pred)
	{	// partition preserving order of equivalents, using _Pred
	for (;;)
		{
		if (_First == _Last)
			{	// the input range range is true (already partitioned)
			return (_First);
			}

		if (!_Pred(*_First))
			{	// excluded the leading true range
			break;
			}

		++_First;
		}

	for (;;)
		{
		--_Last;
		if (_First == _Last)
			{	// the input range is already partitioned
			return (_First);
			}

		if (_Pred(*_Last))
			{	// excluded the trailing false range
			break;
			}
		}

	using _Diff = _Iter_diff_t<_BidIt>;
	const _Diff _Temp_count = _STD distance(_First, _Last); // _Total_count - 1 since we never need to store *_Last
	const _Diff _Total_count = _Temp_count + static_cast<_Diff>(1);
	_Optimistic_temporary_buffer<_Iter_value_t<_BidIt>> _Temp_buf{_Temp_count};
	return (_Stable_partition_unchecked1(_First, _Last, _Pred, _Total_count, _Temp_buf._Data, _Temp_buf._Capacity)
		.first);
	}

template<class _BidIt,
	class _Pr> inline
	_BidIt stable_partition(_BidIt _First, _BidIt _Last, _Pr _Pred)
	{	// partition preserving order of equivalents, using _Pred
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Stable_partition_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred)));
	return (_First);
	}

#if _HAS_CXX17
template<class _ExPo,
	class _BidIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_BidIt stable_partition(_ExPo&&, _BidIt _First, _BidIt _Last, _Pr _Pred) noexcept
	{	// partition preserving order of equivalents, using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD stable_partition(_First, _Last, _Pass_fn(_Pred)));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE push_heap
template<class _RanIt,
	class _Ty,
	class _Pr> inline
	void _Push_heap_by_index(_RanIt _First, _Iter_diff_t<_RanIt> _Hole,
		_Iter_diff_t<_RanIt> _Top, _Ty&& _Val, _Pr _Pred)
	{	// percolate _Hole to _Top or where _Val belongs, using _Pred
	for (_Iter_diff_t<_RanIt> _Idx = (_Hole - 1) >> 1;	// TRANSITION, VSO#433486
		_Top < _Hole && _DEBUG_LT_PRED(_Pred, *(_First + _Idx), _Val);
		_Idx = (_Hole - 1) >> 1)	// TRANSITION, VSO#433486
		{	// move _Hole up to parent
		*(_First + _Hole) = _STD move(*(_First + _Idx));
		_Hole = _Idx;
		}

	*(_First + _Hole) = _STD move(_Val);	// drop _Val into final hole
	}

template<class _RanIt,
	class _Pr> inline
	void push_heap(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// push *(_Last - 1) onto heap at [_First, _Last - 1), using _Pred
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	auto _ULast = _Get_unwrapped(_Last);
	using _Diff = _Iter_diff_t<_RanIt>;
	_Diff _Count = _ULast - _UFirst;
	if (2 <= _Count)
		{
		_Iter_value_t<_RanIt> _Val = _STD move(*--_ULast);
		_Push_heap_by_index(_UFirst, --_Count, _Diff(0), _STD move(_Val), _Pass_fn(_Pred));
		}
	}

template<class _RanIt> inline
	void push_heap(_RanIt _First, _RanIt _Last)
	{	// push *(_Last - 1) onto heap at [_First, _Last - 1), using operator<
	_STD push_heap(_First, _Last, less<>());
	}

		// FUNCTION TEMPLATE pop_heap
template<class _RanIt,
	class _Ty,
	class _Pr> inline
	void _Pop_heap_hole_by_index(_RanIt _First, _Iter_diff_t<_RanIt> _Hole, _Iter_diff_t<_RanIt> _Bottom,
		_Ty&& _Val, _Pr _Pred)
	{	// percolate _Hole to _Bottom, then push _Val, using _Pred
		// precondition: _Bottom != 0
	using _Diff = _Iter_diff_t<_RanIt>;
	const _Diff _Top = _Hole;
	_Diff _Idx = _Hole;

	// Check whether _Idx can have a child before calculating that child's index, since
	// calculating the child's index can trigger integer overflows
	const _Diff _Max_sequence_non_leaf = (_Bottom - 1) >> 1;	// TRANSITION, VSO#433486
	while (_Idx < _Max_sequence_non_leaf)
		{	// move _Hole down to larger child
		_Idx = 2 * _Idx + 2;
		if (_DEBUG_LT_PRED(_Pred, *(_First + _Idx), *(_First + (_Idx - 1))))
			--_Idx;
		*(_First + _Hole) = _STD move(*(_First + _Idx));
		_Hole = _Idx;
		}

	if (_Idx == _Max_sequence_non_leaf && _Bottom % 2 == 0)
		{	// only child at bottom, move _Hole down to it
		*(_First + _Hole) = _STD move(*(_First + (_Bottom - 1)));
		_Hole = _Bottom - 1;
		}

	_Push_heap_by_index(_First, _Hole, _Top, _STD move(_Val), _Pred);
	}

template<class _RanIt,
	class _Ty,
	class _Pr> inline
	void _Pop_heap_hole_unchecked(_RanIt _First, _RanIt _Last, _RanIt _Dest,
		_Ty&& _Val, _Pr _Pred)
	{	// pop *_First to *_Dest and reheap, using _Pred
		// precondition: _First != _Last
		// precondition: _First != _Dest
	*_Dest = _STD move(*_First);
	_Pop_heap_hole_by_index(_First, _Iter_diff_t<_RanIt>(0), _Iter_diff_t<_RanIt>(_Last - _First),
		_STD move(_Val), _Pred);
	}

template<class _RanIt,
	class _Pr> inline
	void _Pop_heap_unchecked(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// pop *_First to *(_Last - 1) and reheap, using _Pred
	if (2 <= _Last - _First)
		{
		--_Last;
		_Iter_value_t<_RanIt> _Val = _STD move(*_Last);
		_Pop_heap_hole_unchecked(_First, _Last, _Last, _STD move(_Val), _Pred);
		}
	}

template<class _RanIt,
	class _Pr> inline
	void pop_heap(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// pop *_First to *(_Last - 1) and reheap, using _Pred
	_Adl_verify_range(_First, _Last);
	_Pop_heap_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred));
	}

template<class _RanIt> inline
	void pop_heap(_RanIt _First, _RanIt _Last)
	{	// pop *_First to *(_Last - 1) and reheap, using operator<
	_STD pop_heap(_First, _Last, less<>());
	}

		// FUNCTION TEMPLATE make_heap
template<class _RanIt,
	class _Pr> inline
	void _Make_heap_unchecked(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// make nontrivial [_First, _Last) into a heap, using _Pred
	_Iter_diff_t<_RanIt> _Bottom = _Last - _First;
	for (_Iter_diff_t<_RanIt> _Hole = _Bottom >> 1; 0 < _Hole; )	// TRANSITION, VSO#433486
		{	// reheap top half, bottom to top
		--_Hole;
		_Iter_value_t<_RanIt> _Val = _STD move(*(_First + _Hole));
		_Pop_heap_hole_by_index(_First, _Hole, _Bottom, _STD move(_Val), _Pred);
		}
	}

template<class _RanIt,
	class _Pr> inline
	void make_heap(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// make [_First, _Last) into a heap, using _Pred
	_Adl_verify_range(_First, _Last);
	_Make_heap_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred));
	}

template<class _RanIt> inline
	void make_heap(_RanIt _First, _RanIt _Last)
	{	// make [_First, _Last) into a heap, using operator<
	_STD make_heap(_First, _Last, less<>());
	}

		// FUNCTION TEMPLATES is_heap AND is_heap_until
template<class _RanIt,
	class _Pr> inline
	_RanIt _Is_heap_until_unchecked(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// find extent of range that is a heap ordered by _Pred
	const _Iter_diff_t<_RanIt> _Size = _Last - _First;
	for (_Iter_diff_t<_RanIt> _Off = 1; _Off < _Size; ++_Off)
		{
		if (_DEBUG_LT_PRED(_Pred, _First[(_Off - 1) >> 1], _First[_Off]))	// TRANSITION, VSO#433486
			{
			return (_First + _Off);
			}
		}

	return (_Last);
	}

template<class _RanIt,
	class _Pr>
	_NODISCARD inline _RanIt is_heap_until(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// find extent of range that is a heap ordered by _Pred
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Is_heap_until_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred)));
	return (_First);
	}

template<class _RanIt,
	class _Pr>
	_NODISCARD inline bool is_heap(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// test if range is a heap ordered by _Pred
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	return (_Is_heap_until_unchecked(_UFirst, _ULast, _Pass_fn(_Pred)) == _ULast);
	}

template<class _RanIt>
	_NODISCARD inline _RanIt is_heap_until(_RanIt _First, _RanIt _Last)
	{	// find extent of range that is a heap ordered by operator<
	return (_STD is_heap_until(_First, _Last, less<>()));
	}

template<class _RanIt>
	_NODISCARD inline bool is_heap(_RanIt _First, _RanIt _Last)
	{	// test if range is a heap ordered by operator<
	return (_STD is_heap(_First, _Last, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _RanIt is_heap_until(_ExPo&&, _RanIt _First, _RanIt _Last, _Pr _Pred) noexcept
	{	// find extent of range that is a heap ordered by _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_heap_until(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool is_heap(_ExPo&&, _RanIt _First, _RanIt _Last, _Pr _Pred) noexcept
	{	// test if range is a heap ordered by _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_heap(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _RanIt is_heap_until(_ExPo&&, _RanIt _First, _RanIt _Last) noexcept
	{	// find extent of range that is a heap ordered by operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_heap_until(_First, _Last));
	}

template<class _ExPo,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool is_heap(_ExPo&&, _RanIt _First, _RanIt _Last) noexcept
	{	// test if range is a heap ordered by operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_heap(_First, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE sort_heap
template<class _RanIt,
	class _Pr> inline
	void _Sort_heap_unchecked(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// order heap by repeatedly popping, using _Pred
	for (; 2 <= _Last - _First; --_Last)
		{
		_Pop_heap_unchecked(_First, _Last, _Pred);
		}
	}

template<class _RanIt,
	class _Pr> inline
	void sort_heap(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// order heap by repeatedly popping, using _Pred
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
#if _ITERATOR_DEBUG_LEVEL == 2
	const auto _Counterexample = _Is_heap_until_unchecked(_UFirst, _ULast, _Pass_fn(_Pred));
	if (_Counterexample != _ULast)
		{
		_STL_REPORT_ERROR("invalid heap in sort_heap()");
		}
#endif /* _ITERATOR_DEBUG_LEVEL == 2 */
	_Sort_heap_unchecked(_UFirst, _ULast, _Pass_fn(_Pred));
	}

template<class _RanIt> inline
	void sort_heap(_RanIt _First, _RanIt _Last)
	{	// order heap by repeatedly popping, using operator<
	_STD sort_heap(_First, _Last, less<>());
	}

		// FUNCTION TEMPLATE lower_bound
template<class _FwdIt,
	class _Ty>
	_NODISCARD inline _FwdIt lower_bound(_FwdIt _First, _FwdIt _Last, const _Ty& _Val)
	{	// find first element not before _Val, using operator<
	return (_STD lower_bound(_First, _Last, _Val, less<>()));
	}

		// FUNCTION TEMPLATE upper_bound
template<class _FwdIt,
	class _Ty,
	class _Pr>
	_NODISCARD inline _FwdIt upper_bound(_FwdIt _First, _FwdIt _Last,
		const _Ty& _Val, _Pr _Pred)
	{	// find first element that _Val is before, using _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	_Iter_diff_t<_FwdIt> _Count = _STD distance(_UFirst, _Get_unwrapped(_Last));

	while (0 < _Count)
		{	// divide and conquer, find half that contains answer
		_Iter_diff_t<_FwdIt> _Count2 = _Count >> 1; // TRANSITION, VSO#433486
		const auto _UMid = _STD next(_UFirst, _Count2);
		if (_Pred(_Val, *_UMid))
			{
			_Count = _Count2;
			}
		else
			{	// try top half
			_UFirst = _Next_iter(_UMid);
			_Count -= _Count2 + 1;
			}
		}

	_Seek_wrapped(_First, _UFirst);
	return (_First);
	}

template<class _FwdIt,
	class _Ty>
	_NODISCARD inline _FwdIt upper_bound(_FwdIt _First, _FwdIt _Last, const _Ty& _Val)
	{	// find first element that _Val is before, using operator<
	return (_STD upper_bound(_First, _Last, _Val, less<>()));
	}

		// FUNCTION TEMPLATE equal_range
template<class _FwdIt,
	class _Ty,
	class _Pr>
	_NODISCARD inline pair<_FwdIt, _FwdIt> equal_range(_FwdIt _First, _FwdIt _Last, const _Ty& _Val, _Pr _Pred)
	{	// find range equivalent to _Val, using _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);

	using _Diff = _Iter_diff_t<_FwdIt>;
	_Diff _Count = _STD distance(_UFirst, _ULast);

	for (;;)
		{	// divide and conquer, check midpoint
		if (_Count <= 0)
			{
			_Seek_wrapped(_Last, _UFirst); // empty range
			_Seek_wrapped(_First, _UFirst);
			break;
			}

		_Diff _Count2 = _Count >> 1; // TRANSITION, VSO#433486
		const auto _UMid = _STD next(_UFirst, _Count2);
		if (_DEBUG_LT_PRED(_Pred, *_UMid, _Val))
			{	// range begins above _UMid, loop
			_UFirst = _Next_iter(_UMid);
			_Count -= _Count2 + 1;
			}
		else if (_Pred(_Val, *_UMid))
			{
			_Count = _Count2;	// range in first half, loop
			}
		else
			{	// range straddles _UMid, find each end and return
			auto _UFirst2 = _STD lower_bound(_UFirst, _UMid, _Val, _Pass_fn(_Pred));
			_STD advance(_UFirst, _Count);
			auto _ULast2 = _STD upper_bound(_Next_iter(_UMid), _UFirst, _Val, _Pass_fn(_Pred));
			_Seek_wrapped(_Last, _ULast2);
			_Seek_wrapped(_First, _UFirst2);
			break;
			}
		}

	return {_First, _Last};
	}

template<class _FwdIt,
	class _Ty>
	_NODISCARD inline pair<_FwdIt, _FwdIt> equal_range(_FwdIt _First, _FwdIt _Last, const _Ty& _Val)
	{	// find range equivalent to _Val, using operator<
	return (_STD equal_range(_First, _Last, _Val, less<>()));
	}

		// FUNCTION TEMPLATE binary_search
template<class _FwdIt,
	class _Ty,
	class _Pr>
	_NODISCARD inline bool binary_search(_FwdIt _First, _FwdIt _Last, const _Ty& _Val, _Pr _Pred)
	{	// test if _Val equivalent to some element, using _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	_UFirst = _STD lower_bound(_UFirst, _ULast, _Val, _Pass_fn(_Pred));
	return (_UFirst != _ULast && !_Pred(_Val, *_UFirst));
	}

template<class _FwdIt,
	class _Ty>
	_NODISCARD inline bool binary_search(_FwdIt _First, _FwdIt _Last, const _Ty& _Val)
	{	// test if _Val equivalent to some element, using operator<
	return (_STD binary_search(_First, _Last, _Val, less<>()));
	}

		// FUNCTION TEMPLATE merge
inline _Distance_unknown _Idl_dist_add(_Distance_unknown, _Distance_unknown)
	{	// combine _Idl_distance results (both unknown)
	return {};
	}

template<class _Diff1> inline
	_Distance_unknown _Idl_dist_add(_Diff1, _Distance_unknown)
	{	// combine _Idl_distance results (right unknown)
	return {};
	}

template<class _Diff2> inline
	_Distance_unknown _Idl_dist_add(_Distance_unknown, _Diff2)
	{	// combine _Idl_distance results (left unknown)
	return {};
	}

template<class _Diff1,
	class _Diff2> inline
	auto _Idl_dist_add(_Diff1 _Lhs, _Diff2 _Rhs)
	{	// combine _Idl_distance results (both known)
	return (_Lhs + _Rhs);
	}

template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Pr> inline
	_OutIt merge(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest, _Pr _Pred)
	{	// copy merging ranges, both using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	const auto _Count1 = _Idl_distance<_InIt1>(_UFirst1, _ULast1);
	const auto _Count2 = _Idl_distance<_InIt2>(_UFirst2, _ULast2);
	auto _UDest = _Get_unwrapped_n(_Dest, _Idl_dist_add(_Count1, _Count2));
	if (_UFirst1 != _ULast1 && _UFirst2 != _ULast2)
		{
		for (;;)
			{
			if (_DEBUG_LT_PRED(_Pred, *_UFirst2, *_UFirst1))
				{
				*_UDest = *_UFirst2;
				++_UDest;
				++_UFirst2;

				if (_UFirst2 == _ULast2)
					{
					break;
					}
				}
			else
				{
				*_UDest = *_UFirst1;
				++_UDest;
				++_UFirst1;

				if (_UFirst1 == _ULast1)
					{
					break;
					}
				}
			}
		}

	_UDest = _Copy_unchecked(_UFirst1, _ULast1, _UDest);	// copy any tail
	_Seek_wrapped(_Dest, _Copy_unchecked(_UFirst2, _ULast2, _UDest));
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * merge(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// copy merging ranges, both using _Pred, array dest
	return (_STD merge(_First1, _Last1, _First2, _Last2,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt1,
	class _InIt2,
	class _OutIt> inline
	_OutIt merge(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest)
	{	// copy merging ranges, both using operator<
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * merge(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize])
	{	// copy merging ranges, both using operator<, array dest
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 merge(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest, _Pr _Pred) noexcept
	{	// copy merging ranges, both using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * merge(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// copy merging ranges, both using _Pred, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 merge(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest) noexcept
	{	// copy merging ranges, both using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * merge(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize]) noexcept
	{	// copy merging ranges, both using operator<, array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD merge(_First1, _Last1, _First2, _Last2, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE inplace_merge
		// The "usual invariants" for the inplace_merge helpers below are:
		// [_First, _Mid) and [_Mid, _Last) are sorted
		// _Pred(*_Mid, *_First)             note: this means *_Mid is the "lowest" element
		// _Pred(*prev(_Last), *prev(_Mid))  note: this means *prev(_Mid) is the "highest" element
		// _Count1 == distance(_First, _Mid)
		// _Count2 == distance(_Mid, _Last)
		// _Count1 > 1
		// _Count2 > 1
template<class _BidIt> inline
	void _Rotate_one_right(_BidIt _First, _BidIt _Mid, _BidIt _Last)
	{	// exchanges the range [_First, _Mid) with [_Mid, _Last)
		// pre: distance(_Mid, _Last) is 1
	_Iter_value_t<_BidIt> _Temp(_STD move(*_Mid));
	_Move_backward_unchecked(_First, _Mid, _Last);
	*_First = _STD move(_Temp);
	}

template<class _BidIt> inline
	void _Rotate_one_left(_BidIt _First, _BidIt _Mid, _BidIt _Last)
	{	// exchanges the range [_First, _Mid) with [_Mid, _Last)
		// pre: distance(_First, _Mid) is 1
	_Iter_value_t<_BidIt> _Temp(_STD move(*_First));
	*_Move_unchecked(_Mid, _Last, _First) = _STD move(_Temp);
	}

template<class _BidIt,
	class _Pr> inline
	void _Inplace_merge_buffer_left(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_value_t<_BidIt> * const _Temp_ptr, _Pr _Pred)
	{	// move the range [_First, _Mid) to _Temp_ptr, and merge it with [_Mid, _Last) to _First, using _Pred
		// usual invariants apply
	using _Ptr_ty = _Iter_value_t<_BidIt> *;
	_Uninitialized_backout<_Ptr_ty> _Backout{_Temp_ptr,
		_Uninitialized_move_unchecked(_First, _Mid, _Temp_ptr)};
	_Ptr_ty _Left_first = _Temp_ptr;
	const _Ptr_ty _Left_last = _Backout._Last - 1; // avoid a compare with the highest element
	*_First = _STD move(*_Mid); // the lowest element is now in position
	++_First;
	++_Mid;
	for (;;)
		{
		if (_Pred(*_Mid, *_Left_first))
			{	// take element from the right partition
			*_First = _STD move(*_Mid);
			++_First;
			++_Mid;
			if (_Mid == _Last)
				{
				_Move_unchecked(_Left_first, _Backout._Last, _First);	// move any tail (and the highest element)
				return;
				}
			}
		else
			{	// take element from the left partition
			*_First = _STD move(*_Left_first);
			++_First;
			++_Left_first;
			if (_Left_first == _Left_last)
				{	// move the remaining right partition and highest element, since *_Left_first is highest
				*_Move_unchecked(_Mid, _Last, _First) = _STD move(*_Left_last);
				return;
				}
			}
		}
	}

template<class _BidIt,
	class _Pr> inline
	void _Inplace_merge_buffer_right(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_value_t<_BidIt> * const _Temp_ptr, _Pr _Pred)
	{	// move the range [_Mid, _Last) to _Temp_ptr, and merge it with [_First, _Mid) to _Last, using _Pred
		// usual invariants apply
	using _Ptr_ty = _Iter_value_t<_BidIt> *;
	_Uninitialized_backout<_Ptr_ty> _Backout{_Temp_ptr,
		_Uninitialized_move_unchecked(_Mid, _Last, _Temp_ptr)};
	*--_Last = _STD move(*--_Mid); // move the highest element into position
	const _Ptr_ty _Right_first = _Temp_ptr;
	_Ptr_ty _Right_last = _Backout._Last - 1;
	--_Mid;
	for (;;)
		{
		if (_Pred(*_Right_last, *_Mid))
			{	// merge from the left partition
			*--_Last = _STD move(*_Mid);
			if (_First == _Mid)
				{
				*--_Last = _STD move(*_Right_last);	// to make [_Right_first, _Right_last) a half-open range
				_Move_backward_unchecked(_Right_first, _Right_last, _Last);	// move any head (and lowest element)
				return;
				}

			--_Mid;
			}
		else
			{	// merge from the right partition
			*--_Last = _STD move(*_Right_last);
			--_Right_last;
			if (_Right_first == _Right_last)
				{	// we can't compare with *_Right_first, but we know it is lowest
				*--_Last = _STD move(*_Mid);	// restore half-open range [_First, _Mid)
				_Move_backward_unchecked(_First, _Mid, _Last);
				*_First = _STD move(*_Right_first);
				return;
				}
			}
		}
	}

template<class _BidIt,
	class _Pr> inline
	void _Buffered_inplace_merge_unchecked(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_diff_t<_BidIt> _Count1, _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred);

template<class _BidIt,
	class _Pr> inline
	void _Buffered_inplace_merge_divide_and_conquer2(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_diff_t<_BidIt> _Count1, _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred,
		_BidIt _Firstn, _BidIt _Lastn, _Iter_diff_t<_BidIt> _Count1n, _Iter_diff_t<_BidIt> _Count2n)
	{	// common block of _Buffered_inplace_merge_divide_and_conquer, below
	using _Diff = _Iter_diff_t<_BidIt>;
	_BidIt _Midn = _Buffered_rotate_unchecked(_Firstn, _Mid, _Lastn,
		static_cast<_Diff>(_Count1 - _Count1n), _Count2n, _Temp_ptr, _Capacity);	// rearrange middle
	_Buffered_inplace_merge_unchecked(_First, _Firstn, _Midn,
		_Count1n, _Count2n, _Temp_ptr, _Capacity, _Pred);	// merge each new part
	_Buffered_inplace_merge_unchecked(_Midn, _Lastn, _Last,
		static_cast<_Diff>(_Count1 - _Count1n), static_cast<_Diff>(_Count2 - _Count2n),
		_Temp_ptr, _Capacity, _Pred);
	}

template<class _BidIt,
	class _Pr> inline
	void _Buffered_inplace_merge_divide_and_conquer(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_diff_t<_BidIt> _Count1, _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred)
	{	// merge sorted [_First, _Mid) with sorted [_Mid, _Last), using _Pred
		// usual invariants apply
	if (_Count1 <= _Count2)
		{
		const _Iter_diff_t<_BidIt> _Count1n = _Count1 >> 1;	// TRANSITION, VSO#433486
		const _BidIt _Firstn = _STD next(_First, _Count1n);
		const _BidIt _Lastn = _STD lower_bound(_Mid, _Last, *_Firstn, _Pred);
		_Buffered_inplace_merge_divide_and_conquer2(_First, _Mid, _Last, _Count1, _Count2,
			_Temp_ptr, _Capacity, _Pred,
			_Firstn, _Lastn, _Count1n, _STD distance(_Mid, _Lastn));
		}
	else
		{
		const _Iter_diff_t<_BidIt> _Count2n = _Count2 >> 1;	// TRANSITION, VSO#433486
		const _BidIt _Lastn = _STD next(_Mid, _Count2n);
		const _BidIt _Firstn = _STD upper_bound(_First, _Mid, *_Lastn, _Pred);
		_Buffered_inplace_merge_divide_and_conquer2(_First, _Mid, _Last, _Count1, _Count2,
			_Temp_ptr, _Capacity, _Pred,
			_Firstn, _Lastn, _STD distance(_First, _Firstn), _Count2n);
		}
	}

template<class _BidIt,
	class _Pr> inline
	void _Buffered_inplace_merge_unchecked_impl(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_diff_t<_BidIt> _Count1, _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred)
	{	// merge sorted [_First, _Mid) with sorted [_Mid, _Last), using _Pred
		// usual invariants apply
	if (_Count1 <= _Count2 && _Count1 <= _Capacity)
		{
		_Inplace_merge_buffer_left(_First, _Mid, _Last, _Temp_ptr, _Pred);
		}
	else if (_Count2 <= _Capacity)
		{
		_Inplace_merge_buffer_right(_First, _Mid, _Last, _Temp_ptr, _Pred);
		}
	else
		{
		_Buffered_inplace_merge_divide_and_conquer(_First, _Mid, _Last, _Count1, _Count2, _Temp_ptr, _Capacity, _Pred);
		}
	}

template<class _BidIt,
	class _Pr> inline
	void _Buffered_inplace_merge_unchecked(_BidIt _First, _BidIt _Mid, _BidIt _Last,
		_Iter_diff_t<_BidIt> _Count1, _Iter_diff_t<_BidIt> _Count2,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred)
	{	// merge sorted [_First, _Mid) with sorted [_Mid, _Last), using _Pred
		// usual invariants *do not* apply; only sortedness applies
		// establish the usual invariants (explained in inplace_merge)
	if (_Mid == _Last)
		{
		return;
		}

	for (;;)
		{
		if (_First == _Mid)
			{
			return;
			}

		if (_Pred(*_Mid, *_First))
			{
			break;
			}

		++_First;
		--_Count1;
		}

	const auto _Highest = _Prev_iter(_Mid);
	do
		{
		--_Last;
		--_Count2;
		if (_Mid == _Last)
			{
			_Rotate_one_right(_First, _Mid, ++_Last);
			return;
			}
		}
	while (!_Pred(*_Last, *_Highest));

	++_Last;
	++_Count2;

	if (_Count1 == 1)
		{
		_Rotate_one_left(_First, _Mid, _Last);
		return;
		}

	_Buffered_inplace_merge_unchecked_impl(_First, _Mid, _Last, _Count1, _Count2, _Temp_ptr, _Capacity, _Pred);
	}

template<class _BidIt,
	class _Pr> inline
	void inplace_merge(_BidIt _First, _BidIt _Mid, _BidIt _Last, _Pr _Pred)
	{	// merge [_First, _Mid) with [_Mid, _Last), using _Pred
	_Adl_verify_range(_First, _Mid);
	_Adl_verify_range(_Mid, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	auto _UMid = _Get_unwrapped(_Mid);
	auto _ULast = _Get_unwrapped(_Last);
	_DEBUG_ORDER_UNWRAPPED(_UFirst, _UMid, _Pred);

	// establish the usual invariants:
	if (_UMid == _ULast)
		{
		return;
		}

	for (;;)
		{
		if (_UFirst == _UMid)
			{
			return;
			}

		if (_Pred(*_UMid, *_UFirst))
			{	// found that *_UMid goes in *_UFirst's position
			break;
			}

		++_UFirst;
		}

	const auto _Highest = _Prev_iter(_UMid);
	do
		{
		--_ULast;
		if (_UMid == _ULast)
			{	// rotate only element remaining in right partition to the beginning, without allocating
			_Rotate_one_right(_UFirst, _UMid, ++_ULast);
			return;
			}
		}
	while (!_Pred(*_ULast, *_Highest));	// found that *_Highest goes in *_ULast's position

	++_ULast;

	const _Iter_diff_t<_BidIt> _Count1 = _STD distance(_UFirst, _UMid);
	if (_Count1 == 1)
		{	// rotate only element remaining in left partition to the end, without allocating
		_Rotate_one_left(_UFirst, _UMid, _ULast);
		return;
		}

	const _Iter_diff_t<_BidIt> _Count2 = _STD distance(_UMid, _ULast);
	_Optimistic_temporary_buffer<_Iter_value_t<_BidIt>> _Temp_buf{_Min_value(_Count1, _Count2)};
	_Buffered_inplace_merge_unchecked_impl(_UFirst, _UMid, _ULast,
		_Count1, _Count2, _Temp_buf._Data, _Temp_buf._Capacity, _Pass_fn(_Pred));
	}

template<class _BidIt> inline
	void inplace_merge(_BidIt _First, _BidIt _Mid, _BidIt _Last)
	{	// merge [_First, _Mid) with [_Mid, _Last), using operator<
	_STD inplace_merge(_First, _Mid, _Last, less<>());
	}

#if _HAS_CXX17
template<class _ExPo,
	class _BidIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void inplace_merge(_ExPo&&, _BidIt _First, _BidIt _Mid, _BidIt _Last, _Pr _Pred) noexcept
	{	// merge [_First, _Mid) with [_Mid, _Last), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_STD inplace_merge(_First, _Mid, _Last, _Pass_fn(_Pred));
	}

template<class _ExPo,
	class _BidIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void inplace_merge(_ExPo&&, _BidIt _First, _BidIt _Mid, _BidIt _Last) noexcept
	{	// merge [_First, _Mid) with [_Mid, _Last), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_STD inplace_merge(_First, _Mid, _Last);
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE sort
template<class _BidIt,
	class _Pr> inline
	_BidIt _Insertion_sort_unchecked(_BidIt _First, const _BidIt _Last, _Pr _Pred)
	{	// insertion sort [_First, _Last), using _Pred
	if (_First != _Last)
		{
		for (_BidIt _Next = _First; ++_Next != _Last; )
			{	// order next element
			_BidIt _Next1 = _Next;
			_Iter_value_t<_BidIt> _Val = _STD move(*_Next);

			if (_DEBUG_LT_PRED(_Pred, _Val, *_First))
				{	// found new earliest element, move to front
				_Move_backward_unchecked(_First, _Next, ++_Next1);
				*_First = _STD move(_Val);
				}
			else
				{	// look for insertion point after first
				for (_BidIt _First1 = _Next1;
					_DEBUG_LT_PRED(_Pred, _Val, *--_First1);
					_Next1 = _First1)
					{
					*_Next1 = _STD move(*_First1);	// move hole down
					}

				*_Next1 = _STD move(_Val);	// insert element in hole
				}
			}
		}

	return (_Last);
	}

template<class _RanIt,
	class _Pr> inline
	void _Med3_unchecked(_RanIt _First, _RanIt _Mid, _RanIt _Last, _Pr _Pred)
	{	// sort median of three elements to middle
	if (_DEBUG_LT_PRED(_Pred, *_Mid, *_First))
		{
		_STD iter_swap(_Mid, _First);
		}

	if (_DEBUG_LT_PRED(_Pred, *_Last, *_Mid))
		{	// swap middle and last, then test first again
		_STD iter_swap(_Last, _Mid);

		if (_DEBUG_LT_PRED(_Pred, *_Mid, *_First))
			{
			_STD iter_swap(_Mid, _First);
			}
		}
	}

template<class _RanIt,
	class _Pr> inline
	void _Guess_median_unchecked(_RanIt _First, _RanIt _Mid, _RanIt _Last, _Pr _Pred)
	{	// sort median element to middle
	using _Diff = _Iter_diff_t<_RanIt>;
	const _Diff _Count = _Last - _First;
	if (40 < _Count)
		{	// median of nine
		const _Diff _Step = (_Count + 1) >> 3; // +1 can't overflow because range was made inclusive in caller
		const _Diff _Two_step = _Step << 1; // note: intentionally discards low-order bit
		_Med3_unchecked(_First, _First + _Step, _First + _Two_step, _Pred);
		_Med3_unchecked(_Mid - _Step, _Mid, _Mid + _Step, _Pred);
		_Med3_unchecked(_Last - _Two_step, _Last - _Step, _Last, _Pred);
		_Med3_unchecked(_First + _Step, _Mid, _Last - _Step, _Pred);
		}
	else
		{
		_Med3_unchecked(_First, _Mid, _Last, _Pred);
		}
	}

template<class _RanIt,
	class _Pr> inline
	pair<_RanIt, _RanIt>
		_Partition_by_median_guess_unchecked(_RanIt _First, _RanIt _Last, _Pr _Pred)
	{	// partition [_First, _Last), using _Pred
	_RanIt _Mid = _First + ((_Last - _First) >> 1);	// TRANSITION, VSO#433486
	_Guess_median_unchecked(_First, _Mid, _Last - 1, _Pred);
	_RanIt _Pfirst = _Mid;
	_RanIt _Plast = _Pfirst + 1;

	while (_First < _Pfirst
		&& !_DEBUG_LT_PRED(_Pred, *(_Pfirst - 1), *_Pfirst)
		&& !_Pred(*_Pfirst, *(_Pfirst - 1)))
		{
		--_Pfirst;
		}

	while (_Plast < _Last
		&& !_DEBUG_LT_PRED(_Pred, *_Plast, *_Pfirst)
		&& !_Pred(*_Pfirst, *_Plast))
		{
		++_Plast;
		}

	_RanIt _Gfirst = _Plast;
	_RanIt _Glast = _Pfirst;

	for (;;)
		{	// partition
		for (; _Gfirst < _Last; ++_Gfirst)
			{
			if (_DEBUG_LT_PRED(_Pred, *_Pfirst, *_Gfirst))
				{
				}
			else if (_Pred(*_Gfirst, *_Pfirst))
				{
				break;
				}
			else if (_Plast != _Gfirst)
				{
				_STD iter_swap(_Plast, _Gfirst);
				++_Plast;
				}
			else
				{
				++_Plast;
				}
			}

		for (; _First < _Glast; --_Glast)
			{
			if (_DEBUG_LT_PRED(_Pred, *(_Glast - 1), *_Pfirst))
				{
				}
			else if (_Pred(*_Pfirst, *(_Glast - 1)))
				{
				break;
				}
			else if (--_Pfirst != _Glast - 1)
				{
				_STD iter_swap(_Pfirst, _Glast - 1);
				}
			}

		if (_Glast == _First && _Gfirst == _Last)
			{
			return (pair<_RanIt, _RanIt>(_Pfirst, _Plast));
			}

		if (_Glast == _First)
			{	// no room at bottom, rotate pivot upward
			if (_Plast != _Gfirst)
				{
				_STD iter_swap(_Pfirst, _Plast);
				}

			++_Plast;
			_STD iter_swap(_Pfirst, _Gfirst);
			++_Pfirst;
			++_Gfirst;
			}
		else if (_Gfirst == _Last)
			{	// no room at top, rotate pivot downward
			if (--_Glast != --_Pfirst)
				{
				_STD iter_swap(_Glast, _Pfirst);
				}

			_STD iter_swap(_Pfirst, --_Plast);
			}
		else
			{
			_STD iter_swap(_Gfirst, --_Glast);
			++_Gfirst;
			}
		}
	}

template<class _RanIt,
	class _Pr> inline
	void _Sort_unchecked(_RanIt _First, _RanIt _Last, _Iter_diff_t<_RanIt> _Ideal, _Pr _Pred)
	{	// order [_First, _Last), using _Pred
	_Iter_diff_t<_RanIt> _Count;
	while (_ISORT_MAX < (_Count = _Last - _First) && 0 < _Ideal)
		{	// divide and conquer by quicksort
		auto _Mid = _Partition_by_median_guess_unchecked(_First, _Last, _Pred);
		// TRANSITION, VSO#433486
		_Ideal = (_Ideal >> 1) + (_Ideal >> 2);	// allow 1.5 log2(N) divisions

		if (_Mid.first - _First < _Last - _Mid.second)
			{	// loop on second half
			_Sort_unchecked(_First, _Mid.first, _Ideal, _Pred);
			_First = _Mid.second;
			}
		else
			{	// loop on first half
			_Sort_unchecked(_Mid.second, _Last, _Ideal, _Pred);
			_Last = _Mid.first;
			}
		}

	if (_ISORT_MAX < _Count)
		{	// heap sort if too many divisions
		_Make_heap_unchecked(_First, _Last, _Pred);
		_Sort_heap_unchecked(_First, _Last, _Pred);
		}
	else if (2 <= _Count)
		{
		_Insertion_sort_unchecked(_First, _Last, _Pred);	// small
		}
	}

template<class _RanIt,
	class _Pr> inline
	void sort(const _RanIt _First, const _RanIt _Last, _Pr _Pred)
	{	// order [_First, _Last), using _Pred
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	_Sort_unchecked(_UFirst, _ULast, _ULast - _UFirst, _Pass_fn(_Pred));
	}

template<class _RanIt> inline
	void sort(const _RanIt _First, const _RanIt _Last)
	{	// order [_First, _Last), using operator<
	_STD sort(_First, _Last, less<>());
	}

#if _HAS_CXX17
template<class _ExPo,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void sort(_ExPo&& _Exec, _RanIt _First, _RanIt _Last, _Pr _Pred) noexcept;

template<class _ExPo,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void sort(_ExPo&& _Exec, const _RanIt _First, const _RanIt _Last) noexcept
	{	// order [_First, _Last), using operator<
	_STD sort(_STD forward<_ExPo>(_Exec), _First, _Last, less<>{});
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE stable_sort
template<class _FwdIt,
	class _Ty,
	class _Pr>
	_Ty * _Uninitialized_merge_move(_FwdIt _First, const _FwdIt _Mid, const _FwdIt _Last, _Ty * const _Dest, _Pr _Pred)
	{	// move merging ranges to uninitialized storage, both using _Pred
		// pre: _First != _Mid && _Mid != _Last
	_Uninitialized_backout<_Ty *> _Backout{_Dest};
	_FwdIt _Next = _Mid;
	for (;;)
		{
		if (_DEBUG_LT_PRED(_Pred, *_Next, *_First))
			{
			_Backout._Emplace_back(_STD move(*_Next));
			++_Next;

			if (_Next == _Last)
				{
				_Backout._Last = _Uninitialized_move_unchecked(_First, _Mid, _Backout._Last);
				return (_Backout._Release());
				}
			}
		else
			{
			_Backout._Emplace_back(_STD move(*_First));
			++_First;

			if (_First == _Mid)
				{
				_Backout._Last = _Uninitialized_move_unchecked(_Next, _Last, _Backout._Last);
				return (_Backout._Release());
				}
			}
		}
	}

template<class _InIt,
	class _OutIt,
	class _Pr> inline
	_OutIt _Merge_move(_InIt _First, const _InIt _Mid, const _InIt _Last, _OutIt _Dest, _Pr _Pred)
	{	// move merging adjacent ranges [_First, _Mid) and [_Mid, _Last) to _Dest, both using _Pred
		// pre: _First != _Mid && _Mid != _Last
	_InIt _Next = _Mid;
	for (;;)
		{
		if (_DEBUG_LT_PRED(_Pred, *_Next, *_First))
			{
			*_Dest = _STD move(*_Next);
			++_Dest;
			++_Next;

			if (_Next == _Last)
				{
				return (_Move_unchecked(_First, _Mid, _Dest));
				}
			}
		else
			{
			*_Dest = _STD move(*_First);
			++_Dest;
			++_First;

			if (_First == _Mid)
				{
				return (_Move_unchecked(_Next, _Last, _Dest));
				}
			}
		}
	}

template<class _BidIt,
	class _Ty,
	class _Pr> inline
	void _Uninitialized_chunked_merge_unchecked(_BidIt _First, const _BidIt _Last, _Ty * _Dest,
		const _Iter_diff_t<_BidIt> _Chunk, _Iter_diff_t<_BidIt> _Count, _Pr _Pred)
	{	// move to uninitialized merging adjacent chunks of distance _Chunk, using _Pred
		// pre: _Count == distance(_First, _Last)
		// pre: _Chunk > 0
	_Uninitialized_backout<_Ty *> _Backout{_Dest};
	while (_Chunk < _Count)
		{
		_Count -= _Chunk;
		const _BidIt _Mid1 = _STD next(_First, _Chunk);
		const auto _Chunk2 = _Min_value(_Chunk, _Count);
		_Count -= _Chunk2;
		const _BidIt _Mid2 = _STD next(_Mid1, _Chunk2);
		_Backout._Last = _Uninitialized_merge_move(_First, _Mid1, _Mid2, _Backout._Last, _Pred);
		_First = _Mid2;
		}

	_Uninitialized_move_unchecked(_First, _Last, _Backout._Last);	// copy partial last chunk
	_Backout._Release();
	}

template<class _BidIt,
	class _OutIt,
	class _Pr> inline
	void _Chunked_merge_unchecked(_BidIt _First, const _BidIt _Last, _OutIt _Dest,
		const _Iter_diff_t<_BidIt> _Chunk, _Iter_diff_t<_BidIt> _Count, _Pr _Pred)
	{	// move merging adjacent chunks of distance _Chunk, using _Pred
		// pre: _Count == distance(_First, _Last)
		// pre: _Chunk > 0
	while (_Chunk < _Count)
		{
		_Count -= _Chunk;
		const _BidIt _Mid1 = _STD next(_First, _Chunk);
		const auto _Chunk2 = _Min_value(_Chunk, _Count);
		_Count -= _Chunk2;
		const _BidIt _Mid2 = _STD next(_Mid1, _Chunk2);
		_Dest = _Merge_move(_First, _Mid1, _Mid2, _Dest, _Pred);
		_First = _Mid2;
		}

	_Move_unchecked(_First, _Last, _Dest);	// copy partial last chunk
	}

template<class _BidIt,
	class _Pr>
	void _Insertion_sort_isort_max_chunks(_BidIt _First, const _BidIt _Last, _Iter_diff_t<_BidIt> _Count, _Pr _Pred)
	{	// insertion sort every chunk of distance _ISORT_MAX in [_First, _Last)
		// pre: _Count == distance(_First, _Last)
	constexpr auto _Diffsort_max = static_cast<_Iter_diff_t<_BidIt>>(_ISORT_MAX);
	for (; _Diffsort_max < _Count; _Count -= _Diffsort_max)
		{	// sort chunks
		_First = _Insertion_sort_unchecked(_First, _STD next(_First, _Diffsort_max), _Pred);
		}

	_Insertion_sort_unchecked(_First, _Last, _Pred);	// sort partial last chunk
	}

template<class _BidIt,
	class _Pr> inline
	void _Buffered_merge_sort_unchecked(const _BidIt _First, const _BidIt _Last,
		const _Iter_diff_t<_BidIt> _Count,
		_Iter_value_t<_BidIt> * const _Temp_ptr, _Pr _Pred)
	{	// sort using temp buffer for merges, using _Pred
		// pre: _Last - _First == _Count
		// pre: _Count <= capacity of buffer at _Temp_ptr; also allows safe narrowing to ptrdiff_t
	_Insertion_sort_isort_max_chunks(_First, _Last, _Count, _Pred);
	// merge adjacent pairs of chunks to and from temp buffer
	auto _Chunk = static_cast<_Iter_diff_t<_BidIt>>(_ISORT_MAX);
	if (_Count <= _Chunk)
		{
		return;
		}

	// do the first merge, constructing elements in the temporary buffer
	_Uninitialized_chunked_merge_unchecked(_First, _Last, _Temp_ptr, _Chunk, _Count, _Pred);
	_Uninitialized_backout<_Iter_value_t<_BidIt> *> _Backout{_Temp_ptr, _Temp_ptr + _Count};
	for (;;)
		{
		// unconditionally merge elements back into the source buffer
		_Chunk <<= 1;
		_Chunked_merge_unchecked(_Temp_ptr, _Temp_ptr + _Count, _First,
			static_cast<ptrdiff_t>(_Chunk), static_cast<ptrdiff_t>(_Count), _Pred);
		_Chunk <<= 1;
		if (_Count <= _Chunk)
			{	// if the input would be a single chunk, it's already sorted and we're done
			return;
			}

		// more merges necessary; merge to temporary buffer
		_Chunked_merge_unchecked(_First, _Last, _Temp_ptr, _Chunk, _Count, _Pred);
		}
	}

template<class _BidIt,
	class _Pr> inline
	void _Stable_sort_unchecked(const _BidIt _First, const _BidIt _Last, const _Iter_diff_t<_BidIt> _Count,
		_Iter_value_t<_BidIt> * const _Temp_ptr, const ptrdiff_t _Capacity, _Pr _Pred)
	{	// sort preserving order of equivalents, using _Pred
	using _Diff = _Iter_diff_t<_BidIt>;
	if (_Count <= _ISORT_MAX)
		{
		_Insertion_sort_unchecked(_First, _Last, _Pred);	// small
		}
	else
		{	// sort halves and merge
		const auto _Half_count = static_cast<_Diff>(_Count >> 1);
		const auto _Half_count_ceil = static_cast<_Diff>(_Count - _Half_count);
		const _BidIt _Mid = _STD next(_First, _Half_count_ceil);
		if (_Half_count_ceil <= _Capacity)
			{	// temp buffer big enough, sort each half using buffer
			_Buffered_merge_sort_unchecked(_First, _Mid, _Half_count_ceil, _Temp_ptr, _Pred);
			_Buffered_merge_sort_unchecked(_Mid, _Last, _Half_count, _Temp_ptr, _Pred);
			}
		else
			{	// temp buffer not big enough, divide and conquer
			_Stable_sort_unchecked(_First, _Mid, _Half_count_ceil, _Temp_ptr, _Capacity, _Pred);
			_Stable_sort_unchecked(_Mid, _Last, _Half_count, _Temp_ptr, _Capacity, _Pred);
			}

		_Buffered_inplace_merge_unchecked(_First, _Mid, _Last,
			_Half_count_ceil, _Half_count, _Temp_ptr, _Capacity, _Pred);	// merge halves
		}
	}

template<class _BidIt,
	class _Pr> inline
	void stable_sort(_BidIt _First, _BidIt _Last, _Pr _Pred)
	{	// sort preserving order of equivalents, using _Pred
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	const auto _Count = _STD distance(_UFirst, _ULast);
	if (_Count <= _ISORT_MAX)
		{
		if (_Count > 1)
			{
			_Insertion_sort_unchecked(_First, _Last, _Pass_fn(_Pred));
			}

		return;
		}

	_Optimistic_temporary_buffer<_Iter_value_t<_BidIt>> _Temp_buf{_Count - (_Count >> 1)};
	_Stable_sort_unchecked(_UFirst, _ULast, _Count, _Temp_buf._Data, _Temp_buf._Capacity, _Pass_fn(_Pred));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _BidIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void stable_sort(_ExPo&& _Exec, _BidIt _First, _BidIt _Last, _Pr _Pred) noexcept;
#endif /* _HAS_CXX17 */

template<class _BidIt> inline
	void stable_sort(_BidIt _First, _BidIt _Last)
	{	// sort preserving order of equivalents, using operator<
	_STD stable_sort(_First, _Last, less<>());
	}

#if _HAS_CXX17
template<class _ExPo,
	class _BidIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void stable_sort(_ExPo&& _Exec, _BidIt _First, _BidIt _Last) noexcept
	{	// sort preserving order of equivalents, using operator<
	_STD stable_sort(_STD forward<_ExPo>(_Exec), _First, _Last, less<>());
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE partial_sort
template<class _RanIt,
	class _Pr> inline
	void partial_sort(_RanIt _First, _RanIt _Mid, _RanIt _Last, _Pr _Pred)
	{	// order [_First, _Last) up to _Mid, using _Pred
	_Adl_verify_range(_First, _Mid);
	_Adl_verify_range(_Mid, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _UMid = _Get_unwrapped(_Mid);
	const auto _ULast = _Get_unwrapped(_Last);

	if (_UFirst == _UMid)
		{
		return;	// nothing to do, avoid violating _Pop_heap_hole_unchecked preconditions
		}

	_Make_heap_unchecked(_UFirst, _UMid, _Pass_fn(_Pred));
	for (auto _UNext = _UMid; _UNext < _ULast; ++_UNext)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UNext, *_UFirst))
			{	// replace top with new largest
			_Iter_value_t<_RanIt> _Val = _STD move(*_UNext);
			_Pop_heap_hole_unchecked(_UFirst, _UMid, _UNext, _STD move(_Val), _Pass_fn(_Pred));
			}
		}

	_Sort_heap_unchecked(_UFirst, _UMid, _Pass_fn(_Pred));
	}

template<class _RanIt> inline
	void partial_sort(_RanIt _First, _RanIt _Mid, _RanIt _Last)
	{	// order [_First, _Last) up to _Mid, using operator<
	_STD partial_sort(_First, _Mid, _Last, less<>());
	}

#if _HAS_CXX17
template<class _ExPo,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void partial_sort(_ExPo&&, _RanIt _First, _RanIt _Mid, _RanIt _Last, _Pr _Pred) noexcept
	{	// order [_First, _Last) up to _Mid, using _Pred
		// parallelism suspected to be infeasible
	return (_STD partial_sort(_First, _Mid, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void partial_sort(_ExPo&&, _RanIt _First, _RanIt _Mid, _RanIt _Last) noexcept
	{	// order [_First, _Last) up to _Mid, using operator<
		// parallelism suspected to be infeasible
	return (_STD partial_sort(_First, _Mid, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE partial_sort_copy
template<class _InIt,
	class _RanIt,
	class _Pr> inline
	_RanIt partial_sort_copy(_InIt _First1, _InIt _Last1,
		_RanIt _First2, _RanIt _Last2, _Pr _Pred)
	{	// copy [_First1, _Last1) into [_First2, _Last2) using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	auto _UMid2 = _UFirst2;
	if (_UFirst1 != _ULast1 && _UFirst2 != _ULast2)
		{
		for (; _UFirst1 != _ULast1 && _UMid2 != _ULast2; ++_UFirst1, (void)++_UMid2)
			{
			*_UMid2 = *_UFirst1;	// copy min(_ULast1 - _UFirst1, _ULast2 - _UFirst2)
			}

		_Make_heap_unchecked(_UFirst2, _UMid2, _Pass_fn(_Pred));
		for (; _UFirst1 != _ULast1; ++_UFirst1)
			{
			if (_DEBUG_LT_PRED(_Pred, *_UFirst1, *_UFirst2))
				{
				// replace top with new largest:
				_Pop_heap_hole_by_index(_UFirst2, static_cast<_Iter_diff_t<_RanIt>>(0),
					static_cast<_Iter_diff_t<_RanIt>>(_UMid2 - _UFirst2),
					static_cast<_Iter_value_t<_InIt>>(*_UFirst1), _Pass_fn(_Pred));
				}
			}

		_Sort_heap_unchecked(_UFirst2, _UMid2, _Pass_fn(_Pred));
		}

	_Seek_wrapped(_First2, _UMid2);
	return (_First2);
	}

template<class _InIt,
	class _RanIt> inline
	_RanIt partial_sort_copy(_InIt _First1, _InIt _Last1,
		_RanIt _First2, _RanIt _Last2)
	{	// copy [_First1, _Last1) into [_First2, _Last2), using operator<
	return (_STD partial_sort_copy(_First1, _Last1, _First2, _Last2, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_RanIt partial_sort_copy(_ExPo&&, _FwdIt _First1, _FwdIt _Last1,
		_RanIt _First2, _RanIt _Last2, _Pr _Pred) noexcept
	{	// copy [_First1, _Last1) into [_First2, _Last2) using _Pred
		// parallelism suspected to be infeasible
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt);
	return (_STD partial_sort_copy(_First1, _Last1, _First2, _Last2, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_RanIt partial_sort_copy(_ExPo&&, _FwdIt _First1, _FwdIt _Last1,
		_RanIt _First2, _RanIt _Last2) noexcept
	{	// copy [_First1, _Last1) into [_First2, _Last2), using operator<
		// parallelism suspected to be infeasible
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt);
	return (_STD partial_sort_copy(_First1, _Last1, _First2, _Last2));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE nth_element
template<class _RanIt,
	class _Pr> inline
	void nth_element(_RanIt _First, _RanIt _Nth, _RanIt _Last, _Pr _Pred)
	{	// order Nth element, using _Pred
	_Adl_verify_range(_First, _Nth);
	_Adl_verify_range(_Nth, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _UNth = _Get_unwrapped(_Nth);
	auto _ULast = _Get_unwrapped(_Last);
	if (_UNth == _ULast)
		{
		return;	// nothing to do
		}

	while (_ISORT_MAX < _ULast - _UFirst)
		{	// divide and conquer, ordering partition containing Nth
		auto _UMid = _Partition_by_median_guess_unchecked(_UFirst, _ULast, _Pass_fn(_Pred));

		if (_UMid.second <= _UNth)
			{
			_UFirst = _UMid.second;
			}
		else if (_UMid.first <= _UNth)
			{
			return;	// Nth inside fat pivot, done
			}
		else
			{
			_ULast = _UMid.first;
			}
		}

	_Insertion_sort_unchecked(_UFirst, _ULast, _Pass_fn(_Pred));	// sort any remainder
	}

template<class _RanIt> inline
	void nth_element(_RanIt _First, _RanIt _Nth, _RanIt _Last)
	{	// order Nth element, using operator<
	_STD nth_element(_First, _Nth, _Last, less<>());
	}

#if _HAS_CXX17
template<class _ExPo,
	class _RanIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void nth_element(_ExPo&&, _RanIt _First, _RanIt _Nth, _RanIt _Last, _Pr _Pred) noexcept
	{	// order Nth element, using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_STD nth_element(_First, _Nth, _Last, _Pass_fn(_Pred));
	}

template<class _ExPo,
	class _RanIt,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	void nth_element(_ExPo&&, _RanIt _First, _RanIt _Nth, _RanIt _Last) noexcept
	{	// order Nth element, using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_STD nth_element(_First, _Nth, _Last);
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE includes
template<class _InIt1,
	class _InIt2,
	class _Pr>
	_NODISCARD inline bool includes(_InIt1 _First1, _InIt1 _Last1, _InIt2 _First2, _InIt2 _Last2, _Pr _Pred)
	{	// test if every element in sorted [_First2, _Last2) is in sorted [_First1, _Last1), using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	for (; _UFirst1 != _ULast1 && _UFirst2 != _ULast2; ++_UFirst1)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UFirst2, *_UFirst1))
			{
			return (false);
			}

		if (!_Pred(*_UFirst1, *_UFirst2))
			{
			++_UFirst2;
			}
		}

	return (_UFirst2 == _ULast2);
	}

template<class _InIt1,
	class _InIt2>
	_NODISCARD inline bool includes(_InIt1 _First1, _InIt1 _Last1, _InIt2 _First2, _InIt2 _Last2)
	{	// test if every element in sorted [_First2, _Last2) is in sorted [_First1, _Last1), using operator<
	return (_STD includes(_First1, _Last1, _First2, _Last2, less<>()));
	}

#if _HAS_CXX17
		// FUNCTION TEMPLATE includes
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool includes(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1, _FwdIt2 _First2,
		_FwdIt2 _Last2, _Pr _Pred) noexcept
	{	// test if every element in sorted [_First2, _Last2) is in sorted [_First1, _Last1), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD includes(_First1, _Last1, _First2, _Last2, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool includes(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1, _FwdIt2 _First2,
		_FwdIt2 _Last2) noexcept
	{	// test if every element in sorted [_First2, _Last2) is in sorted [_First1, _Last1), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD includes(_First1, _Last1, _First2, _Last2));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE set_union
template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Pr> inline
	_OutIt set_union(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _OutIt _Dest, _Pr _Pred)
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	for (; _UFirst1 != _ULast1 && _UFirst2 != _ULast2; ++_UDest)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UFirst1, *_UFirst2))
			{	// copy first
			*_UDest = *_UFirst1;
			++_UFirst1;
			}
		else if (_Pred(*_UFirst2, *_UFirst1))
			{	// copy second
			*_UDest = *_UFirst2;
			++_UFirst2;
			}
		else
			{	// advance both
			*_UDest = *_UFirst1;
			++_UFirst1;
			++_UFirst2;
			}
		}

	_UDest = _Copy_unchecked(_UFirst1, _ULast1, _UDest);
	_Seek_wrapped(_Dest, _Copy_unchecked(_UFirst2, _ULast2, _UDest));
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * set_union(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_union(_First1, _Last1, _First2, _Last2,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt1,
	class _InIt2,
	class _OutIt> inline
	_OutIt set_union(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _OutIt _Dest)
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), using operator<
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * set_union(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _DestTy (&_Dest)[_DestSize])
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_union(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _FwdIt3 _Dest, _Pr _Pred) noexcept
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_union(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_union(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _FwdIt3 _Dest) noexcept
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_union(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _DestTy (&_Dest)[_DestSize]) noexcept
	{	// OR sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_union(_First1, _Last1, _First2, _Last2, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE set_intersection
template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Pr> inline
	_OutIt set_intersection(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _OutIt _Dest, _Pr _Pred)
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	while (_UFirst1 != _ULast1 && _UFirst2 != _ULast2)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UFirst1, *_UFirst2))
			{
			++_UFirst1;
			}
		else if (_Pred(*_UFirst2, *_UFirst1))
			{
			++_UFirst2;
			}
		else
			{
			*_UDest = *_UFirst1;
			++_UDest;
			++_UFirst1;
			++_UFirst2;
			}
		}

	_Seek_wrapped(_Dest, _UDest);
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * set_intersection(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt1,
	class _InIt2,
	class _OutIt> inline
	_OutIt set_intersection(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _OutIt _Dest)
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), using operator<
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * set_intersection(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2, _DestTy (&_Dest)[_DestSize])
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_intersection(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _FwdIt3 _Dest, _Pr _Pred) noexcept
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_intersection(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_intersection(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _FwdIt3 _Dest) noexcept
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_intersection(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2, _DestTy (&_Dest)[_DestSize]) noexcept
	{	// AND sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_intersection(_First1, _Last1, _First2, _Last2, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE set_difference
template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Pr> inline
	_OutIt set_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest, _Pr _Pred)
	{	// take set [_First2, _Last2) from [_First1, _Last1), using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	while (_UFirst1 != _ULast1 && _UFirst2 != _ULast2)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UFirst1, *_UFirst2))
			{	// copy first
			*_UDest = *_UFirst1;
			++_UDest;
			++_UFirst1;
			}
		else
			{
			if (!_Pred(*_UFirst2, *_UFirst1))
				{
				++_UFirst1;
				}

			++_UFirst2;
			}
		}

	_Seek_wrapped(_Dest, _Copy_unchecked(_UFirst1, _ULast1, _UDest));
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * set_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// take set [_First2, _Last2) from [_First1, _Last1), array dest
	return (_STD set_difference(_First1, _Last1, _First2, _Last2,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt1,
	class _InIt2,
	class _OutIt> inline
	_OutIt set_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest)
	{	// take set [_First2, _Last2) from [_First1, _Last1), using operator<
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * set_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize])
	{	// take set [_First2, _Last2) from [_First1, _Last1), array dest
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest, _Pr _Pred) noexcept
	{	// take set [_First2, _Last2) from [_First1, _Last1), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// take set [_First2, _Last2) from [_First1, _Last1), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest) noexcept
	{	// take set [_First2, _Last2) from [_First1, _Last1), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize]) noexcept
	{	// take set [_First2, _Last2) from [_First1, _Last1), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_difference(_First1, _Last1, _First2, _Last2, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE set_symmetric_difference
template<class _InIt1,
	class _InIt2,
	class _OutIt,
	class _Pr> inline
	_OutIt set_symmetric_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest, _Pr _Pred)
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), using _Pred
	_Adl_verify_range(_First1, _Last1);
	_Adl_verify_range(_First2, _Last2);
	auto _UFirst1 = _Get_unwrapped(_First1);
	const auto _ULast1 = _Get_unwrapped(_Last1);
	auto _UFirst2 = _Get_unwrapped(_First2);
	const auto _ULast2 = _Get_unwrapped(_Last2);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt2, _UFirst1, _ULast1, _Pred);
	_DEBUG_ORDER_SET_UNWRAPPED(_InIt1, _UFirst2, _ULast2, _Pred);
	auto _UDest = _Get_unwrapped_unverified(_Dest);
	while (_UFirst1 != _ULast1 && _UFirst2 != _ULast2)
		{
		if (_DEBUG_LT_PRED(_Pred, *_UFirst1, *_UFirst2))
			{	// copy first
			*_UDest = *_UFirst1;
			++_UDest;
			++_UFirst1;
			}
		else if (_Pred(*_UFirst2, *_UFirst1))
			{	// copy second
			*_UDest = *_UFirst2;
			++_UDest;
			++_UFirst2;
			}
		else
			{	// advance both
			++_UFirst1;
			++_UFirst2;
			}
		}

	_UDest = _Copy_unchecked(_UFirst1, _ULast1, _UDest);
	_Seek_wrapped(_Dest, _Copy_unchecked(_UFirst2, _ULast2, _UDest));
	return (_Dest);
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr> inline
	_DestTy * set_symmetric_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred)
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2,
		_Array_iterator<_DestTy, _DestSize>(_Dest), _Pass_fn(_Pred))._Unwrapped());
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _InIt1,
	class _InIt2,
	class _OutIt> inline
	_OutIt set_symmetric_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_OutIt _Dest)
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), using operator<
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _InIt1,
	class _InIt2,
	class _DestTy,
	size_t _DestSize> inline
	_DestTy * set_symmetric_difference(_InIt1 _First1, _InIt1 _Last1,
		_InIt2 _First2, _InIt2 _Last2,
		_DestTy (&_Dest)[_DestSize])
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), array dest
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest, less<>()));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_symmetric_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest, _Pr _Pred) noexcept
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_symmetric_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize], _Pr _Pred) noexcept
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest, _Pass_fn(_Pred)));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */

template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _FwdIt3,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_FwdIt3 set_symmetric_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_FwdIt3 _Dest) noexcept
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt3);
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest));
	}

 #if _ITERATOR_DEBUG_ARRAY_OVERLOADS
template<class _ExPo,
	class _FwdIt1,
	class _FwdIt2,
	class _DestTy,
	size_t _DestSize,
	_Enable_if_execution_policy_t<_ExPo> = 0> inline
	_DestTy * set_symmetric_difference(_ExPo&&, _FwdIt1 _First1, _FwdIt1 _Last1,
		_FwdIt2 _First2, _FwdIt2 _Last2,
		_DestTy (&_Dest)[_DestSize]) noexcept
	{	// XOR sets [_First1, _Last1) and [_First2, _Last2), array dest
		// not parallelized at present, parallelism expected to be feasible in a future release
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt1);
	_REQUIRE_PARALLEL_ITERATOR(_FwdIt2);
	return (_STD set_symmetric_difference(_First1, _Last1, _First2, _Last2, _Dest));
	}
 #endif /* _ITERATOR_DEBUG_ARRAY_OVERLOADS */
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE max_element
template<class _FwdIt,
	class _Pr>
	constexpr _FwdIt _Max_element_unchecked(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find largest element, using _Pred
	_FwdIt _Found = _First;
	if (_First != _Last)
		{
		while (++_First != _Last)
			{
			if (_DEBUG_LT_PRED(_Pred, *_Found, *_First))
				{
				_Found = _First;
				}
			}
		}

	return (_Found);
	}

template<class _FwdIt,
	class _Pr>
	_NODISCARD constexpr _FwdIt max_element(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find largest element, using _Pred
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Max_element_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred)));
	return (_First);
	}

template<class _FwdIt>
	_NODISCARD constexpr _FwdIt max_element(_FwdIt _First, _FwdIt _Last)
	{	// find largest element, using operator<
	return (_STD max_element(_First, _Last, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt max_element(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// find largest element, using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD max_element(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt max_element(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// find largest element, using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD max_element(_First, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE min_element
template<class _FwdIt,
	class _Pr>
	constexpr _FwdIt _Min_element_unchecked(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find smallest element, using _Pred
	_FwdIt _Found = _First;
	if (_First != _Last)
		{
		while (++_First != _Last)
			{
			if (_DEBUG_LT_PRED(_Pred, *_First, *_Found))
				{
				_Found = _First;
				}
			}
		}

	return (_Found);
	}

template<class _FwdIt,
	class _Pr>
	_NODISCARD constexpr _FwdIt min_element(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find smallest element, using _Pred
	_Adl_verify_range(_First, _Last);
	_Seek_wrapped(_First,
		_Min_element_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred)));
	return (_First);
	}

template<class _FwdIt>
	_NODISCARD constexpr _FwdIt min_element(_FwdIt _First, _FwdIt _Last)
	{	// find smallest element, using operator<
	return (_STD min_element(_First, _Last, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt min_element(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// find smallest element, using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD min_element(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt min_element(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// find smallest element, using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD min_element(_First, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE minmax_element
template<class _FwdIt,
	class _Pr>
	constexpr pair<_FwdIt, _FwdIt> _Minmax_element_unchecked(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find smallest and largest elements, using _Pred
	pair<_FwdIt, _FwdIt> _Found(_First, _First);

	if (_First != _Last)
		{
		while (++_First != _Last)
			{	// process one or two elements
			_FwdIt _Next = _First;
			if (++_Next == _Last)
				{	// process last element
				if (_DEBUG_LT_PRED(_Pred, *_First, *_Found.first))
					{
					_Found.first = _First;
					}
				else if (!_DEBUG_LT_PRED(_Pred, *_First, *_Found.second))
					{
					_Found.second = _First;
					}
				}
			else
				{	// process next two elements
				if (_DEBUG_LT_PRED(_Pred, *_Next, *_First))
					{	// test _Next for new smallest
					if (_DEBUG_LT_PRED(_Pred, *_Next, *_Found.first))
						{
						_Found.first = _Next;
						}
					if (!_DEBUG_LT_PRED(_Pred, *_First, *_Found.second))
						{
						_Found.second = _First;
						}
					}
				else
					{	// test _First for new smallest
					if (_DEBUG_LT_PRED(_Pred, *_First, *_Found.first))
						{
						_Found.first = _First;
						}
					if (!_DEBUG_LT_PRED(_Pred, *_Next, *_Found.second))
						{
						_Found.second = _Next;
						}
					}
				_First = _Next;
				}
			}
		}

	return (_Found);
	}

template<class _FwdIt,
	class _Pr>
	_NODISCARD constexpr pair<_FwdIt, _FwdIt> minmax_element(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find smallest and largest elements, using _Pred
	_Adl_verify_range(_First, _Last);
	const auto _Result =
		_Minmax_element_unchecked(_Get_unwrapped(_First), _Get_unwrapped(_Last), _Pass_fn(_Pred));
	_Seek_wrapped(_Last, _Result.second);
	_Seek_wrapped(_First, _Result.first);
	return {_First, _Last};
	}

template<class _FwdIt>
	_NODISCARD constexpr pair<_FwdIt, _FwdIt> minmax_element(_FwdIt _First, _FwdIt _Last)
	{	// find smallest and largest elements, using operator<
	return (_STD minmax_element(_First, _Last, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt, _FwdIt> minmax_element(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// find smallest and largest elements, using _Pred
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD minmax_element(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline pair<_FwdIt, _FwdIt> minmax_element(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// find smallest and largest elements, using operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD minmax_element(_First, _Last));
	}
#endif /* _HAS_CXX17 */

		// FUNCTION TEMPLATE max
template<class _Ty,
	class _Pr>
	_NODISCARD constexpr const _Ty& (max)(const _Ty& _Left, const _Ty& _Right, _Pr _Pred)
		_NOEXCEPT_COND(_NOEXCEPT_OPER(_DEBUG_LT_PRED(_Pred, _Left, _Right)))
	{	// return larger of _Left and _Right using _Pred
	return (_DEBUG_LT_PRED(_Pred, _Left, _Right) ? _Right : _Left);
	}

template<class _Ty,
	class _Pr>
	_NODISCARD constexpr _Ty (max)(initializer_list<_Ty> _Ilist, _Pr _Pred)
	{	// return leftmost/largest
	const _Ty *_Res = _Max_element_unchecked(_Ilist.begin(), _Ilist.end(), _Pass_fn(_Pred));
	return (*_Res);
	}

#pragma warning(push)
#pragma warning(disable: 28285)	// (syntax error in SAL annotation, occurs when _Ty is not an integral type)
template<class _Ty>
	_Post_equal_to_(_Left < _Right ? _Right : _Left)
	_NODISCARD constexpr const _Ty& (max)(const _Ty& _Left, const _Ty& _Right)
		_NOEXCEPT_COND(_NOEXCEPT_OPER(_Left < _Right))
	{	// return larger of _Left and _Right
	if (_Left < _Right)
		{
		_STL_ASSERT(!(_Right < _Left), "invalid comparator");
		return (_Right);
		}

	return (_Left);
	}
#pragma warning(pop)

template<class _Ty>
	_NODISCARD constexpr _Ty (max)(initializer_list<_Ty> _Ilist)
	{	// return leftmost/largest
	return ((_STD max)(_Ilist, less<>()));
	}

		// FUNCTION TEMPLATE min
template<class _Ty,
	class _Pr>
	_NODISCARD constexpr const _Ty& (min)(const _Ty& _Left, const _Ty& _Right, _Pr _Pred)
		_NOEXCEPT_COND(_NOEXCEPT_OPER(_DEBUG_LT_PRED(_Pred, _Right, _Left)))
	{	// return smaller of _Left and _Right using _Pred
	return (_DEBUG_LT_PRED(_Pred, _Right, _Left) ? _Right : _Left);
	}

template<class _Ty,
	class _Pr>
	_NODISCARD constexpr _Ty (min)(initializer_list<_Ty> _Ilist, _Pr _Pred)
	{	// return leftmost/smallest
	const _Ty *_Res = _Min_element_unchecked(_Ilist.begin(), _Ilist.end(), _Pass_fn(_Pred));
	return (*_Res);
	}

#pragma warning(push)
#pragma warning(disable: 28285)	// (syntax error in SAL annotation, occurs when _Ty is not an integral type)
template<class _Ty>
	_Post_equal_to_(_Right < _Left ? _Right : _Left)
	_NODISCARD constexpr const _Ty& (min)(const _Ty& _Left, const _Ty& _Right)
		_NOEXCEPT_COND(_NOEXCEPT_OPER(_Right < _Left))
	{	// return smaller of _Left and _Right
	if (_Right < _Left)
		{
		_STL_ASSERT(!(_Left < _Right), "invalid comparator");
		return (_Right);
		}

	return (_Left);
	}
#pragma warning(pop)

template<class _Ty>
	_NODISCARD constexpr _Ty (min)(initializer_list<_Ty> _Ilist)
	{	// return leftmost/smallest
	return ((_STD min)(_Ilist, less<>()));
	}

		// FUNCTION TEMPLATE minmax
template<class _Ty,
	class _Pr>
	_NODISCARD constexpr pair<const _Ty&, const _Ty&>
		minmax(const _Ty& _Left, const _Ty& _Right, _Pr _Pred)
	{	// return pair(leftmost/smaller, rightmost/larger) of _Left and _Right
	return (_Pred(_Right, _Left)
		? pair<const _Ty&, const _Ty&>(_Right, _Left)
		: pair<const _Ty&, const _Ty&>(_Left, _Right));
	}

template<class _Ty,
	class _Pr>
	_NODISCARD constexpr pair<_Ty, _Ty> minmax(initializer_list<_Ty> _Ilist, _Pr _Pred)
	{	// return {leftmost/smallest, rightmost/largest}
	pair<const _Ty *, const _Ty *> _Res = _Minmax_element_unchecked(
		_Ilist.begin(), _Ilist.end(), _Pass_fn(_Pred));
	return (pair<_Ty, _Ty>(*_Res.first, *_Res.second));
	}

template<class _Ty>
	_NODISCARD constexpr pair<const _Ty&, const _Ty&>
		minmax(const _Ty& _Left, const _Ty& _Right)
	{	// return pair(leftmost/smaller, rightmost/larger) of _Left and _Right
	return (_Right < _Left
		? pair<const _Ty&, const _Ty&>(_Right, _Left)
		: pair<const _Ty&, const _Ty&>(_Left, _Right));
	}

template<class _Ty>
	_NODISCARD constexpr pair<_Ty, _Ty> minmax(initializer_list<_Ty> _Ilist)
	{	// return {leftmost/smallest, rightmost/largest}
	return (_STD minmax(_Ilist, less<>()));
	}

		// FUNCTION TEMPLATE next_permutation
template<class _BidIt,
	class _Pr> inline
	bool next_permutation(_BidIt _First, _BidIt _Last, _Pr _Pred)
	{	// permute and test for pure ascending, using _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UNext = _ULast;
	if (_UFirst == _ULast || _UFirst == --_UNext)
		{
		return (false);
		}

	for (;;)
		{	// find rightmost element smaller than successor
		auto _UNext1 = _UNext;
		if (_DEBUG_LT_PRED(_Pred, *--_UNext, *_UNext1))
			{	// swap with rightmost element that's smaller, flip suffix
			auto _UMid = _ULast;
			do
				{
				--_UMid;
				}
			while (!_DEBUG_LT_PRED(_Pred, *_UNext, *_UMid));

			_STD iter_swap(_UNext, _UMid);
			_Reverse_unchecked(_UNext1, _ULast);
			return (true);
			}

		if (_UNext == _UFirst)
			{	// pure descending, flip all
			_Reverse_unchecked(_UFirst, _ULast);
			return (false);
			}
		}
	}

template<class _BidIt> inline
	bool next_permutation(_BidIt _First, _BidIt _Last)
	{	// permute and test for pure ascending, using operator<
	return (_STD next_permutation(_First, _Last, less<>()));
	}

		// FUNCTION TEMPLATE prev_permutation
template<class _BidIt,
	class _Pr> inline
	bool prev_permutation(_BidIt _First, _BidIt _Last, _Pr _Pred)
	{	// reverse permute and test for pure descending, using _Pred
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	auto _UNext = _ULast;
	if (_UFirst == _ULast || _UFirst == --_UNext)
		{
		return (false);
		}

	for (;;)
		{	// find rightmost element not smaller than successor
		auto _UNext1 = _UNext;
		if (_DEBUG_LT_PRED(_Pred, *_UNext1, *--_UNext))
			{	// swap with rightmost element that's not smaller, flip suffix
			auto _UMid = _ULast;
			do
				{
				--_UMid;
				}
			while (!_DEBUG_LT_PRED(_Pred, *_UMid, *_UNext));

			_STD iter_swap(_UNext, _UMid);
			_Reverse_unchecked(_UNext1, _ULast);
			return (true);
			}

		if (_UNext == _UFirst)
			{	// pure ascending, flip all
			_Reverse_unchecked(_UFirst, _ULast);
			return (false);
			}
		}
	}

template<class _BidIt> inline
	bool prev_permutation(_BidIt _First, _BidIt _Last)
	{	// reverse permute and test for pure descending, using operator<
	return (_STD prev_permutation(_First, _Last, less<>()));
	}

		// FUNCTION TEMPLATES is_sorted AND is_sorted_until
template<class _FwdIt,
	class _Pr>
	_NODISCARD inline _FwdIt is_sorted_until(const _FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// find extent of range that is ordered by predicate
	_Adl_verify_range(_First, _Last);
	auto _UFirst = _Get_unwrapped(_First);
	auto _ULast = _Get_unwrapped(_Last);
	if (_UFirst != _ULast)
		{
		for (auto _UNext = _UFirst; ++_UNext != _ULast; ++_UFirst)
			{
			if (_DEBUG_LT_PRED(_Pred, *_UNext, *_UFirst))
				{
				_ULast = _UNext;
				break;
				}
			}
		}

	_Seek_wrapped(_Last, _ULast);
	return (_Last);
	}

template<class _FwdIt,
	class _Pr>
	_NODISCARD inline bool is_sorted(_FwdIt _First, _FwdIt _Last, _Pr _Pred)
	{	// test if range is ordered by predicate
	_Adl_verify_range(_First, _Last);
	const auto _UFirst = _Get_unwrapped(_First);
	const auto _ULast = _Get_unwrapped(_Last);
	return (_STD is_sorted_until(_UFirst, _ULast, _Pass_fn(_Pred)) == _ULast);
	}

template<class _FwdIt>
	_NODISCARD inline _FwdIt is_sorted_until(_FwdIt _First, _FwdIt _Last)
	{	// find extent of range that is ordered by operator<
	return (_STD is_sorted_until(_First, _Last, less<>()));
	}

template<class _FwdIt>
	_NODISCARD inline bool is_sorted(_FwdIt _First, _FwdIt _Last)
	{	// test if range is ordered by operator<
	return (_STD is_sorted(_First, _Last, less<>()));
	}

#if _HAS_CXX17
template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt is_sorted_until(_ExPo&&, const _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// find extent of range that is ordered by predicate
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_sorted_until(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	class _Pr,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool is_sorted(_ExPo&&, _FwdIt _First, _FwdIt _Last, _Pr _Pred) noexcept
	{	// test if range is ordered by predicate
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_sorted(_First, _Last, _Pass_fn(_Pred)));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline _FwdIt is_sorted_until(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// find extent of range that is ordered by operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_sorted_until(_First, _Last));
	}

template<class _ExPo,
	class _FwdIt,
	_Enable_if_execution_policy_t<_ExPo> = 0>
	_NODISCARD inline bool is_sorted(_ExPo&&, _FwdIt _First, _FwdIt _Last) noexcept
	{	// test if range is ordered by operator<
		// not parallelized at present, parallelism expected to be feasible in a future release
	return (_STD is_sorted(_First, _Last));
	}
#endif /* _HAS_CXX17 */

#if _HAS_CXX17
		// FUNCTION TEMPLATE clamp
template<class _Ty,
	class _Pr>
	_NODISCARD constexpr const _Ty& clamp(const _Ty& _Val, const _Ty& _Min_val, const _Ty& _Max_val, _Pr _Pred)
	{	// returns _Val constrained to [_Min_val, _Max_val] ordered by _Pred
#if _ITERATOR_DEBUG_LEVEL == 2
	if (_DEBUG_LT_PRED(_Pred, _Max_val, _Min_val))
		{
		_STL_REPORT_ERROR("invalid bounds arguments passed to std::clamp");
		return (_Val);
		}
#endif /* _ITERATOR_DEBUG_LEVEL == 2 */

	return (_DEBUG_LT_PRED(_Pred, _Max_val, _Val)
			? _Max_val
			: _DEBUG_LT_PRED(_Pred, _Val, _Min_val)
				? _Min_val
				: _Val);
	}

template<class _Ty>
	_NODISCARD constexpr const _Ty& clamp(const _Ty& _Val, const _Ty& _Min_val, const _Ty& _Max_val)
	{	// returns _Val constrained to [_Min_val, _Max_val]
	return (_STD clamp(_Val, _Min_val, _Max_val, less<>()));
	}
#endif /* _HAS_CXX17 */

_STD_END
#pragma pop_macro("new")
_STL_RESTORE_CLANG_WARNINGS
#pragma warning(pop)
#pragma pack(pop)
#endif /* RC_INVOKED */
#endif /* _ALGORITHM_ */

/*
 * Copyright (c) by P.J. Plauger. All rights reserved.
 * Consult your license regarding permissions and restrictions.
V6.50:0009 */
