1#ifndef BMAGGREGATOR__H__INCLUDED__
2#define BMAGGREGATOR__H__INCLUDED__
26#ifndef BM__H__INCLUDED__
29# error missing include (bm.h or bm64.h)
295 unsigned src_and_size,
297 unsigned src_sub_size);
348 int* is_result_full);
361 bool init_clear =
true);
369 unsigned i,
unsigned j,
370 unsigned* arg_blk_count,
375 unsigned i,
unsigned j,
376 unsigned* arg_blk_count,
381 unsigned i,
unsigned j,
unsigned block_count);
417 unsigned k,
unsigned i,
unsigned j)
BMNOEXCEPT;
444 unsigned arg_group0_size = 0;
445 unsigned arg_group1_size = 0;
452 unsigned top_block_size_ = 0;
455 bool range_set_ =
false;
480template<
typename Agg,
typename It>
485 int pipeline_size = 0;
486 for (It it = first; it != last; ++it, ++pipeline_size)
498 for (It it = first; it != last; ++it, ++w)
501 auto op_st = agg.get_operation_status();
502 if (op_st != Agg::op_done)
504 op_st = agg.run_step(i, j);
505 pipeline_size -= (op_st == Agg::op_done);
508 if (pipeline_size <= 0)
544 arg_group0_size = arg_group1_size = operation_ = top_block_size_ = 0;
545 operation_status_ = op_undefined;
555 range_from_ = from; range_to_ = to;
584 BM_ASSERT(arg_group1_size < max_aggregator_cap);
588 return arg_group1_size;
590 ar_->arg_bv1[arg_group1_size++] = bv;
591 return arg_group1_size;
595 BM_ASSERT(arg_group0_size < max_aggregator_cap);
599 return arg_group0_size;
601 ar_->arg_bv0[arg_group0_size++] = bv;
602 return arg_group0_size;
611 combine_or(bv_target, ar_->arg_bv0, arg_group0_size);
619 combine_and(bv_target, ar_->arg_bv0, arg_group0_size);
627 return combine_and_sub(bv_target,
628 ar_->arg_bv0, arg_group0_size,
629 ar_->arg_bv1, arg_group1_size,
false);
637 return combine_and_sub(bv_target,
638 ar_->arg_bv0, arg_group0_size,
639 ar_->arg_bv1, arg_group1_size, any);
647 return find_first_and_sub(idx,
648 ar_->arg_bv0, arg_group0_size,
649 ar_->arg_bv1, arg_group1_size);
657 combine_shift_right_and(bv_target, ar_->arg_bv0, arg_group0_size,
false);
672 unsigned top_blocks = resize_target(bv_target, bv_src, src_size);
673 for (
unsigned i = 0; i < top_blocks; ++i)
675 unsigned set_array_max =
676 find_effective_sub_block_size(i, bv_src, src_size,
false);
677 for (
unsigned j = 0; j < set_array_max; ++j)
679 combine_or(i, j, bv_target, bv_src, src_size);
703 unsigned top_blocks = resize_target(bv_target, bv_src, src_size);
704 for (
unsigned i = 0; i < top_blocks; ++i)
707 unsigned set_array_max =
708 find_effective_sub_block_size(i, bv_src, src_size,
true);
709 for (
unsigned j = 0; j < set_array_max; ++j)
728 bool global_found =
false;
730 if (!bv_src_and || !src_and_size)
738 unsigned top_blocks = resize_target(bv_target, bv_src_and, src_and_size);
739 unsigned top_blocks2 = resize_target(bv_target, bv_src_sub, src_sub_size,
false);
741 if (top_blocks2 > top_blocks)
742 top_blocks = top_blocks2;
744 for (
unsigned i = 0; i < top_blocks; ++i)
746 unsigned set_array_max = find_effective_sub_block_size(i, bv_src_and, src_and_size,
true);
751 unsigned set_array_max2 =
752 find_effective_sub_block_size(i, bv_src_sub, src_sub_size,
false);
753 if (set_array_max2 > set_array_max)
754 set_array_max = set_array_max2;
756 for (
unsigned j = 0; j < set_array_max; ++j)
760 bv_src_and, src_and_size,
761 bv_src_sub, src_sub_size,
765 bman_target.check_alloc_top_subblock(i);
769 bman_target.validate_top_full(i);
779 bman_target.opt_copy_bit_block(i, j, ar_->tb1,
780 opt_mode_, ar_->tb_opt);
784 global_found |= found;
801 if (!bv_src_and || !src_and_size)
804 unsigned top_blocks = max_top_blocks(bv_src_and, src_and_size);
805 unsigned top_blocks2 = max_top_blocks(bv_src_sub, src_sub_size);
807 if (top_blocks2 > top_blocks)
808 top_blocks = top_blocks2;
819 if (nblock_from == nblock_to)
822 unsigned i = top_from;
825 bv_src_and, src_and_size,
826 bv_src_sub, src_sub_size,
831 unsigned block_bit_idx = 0;
840 if (top_to < top_blocks)
841 top_blocks = top_to+1;
848 for (
unsigned i = top_from; i < top_blocks; ++i)
865 set_array_max = find_effective_sub_block_size(i, bv_src_and, src_and_size,
true);
870 unsigned set_array_max2 =
871 find_effective_sub_block_size(i, bv_src_sub, src_sub_size,
false);
872 if (set_array_max2 > set_array_max)
873 set_array_max = set_array_max2;
876 for (; j < set_array_max; ++j)
880 bv_src_and, src_and_size,
881 bv_src_sub, src_sub_size,
885 unsigned block_bit_idx = 0;
913 for (
unsigned k = 0; k < src_size; ++k)
918 bv->get_blocks_manager();
919 const bm::word_t*
const* blk_blk_arg = bman_arg.get_topblock(i);
922 if (top_null_as_zero)
956 unsigned arg_blk_count = 0;
957 unsigned arg_blk_gap_count = 0;
959 sort_input_blocks_or(bv_src, src_size, i, j,
960 &arg_blk_count, &arg_blk_gap_count);
966 bman_target.check_alloc_top_subblock(i);
967 bman_target.set_block_ptr(i, j, blk);
970 bman_target.validate_top_full(i);
976 if (arg_blk_count || arg_blk_gap_count)
979 process_bit_blocks_or(bman_target, i, j, arg_blk_count);
982 if (arg_blk_gap_count)
984 process_gap_blocks_or(arg_blk_gap_count);
987 bman_target.opt_copy_bit_block(i, j, ar_->tb1,
988 opt_mode_, ar_->tb_opt);
1004 unsigned arg_blk_count = 0;
1005 unsigned arg_blk_gap_count = 0;
1007 sort_input_blocks_and(bv_src, src_size,
1009 &arg_blk_count, &arg_blk_gap_count);
1015 if (arg_blk_count || arg_blk_gap_count)
1017 if (!arg_blk_gap_count && (arg_blk_count == 1))
1023 bman_target.check_alloc_top_subblock(i);
1024 bman_target.set_block_ptr(i, j, blk);
1026 bman_target.validate_top_full(i);
1033 digest = process_bit_blocks_and(arg_blk_count, digest);
1039 if (arg_blk_gap_count)
1041 digest = process_gap_blocks_and(arg_blk_gap_count, digest);
1046 bman_target.opt_copy_bit_block(i, j, ar_->tb1,
1047 opt_mode_, ar_->tb_opt);
1054template<
typename BV>
1059 int* is_result_full)
1064 unsigned arg_blk_and_count = 0;
1065 unsigned arg_blk_and_gap_count = 0;
1066 unsigned arg_blk_sub_count = 0;
1067 unsigned arg_blk_sub_gap_count = 0;
1069 *is_result_full = 0;
1070 bm::word_t* blk = sort_input_blocks_and(bv_src_and, src_and_size,
1072 &arg_blk_and_count, &arg_blk_and_gap_count);
1074 if (!blk || !(arg_blk_and_count | arg_blk_and_gap_count))
1079 blk = sort_input_blocks_or(bv_src_sub, src_sub_size,
1081 &arg_blk_sub_count, &arg_blk_sub_gap_count);
1088 if (!arg_blk_and_gap_count && (arg_blk_and_count == 1))
1092 *is_result_full = 1;
1102 digest = process_bit_blocks_and(arg_blk_and_count, digest);
1105 digest = process_bit_blocks_sub(arg_blk_sub_count, digest);
1112 process_gap_blocks_and(arg_blk_and_gap_count, digest);
1116 if (arg_blk_sub_gap_count)
1119 process_gap_blocks_sub(arg_blk_sub_gap_count, digest);
1127template<
typename BV>
1131 for (
unsigned k = 0; k < arg_blk_gap_count; ++k)
1137template<
typename BV>
1143 bool single_bit_found;
1144 unsigned single_bit_idx;
1145 for (
unsigned k = 0; k < arg_blk_gap_count; ++k)
1155 if (single_bit_found)
1157 for (++k; k < arg_blk_gap_count; ++k)
1171template<
typename BV>
1177 bool single_bit_found;
1178 unsigned single_bit_idx;
1179 for (
unsigned k = 0; k < arg_blk_gap_count; ++k)
1190 if (single_bit_found)
1192 for (++k; k < arg_blk_gap_count; ++k)
1206template<
typename BV>
1211 for (
unsigned i = 0; i < arg_blk_gap_count && b; ++i)
1220template<
typename BV>
1225 for (
unsigned i = 0; i < arg_blk_gap_count; ++i)
1237template<
typename BV>
1239 unsigned i,
unsigned j,
1240 unsigned arg_blk_count)
1254 unsigned unroll_factor, len, len_unr;
1257 len = arg_blk_count - k;
1258 len_unr = len - (len % unroll_factor);
1259 for( ;k < len_unr; k+=unroll_factor)
1262 ar_->v_arg_or_blk[k], ar_->v_arg_or_blk[k+1],
1263 ar_->v_arg_or_blk[k+2], ar_->v_arg_or_blk[k+3]);
1274 len = arg_blk_count - k;
1275 len_unr = len - (len % unroll_factor);
1276 for( ;k < len_unr; k+=unroll_factor)
1288 for (; k < arg_blk_count; ++k)
1305template<
typename BV>
1315 if (range_set_ && (nb_from == nb_to))
1325 switch (arg_blk_count)
1335 ar_->v_arg_and_blk[k],
1336 ar_->v_arg_and_blk[k+1],
1343 unsigned unroll_factor, len, len_unr;
1344 unsigned single_bit_idx;
1347 len = arg_blk_count - k;
1348 len_unr = len - (len % unroll_factor);
1349 for (; k < len_unr; k += unroll_factor)
1353 ar_->v_arg_and_blk[k], ar_->v_arg_and_blk[k + 1],
1354 ar_->v_arg_and_blk[k + 2], ar_->v_arg_and_blk[k + 3],
1363 for (++k; k < arg_blk_count; ++k)
1365 const bm::word_t* arg_blk = ar_->v_arg_and_blk[k];
1366 if (!(mask & arg_blk[nword]))
1376 for (; k < arg_blk_count; ++k)
1389template<
typename BV>
1395 unsigned single_bit_idx;
1396 const word_t** args = &ar_->v_arg_or_blk[0];
1397 for (
unsigned k = 0; k < arg_blk_count; ++k)
1413 for (++k; k < arg_blk_count; ++k)
1415 if (mask & args[k][nword])
1429template<
typename BV>
1437 if (bman_target.is_init())
1438 bman_target.deinit_tree();
1441 unsigned top_blocks = bman_target.top_block_size();
1443 bool need_realloc =
false;
1446 for (
unsigned i = 0; i < src_size; ++i)
1451 bv->get_blocks_manager();
1452 unsigned arg_top_blocks = bman_arg.top_block_size();
1453 if (arg_top_blocks > top_blocks)
1455 need_realloc =
true;
1456 top_blocks = arg_top_blocks;
1459 if (arg_size > size)
1465 bman_target.reserve_top_blocks(top_blocks);
1467 if (!bman_target.is_init())
1468 bman_target.init_tree();
1469 if (size > bv_target.size())
1470 bv_target.resize(size);
1477template<
typename BV>
1482 unsigned top_blocks = 1;
1485 for (
unsigned i = 0; i < src_size; ++i)
1490 unsigned arg_top_blocks = bman_arg.top_block_size();
1491 if (arg_top_blocks > top_blocks)
1492 top_blocks = arg_top_blocks;
1499template<
typename BV>
1503 unsigned i,
unsigned j,
1504 unsigned* arg_blk_count,
1508 for (
unsigned k = 0; k < src_size; ++k)
1513 const bm::word_t* arg_blk = bman_arg.get_block_ptr(i, j);
1518 ar_->v_arg_or_blk_gap[*arg_blk_gap_count] =
BMGAP_PTR(arg_blk);
1519 (*arg_blk_gap_count)++;
1526 *arg_blk_gap_count = *arg_blk_count = 0;
1529 ar_->v_arg_or_blk[*arg_blk_count] = arg_blk;
1538template<
typename BV>
1542 unsigned i,
unsigned j,
1543 unsigned* arg_blk_count,
1546 unsigned full_blk_cnt = 0;
1548 for (
unsigned k = 0; k < src_size; ++k)
1553 const bm::word_t* arg_blk = bman_arg.get_block_ptr(i, j);
1557 *arg_blk_gap_count = *arg_blk_count = 0;
1562 ar_->v_arg_and_blk_gap[*arg_blk_gap_count] =
BMGAP_PTR(arg_blk);
1563 (*arg_blk_gap_count)++;
1579 ar_->v_arg_and_blk[*arg_blk_count] = arg_blk;
1593template<
typename BV>
1607 for (
unsigned i = 1; i < src_size; ++i)
1611 bv_target.bit_or(*bv);
1617template<
typename BV>
1631 for (
unsigned i = 1; i < src_size; ++i)
1635 bv_target.bit_and(*bv);
1641template<
typename BV>
1644 unsigned src_and_size,
1646 unsigned src_sub_size)
1650 combine_and_horizontal(bv_target, bv_src_and, src_and_size);
1652 for (
unsigned i = 0; i < src_sub_size; ++i)
1662template<
typename BV>
1667 top_block_size_ = resize_target(bv_target, bv_src, src_size);
1670 for (
unsigned i = 0; i < src_size; ++i)
1671 ar_->carry_overs_[i] = 0;
1676template<
typename BV>
1688 prepare_shift_right_and(bv_target, bv_src_and, src_and_size);
1692 if (i > top_block_size_)
1694 if (!any_carry_overs(&ar_->carry_overs_[0], src_and_size))
1702 combine_shift_right_and(i, j, bv_target, bv_src_and, src_and_size);
1709 return bv_target.any();
1714template<
typename BV>
1720 bm::word_t* blk = temp_blk_ ? temp_blk_ : ar_->tb1;
1721 unsigned char* carry_overs = &(ar_->carry_overs_[0]);
1725 bool blk_zero =
false;
1730 const bm::word_t* arg_blk = bman_arg.get_block(i, j);
1751 for (
unsigned k = 1; k < src_size; ++k)
1753 unsigned carry_over = carry_overs[k];
1754 if (!digest && !carry_over)
1759 blk_zero = !blk_zero;
1761 const bm::word_t* arg_blk = get_arg_block(bv_src, k, i, j);
1762 carry_overs[k] = (
unsigned char)
1763 process_shift_right_and(blk, arg_blk, digest, carry_over);
1764 BM_ASSERT(carry_overs[k] == 0 || carry_overs[k] == 1);
1776 bman_target.opt_copy_bit_block(i, j, blk, opt_mode_, ar_->tb_opt);
1784template<
typename BV>
1791 BM_ASSERT(carry_over == 1 || carry_over == 0);
1803 blk[0] = carry_over;
1824 blk[0] = carry_over & arg_blk[0];
1845template<
typename BV>
1848 unsigned k,
unsigned i,
unsigned j)
BMNOEXCEPT
1850 return bv_src[k]->get_blocks_manager().get_block(i, j);
1855template<
typename BV>
1860 unsigned acc = carry_overs[0];
1861 for (
unsigned i = 1; i < co_size; ++i)
1862 acc |= carry_overs[i];
1871template<
typename BV>
1877 temp_blk_ = temp_block;
1881 case BM_NOT_DEFINED:
1883 case BM_SHIFT_R_AND:
1884 prepare_shift_right_and(*bv_target, ar_->arg_bv0, arg_group0_size);
1885 operation_status_ = op_prepared;
1894template<
typename BV>
1898 BM_ASSERT(operation_status_ == op_prepared || operation_status_ == op_in_progress);
1903 case BM_NOT_DEFINED:
1906 case BM_SHIFT_R_AND:
1908 if (i > top_block_size_)
1910 if (!this->any_carry_overs(&ar_->carry_overs_[0], arg_group0_size))
1912 operation_status_ = op_done;
1913 return operation_status_;
1917 this->combine_shift_right_and(i, j, *bv_target_,
1918 ar_->arg_bv0, arg_group0_size);
1919 operation_status_ = op_in_progress;
1927 return operation_status_;
#define BM_DECLARE_TEMP_BLOCK(x)
#define IS_FULL_BLOCK(addr)
#define BM_ASSERT_THROW(x, xerrcode)
#define FULL_BLOCK_FAKE_ADDR
#define FULL_BLOCK_REAL_ADDR
Bit manipulation primitives (internal)
pre-processor un-defines to avoid global space pollution (internal)
Algorithms for fast aggregation of a group of bit-vectors.
BV::block_idx_type block_idx_type
void set_optimization(typename bvector_type::optmode opt=bvector_type::opt_compress)
set on-the-fly bit-block compression By default aggregator does not try to optimize result,...
bvector_type * check_create_target()
void reset() BMNOEXCEPT
Reset aggregate groups, forget all attached vectors.
void process_gap_blocks_or(unsigned block_count)
max_size
Maximum aggregation capacity in one pass.
const bvector_type * get_target() const
static unsigned process_shift_right_and(bm::word_t *BMRESTRICT blk, const bm::word_t *BMRESTRICT arg_blk, digest_type &BMRESTRICT digest, unsigned carry_over) BMNOEXCEPT
void combine_and_sub_horizontal(bvector_type &bv_target, const bvector_type_const_ptr *bv_src_and, unsigned src_and_size, const bvector_type_const_ptr *bv_src_sub, unsigned src_sub_size)
Horizontal AND-SUB aggregation (potentially slower) method.
static unsigned find_effective_sub_block_size(unsigned i, const bvector_type_const_ptr *bv_src, unsigned src_size, bool top_null_as_zero) BMNOEXCEPT
bm::word_t * sort_input_blocks_or(const bvector_type_const_ptr *bv_src, unsigned src_size, unsigned i, unsigned j, unsigned *arg_blk_count, unsigned *arg_blk_gap_count) BMNOEXCEPT
void combine_shift_right_and(bvector_type &bv_target)
Aggregate added group of vectors using SHIFT-RIGHT and logical AND Operation does NOT perform an expl...
void prepare_shift_right_and(bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
unsigned add(const bvector_type *bv, unsigned agr_group=0) BMNOEXCEPT
Attach source bit-vector to a argument group (0 or 1).
operation_status run_step(unsigned i, unsigned j)
Run a step of current arrgegation operation.
void stage(bm::word_t *temp_block)
Prepare operation, create internal resources, analyse dependencies.
bool combine_and_sub(bvector_type &bv_target)
Aggregate added group of vectors using fused logical AND-SUB Operation does NOT perform an explicit r...
void combine_or(unsigned i, unsigned j, bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
bvector_type::blocks_manager_type blocks_manager_type
digest_type combine_and_sub(unsigned i, unsigned j, const bvector_type_const_ptr *bv_src_and, unsigned src_and_size, const bvector_type_const_ptr *bv_src_sub, unsigned src_sub_size, int *is_result_full)
void combine_or(bvector_type &bv_target)
Aggregate added group of vectors using logical OR Operation does NOT perform an explicit reset of arg...
digest_type process_bit_blocks_sub(unsigned block_count, digest_type digest)
void combine_and(bvector_type &bv_target)
Aggregate added group of vectors using logical AND Operation does NOT perform an explicit reset of ar...
void set_operation(int op_code) BMNOEXCEPT
Set operation code for the aggregator.
const bvector_type * bvector_type_const_ptr
static const bm::word_t * get_arg_block(const bvector_type_const_ptr *bv_src, unsigned k, unsigned i, unsigned j) BMNOEXCEPT
bool test_gap_blocks_and(unsigned block_count, unsigned bit_idx)
void set_range_hint(size_type from, size_type to) BMNOEXCEPT
Set search hint for the range, where results needs to be searched (experimental for internal use).
static unsigned max_top_blocks(const bvector_type_const_ptr *bv_src, unsigned src_size) BMNOEXCEPT
bm::word_t * get_temp_block()
void combine_and_horizontal(bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
Horizontal AND aggregation (potentially slower) method.
bool find_first_and_sub(size_type &idx)
operation
Codes for aggregation operations which can be pipelined for efficient execution.
int get_operation() const BMNOEXCEPT
Get current operation code.
bool process_bit_blocks_or(blocks_manager_type &bman_target, unsigned i, unsigned j, unsigned block_count)
static unsigned resize_target(bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size, bool init_clear=true)
bm::word_t * sort_input_blocks_and(const bvector_type_const_ptr *bv_src, unsigned src_size, unsigned i, unsigned j, unsigned *arg_blk_count, unsigned *arg_blk_gap_count) BMNOEXCEPT
bool combine_shift_right_and(unsigned i, unsigned j, bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
bool test_gap_blocks_sub(unsigned block_count, unsigned bit_idx)
void combine_or_horizontal(bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
Horizontal OR aggregation (potentially slower) method.
digest_type process_gap_blocks_and(unsigned block_count, digest_type digest)
bvector_type::allocator_type::allocator_pool_type allocator_pool_type
static bool any_carry_overs(const unsigned char *carry_overs, unsigned co_size) BMNOEXCEPT
void combine_and(unsigned i, unsigned j, bvector_type &bv_target, const bvector_type_const_ptr *bv_src, unsigned src_size)
digest_type process_bit_blocks_and(unsigned block_count, digest_type digest)
digest_type process_gap_blocks_sub(unsigned block_count, digest_type digest)
operation_status get_operation_status() const
Bitvector Bit-vector container with runtime compression of bits.
optmode
Optimization mode Every next level means additional checks (better compression vs time)
@ opt_compress
compress blocks when possible (GAP/prefix sum)
allocator_type::allocator_pool_type allocator_pool_type
blocks_manager< Alloc > blocks_manager_type
bm::id64_t bit_block_and_5way(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src0, const bm::word_t *BMRESTRICT src1, const bm::word_t *BMRESTRICT src2, const bm::word_t *BMRESTRICT src3, bm::id64_t digest) BMNOEXCEPT
digest based bit-block AND 5-way
void bit_block_copy(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src) BMNOEXCEPT
Bitblock copy operation.
void block_init_digest0(bm::word_t *const block, bm::id64_t digest) BMNOEXCEPT
Init block with 000111000 pattren based on digest.
bm::id64_t bit_block_and(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src) BMNOEXCEPT
Plain bitblock AND operation. Function does not analyse availability of source and destination blocks...
bool bit_block_or_5way(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src1, const bm::word_t *BMRESTRICT src2, const bm::word_t *BMRESTRICT src3, const bm::word_t *BMRESTRICT src4) BMNOEXCEPT
5 way (target, source1, source2) bitblock OR operation. Function does not analyse availability of sou...
bool bit_block_shift_r1_and_unr(bm::word_t *BMRESTRICT block, bm::word_t co_flag, const bm::word_t *BMRESTRICT mask_block, bm::id64_t *BMRESTRICT digest) BMNOEXCEPT
Right bit-shift bitblock by 1 bit (reference) + AND.
bm::id64_t calc_block_digest0(const bm::word_t *const block) BMNOEXCEPT
Compute digest for 64 non-zero areas.
bm::id64_t bit_block_and_2way(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src1, const bm::word_t *BMRESTRICT src2, bm::id64_t digest) BMNOEXCEPT
digest based bit-block AND
bool bit_find_first(const bm::word_t *BMRESTRICT block, unsigned *BMRESTRICT pos) BMNOEXCEPT
BIT block find the first set bit.
bool bit_block_or_3way(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src1, const bm::word_t *BMRESTRICT src2) BMNOEXCEPT
3 way (target | source1 | source2) bitblock OR operation. Function does not analyse availability of s...
void bit_block_set(bm::word_t *BMRESTRICT dst, bm::word_t value) BMNOEXCEPT
Bitblock memset operation.
BMFORCEINLINE bm::id64_t digest_mask(unsigned from, unsigned to) BMNOEXCEPT
Compute digest mask for [from..to] positions.
bool bit_block_or(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src) BMNOEXCEPT
Plain bitblock OR operation. Function does not analyse availability of source and destination blocks.
bool bit_is_all_zero(const bm::word_t *BMRESTRICT start) BMNOEXCEPT
Returns "true" if all bits in the block are 0.
bool bit_find_first_if_1(const bm::word_t *BMRESTRICT block, unsigned *BMRESTRICT first, bm::id64_t digest) BMNOEXCEPT
BIT block find the first set bit if only 1 bit is set.
bm::id64_t update_block_digest0(const bm::word_t *const block, bm::id64_t digest) BMNOEXCEPT
Compute digest for 64 non-zero areas based on existing digest (function revalidates zero areas)
bm::id64_t bit_block_sub(bm::word_t *BMRESTRICT dst, const bm::word_t *BMRESTRICT src) BMNOEXCEPT
Plain bitblock SUB (AND NOT) operation. Function does not analyse availability of source and destinat...
bool is_bits_one(const bm::wordop_t *start) BMNOEXCEPT
Returns "true" if all bits in the block are 1.
unsigned gap_test_unr(const T *BMRESTRICT buf, const unsigned pos) BMNOEXCEPT
Tests if bit = pos is true. Analog of bm::gap_test with SIMD unrolling.
void gap_and_to_bitset(unsigned *BMRESTRICT dest, const T *BMRESTRICT pcurr) BMNOEXCEPT
ANDs GAP block to bitblock.
void gap_sub_to_bitset(unsigned *BMRESTRICT dest, const T *BMRESTRICT pcurr) BMNOEXCEPT
SUB (AND NOT) GAP block to bitblock.
void gap_add_to_bitset(unsigned *BMRESTRICT dest, const T *BMRESTRICT pcurr, unsigned len) BMNOEXCEPT
Adds(OR) GAP block to bitblock.
void combine_and(BV &bv, It first, It last)
AND Combine bitvector and the iterable sequence.
void combine_or(BV &bv, It first, It last)
OR Combine bitvector and the iterable sequence.
void aggregator_pipeline_execute(It first, It last)
Experimental method ro run multiple aggregators in sync.
const unsigned set_array_mask
const unsigned set_block_mask
void aligned_free(void *ptr) BMNOEXCEPT
Aligned free.
const unsigned set_sub_array_size
void * aligned_new_malloc(size_t size)
Aligned malloc (unlike classic malloc it throws bad_alloc exception)
const unsigned set_word_shift
const unsigned set_block_size
unsigned long long int id64_t
const unsigned set_array_shift
bm::id_t block_to_global_index(unsigned i, unsigned j, unsigned block_idx) BMNOEXCEPT
calculate bvector<> global bit-index from block-local coords
unsigned short gap_word_t
const unsigned set_top_array_size
const unsigned set_block_shift
const unsigned set_word_mask