Drizzled Public API Documentation

btr0sea.cc
1 /*****************************************************************************
2 
3 Copyright (C) 1996, 2009, Innobase Oy. All Rights Reserved.
4 Copyright (C) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
22 St, Fifth Floor, Boston, MA 02110-1301 USA
23 
24 *****************************************************************************/
25 
26 /********************************************************************/
33 #include "btr0sea.h"
34 #ifdef UNIV_NONINL
35 #include "btr0sea.ic"
36 #endif
37 
38 #include "buf0buf.h"
39 #include "page0page.h"
40 #include "page0cur.h"
41 #include "btr0cur.h"
42 #include "btr0pcur.h"
43 #include "btr0btr.h"
44 #include "ha0ha.h"
45 
48 UNIV_INTERN bool btr_search_enabled = TRUE;
49 UNIV_INTERN ibool btr_search_fully_disabled = FALSE;
50 
52 static mutex_t btr_search_enabled_mutex;
53 
55 UNIV_INTERN ulint btr_search_this_is_zero = 0;
56 
57 #ifdef UNIV_SEARCH_PERF_STAT
58 
59 UNIV_INTERN ulint btr_search_n_succ = 0;
61 UNIV_INTERN ulint btr_search_n_hash_fail = 0;
62 #endif /* UNIV_SEARCH_PERF_STAT */
63 
67 UNIV_INTERN byte btr_sea_pad1[64];
68 
75 /* We will allocate the latch from dynamic memory to get it to the
76 same DRAM page as other hotspot semaphores */
77 UNIV_INTERN rw_lock_t* btr_search_latch_temp;
78 
81 UNIV_INTERN byte btr_sea_pad2[64];
82 
84 UNIV_INTERN btr_search_sys_t* btr_search_sys;
85 
86 #ifdef UNIV_PFS_RWLOCK
87 /* Key to register btr_search_sys with performance schema */
88 UNIV_INTERN mysql_pfs_key_t btr_search_latch_key;
89 #endif /* UNIV_PFS_RWLOCK */
90 
94 #define BTR_SEARCH_PAGE_BUILD_LIMIT 16
95 
98 #define BTR_SEARCH_BUILD_LIMIT 100
99 
100 /********************************************************************/
105 static
106 void
107 btr_search_build_page_hash_index(
108 /*=============================*/
109  dict_index_t* index,
111  buf_block_t* block,
112  ulint n_fields,
113  ulint n_bytes,
115  ibool left_side);
117 /*****************************************************************/
127 static
128 void
129 btr_search_check_free_space_in_heap(void)
130 /*=====================================*/
131 {
132  hash_table_t* table;
133  mem_heap_t* heap;
134 
135 #ifdef UNIV_SYNC_DEBUG
136  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
137  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
138 #endif /* UNIV_SYNC_DEBUG */
139 
140  table = btr_search_sys->hash_index;
141 
142  heap = table->heap;
143 
144  /* Note that we peek the value of heap->free_block without reserving
145  the latch: this is ok, because we will not guarantee that there will
146  be enough free space in the hash table. */
147 
148  if (heap->free_block == NULL) {
149  buf_block_t* block = buf_block_alloc(NULL, 0);
150 
151  rw_lock_x_lock(&btr_search_latch);
152 
153  if (heap->free_block == NULL) {
154  heap->free_block = block;
155  } else {
156  buf_block_free(block);
157  }
158 
159  rw_lock_x_unlock(&btr_search_latch);
160  }
161 }
162 
163 /*****************************************************************/
165 UNIV_INTERN
166 void
167 btr_search_sys_create(
168 /*==================*/
169  ulint hash_size)
170 {
171  /* We allocate the search latch from dynamic memory:
172  see above at the global variable definition */
173 
174  btr_search_latch_temp = static_cast<rw_lock_t *>(mem_alloc(sizeof(rw_lock_t)));
175 
176  rw_lock_create(btr_search_latch_key, &btr_search_latch,
177  SYNC_SEARCH_SYS);
178  mutex_create(btr_search_enabled_mutex_key,
179  &btr_search_enabled_mutex, SYNC_SEARCH_SYS_CONF);
180 
181  btr_search_sys = (btr_search_sys_t *)mem_alloc(sizeof(btr_search_sys_t));
182 
183  btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
184 }
185 
186 /*****************************************************************/
188 UNIV_INTERN
189 void
190 btr_search_sys_free(void)
191 /*=====================*/
192 {
193  rw_lock_free(&btr_search_latch);
194  mem_free(btr_search_latch_temp);
195  btr_search_latch_temp = NULL;
196  mem_heap_free(btr_search_sys->hash_index->heap);
197  hash_table_free(btr_search_sys->hash_index);
198  mem_free(btr_search_sys);
199  btr_search_sys = NULL;
200 }
201 
202 /********************************************************************/
204 UNIV_INTERN
205 void
206 btr_search_disable(void)
207 /*====================*/
208 {
209  mutex_enter(&btr_search_enabled_mutex);
210  rw_lock_x_lock(&btr_search_latch);
211 
212  /* Disable access to hash index, also tell ha_insert_for_fold()
213  stop adding new nodes to hash index, but still allow updating
214  existing nodes */
215  btr_search_enabled = FALSE;
216 
217  /* Clear all block->is_hashed flags and remove all entries
218  from btr_search_sys->hash_index. */
219  buf_pool_drop_hash_index();
220 
221  /* hash index has been cleaned up, disallow any operation to
222  the hash index */
223  btr_search_fully_disabled = TRUE;
224 
225  /* btr_search_enabled_mutex should guarantee this. */
226  ut_ad(!btr_search_enabled);
227 
228  rw_lock_x_unlock(&btr_search_latch);
229  mutex_exit(&btr_search_enabled_mutex);
230 }
231 
232 /********************************************************************/
234 UNIV_INTERN
235 void
236 btr_search_enable(void)
237 /*====================*/
238 {
239  mutex_enter(&btr_search_enabled_mutex);
240  rw_lock_x_lock(&btr_search_latch);
241 
242  btr_search_enabled = TRUE;
243  btr_search_fully_disabled = FALSE;
244 
245  rw_lock_x_unlock(&btr_search_latch);
246  mutex_exit(&btr_search_enabled_mutex);
247 }
248 
249 /*****************************************************************/
252 UNIV_INTERN
254 btr_search_info_create(
255 /*===================*/
256  mem_heap_t* heap)
257 {
258  btr_search_t* info;
259 
260  info = (btr_search_t *)mem_heap_alloc(heap, sizeof(btr_search_t));
261 
262 #ifdef UNIV_DEBUG
263  info->magic_n = BTR_SEARCH_MAGIC_N;
264 #endif /* UNIV_DEBUG */
265 
266  info->ref_count = 0;
267  info->root_guess = NULL;
268 
269  info->hash_analysis = 0;
270  info->n_hash_potential = 0;
271 
272  info->last_hash_succ = FALSE;
273 
274 #ifdef UNIV_SEARCH_PERF_STAT
275  info->n_hash_succ = 0;
276  info->n_hash_fail = 0;
277  info->n_patt_succ = 0;
278  info->n_searches = 0;
279 #endif /* UNIV_SEARCH_PERF_STAT */
280 
281  /* Set some sensible values */
282  info->n_fields = 1;
283  info->n_bytes = 0;
284 
285  info->left_side = TRUE;
286 
287  return(info);
288 }
289 
290 /*****************************************************************/
294 UNIV_INTERN
295 ulint
296 btr_search_info_get_ref_count(
297 /*==========================*/
298  btr_search_t* info)
299 {
300  ulint ret;
301 
302  ut_ad(info);
303 
304 #ifdef UNIV_SYNC_DEBUG
305  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
306  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
307 #endif /* UNIV_SYNC_DEBUG */
308 
310  ret = info->ref_count;
311  rw_lock_s_unlock(&btr_search_latch);
312 
313  return(ret);
314 }
315 
316 /*********************************************************************/
320 static
321 void
322 btr_search_info_update_hash(
323 /*========================*/
324  btr_search_t* info,
325  btr_cur_t* cursor)
326 {
327  dict_index_t* index;
328  ulint n_unique;
329  int cmp;
330 
331 #ifdef UNIV_SYNC_DEBUG
332  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
333  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
334 #endif /* UNIV_SYNC_DEBUG */
335 
336  index = cursor->index;
337 
338  if (dict_index_is_ibuf(index)) {
339  /* So many deletes are performed on an insert buffer tree
340  that we do not consider a hash index useful on it: */
341 
342  return;
343  }
344 
345  n_unique = dict_index_get_n_unique_in_tree(index);
346 
347  if (info->n_hash_potential == 0) {
348 
349  goto set_new_recomm;
350  }
351 
352  /* Test if the search would have succeeded using the recommended
353  hash prefix */
354 
355  if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
356 increment_potential:
357  info->n_hash_potential++;
358 
359  return;
360  }
361 
362  cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
363  cursor->low_match, cursor->low_bytes);
364 
365  if (info->left_side ? cmp <= 0 : cmp > 0) {
366 
367  goto set_new_recomm;
368  }
369 
370  cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
371  cursor->up_match, cursor->up_bytes);
372 
373  if (info->left_side ? cmp <= 0 : cmp > 0) {
374 
375  goto increment_potential;
376  }
377 
378 set_new_recomm:
379  /* We have to set a new recommendation; skip the hash analysis
380  for a while to avoid unnecessary CPU time usage when there is no
381  chance for success */
382 
383  info->hash_analysis = 0;
384 
385  cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
386  cursor->low_match, cursor->low_bytes);
387  if (cmp == 0) {
388  info->n_hash_potential = 0;
389 
390  /* For extra safety, we set some sensible values here */
391 
392  info->n_fields = 1;
393  info->n_bytes = 0;
394 
395  info->left_side = TRUE;
396 
397  } else if (cmp > 0) {
398  info->n_hash_potential = 1;
399 
400  if (cursor->up_match >= n_unique) {
401 
402  info->n_fields = n_unique;
403  info->n_bytes = 0;
404 
405  } else if (cursor->low_match < cursor->up_match) {
406 
407  info->n_fields = cursor->low_match + 1;
408  info->n_bytes = 0;
409  } else {
410  info->n_fields = cursor->low_match;
411  info->n_bytes = cursor->low_bytes + 1;
412  }
413 
414  info->left_side = TRUE;
415  } else {
416  info->n_hash_potential = 1;
417 
418  if (cursor->low_match >= n_unique) {
419 
420  info->n_fields = n_unique;
421  info->n_bytes = 0;
422 
423  } else if (cursor->low_match > cursor->up_match) {
424 
425  info->n_fields = cursor->up_match + 1;
426  info->n_bytes = 0;
427  } else {
428  info->n_fields = cursor->up_match;
429  info->n_bytes = cursor->up_bytes + 1;
430  }
431 
432  info->left_side = FALSE;
433  }
434 }
435 
436 /*********************************************************************/
441 static
442 ibool
443 btr_search_update_block_hash_info(
444 /*==============================*/
445  btr_search_t* info,
446  buf_block_t* block,
447  btr_cur_t* /*cursor __attribute__((unused))*/)
449 {
450 #ifdef UNIV_SYNC_DEBUG
451  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
452  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
453  ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
454  || rw_lock_own(&block->lock, RW_LOCK_EX));
455 #endif /* UNIV_SYNC_DEBUG */
456  ut_ad(cursor);
457 
458  info->last_hash_succ = FALSE;
459 
460  ut_a(buf_block_state_valid(block));
461  ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
462 
463  if ((block->n_hash_helps > 0)
464  && (info->n_hash_potential > 0)
465  && (block->n_fields == info->n_fields)
466  && (block->n_bytes == info->n_bytes)
467  && (block->left_side == info->left_side)) {
468 
469  if ((block->is_hashed)
470  && (block->curr_n_fields == info->n_fields)
471  && (block->curr_n_bytes == info->n_bytes)
472  && (block->curr_left_side == info->left_side)) {
473 
474  /* The search would presumably have succeeded using
475  the hash index */
476 
477  info->last_hash_succ = TRUE;
478  }
479 
480  block->n_hash_helps++;
481  } else {
482  block->n_hash_helps = 1;
483  block->n_fields = info->n_fields;
484  block->n_bytes = info->n_bytes;
485  block->left_side = info->left_side;
486  }
487 
488 #ifdef UNIV_DEBUG
489  if (cursor->index->table->does_not_fit_in_memory) {
490  block->n_hash_helps = 0;
491  }
492 #endif /* UNIV_DEBUG */
493 
494  if ((block->n_hash_helps > page_get_n_recs(block->frame)
495  / BTR_SEARCH_PAGE_BUILD_LIMIT)
496  && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
497 
498  if ((!block->is_hashed)
499  || (block->n_hash_helps
500  > 2 * page_get_n_recs(block->frame))
501  || (block->n_fields != block->curr_n_fields)
502  || (block->n_bytes != block->curr_n_bytes)
503  || (block->left_side != block->curr_left_side)) {
504 
505  /* Build a new hash index on the page */
506 
507  return(TRUE);
508  }
509  }
510 
511  return(FALSE);
512 }
513 
514 /*********************************************************************/
522 static
523 void
524 btr_search_update_hash_ref(
525 /*=======================*/
526  btr_search_t* info,
527  buf_block_t* block,
528  btr_cur_t* cursor)
529 {
530  ulint fold;
531  rec_t* rec;
532  index_id_t index_id;
533 
534  ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
535 #ifdef UNIV_SYNC_DEBUG
536  ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
537  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
538  || rw_lock_own(&(block->lock), RW_LOCK_EX));
539 #endif /* UNIV_SYNC_DEBUG */
541  == buf_block_get_frame(block));
542 
543  if (!block->is_hashed) {
544 
545  return;
546  }
547 
548  ut_a(block->index == cursor->index);
549  ut_a(!dict_index_is_ibuf(cursor->index));
550 
551  if ((info->n_hash_potential > 0)
552  && (block->curr_n_fields == info->n_fields)
553  && (block->curr_n_bytes == info->n_bytes)
554  && (block->curr_left_side == info->left_side)) {
555  mem_heap_t* heap = NULL;
556  ulint offsets_[REC_OFFS_NORMAL_SIZE];
557  rec_offs_init(offsets_);
558 
559  rec = btr_cur_get_rec(cursor);
560 
561  if (!page_rec_is_user_rec(rec)) {
562 
563  return;
564  }
565 
566  index_id = cursor->index->id;
567  fold = rec_fold(rec,
568  rec_get_offsets(rec, cursor->index, offsets_,
569  ULINT_UNDEFINED, &heap),
570  block->curr_n_fields,
571  block->curr_n_bytes, index_id);
572  if (UNIV_LIKELY_NULL(heap)) {
573  mem_heap_free(heap);
574  }
575 #ifdef UNIV_SYNC_DEBUG
576  ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
577 #endif /* UNIV_SYNC_DEBUG */
578 
579  ha_insert_for_fold(btr_search_sys->hash_index, fold,
580  block, rec);
581  }
582 }
583 
584 /*********************************************************************/
586 UNIV_INTERN
587 void
588 btr_search_info_update_slow(
589 /*========================*/
590  btr_search_t* info,
591  btr_cur_t* cursor)
592 {
593  buf_block_t* block;
594  ibool build_index;
595  ulint* params;
596  ulint* params2;
597 
598 #ifdef UNIV_SYNC_DEBUG
599  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
600  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
601 #endif /* UNIV_SYNC_DEBUG */
602 
603  block = btr_cur_get_block(cursor);
604 
605  /* NOTE that the following two function calls do NOT protect
606  info or block->n_fields etc. with any semaphore, to save CPU time!
607  We cannot assume the fields are consistent when we return from
608  those functions! */
609 
610  btr_search_info_update_hash(info, cursor);
611 
612  build_index = btr_search_update_block_hash_info(info, block, cursor);
613 
614  if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
615 
616  btr_search_check_free_space_in_heap();
617  }
618 
619  if (cursor->flag == BTR_CUR_HASH_FAIL) {
620  /* Update the hash node reference, if appropriate */
621 
622 #ifdef UNIV_SEARCH_PERF_STAT
623  btr_search_n_hash_fail++;
624 #endif /* UNIV_SEARCH_PERF_STAT */
625 
626  rw_lock_x_lock(&btr_search_latch);
627 
628  btr_search_update_hash_ref(info, block, cursor);
629 
630  rw_lock_x_unlock(&btr_search_latch);
631  }
632 
633  if (build_index) {
634  /* Note that since we did not protect block->n_fields etc.
635  with any semaphore, the values can be inconsistent. We have
636  to check inside the function call that they make sense. We
637  also malloc an array and store the values there to make sure
638  the compiler does not let the function call parameters change
639  inside the called function. It might be that the compiler
640  would optimize the call just to pass pointers to block. */
641 
642  params = (ulint *)mem_alloc(3 * sizeof(ulint));
643  params[0] = block->n_fields;
644  params[1] = block->n_bytes;
645  params[2] = block->left_side;
646 
647  /* Make sure the compiler cannot deduce the values and do
648  optimizations */
649 
650  params2 = params + btr_search_this_is_zero;
651 
652  btr_search_build_page_hash_index(cursor->index,
653  block,
654  params2[0],
655  params2[1],
656  params2[2]);
657  mem_free(params);
658  }
659 }
660 
661 /******************************************************************/
666 static
667 ibool
668 btr_search_check_guess(
669 /*===================*/
670  btr_cur_t* cursor,
671  ibool can_only_compare_to_cursor_rec,
679  const dtuple_t* tuple,
680  ulint mode,
682  mtr_t* mtr)
683 {
684  rec_t* rec;
685  ulint n_unique;
686  ulint match;
687  ulint bytes;
688  int cmp;
689  mem_heap_t* heap = NULL;
690  ulint offsets_[REC_OFFS_NORMAL_SIZE];
691  ulint* offsets = offsets_;
692  ibool success = FALSE;
693  rec_offs_init(offsets_);
694 
695  n_unique = dict_index_get_n_unique_in_tree(cursor->index);
696 
697  rec = btr_cur_get_rec(cursor);
698 
700 
701  match = 0;
702  bytes = 0;
703 
704  offsets = rec_get_offsets(rec, cursor->index, offsets,
705  n_unique, &heap);
706  cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
707  offsets, &match, &bytes);
708 
709  if (mode == PAGE_CUR_GE) {
710  if (cmp == 1) {
711  goto exit_func;
712  }
713 
714  cursor->up_match = match;
715 
716  if (match >= n_unique) {
717  success = TRUE;
718  goto exit_func;
719  }
720  } else if (mode == PAGE_CUR_LE) {
721  if (cmp == -1) {
722  goto exit_func;
723  }
724 
725  cursor->low_match = match;
726 
727  } else if (mode == PAGE_CUR_G) {
728  if (cmp != -1) {
729  goto exit_func;
730  }
731  } else if (mode == PAGE_CUR_L) {
732  if (cmp != 1) {
733  goto exit_func;
734  }
735  }
736 
737  if (can_only_compare_to_cursor_rec) {
738  /* Since we could not determine if our guess is right just by
739  looking at the record under the cursor, return FALSE */
740  goto exit_func;
741  }
742 
743  match = 0;
744  bytes = 0;
745 
746  if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
747  rec_t* prev_rec;
748 
749  ut_ad(!page_rec_is_infimum(rec));
750 
751  prev_rec = page_rec_get_prev(rec);
752 
753  if (page_rec_is_infimum(prev_rec)) {
754  success = btr_page_get_prev(page_align(prev_rec), mtr)
755  == FIL_NULL;
756 
757  goto exit_func;
758  }
759 
760  offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
761  n_unique, &heap);
762  cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
763  offsets, &match, &bytes);
764  if (mode == PAGE_CUR_GE) {
765  success = cmp == 1;
766  } else {
767  success = cmp != -1;
768  }
769 
770  goto exit_func;
771  } else {
772  rec_t* next_rec;
773 
775 
776  next_rec = page_rec_get_next(rec);
777 
778  if (page_rec_is_supremum(next_rec)) {
779  if (btr_page_get_next(page_align(next_rec), mtr)
780  == FIL_NULL) {
781 
782  cursor->up_match = 0;
783  success = TRUE;
784  }
785 
786  goto exit_func;
787  }
788 
789  offsets = rec_get_offsets(next_rec, cursor->index, offsets,
790  n_unique, &heap);
791  cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
792  offsets, &match, &bytes);
793  if (mode == PAGE_CUR_LE) {
794  success = cmp == -1;
795  cursor->up_match = match;
796  } else {
797  success = cmp != 1;
798  }
799  }
800 exit_func:
801  if (UNIV_LIKELY_NULL(heap)) {
802  mem_heap_free(heap);
803  }
804  return(success);
805 }
806 
807 /******************************************************************/
813 UNIV_INTERN
814 ibool
815 btr_search_guess_on_hash(
816 /*=====================*/
817  dict_index_t* index,
818  btr_search_t* info,
819  const dtuple_t* tuple,
820  ulint mode,
821  ulint latch_mode,
827  btr_cur_t* cursor,
828  ulint has_search_latch,
831  mtr_t* mtr)
832 {
833  buf_pool_t* buf_pool;
834  buf_block_t* block;
835  rec_t* rec;
836  ulint fold;
837  index_id_t index_id;
838 #ifdef notdefined
839  btr_cur_t cursor2;
840  btr_pcur_t pcur;
841 #endif
842  ut_ad(index && info && tuple && cursor && mtr);
843  ut_ad((latch_mode == BTR_SEARCH_LEAF)
844  || (latch_mode == BTR_MODIFY_LEAF));
845 
846  /* Note that, for efficiency, the struct info may not be protected by
847  any latch here! */
848 
849  if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
850 
851  return(FALSE);
852  }
853 
854  cursor->n_fields = info->n_fields;
855  cursor->n_bytes = info->n_bytes;
856 
857  if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
858  < cursor->n_fields + (cursor->n_bytes > 0))) {
859 
860  return(FALSE);
861  }
862 
863  index_id = index->id;
864 
865 #ifdef UNIV_SEARCH_PERF_STAT
866  info->n_hash_succ++;
867 #endif
868  fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
869 
870  cursor->fold = fold;
871  cursor->flag = BTR_CUR_HASH;
872 
873  if (UNIV_LIKELY(!has_search_latch)) {
875 
876  if (UNIV_UNLIKELY(!btr_search_enabled)) {
877  goto failure_unlock;
878  }
879  }
880 
881  ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
883 
884  rec = (rec_t *)ha_search_and_get_data(btr_search_sys->hash_index, fold);
885 
886  if (UNIV_UNLIKELY(!rec)) {
887  goto failure_unlock;
888  }
889 
890  block = buf_block_align(rec);
891 
892  if (UNIV_LIKELY(!has_search_latch)) {
893 
894  if (UNIV_UNLIKELY(
895  !buf_page_get_known_nowait(latch_mode, block,
897  __FILE__, __LINE__,
898  mtr))) {
899  goto failure_unlock;
900  }
901 
902  rw_lock_s_unlock(&btr_search_latch);
903 
904  buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
905  }
906 
907  if (UNIV_UNLIKELY(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE)) {
909 
910  if (UNIV_LIKELY(!has_search_latch)) {
911 
912  btr_leaf_page_release(block, latch_mode, mtr);
913  }
914 
915  goto failure;
916  }
917 
919 
920  btr_cur_position(index, rec, block, cursor);
921 
922  /* Check the validity of the guess within the page */
923 
924  /* If we only have the latch on btr_search_latch, not on the
925  page, it only protects the columns of the record the cursor
926  is positioned on. We cannot look at the next of the previous
927  record to determine if our guess for the cursor position is
928  right. */
929  if (UNIV_UNLIKELY(index_id != btr_page_get_index_id(block->frame))
930  || !btr_search_check_guess(cursor,
931  has_search_latch,
932  tuple, mode, mtr)) {
933  if (UNIV_LIKELY(!has_search_latch)) {
934  btr_leaf_page_release(block, latch_mode, mtr);
935  }
936 
937  goto failure;
938  }
939 
940  if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
941 
942  info->n_hash_potential++;
943  }
944 
945 #ifdef notdefined
946  /* These lines of code can be used in a debug version to check
947  the correctness of the searched cursor position: */
948 
949  info->last_hash_succ = FALSE;
950 
951  /* Currently, does not work if the following fails: */
952  ut_ad(!has_search_latch);
953 
954  btr_leaf_page_release(block, latch_mode, mtr);
955 
956  btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
957  &cursor2, 0, mtr);
958  if (mode == PAGE_CUR_GE
959  && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
960 
961  /* If mode is PAGE_CUR_GE, then the binary search
962  in the index tree may actually take us to the supremum
963  of the previous page */
964 
965  info->last_hash_succ = FALSE;
966 
967  btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
968  &pcur, mtr);
969  ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
970  } else {
971  ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
972  }
973 
974  /* NOTE that it is theoretically possible that the above assertions
975  fail if the page of the cursor gets removed from the buffer pool
976  meanwhile! Thus it might not be a bug. */
977 #endif
978  info->last_hash_succ = TRUE;
979 
980 #ifdef UNIV_SEARCH_PERF_STAT
981  btr_search_n_succ++;
982 #endif
983  if (UNIV_LIKELY(!has_search_latch)
984  && buf_page_peek_if_too_old(&block->page)) {
985 
986  buf_page_make_young(&block->page);
987  }
988 
989  /* Increment the page get statistics though we did not really
990  fix the page: for user info only */
991  buf_pool = buf_pool_from_bpage(&block->page);
992  buf_pool->stat.n_page_gets++;
993 
994  return(TRUE);
995 
996  /*-------------------------------------------*/
997 failure_unlock:
998  if (UNIV_LIKELY(!has_search_latch)) {
999  rw_lock_s_unlock(&btr_search_latch);
1000  }
1001 failure:
1002  cursor->flag = BTR_CUR_HASH_FAIL;
1003 
1004 #ifdef UNIV_SEARCH_PERF_STAT
1005  info->n_hash_fail++;
1006 
1007  if (info->n_hash_succ > 0) {
1008  info->n_hash_succ--;
1009  }
1010 #endif
1011  info->last_hash_succ = FALSE;
1012 
1013  return(FALSE);
1014 }
1015 
1016 /********************************************************************/
1018 UNIV_INTERN
1019 void
1020 btr_search_drop_page_hash_index(
1021 /*============================*/
1022  buf_block_t* block)
1026 {
1027  hash_table_t* table;
1028  ulint n_fields;
1029  ulint n_bytes;
1030  const page_t* page;
1031  const rec_t* rec;
1032  ulint fold;
1033  ulint prev_fold;
1034  index_id_t index_id;
1035  ulint n_cached;
1036  ulint n_recs;
1037  ulint* folds;
1038  ulint i;
1039  mem_heap_t* heap;
1040  const dict_index_t* index;
1041  ulint* offsets;
1042 
1043 #ifdef UNIV_SYNC_DEBUG
1044  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
1045  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1046 #endif /* UNIV_SYNC_DEBUG */
1047 
1048 retry:
1050  page = block->frame;
1051 
1052  if (UNIV_LIKELY(!block->is_hashed)) {
1053 
1054  rw_lock_s_unlock(&btr_search_latch);
1055 
1056  return;
1057  }
1058 
1059  table = btr_search_sys->hash_index;
1060 
1061 #ifdef UNIV_SYNC_DEBUG
1062  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1063  || rw_lock_own(&(block->lock), RW_LOCK_EX)
1064  || (block->page.buf_fix_count == 0));
1065 #endif /* UNIV_SYNC_DEBUG */
1066 
1067  n_fields = block->curr_n_fields;
1068  n_bytes = block->curr_n_bytes;
1069  index = block->index;
1070  ut_a(!dict_index_is_ibuf(index));
1071 
1072  /* NOTE: The fields of block must not be accessed after
1073  releasing btr_search_latch, as the index page might only
1074  be s-latched! */
1075 
1076  rw_lock_s_unlock(&btr_search_latch);
1077 
1078  ut_a(n_fields + n_bytes > 0);
1079 
1080  n_recs = page_get_n_recs(page);
1081 
1082  /* Calculate and cache fold values into an array for fast deletion
1083  from the hash index */
1084 
1085  folds = (ulint *)mem_alloc(n_recs * sizeof(ulint));
1086 
1087  n_cached = 0;
1088 
1089  rec = page_get_infimum_rec(page);
1090  rec = page_rec_get_next_low(rec, page_is_comp(page));
1091 
1092  index_id = btr_page_get_index_id(page);
1093 
1094  ut_a(index_id == index->id);
1095 
1096  prev_fold = 0;
1097 
1098  heap = NULL;
1099  offsets = NULL;
1100 
1101  while (!page_rec_is_supremum(rec)) {
1102  offsets = rec_get_offsets(rec, index, offsets,
1103  n_fields + (n_bytes > 0), &heap);
1104  ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
1105  fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1106 
1107  if (fold == prev_fold && prev_fold != 0) {
1108 
1109  goto next_rec;
1110  }
1111 
1112  /* Remove all hash nodes pointing to this page from the
1113  hash chain */
1114 
1115  folds[n_cached] = fold;
1116  n_cached++;
1117 next_rec:
1118  rec = page_rec_get_next_low(rec, page_rec_is_comp(rec));
1119  prev_fold = fold;
1120  }
1121 
1122  if (UNIV_LIKELY_NULL(heap)) {
1123  mem_heap_free(heap);
1124  }
1125 
1126  rw_lock_x_lock(&btr_search_latch);
1127 
1128  if (UNIV_UNLIKELY(!block->is_hashed)) {
1129  /* Someone else has meanwhile dropped the hash index */
1130 
1131  goto cleanup;
1132  }
1133 
1134  ut_a(block->index == index);
1135 
1136  if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
1137  || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
1138 
1139  /* Someone else has meanwhile built a new hash index on the
1140  page, with different parameters */
1141 
1142  rw_lock_x_unlock(&btr_search_latch);
1143 
1144  mem_free(folds);
1145  goto retry;
1146  }
1147 
1148  for (i = 0; i < n_cached; i++) {
1149 
1150  ha_remove_all_nodes_to_page(table, folds[i], page);
1151  }
1152 
1153  ut_a(index->search_info->ref_count > 0);
1154  index->search_info->ref_count--;
1155 
1156  block->is_hashed = FALSE;
1157  block->index = NULL;
1158 
1159 cleanup:
1160 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1161  if (UNIV_UNLIKELY(block->n_pointers)) {
1162  /* Corruption */
1163  ut_print_timestamp(stderr);
1164  fprintf(stderr,
1165  " InnoDB: Corruption of adaptive hash index."
1166  " After dropping\n"
1167  "InnoDB: the hash index to a page of %s,"
1168  " still %lu hash nodes remain.\n",
1169  index->name, (ulong) block->n_pointers);
1170  rw_lock_x_unlock(&btr_search_latch);
1171 
1172  btr_search_validate();
1173  } else {
1174  rw_lock_x_unlock(&btr_search_latch);
1175  }
1176 #else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1177  rw_lock_x_unlock(&btr_search_latch);
1178 #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
1179 
1180  mem_free(folds);
1181 }
1182 
1183 /********************************************************************/
1186 UNIV_INTERN
1187 void
1188 btr_search_drop_page_hash_when_freed(
1189 /*=================================*/
1190  ulint space,
1191  ulint zip_size,
1193  ulint page_no)
1194 {
1195  buf_block_t* block;
1196  mtr_t mtr;
1197 
1198  if (!buf_page_peek_if_search_hashed(space, page_no)) {
1199 
1200  return;
1201  }
1202 
1203  mtr_start(&mtr);
1204 
1205  /* We assume that if the caller has a latch on the page, then the
1206  caller has already dropped the hash index for the page, and we never
1207  get here. Therefore we can acquire the s-latch to the page without
1208  having to fear a deadlock. */
1209 
1210  block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1211  BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1212  &mtr);
1213  /* Because the buffer pool mutex was released by
1214  buf_page_peek_if_search_hashed(), it is possible that the
1215  block was removed from the buffer pool by another thread
1216  before buf_page_get_gen() got a chance to acquire the buffer
1217  pool mutex again. Thus, we must check for a NULL return. */
1218 
1219  if (UNIV_LIKELY(block != NULL)) {
1220 
1221  buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1222 
1223  btr_search_drop_page_hash_index(block);
1224  }
1225 
1226  mtr_commit(&mtr);
1227 }
1228 
1229 /********************************************************************/
1234 static
1235 void
1236 btr_search_build_page_hash_index(
1237 /*=============================*/
1238  dict_index_t* index,
1239  buf_block_t* block,
1240  ulint n_fields,
1241  ulint n_bytes,
1243  ibool left_side)
1244 {
1245  hash_table_t* table;
1246  page_t* page;
1247  rec_t* rec;
1248  rec_t* next_rec;
1249  ulint fold;
1250  ulint next_fold;
1251  index_id_t index_id;
1252  ulint n_cached;
1253  ulint n_recs;
1254  ulint* folds;
1255  rec_t** recs;
1256  ulint i;
1257  mem_heap_t* heap = NULL;
1258  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1259  ulint* offsets = offsets_;
1260  rec_offs_init(offsets_);
1261 
1262  ut_ad(index);
1263  ut_a(!dict_index_is_ibuf(index));
1264 
1265  table = btr_search_sys->hash_index;
1266  page = buf_block_get_frame(block);
1267 
1268 #ifdef UNIV_SYNC_DEBUG
1269  ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1270  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1271  || rw_lock_own(&(block->lock), RW_LOCK_EX));
1272 #endif /* UNIV_SYNC_DEBUG */
1273 
1275 
1276  if (block->is_hashed && ((block->curr_n_fields != n_fields)
1277  || (block->curr_n_bytes != n_bytes)
1278  || (block->curr_left_side != left_side))) {
1279 
1280  rw_lock_s_unlock(&btr_search_latch);
1281 
1282  btr_search_drop_page_hash_index(block);
1283  } else {
1284  rw_lock_s_unlock(&btr_search_latch);
1285  }
1286 
1287  n_recs = page_get_n_recs(page);
1288 
1289  if (n_recs == 0) {
1290 
1291  return;
1292  }
1293 
1294  /* Check that the values for hash index build are sensible */
1295 
1296  if (n_fields + n_bytes == 0) {
1297 
1298  return;
1299  }
1300 
1301  if (dict_index_get_n_unique_in_tree(index) < n_fields
1302  || (dict_index_get_n_unique_in_tree(index) == n_fields
1303  && n_bytes > 0)) {
1304  return;
1305  }
1306 
1307  /* Calculate and cache fold values and corresponding records into
1308  an array for fast insertion to the hash index */
1309 
1310  folds = (ulint *)mem_alloc(n_recs * sizeof(ulint));
1311  recs = (rec_t **)mem_alloc(n_recs * sizeof(rec_t*));
1312 
1313  n_cached = 0;
1314 
1315  index_id = btr_page_get_index_id(page);
1316 
1317  rec = page_rec_get_next(page_get_infimum_rec(page));
1318 
1319  offsets = rec_get_offsets(rec, index, offsets,
1320  n_fields + (n_bytes > 0), &heap);
1321 
1322  if (!page_rec_is_supremum(rec)) {
1323  ut_a(n_fields <= rec_offs_n_fields(offsets));
1324 
1325  if (n_bytes > 0) {
1326  ut_a(n_fields < rec_offs_n_fields(offsets));
1327  }
1328  }
1329 
1330  fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1331 
1332  if (left_side) {
1333 
1334  folds[n_cached] = fold;
1335  recs[n_cached] = rec;
1336  n_cached++;
1337  }
1338 
1339  for (;;) {
1340  next_rec = page_rec_get_next(rec);
1341 
1342  if (page_rec_is_supremum(next_rec)) {
1343 
1344  if (!left_side) {
1345 
1346  folds[n_cached] = fold;
1347  recs[n_cached] = rec;
1348  n_cached++;
1349  }
1350 
1351  break;
1352  }
1353 
1354  offsets = rec_get_offsets(next_rec, index, offsets,
1355  n_fields + (n_bytes > 0), &heap);
1356  next_fold = rec_fold(next_rec, offsets, n_fields,
1357  n_bytes, index_id);
1358 
1359  if (fold != next_fold) {
1360  /* Insert an entry into the hash index */
1361 
1362  if (left_side) {
1363 
1364  folds[n_cached] = next_fold;
1365  recs[n_cached] = next_rec;
1366  n_cached++;
1367  } else {
1368  folds[n_cached] = fold;
1369  recs[n_cached] = rec;
1370  n_cached++;
1371  }
1372  }
1373 
1374  rec = next_rec;
1375  fold = next_fold;
1376  }
1377 
1378  btr_search_check_free_space_in_heap();
1379 
1380  rw_lock_x_lock(&btr_search_latch);
1381 
1382  if (UNIV_UNLIKELY(btr_search_fully_disabled)) {
1383  goto exit_func;
1384  }
1385 
1386  if (block->is_hashed && ((block->curr_n_fields != n_fields)
1387  || (block->curr_n_bytes != n_bytes)
1388  || (block->curr_left_side != left_side))) {
1389  goto exit_func;
1390  }
1391 
1392  /* This counter is decremented every time we drop page
1393  hash index entries and is incremented here. Since we can
1394  rebuild hash index for a page that is already hashed, we
1395  have to take care not to increment the counter in that
1396  case. */
1397  if (!block->is_hashed) {
1398  index->search_info->ref_count++;
1399  }
1400 
1401  block->is_hashed = TRUE;
1402  block->n_hash_helps = 0;
1403 
1404  block->curr_n_fields = n_fields;
1405  block->curr_n_bytes = n_bytes;
1406  block->curr_left_side = left_side;
1407  block->index = index;
1408 
1409  for (i = 0; i < n_cached; i++) {
1410 
1411  ha_insert_for_fold(table, folds[i], block, recs[i]);
1412  }
1413 
1414 exit_func:
1415  rw_lock_x_unlock(&btr_search_latch);
1416 
1417  mem_free(folds);
1418  mem_free(recs);
1419  if (UNIV_LIKELY_NULL(heap)) {
1420  mem_heap_free(heap);
1421  }
1422 }
1423 
1424 /********************************************************************/
1429 UNIV_INTERN
1430 void
1431 btr_search_move_or_delete_hash_entries(
1432 /*===================================*/
1433  buf_block_t* new_block,
1435  buf_block_t* block,
1439  dict_index_t* index)
1440 {
1441  ulint n_fields;
1442  ulint n_bytes;
1443  ibool left_side;
1444 
1445 #ifdef UNIV_SYNC_DEBUG
1446  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1447  ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1448 #endif /* UNIV_SYNC_DEBUG */
1449  ut_a(!new_block->is_hashed || new_block->index == index);
1450  ut_a(!block->is_hashed || block->index == index);
1451  ut_a(!(new_block->is_hashed || block->is_hashed)
1452  || !dict_index_is_ibuf(index));
1453 
1455 
1456  if (new_block->is_hashed) {
1457 
1458  rw_lock_s_unlock(&btr_search_latch);
1459 
1460  btr_search_drop_page_hash_index(block);
1461 
1462  return;
1463  }
1464 
1465  if (block->is_hashed) {
1466 
1467  n_fields = block->curr_n_fields;
1468  n_bytes = block->curr_n_bytes;
1469  left_side = block->curr_left_side;
1470 
1471  new_block->n_fields = block->curr_n_fields;
1472  new_block->n_bytes = block->curr_n_bytes;
1473  new_block->left_side = left_side;
1474 
1475  rw_lock_s_unlock(&btr_search_latch);
1476 
1477  ut_a(n_fields + n_bytes > 0);
1478 
1479  btr_search_build_page_hash_index(index, new_block, n_fields,
1480  n_bytes, left_side);
1481  ut_ad(n_fields == block->curr_n_fields);
1482  ut_ad(n_bytes == block->curr_n_bytes);
1483  ut_ad(left_side == block->curr_left_side);
1484  return;
1485  }
1486 
1487  rw_lock_s_unlock(&btr_search_latch);
1488 }
1489 
1490 /********************************************************************/
1492 UNIV_INTERN
1493 void
1494 btr_search_update_hash_on_delete(
1495 /*=============================*/
1496  btr_cur_t* cursor)
1499 {
1500  hash_table_t* table;
1501  buf_block_t* block;
1502  rec_t* rec;
1503  ulint fold;
1504  index_id_t index_id;
1505  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1506  mem_heap_t* heap = NULL;
1507  rec_offs_init(offsets_);
1508 
1509  rec = btr_cur_get_rec(cursor);
1510 
1511  block = btr_cur_get_block(cursor);
1512 
1513 #ifdef UNIV_SYNC_DEBUG
1514  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1515 #endif /* UNIV_SYNC_DEBUG */
1516 
1517  if (!block->is_hashed) {
1518 
1519  return;
1520  }
1521 
1522  ut_a(block->index == cursor->index);
1523  ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1524  ut_a(!dict_index_is_ibuf(cursor->index));
1525 
1526  table = btr_search_sys->hash_index;
1527 
1528  index_id = cursor->index->id;
1529  fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
1530  ULINT_UNDEFINED, &heap),
1531  block->curr_n_fields, block->curr_n_bytes, index_id);
1532  if (UNIV_LIKELY_NULL(heap)) {
1533  mem_heap_free(heap);
1534  }
1535  rw_lock_x_lock(&btr_search_latch);
1536 
1537  ha_search_and_delete_if_found(table, fold, rec);
1538 
1539  rw_lock_x_unlock(&btr_search_latch);
1540 }
1541 
1542 /********************************************************************/
1544 UNIV_INTERN
1545 void
1546 btr_search_update_hash_node_on_insert(
1547 /*==================================*/
1548  btr_cur_t* cursor)
1552 {
1553  hash_table_t* table;
1554  buf_block_t* block;
1555  rec_t* rec;
1556 
1557  rec = btr_cur_get_rec(cursor);
1558 
1559  block = btr_cur_get_block(cursor);
1560 
1561 #ifdef UNIV_SYNC_DEBUG
1562  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1563 #endif /* UNIV_SYNC_DEBUG */
1564 
1565  if (!block->is_hashed) {
1566 
1567  return;
1568  }
1569 
1570  ut_a(block->index == cursor->index);
1571  ut_a(!dict_index_is_ibuf(cursor->index));
1572 
1573  rw_lock_x_lock(&btr_search_latch);
1574 
1575  if ((cursor->flag == BTR_CUR_HASH)
1576  && (cursor->n_fields == block->curr_n_fields)
1577  && (cursor->n_bytes == block->curr_n_bytes)
1578  && !block->curr_left_side) {
1579 
1580  table = btr_search_sys->hash_index;
1581 
1582  ha_search_and_update_if_found(table, cursor->fold, rec,
1583  block, page_rec_get_next(rec));
1584 
1585  rw_lock_x_unlock(&btr_search_latch);
1586  } else {
1587  rw_lock_x_unlock(&btr_search_latch);
1588 
1589  btr_search_update_hash_on_insert(cursor);
1590  }
1591 }
1592 
1593 /********************************************************************/
1595 UNIV_INTERN
1596 void
1597 btr_search_update_hash_on_insert(
1598 /*=============================*/
1599  btr_cur_t* cursor)
1603 {
1604  hash_table_t* table;
1605  buf_block_t* block;
1606  rec_t* rec;
1607  rec_t* ins_rec;
1608  rec_t* next_rec;
1609  index_id_t index_id;
1610  ulint fold;
1611  ulint ins_fold;
1612  ulint next_fold = 0; /* remove warning (??? bug ???) */
1613  ulint n_fields;
1614  ulint n_bytes;
1615  ibool left_side;
1616  ibool locked = FALSE;
1617  mem_heap_t* heap = NULL;
1618  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1619  ulint* offsets = offsets_;
1620  rec_offs_init(offsets_);
1621 
1622  table = btr_search_sys->hash_index;
1623 
1624  btr_search_check_free_space_in_heap();
1625 
1626  rec = btr_cur_get_rec(cursor);
1627 
1628  block = btr_cur_get_block(cursor);
1629 
1630 #ifdef UNIV_SYNC_DEBUG
1631  ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1632 #endif /* UNIV_SYNC_DEBUG */
1633 
1634  if (!block->is_hashed) {
1635 
1636  return;
1637  }
1638 
1639  ut_a(block->index == cursor->index);
1640  ut_a(!dict_index_is_ibuf(cursor->index));
1641 
1642  index_id = cursor->index->id;
1643 
1644  n_fields = block->curr_n_fields;
1645  n_bytes = block->curr_n_bytes;
1646  left_side = block->curr_left_side;
1647 
1648  ins_rec = page_rec_get_next(rec);
1649  next_rec = page_rec_get_next(ins_rec);
1650 
1651  offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
1652  ULINT_UNDEFINED, &heap);
1653  ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
1654 
1655  if (!page_rec_is_supremum(next_rec)) {
1656  offsets = rec_get_offsets(next_rec, cursor->index, offsets,
1657  n_fields + (n_bytes > 0), &heap);
1658  next_fold = rec_fold(next_rec, offsets, n_fields,
1659  n_bytes, index_id);
1660  }
1661 
1662  if (!page_rec_is_infimum(rec)) {
1663  offsets = rec_get_offsets(rec, cursor->index, offsets,
1664  n_fields + (n_bytes > 0), &heap);
1665  fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1666  } else {
1667  if (left_side) {
1668 
1669  rw_lock_x_lock(&btr_search_latch);
1670 
1671  locked = TRUE;
1672 
1673  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1674  }
1675 
1676  goto check_next_rec;
1677  }
1678 
1679  if (fold != ins_fold) {
1680 
1681  if (!locked) {
1682 
1683  rw_lock_x_lock(&btr_search_latch);
1684 
1685  locked = TRUE;
1686  }
1687 
1688  if (!left_side) {
1689  ha_insert_for_fold(table, fold, block, rec);
1690  } else {
1691  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1692  }
1693  }
1694 
1695 check_next_rec:
1696  if (page_rec_is_supremum(next_rec)) {
1697 
1698  if (!left_side) {
1699 
1700  if (!locked) {
1701  rw_lock_x_lock(&btr_search_latch);
1702 
1703  locked = TRUE;
1704  }
1705 
1706  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1707  }
1708 
1709  goto function_exit;
1710  }
1711 
1712  if (ins_fold != next_fold) {
1713 
1714  if (!locked) {
1715 
1716  rw_lock_x_lock(&btr_search_latch);
1717 
1718  locked = TRUE;
1719  }
1720 
1721  if (!left_side) {
1722 
1723  ha_insert_for_fold(table, ins_fold, block, ins_rec);
1724  /*
1725  fputs("Hash insert for ", stderr);
1726  dict_index_name_print(stderr, cursor->index);
1727  fprintf(stderr, " fold %lu\n", ins_fold);
1728  */
1729  } else {
1730  ha_insert_for_fold(table, next_fold, block, next_rec);
1731  }
1732  }
1733 
1734 function_exit:
1735  if (UNIV_LIKELY_NULL(heap)) {
1736  mem_heap_free(heap);
1737  }
1738  if (locked) {
1739  rw_lock_x_unlock(&btr_search_latch);
1740  }
1741 }
1742 
1743 #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
1744 /********************************************************************/
1747 UNIV_INTERN
1748 ibool
1749 btr_search_validate(void)
1750 /*=====================*/
1751 {
1752  ha_node_t* node;
1753  ulint n_page_dumps = 0;
1754  ibool ok = TRUE;
1755  ulint i;
1756  ulint cell_count;
1757  mem_heap_t* heap = NULL;
1758  ulint offsets_[REC_OFFS_NORMAL_SIZE];
1759  ulint* offsets = offsets_;
1760 
1761  /* How many cells to check before temporarily releasing
1762  btr_search_latch. */
1763  ulint chunk_size = 10000;
1764 
1765  rec_offs_init(offsets_);
1766 
1767  rw_lock_x_lock(&btr_search_latch);
1769 
1770  cell_count = hash_get_n_cells(btr_search_sys->hash_index);
1771 
1772  for (i = 0; i < cell_count; i++) {
1773  /* We release btr_search_latch every once in a while to
1774  give other queries a chance to run. */
1775  if ((i != 0) && ((i % chunk_size) == 0)) {
1777  rw_lock_x_unlock(&btr_search_latch);
1778  os_thread_yield();
1779  rw_lock_x_lock(&btr_search_latch);
1781  }
1782 
1783  node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
1784 
1785  for (; node != NULL; node = node->next) {
1786  const buf_block_t* block
1787  = buf_block_align(node->data);
1788  const buf_block_t* hash_block;
1789  buf_pool_t* buf_pool;
1790  index_id_t page_index_id;
1791 
1792  buf_pool = buf_pool_from_bpage((buf_page_t*) block);
1793 
1794  if (UNIV_LIKELY(buf_block_get_state(block)
1795  == BUF_BLOCK_FILE_PAGE)) {
1796 
1797  /* The space and offset are only valid
1798  for file blocks. It is possible that
1799  the block is being freed
1800  (BUF_BLOCK_REMOVE_HASH, see the
1801  assertion and the comment below) */
1802  hash_block = buf_block_hash_get(
1803  buf_pool,
1804  buf_block_get_space(block),
1805  buf_block_get_page_no(block));
1806  } else {
1807  hash_block = NULL;
1808  }
1809 
1810  if (hash_block) {
1811  ut_a(hash_block == block);
1812  } else {
1813  /* When a block is being freed,
1814  buf_LRU_search_and_free_block() first
1815  removes the block from
1816  buf_pool->page_hash by calling
1817  buf_LRU_block_remove_hashed_page().
1818  After that, it invokes
1819  btr_search_drop_page_hash_index() to
1820  remove the block from
1821  btr_search_sys->hash_index. */
1822 
1823  ut_a(buf_block_get_state(block)
1825  }
1826 
1827  ut_a(!dict_index_is_ibuf(block->index));
1828 
1829  offsets = rec_get_offsets((const rec_t*) node->data,
1830  block->index, offsets,
1831  block->curr_n_fields
1832  + (block->curr_n_bytes > 0),
1833  &heap);
1834 
1835  page_index_id = btr_page_get_index_id(block->frame);
1836 
1837  if (UNIV_UNLIKELY
1838  (!block->is_hashed || node->fold
1839  != rec_fold((rec_t*)(node->data),
1840  offsets,
1841  block->curr_n_fields,
1842  block->curr_n_bytes,
1843  page_index_id))) {
1844  const page_t* page = block->frame;
1845 
1846  ok = FALSE;
1847  ut_print_timestamp(stderr);
1848 
1849  fprintf(stderr,
1850  " InnoDB: Error in an adaptive hash"
1851  " index pointer to page %lu\n"
1852  "InnoDB: ptr mem address %p"
1853  " index id %llu,"
1854  " node fold %lu, rec fold %lu\n",
1855  (ulong) page_get_page_no(page),
1856  node->data,
1857  (ullint) page_index_id,
1858  (ulong) node->fold,
1859  (ulong) rec_fold((rec_t*)(node->data),
1860  offsets,
1861  block->curr_n_fields,
1862  block->curr_n_bytes,
1863  page_index_id));
1864 
1865  fputs("InnoDB: Record ", stderr);
1866  rec_print_new(stderr, (rec_t*)node->data,
1867  offsets);
1868  fprintf(stderr, "\nInnoDB: on that page."
1869  " Page mem address %p, is hashed %lu,"
1870  " n fields %lu, n bytes %lu\n"
1871  "InnoDB: side %lu\n",
1872  (void*) page, (ulong) block->is_hashed,
1873  (ulong) block->curr_n_fields,
1874  (ulong) block->curr_n_bytes,
1875  (ulong) block->curr_left_side);
1876 
1877  if (n_page_dumps < 20) {
1878  buf_page_print(page, 0);
1879  n_page_dumps++;
1880  }
1881  }
1882  }
1883  }
1884 
1885  for (i = 0; i < cell_count; i += chunk_size) {
1886  ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
1887 
1888  /* We release btr_search_latch every once in a while to
1889  give other queries a chance to run. */
1890  if (i != 0) {
1892  rw_lock_x_unlock(&btr_search_latch);
1893  os_thread_yield();
1894  rw_lock_x_lock(&btr_search_latch);
1896  }
1897 
1898  if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
1899  ok = FALSE;
1900  }
1901  }
1902 
1904  rw_lock_x_unlock(&btr_search_latch);
1905  if (UNIV_LIKELY_NULL(heap)) {
1906  mem_heap_free(heap);
1907  }
1908 
1909  return(ok);
1910 }
1911 #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */