@@ -34,100 +34,6 @@ static void vertex_hash_readnext(TableScanDesc scan, Buffer *bufp,
3434 Page * pagep , HashPageOpaque * opaquep );
3535static void vertex_hash_kill_items (TableScanDesc scan );
3636
37- /*
38- * vertex_hash_next() -- Get the next item in a scan.
39- *
40- * On entry, so->currPos describes the current page, which may
41- * be pinned but not locked, and so->currPos.itemIndex identifies
42- * which item was previously returned.
43- *
44- * On successful exit, scan->xs_ctup.t_self is set to the TID
45- * of the next heap tuple. so->currPos is updated as needed.
46- *
47- * On failure exit (no more tuples), we return false with pin
48- * held on bucket page but no pins or locks held on overflow
49- * page.
50- */
51- bool
52- vertex_hash_next (TableScanDesc scan , ScanDirection dir )
53- {
54- Relation rel = scan -> rs_rd ;
55- VertexHeapScanDesc so = (VertexHeapScanDesc ) scan ;
56- HashScanPosItem * currItem ;
57- BlockNumber blkno ;
58- Buffer buf ;
59- bool end_of_scan = false;
60-
61- /*
62- * Advance to the next tuple on the current page; or if done, try to read
63- * data from the next or previous page based on the scan direction. Before
64- * moving to the next or previous page make sure that we deal with all the
65- * killed items.
66- */
67- if (ScanDirectionIsForward (dir ))
68- {
69- if (++ so -> currPos .itemIndex > so -> currPos .lastItem )
70- {
71- if (so -> numKilled > 0 )
72- vertex_hash_kill_items (scan );
73-
74- blkno = so -> currPos .nextPage ;
75- if (BlockNumberIsValid (blkno ))
76- {
77- buf = _hash_getbuf (rel , blkno , HASH_READ , LH_OVERFLOW_PAGE );
78- TestForOldSnapshot (scan -> rs_snapshot , rel , BufferGetPage (buf ));
79- if (!vertex_hash_readpage (scan , & buf , dir ))
80- end_of_scan = true;
81- }
82- else
83- end_of_scan = true;
84- }
85- }
86- else
87- {
88- if (-- so -> currPos .itemIndex < so -> currPos .firstItem )
89- {
90- if (so -> numKilled > 0 )
91- vertex_hash_kill_items (scan );
92-
93- blkno = so -> currPos .prevPage ;
94- if (BlockNumberIsValid (blkno ))
95- {
96- buf = _hash_getbuf (rel , blkno , HASH_READ ,
97- LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
98- TestForOldSnapshot (scan -> rs_snapshot , rel , BufferGetPage (buf ));
99-
100- /*
101- * We always maintain the pin on bucket page for whole scan
102- * operation, so releasing the additional pin we have acquired
103- * here.
104- */
105- if (buf == so -> hashso_bucket_buf ||
106- buf == so -> hashso_split_bucket_buf )
107- _hash_dropbuf (rel , buf );
108-
109- if (!vertex_hash_readpage (scan , & buf , dir ))
110- end_of_scan = true;
111- }
112- else
113- end_of_scan = true;
114- }
115- }
116-
117- if (end_of_scan )
118- {
119- //_hash_dropscanbuf(rel,( TableScanDesc)so); TODO
120- HashScanPosInvalidate (so -> currPos );
121- return false;
122- }
123-
124- /* OK, itemIndex says what to return */
125- currItem = & so -> currPos .items [so -> currPos .itemIndex ];
126- //scan->xs_heaptid = currItem->heapTid;
127-
128- return true;
129- }
130-
13137/*
13238 * Advance to next page in a bucket, if any. If we are scanning the bucket
13339 * being populated during split operation then this function advances to the
0 commit comments