New src/share/vm/oops/instanceKlass.inline.hpp (original) (raw)
1 /*
2 * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 /
24
25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
27
28 #include "memory/iterator.hpp"
29 #include "oops/instanceKlass.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/globalDefinitions.hpp"
33 #include "utilities/macros.hpp"
34
35 // The iteration over the oops in objects is a hot path in the GC code.
36 // By force inlining the following functions, we get similar GC performance
37 // as the previous macro based implementation.
38 #ifdef TARGET_COMPILER_visCPP
39 #define INLINE __forceinline
40 #else
41 #define INLINE inline
42 #endif
43
44 template <bool nv, typename T, class OopClosureType>
45 INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock map, oop obj, OopClosureType* closure) {
46 T* p = (T*)obj->obj_field_addr(map->offset());
47 T* const end = p + map->count();
48
49 for (; p < end; ++p) {
50 Devirtualizer::do_oop(closure, p);
51 }
52 }
53
54 #if INCLUDE_ALL_GCS
55 template <bool nv, typename T, class OopClosureType>
56 INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
57 T* const start = (T*)obj->obj_field_addr(map->offset());
58 T* p = start + map->count();
59
60 while (start < p) {
61 --p;
62 Devirtualizer::do_oop(closure, p);
63 }
64 }
65 #endif
66
67 template <bool nv, typename T, class OopClosureType>
68 INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
69 T* p = (T*)obj->obj_field_addr(map->offset());
70 T* end = p + map->count();
71
72 T* const l = (T*)mr.start();
73 T* const h = (T*)mr.end();
74 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
75 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
76 "bounded region must be properly aligned");
77
78 if (p < l) {
79 p = l;
80 }
81 if (end > h) {
82 end = h;
83 }
84
85 for (;p < end; ++p) {
86 Devirtualizer::do_oop(closure, p);
87 }
88 }
89
90 template <bool nv, typename T, class OopClosureType>
91 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
92 OopMapBlock* map = start_of_nonstatic_oop_maps();
93 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
94
95 for (; map < end_map; ++map) {
96 oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
97 }
98 }
99
100 #if INCLUDE_ALL_GCS
101 template <bool nv, typename T, class OopClosureType>
102 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
103 OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
104 OopMapBlock* map = start_map + nonstatic_oop_map_count();
105
106 while (start_map < map) {
107 --map;
108 oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
109 }
110 }
111 #endif
112
113 template <bool nv, typename T, class OopClosureType>
114 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
115 OopMapBlock* map = start_of_nonstatic_oop_maps();
116 OopMapBlock* const end_map = map + nonstatic_oop_map_count();
117
118 for (;map < end_map; ++map) {
119 oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
120 }
121 }
122
123 template <bool nv, class OopClosureType>
124 INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
125 if (UseCompressedOops) {
126 oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
127 } else {
128 oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
129 }
130 }
131
132 #if INCLUDE_ALL_GCS
133 template <bool nv, class OopClosureType>
134 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
135 if (UseCompressedOops) {
136 oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
137 } else {
138 oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
139 }
140 }
141 #endif
142
143 template <bool nv, class OopClosureType>
144 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
145 if (UseCompressedOops) {
146 oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
147 } else {
148 oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
149 }
150 }
151
152 template <bool nv, class OopClosureType>
153 INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
154 if (Devirtualizer::do_metadata(closure)) {
155 Devirtualizer::do_klass(closure, obj->klass());
156 }
157
158 oop_oop_iterate_oop_maps(obj, closure);
159
160 return size_helper();
161 }
162
163 #if INCLUDE_ALL_GCS
164 template <bool nv, class OopClosureType>
165 INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
166 assert(!Devirtualizer::do_metadata(closure),
167 "Code to handle metadata is not implemented");
168
169 oop_oop_iterate_oop_maps_reverse(obj, closure);
170
171 return size_helper();
172 }
173 #endif
174
175 template <bool nv, class OopClosureType>
176 INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
177 if (Devirtualizer::do_metadata(closure)) {
178 if (mr.contains(obj)) {
179 Devirtualizer::do_klass(closure, obj->klass());
180 }
181 }
182
183 oop_oop_iterate_oop_maps_bounded(obj, closure, mr);
184
185 return size_helper();
186 }
187
188 #undef INLINE
189
190
191 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)
192 int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {
193 return oop_oop_iterate<nvs_to_bool(nv_suffix)>(obj, closure);
194 }
195
196 #if INCLUDE_ALL_GCS
197 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
198 int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {
199 return oop_oop_iterate_reverse<nvs_to_bool(nv_suffix)>(obj, closure);
200 }
201 #else
202 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
203 #endif
204
205 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)
206 int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) {
207 return oop_oop_iterate_bounded<nvs_to_bool(nv_suffix)>(obj, closure, mr);
208 }
209
210 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)
211 InstanceKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix)
212 InstanceKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix)
213 InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
214
215 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP