blob: dcf5357de7e7cdbfcbde16bd4bc6dd4765bf4646 [file] [log] [blame]
Ian Rogers1d54e732013-05-02 21:10:01 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
18#define ART_RUNTIME_GC_SPACE_SPACE_H_
Ian Rogers1d54e732013-05-02 21:10:01 -070019
20#include <string>
21
22#include "UniquePtr.h"
23#include "base/macros.h"
24#include "base/mutex.h"
25#include "gc/accounting/space_bitmap.h"
26#include "globals.h"
27#include "image.h"
28#include "mem_map.h"
29
30namespace art {
31namespace mirror {
32 class Object;
33} // namespace mirror
34
35namespace gc {
36
Ian Rogers1d54e732013-05-02 21:10:01 -070037class Heap;
38
39namespace space {
40
Mathieu Chartier590fee92013-09-13 13:46:47 -070041class AllocSpace;
Mathieu Chartier7410f292013-11-24 13:17:35 -080042class BumpPointerSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080043class ContinuousMemMapAllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070044class ContinuousSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070045class DiscontinuousSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046class MallocSpace;
47class DlMallocSpace;
48class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070049class ImageSpace;
50class LargeObjectSpace;
Mathieu Chartiera1602f22014-01-13 17:19:19 -080051class ZygoteSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070052
Mathieu Chartier0f72e412013-09-06 16:40:01 -070053static constexpr bool kDebugSpaces = kIsDebugBuild;
Ian Rogers1d54e732013-05-02 21:10:01 -070054
55// See Space::GetGcRetentionPolicy.
56enum GcRetentionPolicy {
57 // Objects are retained forever with this policy for a space.
58 kGcRetentionPolicyNeverCollect,
59 // Every GC cycle will attempt to collect objects in this space.
60 kGcRetentionPolicyAlwaysCollect,
61 // Objects will be considered for collection only in "full" GC cycles, ie faster partial
62 // collections won't scan these areas such as the Zygote.
63 kGcRetentionPolicyFullCollect,
64};
65std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
66
67enum SpaceType {
68 kSpaceTypeImageSpace,
Mathieu Chartiera1602f22014-01-13 17:19:19 -080069 kSpaceTypeMallocSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070070 kSpaceTypeZygoteSpace,
Mathieu Chartier590fee92013-09-13 13:46:47 -070071 kSpaceTypeBumpPointerSpace,
Ian Rogers1d54e732013-05-02 21:10:01 -070072 kSpaceTypeLargeObjectSpace,
73};
74std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
75
76// A space contains memory allocated for managed objects.
77class Space {
78 public:
79 // Dump space. Also key method for C++ vtables.
80 virtual void Dump(std::ostream& os) const;
81
82 // Name of the space. May vary, for example before/after the Zygote fork.
83 const char* GetName() const {
84 return name_.c_str();
85 }
86
87 // The policy of when objects are collected associated with this space.
88 GcRetentionPolicy GetGcRetentionPolicy() const {
89 return gc_retention_policy_;
90 }
91
Ian Rogers1d54e732013-05-02 21:10:01 -070092 // Is the given object contained within this space?
93 virtual bool Contains(const mirror::Object* obj) const = 0;
94
95 // The kind of space this: image, alloc, zygote, large object.
96 virtual SpaceType GetType() const = 0;
97
98 // Is this an image space, ie one backed by a memory mapped image file.
99 bool IsImageSpace() const {
100 return GetType() == kSpaceTypeImageSpace;
101 }
102 ImageSpace* AsImageSpace();
103
104 // Is this a dlmalloc backed allocation space?
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700105 bool IsMallocSpace() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700106 SpaceType type = GetType();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800107 return type == kSpaceTypeMallocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -0700108 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700109 MallocSpace* AsMallocSpace();
110
111 virtual bool IsDlMallocSpace() const {
112 return false;
113 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800114 virtual DlMallocSpace* AsDlMallocSpace();
115
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700116 virtual bool IsRosAllocSpace() const {
117 return false;
118 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800119 virtual RosAllocSpace* AsRosAllocSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700120
Ian Rogers6fac4472014-02-25 17:01:10 -0800121 // Is this the space allocated into by the Zygote and no-longer in use for allocation?
Ian Rogers1d54e732013-05-02 21:10:01 -0700122 bool IsZygoteSpace() const {
123 return GetType() == kSpaceTypeZygoteSpace;
124 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800125 virtual ZygoteSpace* AsZygoteSpace();
Ian Rogers1d54e732013-05-02 21:10:01 -0700126
Mathieu Chartier590fee92013-09-13 13:46:47 -0700127 // Is this space a bump pointer space?
128 bool IsBumpPointerSpace() const {
129 return GetType() == kSpaceTypeBumpPointerSpace;
130 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800131 virtual BumpPointerSpace* AsBumpPointerSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700132
Ian Rogers1d54e732013-05-02 21:10:01 -0700133 // Does this space hold large objects and implement the large object space abstraction?
134 bool IsLargeObjectSpace() const {
135 return GetType() == kSpaceTypeLargeObjectSpace;
136 }
137 LargeObjectSpace* AsLargeObjectSpace();
138
Mathieu Chartier590fee92013-09-13 13:46:47 -0700139 virtual bool IsContinuousSpace() const {
140 return false;
141 }
142 ContinuousSpace* AsContinuousSpace();
143
144 virtual bool IsDiscontinuousSpace() const {
145 return false;
146 }
147 DiscontinuousSpace* AsDiscontinuousSpace();
148
149 virtual bool IsAllocSpace() const {
150 return false;
151 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800152 virtual AllocSpace* AsAllocSpace();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700153
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800154 virtual bool IsContinuousMemMapAllocSpace() const {
155 return false;
156 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800157 virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800158
Mathieu Chartier31f44142014-04-08 14:40:03 -0700159 // Returns true if objects in the space are movable.
160 virtual bool CanMoveObjects() const = 0;
161
Ian Rogers1d54e732013-05-02 21:10:01 -0700162 virtual ~Space() {}
163
164 protected:
165 Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
166
167 void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
168 gc_retention_policy_ = gc_retention_policy;
169 }
170
171 // Name of the space that may vary due to the Zygote fork.
172 std::string name_;
173
Mathieu Chartier590fee92013-09-13 13:46:47 -0700174 protected:
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800175 struct SweepCallbackContext {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700176 public:
177 SweepCallbackContext(bool swap_bitmaps, space::Space* space);
178 const bool swap_bitmaps;
179 space::Space* const space;
180 Thread* const self;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800181 size_t freed_objects;
182 size_t freed_bytes;
183 };
184
Ian Rogers1d54e732013-05-02 21:10:01 -0700185 // When should objects within this space be reclaimed? Not constant as we vary it in the case
186 // of Zygote forking.
187 GcRetentionPolicy gc_retention_policy_;
188
Mathieu Chartier590fee92013-09-13 13:46:47 -0700189 private:
Ian Rogers1d54e732013-05-02 21:10:01 -0700190 friend class art::gc::Heap;
Ian Rogers1d54e732013-05-02 21:10:01 -0700191 DISALLOW_COPY_AND_ASSIGN(Space);
192};
193std::ostream& operator<<(std::ostream& os, const Space& space);
194
195// AllocSpace interface.
196class AllocSpace {
197 public:
198 // Number of bytes currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700199 virtual uint64_t GetBytesAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700200 // Number of objects currently allocated.
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700201 virtual uint64_t GetObjectsAllocated() = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700202
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700203 // Allocate num_bytes without allowing growth. If the allocation
204 // succeeds, the output parameter bytes_allocated will be set to the
205 // actually allocated bytes which is >= num_bytes.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700206 // Alloc can be called from multiple threads at the same time and must be thread-safe.
Ian Rogers6fac4472014-02-25 17:01:10 -0800207 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
208 size_t* usable_size) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700209
Mathieu Chartier0651d412014-04-29 14:37:57 -0700210 // Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
211 virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
212 size_t* usable_size)
213 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
214 return Alloc(self, num_bytes, bytes_allocated, usable_size);
215 }
216
Ian Rogers1d54e732013-05-02 21:10:01 -0700217 // Return the storage space required by obj.
Ian Rogers6fac4472014-02-25 17:01:10 -0800218 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700219
220 // Returns how many bytes were freed.
221 virtual size_t Free(Thread* self, mirror::Object* ptr) = 0;
222
223 // Returns how many bytes were freed.
224 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) = 0;
225
Ian Rogers6fac4472014-02-25 17:01:10 -0800226 // Revoke any sort of thread-local buffers that are used to speed up allocations for the given
227 // thread, if the alloc space implementation uses any.
228 virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700229
Ian Rogers6fac4472014-02-25 17:01:10 -0800230 // Revoke any sort of thread-local buffers that are used to speed up allocations for all the
231 // threads, if the alloc space implementation uses any.
232 virtual void RevokeAllThreadLocalBuffers() = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700233
Ian Rogers1d54e732013-05-02 21:10:01 -0700234 protected:
235 AllocSpace() {}
236 virtual ~AllocSpace() {}
237
238 private:
239 DISALLOW_COPY_AND_ASSIGN(AllocSpace);
240};
241
242// Continuous spaces have bitmaps, and an address range. Although not required, objects within
243// continuous spaces can be marked in the card table.
244class ContinuousSpace : public Space {
245 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700246 // Address at which the space begins.
Ian Rogers1d54e732013-05-02 21:10:01 -0700247 byte* Begin() const {
248 return begin_;
249 }
250
Mathieu Chartier590fee92013-09-13 13:46:47 -0700251 // Current address at which the space ends, which may vary as the space is filled.
Ian Rogers1d54e732013-05-02 21:10:01 -0700252 byte* End() const {
253 return end_;
254 }
255
Mathieu Chartier590fee92013-09-13 13:46:47 -0700256 // The end of the address range covered by the space.
257 byte* Limit() const {
258 return limit_;
259 }
260
261 // Change the end of the space. Be careful with use since changing the end of a space to an
262 // invalid value may break the GC.
263 void SetEnd(byte* end) {
264 end_ = end;
265 }
266
267 void SetLimit(byte* limit) {
268 limit_ = limit;
269 }
270
Ian Rogers1d54e732013-05-02 21:10:01 -0700271 // Current size of space
272 size_t Size() const {
273 return End() - Begin();
274 }
275
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700276 virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
277 virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700278
Mathieu Chartier590fee92013-09-13 13:46:47 -0700279 // Maximum which the mapped space can grow to.
280 virtual size_t Capacity() const {
281 return Limit() - Begin();
282 }
283
Ian Rogers1d54e732013-05-02 21:10:01 -0700284 // Is object within this space? We check to see if the pointer is beyond the end first as
285 // continuous spaces are iterated over from low to high.
286 bool HasAddress(const mirror::Object* obj) const {
287 const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700288 return byte_ptr >= Begin() && byte_ptr < Limit();
Ian Rogers1d54e732013-05-02 21:10:01 -0700289 }
290
291 bool Contains(const mirror::Object* obj) const {
292 return HasAddress(obj);
293 }
294
Mathieu Chartier590fee92013-09-13 13:46:47 -0700295 virtual bool IsContinuousSpace() const {
296 return true;
297 }
298
Ian Rogers1d54e732013-05-02 21:10:01 -0700299 virtual ~ContinuousSpace() {}
300
301 protected:
302 ContinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy,
Mathieu Chartier590fee92013-09-13 13:46:47 -0700303 byte* begin, byte* end, byte* limit) :
304 Space(name, gc_retention_policy), begin_(begin), end_(end), limit_(limit) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700305 }
306
Ian Rogers1d54e732013-05-02 21:10:01 -0700307 // The beginning of the storage for fast access.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700308 byte* begin_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700309
310 // Current end of the space.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700311 byte* volatile end_;
312
313 // Limit of the space.
314 byte* limit_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700315
316 private:
317 DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
318};
319
320// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
321// the card table can't cover these objects and so the write barrier shouldn't be triggered. This
322// is suitable for use for large primitive arrays.
323class DiscontinuousSpace : public Space {
324 public:
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700325 accounting::LargeObjectBitmap* GetLiveBitmap() const {
326 return live_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700327 }
328
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700329 accounting::LargeObjectBitmap* GetMarkBitmap() const {
330 return mark_bitmap_.get();
Ian Rogers1d54e732013-05-02 21:10:01 -0700331 }
332
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700333 virtual bool IsDiscontinuousSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700334 return true;
335 }
336
Ian Rogers1d54e732013-05-02 21:10:01 -0700337 virtual ~DiscontinuousSpace() {}
338
339 protected:
340 DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
341
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700342 UniquePtr<accounting::LargeObjectBitmap> live_bitmap_;
343 UniquePtr<accounting::LargeObjectBitmap> mark_bitmap_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700344
345 private:
346 DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
347};
348
349class MemMapSpace : public ContinuousSpace {
350 public:
Ian Rogers1d54e732013-05-02 21:10:01 -0700351 // Size of the space without a limit on its growth. By default this is just the Capacity, but
352 // for the allocation space we support starting with a small heap and then extending it.
353 virtual size_t NonGrowthLimitCapacity() const {
354 return Capacity();
355 }
356
Ian Rogers1d54e732013-05-02 21:10:01 -0700357 MemMap* GetMemMap() {
358 return mem_map_.get();
359 }
360
361 const MemMap* GetMemMap() const {
362 return mem_map_.get();
363 }
364
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800365 MemMap* ReleaseMemMap() {
366 return mem_map_.release();
367 }
368
Mathieu Chartier590fee92013-09-13 13:46:47 -0700369 protected:
370 MemMapSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit,
371 GcRetentionPolicy gc_retention_policy)
372 : ContinuousSpace(name, gc_retention_policy, begin, end, limit),
373 mem_map_(mem_map) {
374 }
375
Ian Rogers1d54e732013-05-02 21:10:01 -0700376 // Underlying storage of the space
377 UniquePtr<MemMap> mem_map_;
378
Mathieu Chartier590fee92013-09-13 13:46:47 -0700379 private:
Ian Rogers1d54e732013-05-02 21:10:01 -0700380 DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
381};
382
Mathieu Chartier590fee92013-09-13 13:46:47 -0700383// Used by the heap compaction interface to enable copying from one type of alloc space to another.
384class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
385 public:
Ian Rogers6fac4472014-02-25 17:01:10 -0800386 bool IsAllocSpace() const OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700387 return true;
388 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800389 AllocSpace* AsAllocSpace() OVERRIDE {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700390 return this;
391 }
392
Ian Rogers6fac4472014-02-25 17:01:10 -0800393 bool IsContinuousMemMapAllocSpace() const OVERRIDE {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800394 return true;
395 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800396 ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800397 return this;
398 }
399
400 bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
401 void BindLiveToMarkBitmap()
402 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
403 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier1f3b5352014-02-03 14:00:42 -0800404 // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
405 void SwapBitmaps();
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800406
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700407 // Clear the space back to an empty space.
Ian Rogers6fac4472014-02-25 17:01:10 -0800408 virtual void Clear() = 0;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700409
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700410 accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800411 return live_bitmap_.get();
412 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800413
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700414 accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800415 return mark_bitmap_.get();
416 }
417
Ian Rogers6fac4472014-02-25 17:01:10 -0800418 void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes);
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700419 virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800420
Mathieu Chartier590fee92013-09-13 13:46:47 -0700421 protected:
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700422 UniquePtr<accounting::ContinuousSpaceBitmap> live_bitmap_;
423 UniquePtr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
424 UniquePtr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800425
Mathieu Chartier590fee92013-09-13 13:46:47 -0700426 ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
427 byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
428 : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) {
429 }
430
431 private:
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800432 friend class gc::Heap;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700433 DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
434};
435
Ian Rogers1d54e732013-05-02 21:10:01 -0700436} // namespace space
437} // namespace gc
438} // namespace art
439
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700440#endif // ART_RUNTIME_GC_SPACE_SPACE_H_