1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "transaction.h"
18
19 #include <android-base/logging.h>
20
21 #include "aot_class_linker.h"
22 #include "base/mutex-inl.h"
23 #include "base/stl_util.h"
24 #include "common_throws.h"
25 #include "dex/descriptors_names.h"
26 #include "gc/accounting/card_table-inl.h"
27 #include "gc/heap.h"
28 #include "gc_root-inl.h"
29 #include "intern_table.h"
30 #include "mirror/class-inl.h"
31 #include "mirror/dex_cache-inl.h"
32 #include "mirror/object-inl.h"
33 #include "mirror/object_array-inl.h"
34 #include "obj_ptr-inl.h"
35 #include "runtime.h"
36
37 #include <list>
38
39 namespace art HIDDEN {
40
41 // TODO: remove (only used for debugging purpose).
42 static constexpr bool kEnableTransactionStats = false;
43
Transaction(bool strict,mirror::Class * root,ArenaStack * arena_stack,ArenaPool * arena_pool)44 Transaction::Transaction(bool strict,
45 mirror::Class* root,
46 ArenaStack* arena_stack,
47 ArenaPool* arena_pool)
48 : arena_stack_(std::nullopt),
49 allocator_(arena_stack != nullptr ? arena_stack : &arena_stack_.emplace(arena_pool)),
50 object_logs_(std::less<mirror::Object*>(), allocator_.Adapter(kArenaAllocTransaction)),
51 array_logs_(std::less<mirror::Array*>(), allocator_.Adapter(kArenaAllocTransaction)),
52 intern_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
53 resolve_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
54 resolve_method_type_logs_(allocator_.Adapter(kArenaAllocTransaction)),
55 aborted_(false),
56 rolling_back_(false),
57 heap_(Runtime::Current()->GetHeap()),
58 strict_(strict),
59 root_(root),
60 last_allocated_object_(nullptr) {
61 DCHECK(Runtime::Current()->IsAotCompiler());
62 DCHECK_NE(arena_stack != nullptr, arena_pool != nullptr);
63 }
64
~Transaction()65 Transaction::~Transaction() {
66 if (kEnableTransactionStats) {
67 size_t objects_count = object_logs_.size();
68 size_t field_values_count = 0;
69 for (const auto& it : object_logs_) {
70 field_values_count += it.second.Size();
71 }
72 size_t array_count = array_logs_.size();
73 size_t array_values_count = 0;
74 for (const auto& it : array_logs_) {
75 array_values_count += it.second.Size();
76 }
77 size_t intern_string_count =
78 std::distance(intern_string_logs_.begin(), intern_string_logs_.end());
79 size_t resolve_string_count =
80 std::distance(resolve_string_logs_.begin(), resolve_string_logs_.end());
81 size_t resolve_method_type_count =
82 std::distance(resolve_method_type_logs_.begin(), resolve_method_type_logs_.end());
83 LOG(INFO) << "Transaction::~Transaction"
84 << ": objects_count=" << objects_count
85 << ", field_values_count=" << field_values_count
86 << ", array_count=" << array_count
87 << ", array_values_count=" << array_values_count
88 << ", intern_string_count=" << intern_string_count
89 << ", resolve_string_count=" << resolve_string_count
90 << ", resolve_method_type_count=" << resolve_method_type_count;
91 }
92 }
93
Abort(const std::string & abort_message)94 void Transaction::Abort(const std::string& abort_message) {
95 // We may abort more than once if the exception thrown at the time of the
96 // previous abort has been caught during execution of a class initializer.
97 // We just keep the message of the first abort because it will cause the
98 // transaction to be rolled back anyway.
99 if (!aborted_) {
100 aborted_ = true;
101 abort_message_ = abort_message;
102 }
103 }
104
ThrowAbortError(Thread * self,const std::string * abort_message)105 void Transaction::ThrowAbortError(Thread* self, const std::string* abort_message) {
106 const bool rethrow = (abort_message == nullptr);
107 if (kIsDebugBuild && rethrow) {
108 CHECK(IsAborted()) << "Rethrow " << DescriptorToDot(kTransactionAbortErrorDescriptor)
109 << " while transaction is not aborted";
110 }
111 if (rethrow) {
112 // Rethrow an exception with the earlier abort message stored in the transaction.
113 self->ThrowNewWrappedException(kTransactionAbortErrorDescriptor, GetAbortMessage().c_str());
114 } else {
115 // Throw an exception with the given abort message.
116 self->ThrowNewWrappedException(kTransactionAbortErrorDescriptor, abort_message->c_str());
117 }
118 }
119
GetAbortMessage() const120 const std::string& Transaction::GetAbortMessage() const {
121 return abort_message_;
122 }
123
WriteConstraint(ObjPtr<mirror::Object> obj) const124 bool Transaction::WriteConstraint(ObjPtr<mirror::Object> obj) const {
125 DCHECK(obj != nullptr);
126
127 // Prevent changes in boot image spaces for app or boot image extension.
128 // For boot image there are no boot image spaces and this condition evaluates to false.
129 if (heap_->ObjectIsInBootImageSpace(obj)) {
130 return true;
131 }
132
133 // For apps, also prevent writing to other classes.
134 return IsStrict() &&
135 obj->IsClass() && // no constraint updating instances or arrays
136 obj != root_; // modifying other classes' static field, fail
137 }
138
WriteValueConstraint(ObjPtr<mirror::Object> value) const139 bool Transaction::WriteValueConstraint(ObjPtr<mirror::Object> value) const {
140 if (value == nullptr) {
141 return false; // We can always store null values.
142 }
143 if (IsStrict()) {
144 // TODO: Should we restrict writes the same way as for boot image extension?
145 return false;
146 } else if (heap_->GetBootImageSpaces().empty()) {
147 return false; // No constraints for boot image.
148 } else {
149 // Boot image extension.
150 ObjPtr<mirror::Class> klass = value->IsClass() ? value->AsClass() : value->GetClass();
151 return !AotClassLinker::CanReferenceInBootImageExtensionOrAppImage(klass, heap_);
152 }
153 }
154
ReadConstraint(ObjPtr<mirror::Object> obj) const155 bool Transaction::ReadConstraint(ObjPtr<mirror::Object> obj) const {
156 // Read constraints are checked only for static field reads as there are
157 // no constraints on reading instance fields and array elements.
158 DCHECK(obj->IsClass());
159 if (IsStrict()) {
160 return obj != root_; // fail if not self-updating
161 } else {
162 // For boot image and boot image extension, allow reading any field.
163 return false;
164 }
165 }
166
RecordNewObject(ObjPtr<mirror::Object> obj)167 void Transaction::RecordNewObject(ObjPtr<mirror::Object> obj) {
168 last_allocated_object_ = obj.Ptr();
169 ObjectLog log(&allocator_);
170 log.MarkAsNewObject();
171 object_logs_.Put(obj.Ptr(), std::move(log));
172 }
173
RecordNewArray(ObjPtr<mirror::Array> array)174 void Transaction::RecordNewArray(ObjPtr<mirror::Array> array) {
175 if (array->IsObjectArray()) {
176 // `ObjectArray<T>::SetWithoutChecks()` uses `SetFieldObject()` which records value
177 // changes in `object_log_`, so we need to record new object arrays as normal objects.
178 RecordNewObject(array);
179 return;
180 }
181 last_allocated_object_ = array.Ptr();
182 ArrayLog log(&allocator_);
183 log.MarkAsNewArray();
184 array_logs_.Put(array.Ptr(), std::move(log));
185 }
186
ObjectNeedsTransactionRecords(ObjPtr<mirror::Object> obj)187 bool Transaction::ObjectNeedsTransactionRecords(ObjPtr<mirror::Object> obj) {
188 if (obj == last_allocated_object_) {
189 return false;
190 }
191 auto it = object_logs_.find(obj.Ptr());
192 return it == object_logs_.end() || !it->second.IsNewObject();
193 }
194
ArrayNeedsTransactionRecords(ObjPtr<mirror::Array> array)195 bool Transaction::ArrayNeedsTransactionRecords(ObjPtr<mirror::Array> array) {
196 if (array == last_allocated_object_) {
197 return false;
198 }
199 auto it = array_logs_.find(array.Ptr());
200 return it == array_logs_.end() || !it->second.IsNewArray();
201 }
202
GetOrCreateObjectLog(mirror::Object * obj)203 inline Transaction::ObjectLog& Transaction::GetOrCreateObjectLog(mirror::Object* obj) {
204 return object_logs_.GetOrCreate(obj, [&]() { return ObjectLog(&allocator_); });
205 }
206
RecordWriteFieldBoolean(mirror::Object * obj,MemberOffset field_offset,uint8_t value,bool is_volatile)207 void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
208 MemberOffset field_offset,
209 uint8_t value,
210 bool is_volatile) {
211 DCHECK(obj != nullptr);
212 if (obj != last_allocated_object_) {
213 ObjectLog& object_log = GetOrCreateObjectLog(obj);
214 object_log.LogBooleanValue(field_offset, value, is_volatile);
215 }
216 }
217
RecordWriteFieldByte(mirror::Object * obj,MemberOffset field_offset,int8_t value,bool is_volatile)218 void Transaction::RecordWriteFieldByte(mirror::Object* obj,
219 MemberOffset field_offset,
220 int8_t value,
221 bool is_volatile) {
222 DCHECK(obj != nullptr);
223 if (obj != last_allocated_object_) {
224 ObjectLog& object_log = GetOrCreateObjectLog(obj);
225 object_log.LogByteValue(field_offset, value, is_volatile);
226 }
227 }
228
RecordWriteFieldChar(mirror::Object * obj,MemberOffset field_offset,uint16_t value,bool is_volatile)229 void Transaction::RecordWriteFieldChar(mirror::Object* obj,
230 MemberOffset field_offset,
231 uint16_t value,
232 bool is_volatile) {
233 DCHECK(obj != nullptr);
234 if (obj != last_allocated_object_) {
235 ObjectLog& object_log = GetOrCreateObjectLog(obj);
236 object_log.LogCharValue(field_offset, value, is_volatile);
237 }
238 }
239
240
RecordWriteFieldShort(mirror::Object * obj,MemberOffset field_offset,int16_t value,bool is_volatile)241 void Transaction::RecordWriteFieldShort(mirror::Object* obj,
242 MemberOffset field_offset,
243 int16_t value,
244 bool is_volatile) {
245 DCHECK(obj != nullptr);
246 if (obj != last_allocated_object_) {
247 ObjectLog& object_log = GetOrCreateObjectLog(obj);
248 object_log.LogShortValue(field_offset, value, is_volatile);
249 }
250 }
251
252
RecordWriteField32(mirror::Object * obj,MemberOffset field_offset,uint32_t value,bool is_volatile)253 void Transaction::RecordWriteField32(mirror::Object* obj,
254 MemberOffset field_offset,
255 uint32_t value,
256 bool is_volatile) {
257 DCHECK(obj != nullptr);
258 if (obj != last_allocated_object_) {
259 ObjectLog& object_log = GetOrCreateObjectLog(obj);
260 object_log.Log32BitsValue(field_offset, value, is_volatile);
261 }
262 }
263
RecordWriteField64(mirror::Object * obj,MemberOffset field_offset,uint64_t value,bool is_volatile)264 void Transaction::RecordWriteField64(mirror::Object* obj,
265 MemberOffset field_offset,
266 uint64_t value,
267 bool is_volatile) {
268 DCHECK(obj != nullptr);
269 if (obj != last_allocated_object_) {
270 ObjectLog& object_log = GetOrCreateObjectLog(obj);
271 object_log.Log64BitsValue(field_offset, value, is_volatile);
272 }
273 }
274
RecordWriteFieldReference(mirror::Object * obj,MemberOffset field_offset,mirror::Object * value,bool is_volatile)275 void Transaction::RecordWriteFieldReference(mirror::Object* obj,
276 MemberOffset field_offset,
277 mirror::Object* value,
278 bool is_volatile) {
279 DCHECK(obj != nullptr);
280 if (obj != last_allocated_object_) {
281 ObjectLog& object_log = GetOrCreateObjectLog(obj);
282 object_log.LogReferenceValue(field_offset, value, is_volatile);
283 }
284 }
285
RecordWriteArray(mirror::Array * array,size_t index,uint64_t value)286 void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) {
287 DCHECK(array != nullptr);
288 DCHECK(array->IsArrayInstance());
289 DCHECK(!array->IsObjectArray());
290 if (array != last_allocated_object_) {
291 ArrayLog& array_log = array_logs_.GetOrCreate(array, [&]() { return ArrayLog(&allocator_); });
292 array_log.LogValue(index, value);
293 }
294 }
295
RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)296 void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
297 dex::StringIndex string_idx) {
298 DCHECK(dex_cache != nullptr);
299 DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
300 resolve_string_logs_.emplace_front(dex_cache, string_idx);
301 }
302
RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)303 void Transaction::RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,
304 dex::ProtoIndex proto_idx) {
305 DCHECK(dex_cache != nullptr);
306 DCHECK_LT(proto_idx.index_, dex_cache->GetDexFile()->NumProtoIds());
307 resolve_method_type_logs_.emplace_front(dex_cache, proto_idx);
308 }
309
RecordStrongStringInsertion(ObjPtr<mirror::String> s)310 void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
311 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
312 LogInternedString(std::move(log));
313 }
314
RecordWeakStringInsertion(ObjPtr<mirror::String> s)315 void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
316 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
317 LogInternedString(std::move(log));
318 }
319
RecordStrongStringRemoval(ObjPtr<mirror::String> s)320 void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
321 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
322 LogInternedString(std::move(log));
323 }
324
RecordWeakStringRemoval(ObjPtr<mirror::String> s)325 void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
326 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
327 LogInternedString(std::move(log));
328 }
329
LogInternedString(InternStringLog && log)330 void Transaction::LogInternedString(InternStringLog&& log) {
331 Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
332 intern_string_logs_.push_front(std::move(log));
333 }
334
Rollback()335 void Transaction::Rollback() {
336 Thread* self = Thread::Current();
337 self->AssertNoPendingException();
338 MutexLock mu(self, *Locks::intern_table_lock_);
339 rolling_back_ = true;
340 CHECK(!Runtime::Current()->IsActiveTransaction());
341 UndoObjectModifications();
342 UndoArrayModifications();
343 UndoInternStringTableModifications();
344 UndoResolveStringModifications();
345 UndoResolveMethodTypeModifications();
346 rolling_back_ = false;
347 }
348
UndoObjectModifications()349 void Transaction::UndoObjectModifications() {
350 // TODO we may not need to restore objects allocated during this transaction. Or we could directly
351 // remove them from the heap.
352 for (const auto& it : object_logs_) {
353 it.second.Undo(it.first);
354 }
355 object_logs_.clear();
356 }
357
UndoArrayModifications()358 void Transaction::UndoArrayModifications() {
359 // TODO we may not need to restore array allocated during this transaction. Or we could directly
360 // remove them from the heap.
361 for (const auto& it : array_logs_) {
362 it.second.Undo(it.first);
363 }
364 array_logs_.clear();
365 }
366
UndoInternStringTableModifications()367 void Transaction::UndoInternStringTableModifications() {
368 InternTable* const intern_table = Runtime::Current()->GetInternTable();
369 // We want to undo each operation from the most recent to the oldest. List has been filled so the
370 // most recent operation is at list begin so just have to iterate over it.
371 for (const InternStringLog& string_log : intern_string_logs_) {
372 string_log.Undo(intern_table);
373 }
374 intern_string_logs_.clear();
375 }
376
UndoResolveStringModifications()377 void Transaction::UndoResolveStringModifications() {
378 for (ResolveStringLog& string_log : resolve_string_logs_) {
379 string_log.Undo();
380 }
381 resolve_string_logs_.clear();
382 }
383
UndoResolveMethodTypeModifications()384 void Transaction::UndoResolveMethodTypeModifications() {
385 for (ResolveMethodTypeLog& method_type_log : resolve_method_type_logs_) {
386 method_type_log.Undo();
387 }
388 resolve_method_type_logs_.clear();
389 }
390
VisitRoots(RootVisitor * visitor)391 void Transaction::VisitRoots(RootVisitor* visitor) {
392 // Transactions are used for single-threaded initialization.
393 // This is the only function that should be called from a different thread,
394 // namely the GC thread, and it is called with the mutator lock held exclusively,
395 // so the data structures in the `Transaction` are protected from concurrent use.
396 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
397
398 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&root_), RootInfo(kRootUnknown));
399 visitor->VisitRoot(&last_allocated_object_, RootInfo(kRootUnknown));
400 {
401 // Create a separate `ArenaStack` for this thread.
402 ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
403 VisitObjectLogs(visitor, &arena_stack);
404 VisitArrayLogs(visitor, &arena_stack);
405 }
406 VisitInternStringLogs(visitor);
407 VisitResolveStringLogs(visitor);
408 VisitResolveMethodTypeLogs(visitor);
409 }
410
411 template <typename MovingRoots, typename Container>
UpdateKeys(const MovingRoots & moving_roots,Container & container)412 void UpdateKeys(const MovingRoots& moving_roots, Container& container) {
413 for (const auto& pair : moving_roots) {
414 auto* old_root = pair.first;
415 auto* new_root = pair.second;
416 auto node = container.extract(old_root);
417 CHECK(!node.empty());
418 node.key() = new_root;
419 bool inserted = container.insert(std::move(node)).inserted;
420 CHECK(inserted);
421 }
422 }
423
VisitObjectLogs(RootVisitor * visitor,ArenaStack * arena_stack)424 void Transaction::VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
425 // List of moving roots.
426 ScopedArenaAllocator allocator(arena_stack);
427 using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
428 ScopedArenaForwardList<ObjectPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
429
430 // Visit roots.
431 for (auto& it : object_logs_) {
432 it.second.VisitRoots(visitor);
433 mirror::Object* old_root = it.first;
434 mirror::Object* new_root = old_root;
435 visitor->VisitRoot(&new_root, RootInfo(kRootUnknown));
436 if (new_root != old_root) {
437 moving_roots.push_front(std::make_pair(old_root, new_root));
438 }
439 }
440
441 // Update object logs with moving roots.
442 UpdateKeys(moving_roots, object_logs_);
443 }
444
VisitArrayLogs(RootVisitor * visitor,ArenaStack * arena_stack)445 void Transaction::VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
446 // List of moving roots.
447 ScopedArenaAllocator allocator(arena_stack);
448 using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
449 ScopedArenaForwardList<ArrayPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
450
451 for (auto& it : array_logs_) {
452 mirror::Array* old_root = it.first;
453 mirror::Array* new_root = old_root;
454 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
455 if (new_root != old_root) {
456 moving_roots.push_front(std::make_pair(old_root, new_root));
457 }
458 }
459
460 // Update array logs with moving roots.
461 UpdateKeys(moving_roots, array_logs_);
462 }
463
VisitInternStringLogs(RootVisitor * visitor)464 void Transaction::VisitInternStringLogs(RootVisitor* visitor) {
465 for (InternStringLog& log : intern_string_logs_) {
466 log.VisitRoots(visitor);
467 }
468 }
469
VisitResolveStringLogs(RootVisitor * visitor)470 void Transaction::VisitResolveStringLogs(RootVisitor* visitor) {
471 for (ResolveStringLog& log : resolve_string_logs_) {
472 log.VisitRoots(visitor);
473 }
474 }
475
VisitResolveMethodTypeLogs(RootVisitor * visitor)476 void Transaction::VisitResolveMethodTypeLogs(RootVisitor* visitor) {
477 for (ResolveMethodTypeLog& log : resolve_method_type_logs_) {
478 log.VisitRoots(visitor);
479 }
480 }
481
LogBooleanValue(MemberOffset offset,uint8_t value,bool is_volatile)482 void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
483 LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
484 }
485
LogByteValue(MemberOffset offset,int8_t value,bool is_volatile)486 void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) {
487 LogValue(ObjectLog::kByte, offset, value, is_volatile);
488 }
489
LogCharValue(MemberOffset offset,uint16_t value,bool is_volatile)490 void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) {
491 LogValue(ObjectLog::kChar, offset, value, is_volatile);
492 }
493
LogShortValue(MemberOffset offset,int16_t value,bool is_volatile)494 void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) {
495 LogValue(ObjectLog::kShort, offset, value, is_volatile);
496 }
497
Log32BitsValue(MemberOffset offset,uint32_t value,bool is_volatile)498 void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) {
499 LogValue(ObjectLog::k32Bits, offset, value, is_volatile);
500 }
501
Log64BitsValue(MemberOffset offset,uint64_t value,bool is_volatile)502 void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) {
503 LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
504 }
505
LogReferenceValue(MemberOffset offset,mirror::Object * obj,bool is_volatile)506 void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
507 mirror::Object* obj,
508 bool is_volatile) {
509 LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
510 }
511
LogValue(ObjectLog::FieldValueKind kind,MemberOffset offset,uint64_t value,bool is_volatile)512 void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
513 MemberOffset offset,
514 uint64_t value,
515 bool is_volatile) {
516 if (is_new_object_) {
517 return;
518 }
519 auto it = field_values_.find(offset.Uint32Value());
520 if (it == field_values_.end()) {
521 ObjectLog::FieldValue field_value;
522 field_value.value = value;
523 field_value.is_volatile = is_volatile;
524 field_value.kind = kind;
525 field_values_.emplace(offset.Uint32Value(), std::move(field_value));
526 }
527 }
528
Undo(mirror::Object * obj) const529 void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
530 for (auto& it : field_values_) {
531 // Garbage collector needs to access object's class and array's length. So we don't rollback
532 // these values.
533 MemberOffset field_offset(it.first);
534 if (field_offset.Uint32Value() == mirror::Class::ClassOffset().Uint32Value()) {
535 // Skip Object::class field.
536 continue;
537 }
538 if (obj->IsArrayInstance() &&
539 field_offset.Uint32Value() == mirror::Array::LengthOffset().Uint32Value()) {
540 // Skip Array::length field.
541 continue;
542 }
543 const FieldValue& field_value = it.second;
544 UndoFieldWrite(obj, field_offset, field_value);
545 }
546 }
547
UndoFieldWrite(mirror::Object * obj,MemberOffset field_offset,const FieldValue & field_value) const548 void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
549 MemberOffset field_offset,
550 const FieldValue& field_value) const {
551 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
552 // we'd need to disable the check.
553 constexpr bool kCheckTransaction = false;
554 switch (field_value.kind) {
555 case kBoolean:
556 if (UNLIKELY(field_value.is_volatile)) {
557 obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
558 field_offset,
559 field_value.value);
560 } else {
561 obj->SetFieldBoolean<false, kCheckTransaction>(
562 field_offset,
563 field_value.value);
564 }
565 break;
566 case kByte:
567 if (UNLIKELY(field_value.is_volatile)) {
568 obj->SetFieldByteVolatile<false, kCheckTransaction>(
569 field_offset,
570 static_cast<int8_t>(field_value.value));
571 } else {
572 obj->SetFieldByte<false, kCheckTransaction>(
573 field_offset,
574 static_cast<int8_t>(field_value.value));
575 }
576 break;
577 case kChar:
578 if (UNLIKELY(field_value.is_volatile)) {
579 obj->SetFieldCharVolatile<false, kCheckTransaction>(
580 field_offset,
581 static_cast<uint16_t>(field_value.value));
582 } else {
583 obj->SetFieldChar<false, kCheckTransaction>(
584 field_offset,
585 static_cast<uint16_t>(field_value.value));
586 }
587 break;
588 case kShort:
589 if (UNLIKELY(field_value.is_volatile)) {
590 obj->SetFieldShortVolatile<false, kCheckTransaction>(
591 field_offset,
592 static_cast<int16_t>(field_value.value));
593 } else {
594 obj->SetFieldShort<false, kCheckTransaction>(
595 field_offset,
596 static_cast<int16_t>(field_value.value));
597 }
598 break;
599 case k32Bits:
600 if (UNLIKELY(field_value.is_volatile)) {
601 obj->SetField32Volatile<false, kCheckTransaction>(
602 field_offset,
603 static_cast<uint32_t>(field_value.value));
604 } else {
605 obj->SetField32<false, kCheckTransaction>(
606 field_offset,
607 static_cast<uint32_t>(field_value.value));
608 }
609 break;
610 case k64Bits:
611 if (UNLIKELY(field_value.is_volatile)) {
612 obj->SetField64Volatile<false, kCheckTransaction>(field_offset, field_value.value);
613 } else {
614 obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value);
615 }
616 break;
617 case kReference:
618 if (UNLIKELY(field_value.is_volatile)) {
619 obj->SetFieldObjectVolatile<false, kCheckTransaction>(
620 field_offset,
621 reinterpret_cast<mirror::Object*>(field_value.value));
622 } else {
623 obj->SetFieldObject<false, kCheckTransaction>(
624 field_offset,
625 reinterpret_cast<mirror::Object*>(field_value.value));
626 }
627 break;
628 }
629 }
630
VisitRoots(RootVisitor * visitor)631 void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
632 for (auto& it : field_values_) {
633 FieldValue& field_value = it.second;
634 if (field_value.kind == ObjectLog::kReference) {
635 visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
636 RootInfo(kRootUnknown));
637 }
638 }
639 }
640
Undo(InternTable * intern_table) const641 void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
642 DCHECK(!Runtime::Current()->IsActiveTransaction());
643 DCHECK(intern_table != nullptr);
644 ObjPtr<mirror::String> s = str_.Read();
645 uint32_t hash = static_cast<uint32_t>(s->GetStoredHashCode());
646 switch (string_op_) {
647 case InternStringLog::kInsert: {
648 switch (string_kind_) {
649 case InternStringLog::kStrongString:
650 intern_table->RemoveStrong(s, hash);
651 break;
652 case InternStringLog::kWeakString:
653 intern_table->RemoveWeak(s, hash);
654 break;
655 default:
656 LOG(FATAL) << "Unknown interned string kind";
657 UNREACHABLE();
658 }
659 break;
660 }
661 case InternStringLog::kRemove: {
662 switch (string_kind_) {
663 case InternStringLog::kStrongString:
664 intern_table->InsertStrong(s, hash);
665 break;
666 case InternStringLog::kWeakString:
667 intern_table->InsertWeak(s, hash);
668 break;
669 default:
670 LOG(FATAL) << "Unknown interned string kind";
671 UNREACHABLE();
672 }
673 break;
674 }
675 default:
676 LOG(FATAL) << "Unknown interned string op";
677 UNREACHABLE();
678 }
679 }
680
VisitRoots(RootVisitor * visitor)681 void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
682 str_.VisitRoot(visitor, RootInfo(kRootInternedString));
683 }
684
Undo() const685 void Transaction::ResolveStringLog::Undo() const {
686 dex_cache_.Read()->ClearString(string_idx_);
687 }
688
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)689 Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
690 dex::StringIndex string_idx)
691 : dex_cache_(dex_cache),
692 string_idx_(string_idx) {
693 DCHECK(dex_cache != nullptr);
694 DCHECK_LT(string_idx_.index_, dex_cache->GetDexFile()->NumStringIds());
695 }
696
VisitRoots(RootVisitor * visitor)697 void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
698 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
699 }
700
Undo() const701 void Transaction::ResolveMethodTypeLog::Undo() const {
702 dex_cache_.Read()->ClearMethodType(proto_idx_);
703 }
704
ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)705 Transaction::ResolveMethodTypeLog::ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,
706 dex::ProtoIndex proto_idx)
707 : dex_cache_(dex_cache),
708 proto_idx_(proto_idx) {
709 DCHECK(dex_cache != nullptr);
710 DCHECK_LT(proto_idx_.index_, dex_cache->GetDexFile()->NumProtoIds());
711 }
712
VisitRoots(RootVisitor * visitor)713 void Transaction::ResolveMethodTypeLog::VisitRoots(RootVisitor* visitor) {
714 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
715 }
716
InternStringLog(ObjPtr<mirror::String> s,StringKind kind,StringOp op)717 Transaction::InternStringLog::InternStringLog(ObjPtr<mirror::String> s,
718 StringKind kind,
719 StringOp op)
720 : str_(s),
721 string_kind_(kind),
722 string_op_(op) {
723 DCHECK(s != nullptr);
724 }
725
LogValue(size_t index,uint64_t value)726 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
727 if (is_new_array_) {
728 return;
729 }
730 // Add a mapping if there is none yet.
731 array_values_.FindOrAdd(index, value);
732 }
733
Undo(mirror::Array * array) const734 void Transaction::ArrayLog::Undo(mirror::Array* array) const {
735 DCHECK(array != nullptr);
736 DCHECK(array->IsArrayInstance());
737 Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
738 for (auto it : array_values_) {
739 UndoArrayWrite(array, type, it.first, it.second);
740 }
741 }
742
UndoArrayWrite(mirror::Array * array,Primitive::Type array_type,size_t index,uint64_t value) const743 void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
744 Primitive::Type array_type,
745 size_t index,
746 uint64_t value) const {
747 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
748 // we'd need to disable the check.
749 constexpr bool kCheckTransaction = false;
750 switch (array_type) {
751 case Primitive::kPrimBoolean:
752 array->AsBooleanArray()->SetWithoutChecks<false, kCheckTransaction>(
753 index, static_cast<uint8_t>(value));
754 break;
755 case Primitive::kPrimByte:
756 array->AsByteArray()->SetWithoutChecks<false, kCheckTransaction>(
757 index, static_cast<int8_t>(value));
758 break;
759 case Primitive::kPrimChar:
760 array->AsCharArray()->SetWithoutChecks<false, kCheckTransaction>(
761 index, static_cast<uint16_t>(value));
762 break;
763 case Primitive::kPrimShort:
764 array->AsShortArray()->SetWithoutChecks<false, kCheckTransaction>(
765 index, static_cast<int16_t>(value));
766 break;
767 case Primitive::kPrimInt:
768 array->AsIntArray()->SetWithoutChecks<false, kCheckTransaction>(
769 index, static_cast<int32_t>(value));
770 break;
771 case Primitive::kPrimFloat:
772 array->AsFloatArray()->SetWithoutChecks<false, kCheckTransaction>(
773 index, static_cast<float>(value));
774 break;
775 case Primitive::kPrimLong:
776 array->AsLongArray()->SetWithoutChecks<false, kCheckTransaction>(
777 index, static_cast<int64_t>(value));
778 break;
779 case Primitive::kPrimDouble:
780 array->AsDoubleArray()->SetWithoutChecks<false, kCheckTransaction>(
781 index, static_cast<double>(value));
782 break;
783 case Primitive::kPrimNot:
784 LOG(FATAL) << "ObjectArray should be treated as Object";
785 UNREACHABLE();
786 default:
787 LOG(FATAL) << "Unsupported type " << array_type;
788 UNREACHABLE();
789 }
790 }
791
792 } // namespace art
793