blob: e81228a5e9fbee4d04b78e91574e40872c698142 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080020
21#include "base/logging.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022#include "dex/quick/mir_to_lir-inl.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080023#include "driver/compiler_driver.h"
Vladimir Marko20f85592015-03-19 10:07:02 +000024#include "driver/compiler_options.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070025#include "gc/accounting/card_table.h"
Vladimir Markof4da6752014-08-01 19:04:18 +010026#include "mirror/art_method.h"
27#include "mirror/object_array-inl.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070028#include "x86_lir.h"
29
30namespace art {
31
Brian Carlstrom7940e442013-07-12 13:46:57 -070032/*
33 * The sparse table in the literal pool is an array of <key,displacement>
34 * pairs.
35 */
Andreas Gampe48971b32014-08-06 10:09:01 -070036void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Chao-ying Fuda96aed2014-10-27 14:42:00 -070037 GenSmallSparseSwitch(mir, table_offset, rl_src);
38}
39
40/*
Brian Carlstrom7940e442013-07-12 13:46:57 -070041 * Code pattern will look something like:
42 *
43 * mov r_val, ..
44 * call 0
45 * pop r_start_of_method
46 * sub r_start_of_method, ..
47 * mov r_key_reg, r_val
48 * sub r_key_reg, low_key
49 * cmp r_key_reg, size-1 ; bound check
50 * ja done
51 * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
52 * add r_start_of_method, r_disp
53 * jmp r_start_of_method
54 * done:
55 */
Andreas Gampe48971b32014-08-06 10:09:01 -070056void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Chao-ying Fu72f53af2014-11-11 16:48:40 -080057 const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -070058 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -070059 SwitchTable* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000060 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
Chao-ying Fu72f53af2014-11-11 16:48:40 -080061 tab_rec->switch_mir = mir;
Brian Carlstrom7940e442013-07-12 13:46:57 -070062 tab_rec->table = table;
63 tab_rec->vaddr = current_dalvik_offset_;
64 int size = table[1];
Vladimir Markoe39c54e2014-09-22 14:50:02 +010065 switch_tables_.push_back(tab_rec);
Brian Carlstrom7940e442013-07-12 13:46:57 -070066
67 // Get the switch value
68 rl_src = LoadValue(rl_src, kCoreReg);
Mark Mendell67c39c42014-01-31 17:28:00 -080069
Brian Carlstrom7940e442013-07-12 13:46:57 -070070 int low_key = s4FromSwitchData(&table[2]);
buzbee2700f7e2014-03-07 09:46:20 -080071 RegStorage keyReg;
Brian Carlstrom7940e442013-07-12 13:46:57 -070072 // Remove the bias, if necessary
73 if (low_key == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080074 keyReg = rl_src.reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -070075 } else {
76 keyReg = AllocTemp();
buzbee2700f7e2014-03-07 09:46:20 -080077 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
Brian Carlstrom7940e442013-07-12 13:46:57 -070078 }
Mark Mendell27dee8b2014-12-01 19:06:12 -050079
Brian Carlstrom7940e442013-07-12 13:46:57 -070080 // Bounds check - if < 0 or >= size continue following switch
Serguei Katkov407a9d22014-07-05 03:09:32 +070081 OpRegImm(kOpCmp, keyReg, size - 1);
Brian Carlstrom7940e442013-07-12 13:46:57 -070082 LIR* branch_over = OpCondBranch(kCondHi, NULL);
83
Mark Mendell27dee8b2014-12-01 19:06:12 -050084 RegStorage addr_for_jump;
85 if (cu_->target64) {
86 RegStorage table_base = AllocTempWide();
87 // Load the address of the table into table_base.
88 LIR* lea = RawLIR(current_dalvik_offset_, kX86Lea64RM, table_base.GetReg(), kRIPReg,
89 256, 0, WrapPointer(tab_rec));
90 lea->flags.fixup = kFixupSwitchTable;
91 AppendLIR(lea);
92
93 // Load the offset from the table out of the table.
94 addr_for_jump = AllocTempWide();
95 NewLIR5(kX86MovsxdRA, addr_for_jump.GetReg(), table_base.GetReg(), keyReg.GetReg(), 2, 0);
96
97 // Add the offset from the table to the table base.
98 OpRegReg(kOpAdd, addr_for_jump, table_base);
99 } else {
100 // Materialize a pointer to the switch table.
101 RegStorage start_of_method_reg;
102 if (base_of_code_ != nullptr) {
103 // We can use the saved value.
104 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
105 rl_method = LoadValue(rl_method, kCoreReg);
106 start_of_method_reg = rl_method.reg;
107 store_method_addr_used_ = true;
108 } else {
109 start_of_method_reg = AllocTempRef();
110 NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
111 }
112 // Load the displacement from the switch table.
113 addr_for_jump = AllocTemp();
114 NewLIR5(kX86PcRelLoadRA, addr_for_jump.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
115 2, WrapPointer(tab_rec));
116 // Add displacement to start of method.
117 OpRegReg(kOpAdd, addr_for_jump, start_of_method_reg);
118 }
119
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 // ..and go!
Mark Mendell27dee8b2014-12-01 19:06:12 -0500121 tab_rec->anchor = NewLIR1(kX86JmpR, addr_for_jump.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700122
123 /* branch_over target here */
124 LIR* target = NewLIR0(kPseudoTargetLabel);
125 branch_over->target = target;
126}
127
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
buzbee33ae5582014-06-12 14:56:32 -0700129 int ex_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700130 Thread::ExceptionOffset<8>().Int32Value() :
131 Thread::ExceptionOffset<4>().Int32Value();
buzbeea0cd2d72014-06-01 09:33:49 -0700132 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700133 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
134 NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700135 StoreValue(rl_dest, rl_result);
136}
137
Vladimir Markobf535be2014-11-19 18:52:35 +0000138void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
Serguei Katkov407a9d22014-07-05 03:09:32 +0700139 DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700140 RegStorage reg_card_base = AllocTempRef();
141 RegStorage reg_card_no = AllocTempRef();
buzbee33ae5582014-06-12 14:56:32 -0700142 int ct_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700143 Thread::CardTableOffset<8>().Int32Value() :
144 Thread::CardTableOffset<4>().Int32Value();
Serguei Katkov407a9d22014-07-05 03:09:32 +0700145 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700146 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
buzbee2700f7e2014-03-07 09:46:20 -0800147 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700148 FreeTemp(reg_card_base);
149 FreeTemp(reg_card_no);
150}
151
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700152void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700153 /*
154 * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
155 * allocation mechanism know so it doesn't try to use any of them when
156 * expanding the frame or flushing. This leaves the utility
157 * code with no spare temps.
158 */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800159 const RegStorage arg0 = TargetReg32(kArg0);
160 const RegStorage arg1 = TargetReg32(kArg1);
161 const RegStorage arg2 = TargetReg32(kArg2);
162 LockTemp(arg0);
163 LockTemp(arg1);
164 LockTemp(arg2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700165
Brian Carlstrom7940e442013-07-12 13:46:57 -0700166 /*
167 * We can safely skip the stack overflow check if we're
168 * a leaf *and* our frame size < fudge factor.
169 */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800170 const InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
Dave Allison648d7112014-07-25 16:15:27 -0700171 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800172 const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
Dave Allison69dfe512014-07-11 17:11:58 +0000173
174 // If we doing an implicit stack overflow check, perform the load immediately
175 // before the stack pointer is decremented and anything is saved.
176 if (!skip_overflow_check &&
177 cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
178 // Implicit stack overflow check.
179 // test eax,[esp + -overflow]
180 int overflow = GetStackOverflowReservedBytes(isa);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800181 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
Dave Allison69dfe512014-07-11 17:11:58 +0000182 MarkPossibleStackOverflowException();
183 }
184
185 /* Build frame, return address already on stack */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800186 stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
Dave Allison69dfe512014-07-11 17:11:58 +0000187 GetInstructionSetPointerSize(cu_->instruction_set));
188
Brian Carlstrom7940e442013-07-12 13:46:57 -0700189 NewLIR0(kPseudoMethodEntry);
190 /* Spill core callee saves */
191 SpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700192 SpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700193 if (!skip_overflow_check) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700194 class StackOverflowSlowPath : public LIRSlowPath {
195 public:
196 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
Vladimir Marko0b40ecf2015-03-20 12:08:03 +0000197 : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700198 }
199 void Compile() OVERRIDE {
200 m2l_->ResetRegPool();
201 m2l_->ResetDefTracking();
Mingyao Yang6ffcfa02014-04-25 11:06:00 -0700202 GenerateTargetLabel(kPseudoThrowTarget);
Ian Rogersb28c1c02014-11-08 11:21:21 -0800203 const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
204 m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700205 m2l_->ClobberCallerSave();
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700206 // Assumes codegen and target are in thumb2 mode.
Andreas Gampe98430592014-07-27 19:44:50 -0700207 m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
208 false /* MarkSafepointPC */, false /* UseLink */);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700209 }
210
211 private:
212 const size_t sp_displace_;
213 };
Dave Allison69dfe512014-07-11 17:11:58 +0000214 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
215 // TODO: for large frames we should do something like:
216 // spill ebp
217 // lea ebp, [esp + frame_size]
218 // cmp ebp, fs:[stack_end_]
219 // jcc stack_overflow_exception
220 // mov esp, ebp
221 // in case a signal comes in that's not using an alternate signal stack and the large frame
222 // may have moved us outside of the reserved area at the end of the stack.
223 // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
224 if (cu_->target64) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800225 OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
Dave Allison69dfe512014-07-11 17:11:58 +0000226 } else {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800227 OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
Dave Allison69dfe512014-07-11 17:11:58 +0000228 }
229 LIR* branch = OpCondBranch(kCondUlt, nullptr);
230 AddSlowPath(
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700231 new(arena_)StackOverflowSlowPath(this, branch,
232 frame_size_ -
233 GetInstructionSetPointerSize(cu_->instruction_set)));
Dave Allison69dfe512014-07-11 17:11:58 +0000234 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700235 }
236
237 FlushIns(ArgLocs, rl_method);
238
Mark Mendell67c39c42014-01-31 17:28:00 -0800239 if (base_of_code_ != nullptr) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700240 RegStorage method_start = TargetPtrReg(kArg0);
Mark Mendell67c39c42014-01-31 17:28:00 -0800241 // We have been asked to save the address of the method start for later use.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700242 setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800243 int displacement = SRegOffset(base_of_code_->s_reg_low);
buzbee695d13a2014-04-19 13:32:20 -0700244 // Native pointer - must be natural word size.
Ian Rogersb28c1c02014-11-08 11:21:21 -0800245 setup_method_address_[1] = StoreBaseDisp(rs_rSP, displacement, method_start,
Elena Sayapinadd644502014-07-01 18:39:52 +0700246 cu_->target64 ? k64 : k32, kNotVolatile);
Mark Mendell67c39c42014-01-31 17:28:00 -0800247 }
248
Ian Rogersb28c1c02014-11-08 11:21:21 -0800249 FreeTemp(arg0);
250 FreeTemp(arg1);
251 FreeTemp(arg2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700252}
253
254void X86Mir2Lir::GenExitSequence() {
255 /*
256 * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
257 * allocated by the register utilities as temps.
258 */
buzbee091cc402014-03-31 10:14:40 -0700259 LockTemp(rs_rX86_RET0);
260 LockTemp(rs_rX86_RET1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700261
262 NewLIR0(kPseudoMethodExit);
263 UnSpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700264 UnSpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700265 /* Remove frame except for return address */
Ian Rogersb28c1c02014-11-08 11:21:21 -0800266 const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
267 stack_increment_ = OpRegImm(kOpAdd, rs_rSP,
268 frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700269 NewLIR0(kX86Ret);
270}
271
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800272void X86Mir2Lir::GenSpecialExitSequence() {
273 NewLIR0(kX86Ret);
274}
275
Vladimir Marko6ce3eba2015-02-16 13:05:59 +0000276void X86Mir2Lir::GenSpecialEntryForSuspend() {
277 // Keep 16-byte stack alignment, there's already the return address, so
278 // - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI,
279 // - for 64-bit push RAX, i.e. ArtMethod*.
280 if (!cu_->target64) {
281 DCHECK(!IsTemp(rs_rSI));
282 DCHECK(!IsTemp(rs_rDI));
283 core_spill_mask_ =
Vladimir Markod7a5e552015-02-20 14:53:53 +0000284 (1u << rs_rDI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum());
Vladimir Marko6ce3eba2015-02-16 13:05:59 +0000285 num_core_spills_ = 3u;
286 } else {
287 core_spill_mask_ = (1u << rs_rRET.GetRegNum());
288 num_core_spills_ = 1u;
289 }
290 fp_spill_mask_ = 0u;
291 num_fp_spills_ = 0u;
292 frame_size_ = 16u;
293 core_vmap_table_.clear();
294 fp_vmap_table_.clear();
295 if (!cu_->target64) {
296 NewLIR1(kX86Push32R, rs_rDI.GetReg());
297 NewLIR1(kX86Push32R, rs_rSI.GetReg());
298 }
299 NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
300}
301
302void X86Mir2Lir::GenSpecialExitForSuspend() {
303 // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
304 NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
305 if (!cu_->target64) {
306 NewLIR1(kX86Pop32R, rs_rSI.GetReg());
307 NewLIR1(kX86Pop32R, rs_rDI.GetReg());
308 }
309}
310
Dave Allison69dfe512014-07-11 17:11:58 +0000311void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
312 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
313 return;
314 }
315 // Implicit null pointer check.
316 // test eax,[arg1+0]
317 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
318 MarkPossibleNullPointerException(opt_flags);
319}
320
Vladimir Markof4da6752014-08-01 19:04:18 +0100321/*
322 * Bit of a hack here - in the absence of a real scheduling pass,
323 * emit the next instruction in static & direct invoke sequences.
324 */
325static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
326 int state, const MethodReference& target_method,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700327 uint32_t,
Vladimir Markof4da6752014-08-01 19:04:18 +0100328 uintptr_t direct_code, uintptr_t direct_method,
329 InvokeType type) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700330 UNUSED(info, direct_code);
Vladimir Markof4da6752014-08-01 19:04:18 +0100331 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
332 if (direct_method != 0) {
333 switch (state) {
334 case 0: // Get the current Method* [sets kArg0]
335 if (direct_method != static_cast<uintptr_t>(-1)) {
Mathieu Chartier921d6eb2015-03-13 16:32:44 -0700336 auto target_reg = cg->TargetReg(kArg0, kRef);
337 if (target_reg.Is64Bit()) {
338 cg->LoadConstantWide(target_reg, direct_method);
339 } else {
340 cg->LoadConstant(target_reg, direct_method);
341 }
Vladimir Markof4da6752014-08-01 19:04:18 +0100342 } else {
343 cg->LoadMethodAddress(target_method, type, kArg0);
344 }
345 break;
346 default:
347 return -1;
348 }
349 } else {
350 RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
351 switch (state) {
352 case 0: // Get the current Method* [sets kArg0]
353 // TUNING: we can save a reg copy if Method* has been promoted.
354 cg->LoadCurrMethodDirect(arg0_ref);
355 break;
356 case 1: // Get method->dex_cache_resolved_methods_
357 cg->LoadRefDisp(arg0_ref,
358 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
359 arg0_ref,
360 kNotVolatile);
361 break;
362 case 2: // Grab target method*
363 CHECK_EQ(cu->dex_file, target_method.dex_file);
364 cg->LoadRefDisp(arg0_ref,
365 mirror::ObjectArray<mirror::Object>::OffsetOfElement(
366 target_method.dex_method_index).Int32Value(),
367 arg0_ref,
368 kNotVolatile);
369 break;
370 default:
371 return -1;
372 }
373 }
374 return state + 1;
375}
376
377NextCallInsn X86Mir2Lir::GetNextSDCallInsn() {
378 return X86NextSDCallInsn;
379}
380
Brian Carlstrom7940e442013-07-12 13:46:57 -0700381} // namespace art