Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
40931 views
1
/*
2
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// no precompiled headers
26
#include "jvm.h"
27
#include "asm/assembler.inline.hpp"
28
#include "classfile/vmSymbols.hpp"
29
#include "code/icBuffer.hpp"
30
#include "code/vtableStubs.hpp"
31
#include "interpreter/interpreter.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "nativeInst_arm.hpp"
34
#include "os_share_linux.hpp"
35
#include "prims/jniFastGetField.hpp"
36
#include "prims/jvm_misc.hpp"
37
#include "runtime/arguments.hpp"
38
#include "runtime/frame.inline.hpp"
39
#include "runtime/interfaceSupport.inline.hpp"
40
#include "runtime/java.hpp"
41
#include "runtime/javaCalls.hpp"
42
#include "runtime/mutexLocker.hpp"
43
#include "runtime/osThread.hpp"
44
#include "runtime/safepointMechanism.hpp"
45
#include "runtime/sharedRuntime.hpp"
46
#include "runtime/stubRoutines.hpp"
47
#include "runtime/timer.hpp"
48
#include "signals_posix.hpp"
49
#include "utilities/debug.hpp"
50
#include "utilities/events.hpp"
51
#include "utilities/vmError.hpp"
52
53
// put OS-includes here
54
# include <sys/types.h>
55
# include <sys/mman.h>
56
# include <pthread.h>
57
# include <signal.h>
58
# include <errno.h>
59
# include <dlfcn.h>
60
# include <stdlib.h>
61
# include <stdio.h>
62
# include <unistd.h>
63
# include <sys/resource.h>
64
# include <pthread.h>
65
# include <sys/stat.h>
66
# include <sys/time.h>
67
# include <sys/utsname.h>
68
# include <sys/socket.h>
69
# include <sys/wait.h>
70
# include <pwd.h>
71
# include <poll.h>
72
# include <ucontext.h>
73
#ifndef __ANDROID__
74
# include <fpu_control.h>
75
#else
76
# include "fpu_control.h" //include the local header
77
#endif
78
# include <asm/ptrace.h>
79
80
#define SPELL_REG_SP "sp"
81
82
// Don't #define SPELL_REG_FP for thumb because it is not safe to use, so this makes sure we never fetch it.
83
#ifndef __thumb__
84
#define SPELL_REG_FP "fp"
85
#endif
86
87
address os::current_stack_pointer() {
88
#if defined(__clang__) || defined(__llvm__)
89
void *sp;
90
__asm__("mov %0, " SPELL_REG_SP : "=r"(sp));
91
return (address) sp;
92
#else
93
register address sp __asm__ (SPELL_REG_SP);
94
return sp;
95
#endif
96
}
97
98
char* os::non_memory_address_word() {
99
// Must never look like an address returned by reserve_memory
100
return (char*) -1;
101
}
102
103
104
#if NGREG == 16
105
// These definitions are based on the observation that until
106
// the certain version of GCC mcontext_t was defined as
107
// a structure containing gregs[NGREG] array with 16 elements.
108
// In later GCC versions mcontext_t was redefined as struct sigcontext,
109
// along with NGREG constant changed to 18.
110
#define arm_pc gregs[15]
111
#define arm_sp gregs[13]
112
#define arm_fp gregs[11]
113
#define arm_r0 gregs[0]
114
#endif
115
116
#define ARM_REGS_IN_CONTEXT 16
117
118
119
address os::Posix::ucontext_get_pc(const ucontext_t* uc) {
120
return (address)uc->uc_mcontext.arm_pc;
121
}
122
123
void os::Posix::ucontext_set_pc(ucontext_t* uc, address pc) {
124
uc->uc_mcontext.arm_pc = (uintx)pc;
125
}
126
127
intptr_t* os::Linux::ucontext_get_sp(const ucontext_t* uc) {
128
return (intptr_t*)uc->uc_mcontext.arm_sp;
129
}
130
131
intptr_t* os::Linux::ucontext_get_fp(const ucontext_t* uc) {
132
return (intptr_t*)uc->uc_mcontext.arm_fp;
133
}
134
135
bool is_safe_for_fp(address pc) {
136
#ifdef __thumb__
137
if (CodeCache::find_blob(pc) != NULL) {
138
return true;
139
}
140
// For thumb C frames, given an fp we have no idea how to access the frame contents.
141
return false;
142
#else
143
// Calling os::address_is_in_vm() here leads to a dladdr call. Calling any libc
144
// function during os::get_native_stack() can result in a deadlock if JFR is
145
// enabled. For now, be more lenient and allow all pc's. There are other
146
// frame sanity checks in shared code, and to date they have been sufficient
147
// for other platforms.
148
//return os::address_is_in_vm(pc);
149
return true;
150
#endif
151
}
152
153
address os::fetch_frame_from_context(const void* ucVoid,
154
intptr_t** ret_sp, intptr_t** ret_fp) {
155
156
address epc;
157
const ucontext_t* uc = (const ucontext_t*)ucVoid;
158
159
if (uc != NULL) {
160
epc = os::Posix::ucontext_get_pc(uc);
161
if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
162
if (ret_fp) {
163
intptr_t* fp = os::Linux::ucontext_get_fp(uc);
164
#ifndef __thumb__
165
if (CodeCache::find_blob(epc) == NULL) {
166
// It's a C frame. We need to adjust the fp.
167
fp += os::C_frame_offset;
168
}
169
#endif
170
// Clear FP when stack walking is dangerous so that
171
// the frame created will not be walked.
172
// However, ensure FP is set correctly when reliable and
173
// potentially necessary.
174
if (!is_safe_for_fp(epc)) {
175
// FP unreliable
176
fp = (intptr_t *)NULL;
177
}
178
*ret_fp = fp;
179
}
180
} else {
181
epc = NULL;
182
if (ret_sp) *ret_sp = (intptr_t *)NULL;
183
if (ret_fp) *ret_fp = (intptr_t *)NULL;
184
}
185
186
return epc;
187
}
188
189
frame os::fetch_frame_from_context(const void* ucVoid) {
190
intptr_t* sp;
191
intptr_t* fp;
192
address epc = fetch_frame_from_context(ucVoid, &sp, &fp);
193
return frame(sp, fp, epc);
194
}
195
196
frame os::get_sender_for_C_frame(frame* fr) {
197
#ifdef __thumb__
198
// We can't reliably get anything from a thumb C frame.
199
return frame();
200
#else
201
address pc = fr->sender_pc();
202
if (! is_safe_for_fp(pc)) {
203
return frame(fr->sender_sp(), (intptr_t *)NULL, pc);
204
} else {
205
return frame(fr->sender_sp(), fr->link() + os::C_frame_offset, pc);
206
}
207
#endif
208
}
209
210
//
211
// This actually returns two frames up. It does not return os::current_frame(),
212
// which is the actual current frame. Nor does it return os::get_native_stack(),
213
// which is the caller. It returns whoever called os::get_native_stack(). Not
214
// very intuitive, but consistent with how this API is implemented on other
215
// platforms.
216
//
217
frame os::current_frame() {
218
#ifdef __thumb__
219
// We can't reliably get anything from a thumb C frame.
220
return frame();
221
#else
222
register intptr_t* fp __asm__ (SPELL_REG_FP);
223
// fp is for os::current_frame. We want the fp for our caller.
224
frame myframe((intptr_t*)os::current_stack_pointer(), fp + os::C_frame_offset,
225
CAST_FROM_FN_PTR(address, os::current_frame));
226
frame caller_frame = os::get_sender_for_C_frame(&myframe);
227
228
if (os::is_first_C_frame(&caller_frame)) {
229
// stack is not walkable
230
// Assert below was added because it does not seem like this can ever happen.
231
// How can this frame ever be the first C frame since it is called from C code?
232
// If it does ever happen, undo the assert and comment here on when/why it happens.
233
assert(false, "this should never happen");
234
return frame();
235
}
236
237
// return frame for our caller's caller
238
return os::get_sender_for_C_frame(&caller_frame);
239
#endif
240
}
241
242
extern "C" address check_vfp_fault_instr;
243
extern "C" address check_vfp3_32_fault_instr;
244
extern "C" address check_simd_fault_instr;
245
extern "C" address check_mp_ext_fault_instr;
246
247
address check_vfp_fault_instr = NULL;
248
address check_vfp3_32_fault_instr = NULL;
249
address check_simd_fault_instr = NULL;
250
address check_mp_ext_fault_instr = NULL;
251
252
253
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
254
ucontext_t* uc, JavaThread* thread) {
255
256
if (sig == SIGILL &&
257
((info->si_addr == (caddr_t)check_simd_fault_instr)
258
|| info->si_addr == (caddr_t)check_vfp_fault_instr
259
|| info->si_addr == (caddr_t)check_vfp3_32_fault_instr
260
|| info->si_addr == (caddr_t)check_mp_ext_fault_instr)) {
261
// skip faulty instruction + instruction that sets return value to
262
// success and set return value to failure.
263
os::Posix::ucontext_set_pc(uc, (address)info->si_addr + 8);
264
uc->uc_mcontext.arm_r0 = 0;
265
return true;
266
}
267
268
address stub = NULL;
269
address pc = NULL;
270
bool unsafe_access = false;
271
272
if (info != NULL && uc != NULL && thread != NULL) {
273
pc = (address) os::Posix::ucontext_get_pc(uc);
274
275
// Handle ALL stack overflow variations here
276
if (sig == SIGSEGV) {
277
address addr = (address) info->si_addr;
278
279
// check if fault address is within thread stack
280
if (thread->is_in_full_stack(addr)) {
281
// stack overflow
282
StackOverflow* overflow_state = thread->stack_overflow_state();
283
if (overflow_state->in_stack_yellow_reserved_zone(addr)) {
284
overflow_state->disable_stack_yellow_reserved_zone();
285
if (thread->thread_state() == _thread_in_Java) {
286
// Throw a stack overflow exception. Guard pages will be reenabled
287
// while unwinding the stack.
288
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
289
} else {
290
// Thread was in the vm or native code. Return and try to finish.
291
return true;
292
}
293
} else if (overflow_state->in_stack_red_zone(addr)) {
294
// Fatal red zone violation. Disable the guard pages and fall through
295
// to handle_unexpected_exception way down below.
296
overflow_state->disable_stack_red_zone();
297
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
298
} else {
299
// Accessing stack address below sp may cause SEGV if current
300
// thread has MAP_GROWSDOWN stack. This should only happen when
301
// current thread was created by user code with MAP_GROWSDOWN flag
302
// and then attached to VM. See notes in os_linux.cpp.
303
if (thread->osthread()->expanding_stack() == 0) {
304
thread->osthread()->set_expanding_stack();
305
if (os::Linux::manually_expand_stack(thread, addr)) {
306
thread->osthread()->clear_expanding_stack();
307
return true;
308
}
309
thread->osthread()->clear_expanding_stack();
310
} else {
311
fatal("recursive segv. expanding stack.");
312
}
313
}
314
}
315
}
316
317
if (thread->thread_state() == _thread_in_Java) {
318
// Java thread running in Java code => find exception handler if any
319
// a fault inside compiled code, the interpreter, or a stub
320
321
if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
322
stub = SharedRuntime::get_poll_stub(pc);
323
} else if (sig == SIGBUS) {
324
// BugId 4454115: A read from a MappedByteBuffer can fault
325
// here if the underlying file has been truncated.
326
// Do not crash the VM in such a case.
327
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
328
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
329
if ((nm != NULL && nm->has_unsafe_access()) || (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc))) {
330
unsafe_access = true;
331
}
332
} else if (sig == SIGSEGV &&
333
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
334
// Determination of interpreter/vtable stub/compiled code null exception
335
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
336
if (cb != NULL) {
337
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
338
}
339
} else if (sig == SIGILL && *(int *)pc == NativeInstruction::zombie_illegal_instruction) {
340
// Zombie
341
stub = SharedRuntime::get_handle_wrong_method_stub();
342
}
343
} else if ((thread->thread_state() == _thread_in_vm ||
344
thread->thread_state() == _thread_in_native) &&
345
sig == SIGBUS && thread->doing_unsafe_access()) {
346
unsafe_access = true;
347
}
348
349
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
350
// and the heap gets shrunk before the field access.
351
if (sig == SIGSEGV || sig == SIGBUS) {
352
address addr = JNI_FastGetField::find_slowcase_pc(pc);
353
if (addr != (address)-1) {
354
stub = addr;
355
}
356
}
357
}
358
359
if (unsafe_access && stub == NULL) {
360
// it can be an unsafe access and we haven't found
361
// any other suitable exception reason,
362
// so assume it is an unsafe access.
363
address next_pc = pc + Assembler::InstructionSize;
364
if (UnsafeCopyMemory::contains_pc(pc)) {
365
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
366
}
367
#ifdef __thumb__
368
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
369
next_pc = (address)((intptr_t)next_pc | 0x1);
370
}
371
#endif
372
373
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
374
}
375
376
if (stub != NULL) {
377
#ifdef __thumb__
378
if (uc->uc_mcontext.arm_cpsr & PSR_T_BIT) {
379
intptr_t p = (intptr_t)pc | 0x1;
380
pc = (address)p;
381
382
// Clear Thumb mode bit if we're redirected into the ARM ISA based code
383
if (((intptr_t)stub & 0x1) == 0) {
384
uc->uc_mcontext.arm_cpsr &= ~PSR_T_BIT;
385
}
386
} else {
387
// No Thumb2 compiled stubs are triggered from ARM ISA compiled JIT'd code today.
388
// The support needs to be added if that changes
389
assert((((intptr_t)stub & 0x1) == 0), "can't return to Thumb code");
390
}
391
#endif
392
393
// save all thread context in case we need to restore it
394
if (thread != NULL) thread->set_saved_exception_pc(pc);
395
396
os::Posix::ucontext_set_pc(uc, stub);
397
return true;
398
}
399
400
return false;
401
}
402
403
void os::Linux::init_thread_fpu_state(void) {
404
os::setup_fpu();
405
}
406
407
int os::Linux::get_fpu_control_word(void) {
408
return 0;
409
}
410
411
void os::Linux::set_fpu_control_word(int fpu_control) {
412
// Nothing to do
413
}
414
415
void os::setup_fpu() {
416
#if !defined(__SOFTFP__) && defined(__VFP_FP__)
417
// Turn on IEEE-754 compliant VFP mode
418
__asm__ volatile (
419
"mov r0, #0;"
420
"fmxr fpscr, r0"
421
: /* no output */ : /* no input */ : "r0"
422
);
423
#endif
424
}
425
426
////////////////////////////////////////////////////////////////////////////////
427
// thread stack
428
429
// Minimum usable stack sizes required to get to user code. Space for
430
// HotSpot guard pages is added later.
431
size_t os::Posix::_compiler_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
432
size_t os::Posix::_java_thread_min_stack_allowed = (32 DEBUG_ONLY(+ 4)) * K;
433
size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
434
435
// return default stack size for thr_type
436
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
437
// default stack size (compiler thread needs larger stack)
438
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
439
return s;
440
}
441
442
/////////////////////////////////////////////////////////////////////////////
443
// helper functions for fatal error handler
444
445
void os::print_context(outputStream *st, const void *context) {
446
if (context == NULL) return;
447
const ucontext_t *uc = (const ucontext_t*)context;
448
449
st->print_cr("Registers:");
450
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
451
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
452
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
453
}
454
#define U64_FORMAT "0x%016llx"
455
// now print flag register
456
st->print_cr(" %-4s = 0x%08lx", "cpsr",uc->uc_mcontext.arm_cpsr);
457
st->cr();
458
459
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
460
st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", p2i(sp));
461
print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
462
st->cr();
463
464
// Note: it may be unsafe to inspect memory near pc. For example, pc may
465
// point to garbage if entry point in an nmethod is corrupted. Leave
466
// this at the end, and hope for the best.
467
address pc = os::Posix::ucontext_get_pc(uc);
468
print_instructions(st, pc, Assembler::InstructionSize);
469
st->cr();
470
}
471
472
void os::print_register_info(outputStream *st, const void *context) {
473
if (context == NULL) return;
474
475
const ucontext_t *uc = (const ucontext_t*)context;
476
intx* reg_area = (intx*)&uc->uc_mcontext.arm_r0;
477
478
st->print_cr("Register to memory mapping:");
479
st->cr();
480
for (int r = 0; r < ARM_REGS_IN_CONTEXT; r++) {
481
st->print_cr(" %-3s = " INTPTR_FORMAT, as_Register(r)->name(), reg_area[r]);
482
print_location(st, reg_area[r]);
483
st->cr();
484
}
485
st->cr();
486
}
487
488
489
490
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
491
492
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
493
494
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
495
// try to use the stub:
496
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
497
498
if (func != NULL) {
499
os::atomic_cmpxchg_long_func = func;
500
return (*func)(compare_value, exchange_value, dest);
501
}
502
assert(Threads::number_of_threads() == 0, "for bootstrap only");
503
504
int64_t old_value = *dest;
505
if (old_value == compare_value)
506
*dest = exchange_value;
507
return old_value;
508
}
509
typedef int64_t load_long_func_t(const volatile int64_t*);
510
511
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
512
513
int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
514
// try to use the stub:
515
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
516
517
if (func != NULL) {
518
os::atomic_load_long_func = func;
519
return (*func)(src);
520
}
521
assert(Threads::number_of_threads() == 0, "for bootstrap only");
522
523
int64_t old_value = *src;
524
return old_value;
525
}
526
527
typedef void store_long_func_t(int64_t, volatile int64_t*);
528
529
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
530
531
void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
532
// try to use the stub:
533
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
534
535
if (func != NULL) {
536
os::atomic_store_long_func = func;
537
return (*func)(val, dest);
538
}
539
assert(Threads::number_of_threads() == 0, "for bootstrap only");
540
541
*dest = val;
542
}
543
544
typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
545
546
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
547
548
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
549
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
550
StubRoutines::atomic_add_entry());
551
if (func != NULL) {
552
os::atomic_add_func = func;
553
return (*func)(add_value, dest);
554
}
555
556
int32_t old_value = *dest;
557
*dest = old_value + add_value;
558
return (old_value + add_value);
559
}
560
561
typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
562
563
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
564
565
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
566
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
567
StubRoutines::atomic_xchg_entry());
568
if (func != NULL) {
569
os::atomic_xchg_func = func;
570
return (*func)(exchange_value, dest);
571
}
572
573
int32_t old_value = *dest;
574
*dest = exchange_value;
575
return (old_value);
576
}
577
578
typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
579
580
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
581
582
int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
583
// try to use the stub:
584
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
585
586
if (func != NULL) {
587
os::atomic_cmpxchg_func = func;
588
return (*func)(compare_value, exchange_value, dest);
589
}
590
assert(Threads::number_of_threads() == 0, "for bootstrap only");
591
592
int32_t old_value = *dest;
593
if (old_value == compare_value)
594
*dest = exchange_value;
595
return old_value;
596
}
597
598
599
#ifndef PRODUCT
600
void os::verify_stack_alignment() {
601
}
602
#endif
603
604
int os::extra_bang_size_in_bytes() {
605
// ARM does not require an additional stack bang.
606
return 0;
607
}
608
609