cpu-exec.c 19.3 KB
Newer Older
B
bellard 已提交
1
/*
2
 *  emulator main execution loop
3
 *
B
bellard 已提交
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
B
bellard 已提交
5
 *
B
bellard 已提交
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
B
bellard 已提交
10
 *
B
bellard 已提交
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
B
bellard 已提交
15
 *
B
bellard 已提交
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
bellard 已提交
19
#include "config.h"
B
Blue Swirl 已提交
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26
#include "qemu/timer.h"
P
Paolo Bonzini 已提交
27
#include "exec/address-spaces.h"
28
#include "qemu/rcu.h"
29
#include "exec/tb-hash.h"
30 31 32
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
#include "hw/i386/apic.h"
#endif
33 34 35 36 37 38

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
39
    int64_t realtime_clock;
40 41 42 43 44 45 46 47
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
48 49 50
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
P
Paolo Bonzini 已提交
70
            sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
71 72 73 74 75 76 77 78 79 80
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

103 104 105 106 107 108
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
109 110
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
111
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
112 113 114 115 116 117
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
118 119 120 121

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
122 123 124 125 126 127 128 129 130 131
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
B
bellard 已提交
132

133 134 135 136
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

155
    cpu->can_do_io = !use_icount;
156
    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
157
    cpu->can_do_io = 1;
158 159 160
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

161 162 163 164 165
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
166
        CPUClass *cc = CPU_GET_CLASS(cpu);
167
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
168 169 170 171 172 173
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
174
    }
175 176 177 178 179 180
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
181 182 183
    return next_tb;
}

P
pbrook 已提交
184 185
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
186
static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
B
Blue Swirl 已提交
187
                             TranslationBlock *orig_tb)
P
pbrook 已提交
188 189 190 191 192 193 194 195
{
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

196
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
197
                     max_cycles | CF_NOCACHE);
198
    tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
199
    cpu->current_tb = tb;
P
pbrook 已提交
200
    /* execute the generated code */
201
    trace_exec_tb_nocache(tb, tb->pc);
202
    cpu_tb_exec(cpu, tb->tc_ptr);
203
    cpu->current_tb = NULL;
P
pbrook 已提交
204 205 206 207
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

208 209 210 211
static TranslationBlock *tb_find_physical(CPUState *cpu,
                                          target_ulong pc,
                                          target_ulong cs_base,
                                          uint64_t flags)
212
{
213
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
214 215
    TranslationBlock *tb, **ptb1;
    unsigned int h;
216
    tb_page_addr_t phys_pc, phys_page1;
P
Paul Brook 已提交
217
    target_ulong virt_page2;
218

219
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
220

221
    /* find translated block using physical mappings */
P
Paul Brook 已提交
222
    phys_pc = get_page_addr_code(env, pc);
223 224
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
225
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
226 227
    for(;;) {
        tb = *ptb1;
228 229 230
        if (!tb) {
            return NULL;
        }
231
        if (tb->pc == pc &&
232
            tb->page_addr[0] == phys_page1 &&
233
            tb->cs_base == cs_base &&
234 235 236
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
237 238
                tb_page_addr_t phys_page2;

239
                virt_page2 = (pc & TARGET_PAGE_MASK) +
240
                    TARGET_PAGE_SIZE;
P
Paul Brook 已提交
241
                phys_page2 = get_page_addr_code(env, virt_page2);
242 243 244
                if (tb->page_addr[1] == phys_page2) {
                    break;
                }
245
            } else {
246
                break;
247 248 249 250
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    /* Move the TB to the head of the list */
    *ptb1 = tb->phys_hash_next;
    tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
    tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
    return tb;
}

static TranslationBlock *tb_find_slow(CPUState *cpu,
                                      target_ulong pc,
                                      target_ulong cs_base,
                                      uint64_t flags)
{
    TranslationBlock *tb;

    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        goto found;
    }

#ifdef CONFIG_USER_ONLY
    /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
     * taken outside tb_lock.  Since we're momentarily dropping
     * tb_lock, there's a chance that our desired tb has been
     * translated.
     */
    tb_unlock();
    mmap_lock();
    tb_lock();
    tb = tb_find_physical(cpu, pc, cs_base, flags);
    if (tb) {
        mmap_unlock();
        goto found;
284
    }
285 286 287 288 289 290 291 292 293 294
#endif

    /* if no translated code available, then translate it now */
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);

#ifdef CONFIG_USER_ONLY
    mmap_unlock();
#endif

found:
295
    /* we add the TB in the virtual pc hash table */
296
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
297 298 299
    return tb;
}

300
static inline TranslationBlock *tb_find_fast(CPUState *cpu)
301
{
302
    CPUArchState *env = (CPUArchState *)cpu->env_ptr;
303 304
    TranslationBlock *tb;
    target_ulong cs_base, pc;
305
    int flags;
306 307 308 309

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
310
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
311
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
312 313
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
314
        tb = tb_find_slow(cpu, pc, cs_base, flags);
315 316 317 318
    }
    return tb;
}

319
static void cpu_handle_debug_exception(CPUState *cpu)
320
{
321
    CPUClass *cc = CPU_GET_CLASS(cpu);
322 323
    CPUWatchpoint *wp;

324 325
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
326 327 328
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
329 330

    cc->debug_excp_handler(cpu);
331 332
}

B
bellard 已提交
333 334
/* main execution loop */

335
int cpu_exec(CPUState *cpu)
B
bellard 已提交
336
{
337
    CPUClass *cc = CPU_GET_CLASS(cpu);
338 339
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
340
    CPUArchState *env = &x86_cpu->env;
341
#endif
342 343
    int ret, interrupt_request;
    TranslationBlock *tb;
B
bellard 已提交
344
    uint8_t *tc_ptr;
345
    uintptr_t next_tb;
346 347
    SyncClocks sc;

348
    if (cpu->halted) {
349 350 351 352 353 354
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
        if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
            apic_poll_irq(x86_cpu->apic_state);
            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
        }
#endif
355
        if (!cpu_has_work(cpu)) {
356 357 358
            return EXCP_HALTED;
        }

359
        cpu->halted = 0;
360
    }
B
bellard 已提交
361

362
    current_cpu = cpu;
P
Paolo Bonzini 已提交
363
    atomic_mb_set(&tcg_current_cpu, cpu);
364 365
    rcu_read_lock();

366
    if (unlikely(atomic_mb_read(&exit_request))) {
367
        cpu->exit_request = 1;
368 369
    }

370
    cc->cpu_exec_enter(cpu);
371

372 373 374 375 376 377 378
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

B
bellard 已提交
379
    /* prepare setjmp context for exception handling */
380
    for(;;) {
381
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
382
            /* if an exception is pending, we execute it here */
383 384
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
385
                    /* exit request from the cpu execution loop */
386
                    ret = cpu->exception_index;
387
                    if (ret == EXCP_DEBUG) {
388
                        cpu_handle_debug_exception(cpu);
389
                    }
390
                    cpu->exception_index = -1;
391
                    break;
A
aurel32 已提交
392 393
                } else {
#if defined(CONFIG_USER_ONLY)
394
                    /* if user mode only, we simulate a fake exception
T
ths 已提交
395
                       which will be handled outside the cpu execution
396
                       loop */
B
bellard 已提交
397
#if defined(TARGET_I386)
398
                    cc->do_interrupt(cpu);
B
bellard 已提交
399
#endif
400
                    ret = cpu->exception_index;
401
                    cpu->exception_index = -1;
402
                    break;
A
aurel32 已提交
403
#else
404
                    cc->do_interrupt(cpu);
405
                    cpu->exception_index = -1;
B
bellard 已提交
406
#endif
407
                }
408
            }
B
bellard 已提交
409

410
            next_tb = 0; /* force lookup of first TB */
411
            for(;;) {
412
                interrupt_request = cpu->interrupt_request;
M
malc 已提交
413
                if (unlikely(interrupt_request)) {
414
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
M
malc 已提交
415
                        /* Mask out external interrupts for this step. */
416
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
M
malc 已提交
417
                    }
418
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
419
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
420
                        cpu->exception_index = EXCP_DEBUG;
421
                        cpu_loop_exit(cpu);
422
                    }
423
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
424 425
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
426
                        cpu->exception_index = EXCP_HLT;
427
                        cpu_loop_exit(cpu);
428
                    }
429 430 431 432 433 434 435 436 437 438 439
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
B
bellard 已提交
440
#endif
441 442 443 444 445 446 447 448 449
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
                    if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                        next_tb = 0;
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
450 451
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
452 453
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
454
                        next_tb = 0;
455
                    }
456
                }
457 458
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
459
                    cpu->exception_index = EXCP_INTERRUPT;
460
                    cpu_loop_exit(cpu);
461
                }
K
KONRAD Frederic 已提交
462
                tb_lock();
463
                tb = tb_find_fast(cpu);
P
pbrook 已提交
464 465
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
466
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
P
pbrook 已提交
467 468 469 470
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
471
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
P
pbrook 已提交
472
                }
473 474 475 476
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
477 478 479
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
480 481
                if (next_tb != 0 && tb->page_addr[1] == -1
                    && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
482 483
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
484
                }
K
KONRAD Frederic 已提交
485
                tb_unlock();
486
                if (likely(!cpu->exit_request)) {
487
                    trace_exec_tb(tb, tb->pc);
P
pbrook 已提交
488
                    tc_ptr = tb->tc_ptr;
489
                    /* execute the generated code */
490
                    cpu->current_tb = tb;
491
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
492
                    cpu->current_tb = NULL;
493 494 495 496 497 498 499
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
500 501 502 503
                         * next time around the loop.  But we need to
                         * ensure the tcg_exit_req read in generated code
                         * comes before the next read of cpu->exit_request
                         * or cpu->interrupt_request.
504
                         */
505
                        smp_rmb();
506 507 508 509
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
T
ths 已提交
510
                        /* Instruction counter expired.  */
P
Paolo Bonzini 已提交
511
                        int insns_left = cpu->icount_decr.u32;
512
                        if (cpu->icount_extra && insns_left >= 0) {
P
pbrook 已提交
513
                            /* Refill decrementer and continue execution.  */
514
                            cpu->icount_extra += insns_left;
P
Paolo Bonzini 已提交
515
                            insns_left = MIN(0xffff, cpu->icount_extra);
516
                            cpu->icount_extra -= insns_left;
517
                            cpu->icount_decr.u16.low = insns_left;
P
pbrook 已提交
518 519 520
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
P
Paolo Bonzini 已提交
521
                                tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
522
                                cpu_exec_nocache(cpu, insns_left, tb);
523
                                align_clocks(&sc, cpu);
P
pbrook 已提交
524
                            }
525
                            cpu->exception_index = EXCP_INTERRUPT;
P
pbrook 已提交
526
                            next_tb = 0;
527
                            cpu_loop_exit(cpu);
P
pbrook 已提交
528
                        }
529 530 531 532
                        break;
                    }
                    default:
                        break;
P
pbrook 已提交
533 534
                    }
                }
535 536 537
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
B
bellard 已提交
538 539
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
T
ths 已提交
540
            } /* for(;;) */
541 542 543
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
544
            cpu = current_cpu;
545
            cc = CPU_GET_CLASS(cpu);
546
            cpu->can_do_io = 1;
547 548
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
549
            env = &x86_cpu->env;
550
#endif
K
KONRAD Frederic 已提交
551
            tb_lock_reset();
B
bellard 已提交
552
        }
553 554
    } /* for(;;) */

555
    cc->cpu_exec_exit(cpu);
556
    rcu_read_unlock();
P
pbrook 已提交
557

558 559
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
P
Paolo Bonzini 已提交
560 561 562

    /* Does not need atomic_mb_set because a spurious wakeup is okay.  */
    atomic_set(&tcg_current_cpu, NULL);
B
bellard 已提交
563 564
    return ret;
}