<rt id="bn8ez"></rt>
<label id="bn8ez"></label>

  • <span id="bn8ez"></span>

    <label id="bn8ez"><meter id="bn8ez"></meter></label>

    so true

    心懷未來,開創未來!
    隨筆 - 160, 文章 - 0, 評論 - 40, 引用 - 0
    數據加載中……

    bash實現線程池

    #!/bin/bash
    THREAD_NUM=5
    tfile=$(mktemp /tmp/.foo.XXXXXXXX)
    rm -f $tfile
    mkfifo $tfile
    exec 9<>$tfile
    for ((i=0;i<$THREAD_NUM;i++)); do
      echo 1>&9
    done
    while read line; do
      read -u 9
      {
        echo $(date +%F_%T) $line && sleep $(($RANDOM % 5)) #do your work
        echo 1>&9
      }&
    done
    wait
    rm -f $tfile
    =========================================================
    # Job pooling for bash shell scripts
    # This script provides a job pooling functionality where you can keep up to n
    # processes/functions running in parallel so that you don't saturate a system
    # with concurrent processes.
    #
    # Got inspiration from http://stackoverflow.com/questions/6441509/how-to-write-a-process-pool-bash-shell
    #
    # Copyright (c) 2012 Vince Tse
    # with changes by Geoff Clements (c) 2014
    #
    # Permission is hereby granted, free of charge, to any person obtaining a copy
    # of this software and associated documentation files (the "Software"), to deal
    # in the Software without restriction, including without limitation the rights
    # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    # copies of the Software, and to permit persons to whom the Software is
    # furnished to do so, subject to the following conditions:
    #
    # The above copyright notice and this permission notice shall be included in
    # all copies or substantial portions of the Software.
    #
    # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    # THE SOFTWARE.
    # end-of-jobs marker
    job_pool_end_of_jobs="JOBPOOL_END_OF_JOBS"
    # job queue used to send jobs to the workers
    job_pool_job_queue=/tmp/job_pool_job_queue_$$
    # where to run results to
    job_pool_result_log=/tmp/job_pool_result_log_$$
    # toggle command echoing
    job_pool_echo_command=0
    # number of parallel jobs allowed. also used to determine if job_pool_init
    # has been called when jobs are queued.
    job_pool_pool_size=-1
    # \brief variable to check for number of non-zero exits
    job_pool_nerrors=0
    ################################################################################
    # private functions
    ################################################################################
    # \brief debug output
    function _job_pool_echo()
    {
    if [[ "${job_pool_echo_command}" == "1" ]]; then
    echo $@
    fi
    }
    # \brief cleans up
    function _job_pool_cleanup()
    {
    rm -f ${job_pool_job_queue} ${job_pool_result_log}
    }
    # \brief signal handler
    function _job_pool_exit_handler()
    {
    _job_pool_stop_workers
    _job_pool_cleanup
    }
    # \brief print the exit codes for each command
    # \param[in] result_log the file where the exit codes are written to
    function _job_pool_print_result_log()
    {
    job_pool_nerrors=$(grep ^ERROR "${job_pool_result_log}" | wc -l)
    cat "${job_pool_result_log}" | sed -e 's/^ERROR//'
    }
    # \brief the worker function that is called when we fork off worker processes
    # \param[in] id the worker ID
    # \param[in] job_queue the fifo to read jobs from
    # \param[in] result_log the temporary log file to write exit codes to
    function _job_pool_worker()
    {
    local id=$1
    local job_queue=$2
    local result_log=$3
    local cmd=
    local args=
    exec 7<> ${job_queue}
    while [[ "${cmd}" != "${job_pool_end_of_jobs}" && -e "${job_queue}" ]]; do
    # workers block on the exclusive lock to read the job queue
    flock --exclusive 7
    IFS=$'\v'
    read cmd args <${job_queue}
    set -- ${args}
    unset IFS
    flock --unlock 7
    # the worker should exit if it sees the end-of-job marker or run the
    # job otherwise and save its exit code to the result log.
    if [[ "${cmd}" == "${job_pool_end_of_jobs}" ]]; then
    # write it one more time for the next sibling so that everyone
    # will know we are exiting.
    echo "${cmd}" >&7
    else
    _job_pool_echo "### _job_pool_worker-${id}: ${cmd}"
    # run the job
    { ${cmd} "$@" ; }
    # now check the exit code and prepend "ERROR" to the result log entry
    # which we will use to count errors and then strip out later.
    local result=$?
    local status=
    if [[ "${result}" != "0" ]]; then
    status=ERROR
    fi
    # now write the error to the log, making sure multiple processes
    # don't trample over each other.
    exec 8<> ${result_log}
    flock --exclusive 8
    _job_pool_echo "${status}job_pool: exited ${result}: ${cmd} $@" >> ${result_log}
    flock --unlock 8
    exec 8>&-
    _job_pool_echo "### _job_pool_worker-${id}: exited ${result}: ${cmd} $@"
    fi
    done
    exec 7>&-
    }
    # \brief sends message to worker processes to stop
    function _job_pool_stop_workers()
    {
    # send message to workers to exit, and wait for them to stop before
    # doing cleanup.
    echo ${job_pool_end_of_jobs} >> ${job_pool_job_queue}
    wait
    }
    # \brief fork off the workers
    # \param[in] job_queue the fifo used to send jobs to the workers
    # \param[in] result_log the temporary log file to write exit codes to
    function _job_pool_start_workers()
    {
    local job_queue=$1
    local result_log=$2
    for ((i=0; i<${job_pool_pool_size}; i++)); do
    _job_pool_worker ${i} ${job_queue} ${result_log} &
    done
    }
    ################################################################################
    # public functions
    ################################################################################
    # \brief initializes the job pool
    # \param[in] pool_size number of parallel jobs allowed
    # \param[in] echo_command 1 to turn on echo, 0 to turn off
    function job_pool_init()
    {
    local pool_size=$1
    local echo_command=$2
    # set the global attibutes
    job_pool_pool_size=${pool_size:=1}
    job_pool_echo_command=${echo_command:=0}
    # create the fifo job queue and create the exit code log
    rm -rf ${job_pool_job_queue} ${job_pool_result_log}
    mkfifo ${job_pool_job_queue}
    touch ${job_pool_result_log}
    # fork off the workers
    _job_pool_start_workers ${job_pool_job_queue} ${job_pool_result_log}
    }
    # \brief waits for all queued up jobs to complete and shuts down the job pool
    function job_pool_shutdown()
    {
    _job_pool_stop_workers
    _job_pool_print_result_log
    _job_pool_cleanup
    }
    # \brief run a job in the job pool
    function job_pool_run()
    {
    if [[ "${job_pool_pool_size}" == "-1" ]]; then
    job_pool_init
    fi
    printf "%s\v" "$@" >> ${job_pool_job_queue}
    echo >> ${job_pool_job_queue}
    }
    # \brief waits for all queued up jobs to complete before starting new jobs
    # This function actually fakes a wait by telling the workers to exit
    # when done with the jobs and then restarting them.
    function job_pool_wait()
    {
    _job_pool_stop_workers
    _job_pool_start_workers ${job_pool_job_queue} ${job_pool_result_log}
    }
    #########################################
    # End of Job Pool
    #########################################

    #!/bin/bash  . job_pool.sh  function foobar() {     # do something     true }     # initialize the job pool to allow 3 parallel jobs and echo commands job_pool_init 3 0  # run jobs job_pool_run sleep 1 job_pool_run sleep 2 job_pool_run sleep 3 job_pool_run foobar job_pool_run foobar job_pool_run /bin/false  # wait until all jobs complete before continuing job_pool_wait  # more jobs job_pool_run /bin/false job_pool_run sleep 1 job_pool_run sleep 2 job_pool_run foobar  # don't forget to shut down the job pool job_pool_shutdown  # check the $job_pool_nerrors for the number of jobs that exited non-zero echo "job_pool_nerrors: ${job_pool_nerrors}"

    posted on 2017-07-25 20:19 so true 閱讀(261) 評論(0)  編輯  收藏 所屬分類: Linux

    主站蜘蛛池模板: 国产免费69成人精品视频| 免费一级特黄特色大片在线观看| 亚洲欧洲视频在线观看| 免费精品国产自产拍在 | 全免费一级毛片在线播放| 美女黄网站人色视频免费| 亚洲中文字幕在线第六区| 在线免费观看你懂的| 亚洲avav天堂av在线网毛片| 久久久久国产成人精品亚洲午夜 | 最近中文字幕国语免费完整| 亚洲最大av资源站无码av网址| 亚洲伊人成无码综合网 | 亚洲六月丁香六月婷婷蜜芽| 免费少妇a级毛片人成网| 最近免费中文字幕大全免费| 四虎永久在线精品免费一区二区| 亚洲成AV人片在线观看ww| 午夜老司机免费视频| 久久99热精品免费观看动漫| 丰满亚洲大尺度无码无码专线 | 性xxxxx免费视频播放| 久久久免费的精品| 特级毛片在线大全免费播放| 亚洲综合色区中文字幕| 亚洲免费在线视频| 亚洲男人av香蕉爽爽爽爽| 午夜一区二区免费视频| 91免费在线播放| 免费看搞黄视频网站| 久久精品无码专区免费| 女bbbbxxxx另类亚洲| 亚洲精品美女久久久久久久| 亚洲人成影院在线高清| 亚洲黄色在线观看网站| 亚洲国产成人久久精品影视| 亚洲国产精品乱码一区二区 | 国产亚洲一区二区三区在线不卡| 日本一道高清不卡免费| 成人午夜大片免费7777| 毛片a级毛片免费播放100|