xinetd工作原理概述

   在众多的linux发行版中都包含了xinetd,全称eXtended InterNET services daemon,也被称为超级internet服务器,是inetd的升级版。

   本文重点是从代码层面简要分析xinetd的工作原理,不会介绍如何配置xinetd,如果有需要请自行百度/google。

xinet的工作步骤主要是三步:

第一步:初始化各服务信息

/*
 * Initialize all services
 *
 * This function is either successful in starting some services
 * or it terminates the program.
 */
void init_services( void )
{
   struct configuration conf ;
   const char *func = "init_services" ;
   if ( cnf_get( &conf ) == FAILED )  /* 读取配置文件信息 */
   {
      msg( LOG_CRIT, func, "couldn‘t get configuration. Exiting..." ) ;
      exit( 1 ) ;
   }
   DEFAULTS( ps ) = CNF_DEFAULTS( &conf ) ;
   (void) cnf_start_services( &conf ) ;   /* 根据配置文件信息启动服务 */
   CNF_DEFAULTS( &conf ) = NULL ;      /* to avoid the free by cnf_free */
   cnf_free( &conf ) ;
   /*
    * The number of available/active services is kept by the service functions
    */
   if ( stayalive_option == 0 ) {
      if ( ps.rws.available_services == 0 )
      {
         msg( LOG_CRIT, func, "no services. Exiting..." ) ;
         if ( ps.ros.pid_file ) {
            unlink(ps.ros.pid_file);
         }
         exit( 1 ) ;
      }
   }
   spec_include() ;      /* include special services */
}

   配置文件默认为/etc/xinetd.conf,也可以通过参数指定。

第二步:循环监听client请求

/*
 * What main_loop does:
 *
 *      select on all active services
 *      for each socket where a request is pending
 *         try to start a server
 */
static void main_loop(void)
{
   const char      *func = "main_loop" ;
   struct timeval   tv, *tvptr = NULL;
   FD_SET(signals_pending[0], &ps.rws.socket_mask);
   if ( signals_pending[0] > ps.rws.mask_max )
      ps.rws.mask_max = signals_pending[0] ;
   if ( signals_pending[1] > ps.rws.mask_max )
      ps.rws.mask_max = signals_pending[1] ;
   for ( ;; )
   {
      fd_set read_mask ;
      int n_active ;
      unsigned u ;
      if ( debug.on )
         msg( LOG_DEBUG, func,
               "active_services = %d", ps.rws.active_services ) ;
      /* get the next timer value, if there is one, and select for that time */
      if( (tv.tv_sec = xtimer_nexttime()) >= 0 ) {
         tv.tv_usec = 0;
         tvptr = &tv;
      } else {
         tvptr = NULL;
      }
      read_mask = ps.rws.socket_mask ;  /* 需要监听服务集 */
      n_active = select( ps.rws.mask_max+1, &read_mask,
                        FD_SET_NULL, FD_SET_NULL, tvptr ) ;
      if ( n_active == -1 )
      {
         if ( errno == EINTR ) {
            continue ;
         } else if ( errno == EBADF )
            find_bad_fd() ;
         continue ;
      }
      else if ( n_active == 0 ) {
         xtimer_poll();
         continue ;
      }
      if ( debug.on )
         msg( LOG_DEBUG, func, "select returned %d", n_active ) ;
      xtimer_poll();  /* 定时检查,清除过期服务 */
      if( FD_ISSET(signals_pending[0], &read_mask) ) {
         check_pipe();
            if ( --n_active == 0 )
               continue ;
      }
#ifdef HAVE_MDNS
      if( xinetd_mdns_poll() == 0 )
         if ( --n_active == 0 )
            continue ;
#endif
      for ( u = 0 ; u < pset_count( SERVICES( ps ) ) ; u++ )
      {
         struct service *sp ;
         sp = SP( pset_pointer( SERVICES( ps ), u ) ) ;
         if ( ! SVC_IS_ACTIVE( sp ) )
            continue ;
         if ( FD_ISSET( SVC_FD( sp ), &read_mask ) )
         {
            svc_request( sp ) ;  /* 捕捉到client请求,调用该函数启动服务 */
            if ( --n_active == 0 )
               break ;
         }
      }
      if ( n_active > 0 )
         msg( LOG_ERR, func, "%d descriptors still set", n_active ) ;
   }
}

   xinetd使用select来完成监听的,感兴趣也可以看看poll和epoll。

第三步:启动服务进程

/*
 *  Try to fork a server process.
 *  Actually, we won‘t fork if tcpmux_child is set, becuase we have
 *  already forked to keep the xinetd parent from blocking on the
 *  read of the service name.
 */
status_e server_start( struct server *serp )
{
   struct service   *sp = SERVER_SERVICE(serp) ;
   const char       *func = "server_start" ;
   if( debug.on )
      msg( LOG_DEBUG, func, "Starting service %s", SC_NAME( SVC_CONF( sp ) ) );
   SERVER_LOGUSER(serp) = SVC_LOGS_USERID_ON_SUCCESS( sp ) ;
                                                                                                                                                                
   SERVER_PID(serp) = do_fork() ;  /* fork子进程来调用服务进程 */
   switch ( SERVER_PID(serp) )
   {
      case 0:  /* 子进程执行服务 */
         ps.rws.env_is_valid = FALSE ;
         child_process( serp ) ;
         msg( LOG_ERR, func, "INTERNAL ERROR: child_process returned" ) ;
         _exit( 0 ) ;
         /* NOTREACHED */
                                                                                                                                                                   
      case -1:  /* fork()失败 */
         msg( LOG_ERR, func, "%s: fork failed: %m", SVC_ID( sp ) ) ;
         SERVER_FORK_FAILURES(serp)++ ;
         return( FAILED ) ;
      default: /* 父进程继续执行 */
         (void) time( &SERVER_STARTTIME(serp) ) ;
         SVC_INC_RUNNING_SERVERS( sp ) ;
         /*
          * Log the start of another server (if it is not an interceptor).
          * Determine if the server writes to the log (because in that case
          * we will have to check the log size).
          */
         if ( ! SVC_IS_INTERCEPTED( sp ) )
            svc_log_success( sp, SERVER_CONNECTION(serp), SERVER_PID(serp) ) ;
         else
            SERVER_WRITES_TO_LOG(serp) = SVC_IS_LOGGING( sp ) ;
         SERVER_WRITES_TO_LOG(serp) |= SERVER_LOGUSER(serp) ;
         return( OK ) ;
   }
}

   这里为什么要fork()一个进程来执行服务?因为外部服务进程需要调用execve()来执行,而execve()函数成功返回后并不会自行创建一个进程,而是用全新的程序替换了当前进程的正文、数据、堆和栈。想了解更多exec()函数,可以查看《UNIX环境高级编程》8.10节。

   

/*
 * This function is invoked in a forked process to run a server.
 * If the service is internal the appropriate function is invoked
 * otherwise the server program is exec‘ed.
 * This function also logs the remote user id if appropriate
 */
void child_process( struct server *serp )
{
   struct service          *sp  = SERVER_SERVICE( serp ) ;
   connection_s            *cp  = SERVER_CONNECTION( serp ) ;
   struct service_config   *scp = SVC_CONF( sp ) ;
   const char              *func = "child_process" ;
   signal_default_state();
   if ((signals_pending[0] >= 0 && Sclose(signals_pending[0])) ||
       (signals_pending[1] >= 0 && Sclose(signals_pending[1])))
   {
      msg(LOG_ERR, func, "Failed to close the signal pipe: %m");
      _exit(1);
   }
   signals_pending[0] = -1;
   signals_pending[1] = -1;
   /* 关闭进程的fd(0/1/2),对应标准输入/输出/错误输出 */
   Sclose(0);
   Sclose(1);
   Sclose(2);
#ifdef DEBUG_SERVER
   if ( debug.on )
   {
      msg( LOG_DEBUG, func, "Process %d is sleeping", getpid() ) ;
      sleep( 10 ) ;
   }
#endif
   if ( ! SC_IS_INTERCEPTED( scp ) )
   {
      set_credentials( scp ) ;
      if ( SC_SPECIFIED( scp, A_NICE ) )
         (void) nice( SC_NICE( scp ) ) ;
   }
   if ( svc_child_access_control(sp, cp) != OK )
      exit(0);
   if ( SERVER_LOGUSER( serp ) )
   {
      unsigned   timeout ;
      idresult_e result ;
                                                  
      /*
       * We use LOGUSER_SUCCESS_TIMEOUT unless the service requires
       * identification, in which case we use an infinite timeout
       */
      timeout = SC_MUST_IDENTIFY( scp ) ? 0 : LOGUSER_SUCCESS_TIMEOUT ;
      result = log_remote_user( serp, timeout ) ;
      if ( result != IDR_OK && SC_MUST_IDENTIFY( scp ) )
      {
         svc_logprint( sp, NOID_ENTRY, "%s %s",
                  conn_addrstr( SERVER_CONNECTION( serp ) ),
                     idresult_explain( result ) ) ;
         _exit( 0 ) ;
      }
   }
   /* this is where the server gets executed  -bbraun */
   if ( ! SC_IS_INTERNAL( scp ) )
   {
      if( SC_REDIR_ADDR(scp) != NULL )
      {
         redir_handler( serp );
      }
      else
      {
#if defined(HAVE_SETENV)
         char buff[1024];
         strx_sprint(buff, sizeof(buff)-1, "REMOTE_HOST=%s", conn_addrstr(cp));
         if( env_addstr(SC_ENV(scp)->env_handle, buff) != ENV_OK ) {
            msg( LOG_ERR, func, "Error adding REMOTE_HOST variable for %s: %m", SC_NAME(scp) );
            _exit( 1 ) ;
         }
#endif
         exec_server( serp ) ; /* 调用外部服务进程 */
      }
   }
   else
   {
      char name[ 180 ] ;
      /*
       * We don‘t bother to disassociate from the controlling terminal
       *   (we have a controlling terminal only if debug.on is TRUE)
       *
       * Also, for interceptor processes, we give them the name:
       *            <program_name> <service-id> interceptor
       */
      if ( SC_IS_INTERCEPTED( scp ) )
         strx_print( INT_NULL, name, sizeof( name ) - 1,
                           "%s %s interceptor", program_name, SC_ID( scp ) ) ;
      else
      {
         int namelen = sizeof( name ) - 1 ;      /* leave space for the NUL */
         char host[NI_MAXHOST];
         size_t hostlen = NI_MAXHOST;
         socklen_t addrlen = 0;
         union xsockaddr *sinp = CONN_XADDRESS(SERVER_CONNECTION(serp));
         int len;
         if( sinp == NULL )
            exit(0);
         if( SC_IPV6(scp) ) addrlen = sizeof(struct sockaddr_in6);
         else if( SC_IPV4(scp) ) addrlen = sizeof(struct sockaddr_in);
         len = strx_nprint(name, namelen, "(%s service) %s", program_name,
            SC_ID( scp ) ) ;
         if( getnameinfo( SA(sinp), addrlen, host, hostlen, NULL, 0, 0) != 0 )
               strcpy(host, "unknown");
         if ( SC_IPV6(scp) && SC_ACCEPTS_CONNECTIONS( scp ) &&
               !IN6_IS_ADDR_UNSPECIFIED(&sinp->sa_in6.sin6_addr) )
            strx_print( INT_NULL, &name[ len ], namelen - len, " %s" , host ) ;
         if ( SC_IPV4(scp) && SC_ACCEPTS_CONNECTIONS( scp ) )
            strx_print( INT_NULL, &name[ len ], namelen - len, " %s", host ) ;
      }
      rename_process( name ) ;
      SVC_INTERNAL( sp, serp ) ;  /* 内部服务进程 */
   }
   _exit( 0 ) ;
   /* NOTREACHED */
}

   在exec_server()函数中,会通过dup2系统调用将子进程的fd(0/1/2)克隆服务进程的socket fd,那么以后需要接收和发送报文时,就只用对fd 0/1来操作了。

本文出自 “cizyzhang” 博客,请务必保留此出处http://cizyzhang.blog.51cto.com/1529108/1392426

xinetd工作原理概述,古老的榕树,5-wow.com

郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。