home *** CD-ROM | disk | FTP | other *** search
/ ftp.parl.clemson.edu / 2015-02-07.ftp.parl.clemson.edu.tar / ftp.parl.clemson.edu / pub / pvfs2 / orangefs-2.8.3-20110323.tar.gz / orangefs-2.8.3-20110323.tar / orangefs / src / server / perf-mon.sm < prev    next >
Text File  |  2008-02-11  |  10KB  |  332 lines

  1. /* 
  2.  * (C) 2001 Clemson University and The University of Chicago 
  3.  *
  4.  * See COPYING in top-level directory.
  5.  */
  6. #include <stdio.h>
  7. #include <string.h>
  8. #include <sys/types.h>
  9. #include <sys/stat.h>
  10. #include <unistd.h>
  11. #include <fcntl.h>
  12. #include <sys/time.h>
  13. #include <assert.h>
  14.  
  15. #include "pvfs2-server.h"
  16. #include "pint-perf-counter.h"
  17.  
  18. static uint64_t* static_start_time_array_ms = NULL;
  19. static uint64_t* static_interval_array_ms = NULL;
  20. static int64_t** static_value_matrix = NULL;
  21. static int static_history_size = 0;
  22. static int static_key_count = 0;
  23.  
  24. static int reallocate_static_arrays_if_needed(void);
  25.  
  26. #define MAX_NEXT_ID 1000000000
  27.  
  28. %%
  29.  
  30. machine pvfs2_perf_mon_sm
  31. {
  32.     state prelude
  33.     {
  34.         jump pvfs2_prelude_sm;
  35.         default => do_work;
  36.     }
  37.  
  38.     state do_work
  39.     {
  40.         run perf_mon_do_work;
  41.         default => final_response;
  42.     }
  43.  
  44.     state final_response
  45.     {
  46.         jump pvfs2_final_response_sm;
  47.         default => cleanup;
  48.     }
  49.  
  50.     state cleanup
  51.     {
  52.         run perf_mon_cleanup;
  53.         default => terminate;
  54.     }
  55. }
  56.  
  57. %%
  58.  
  59. /* perf_mon_cleanup()
  60.  *
  61.  * cleans up any resources consumed by this state machine and ends
  62.  * execution of the machine
  63.  */
  64. static PINT_sm_action perf_mon_cleanup(
  65.         struct PINT_smcb *smcb, job_status_s *js_p)
  66. {
  67.     struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT);
  68.     if(s_op->resp.u.mgmt_perf_mon.perf_array)
  69.     free(s_op->resp.u.mgmt_perf_mon.perf_array);
  70.  
  71.     return(server_state_machine_complete(smcb));
  72. }
  73.  
  74. /* perf_mon_do_work()
  75.  *
  76.  * gathers statistics and builds response
  77.  */
  78. static PINT_sm_action perf_mon_do_work(
  79.         struct PINT_smcb *smcb, job_status_s *js_p)
  80. {
  81.     struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT);
  82.     struct timeval tv;
  83.     int i;
  84.     int valid_count = 0;
  85.     uint32_t tmp_next_id;
  86.     int ret = -1;
  87.     int idx;
  88.  
  89. #ifdef __PVFS2_DISABLE_PERF_COUNTERS__
  90.     gossip_err("Error: perf_mon request received, but perf counters are disabled.\n");
  91.     js_p->error_code = -PVFS_ENOSYS;
  92.     return SM_ACTION_COMPLETE;
  93. #endif
  94.  
  95.     /* allocate memory to hold statistics */
  96.     s_op->resp.u.mgmt_perf_mon.perf_array 
  97.     = (struct PVFS_mgmt_perf_stat*)malloc(s_op->req->u.mgmt_perf_mon.count *
  98.     sizeof(struct PVFS_mgmt_perf_stat));
  99.     if(!s_op->resp.u.mgmt_perf_mon.perf_array)
  100.     {
  101.     js_p->error_code = -PVFS_ENOMEM;
  102.     return SM_ACTION_COMPLETE;
  103.     }
  104.  
  105.     /* fill in some of the response */
  106.     gettimeofday(&tv, NULL);
  107.     s_op->resp.u.mgmt_perf_mon.cur_time_ms = tv.tv_sec*1000 + 
  108.     tv.tv_usec/1000;
  109.     s_op->resp.u.mgmt_perf_mon.suggested_next_id 
  110.     = s_op->req->u.mgmt_perf_mon.next_id;
  111.     s_op->resp.u.mgmt_perf_mon.perf_array_count
  112.     = s_op->req->u.mgmt_perf_mon.count;
  113.  
  114.     /* make sure we have scratch memory to use as an intermediate buffer for
  115.      * performance counters
  116.      */
  117.     ret = reallocate_static_arrays_if_needed();
  118.     if(ret < 0)
  119.     {
  120.         free(s_op->resp.u.mgmt_perf_mon.perf_array);
  121.         s_op->resp.u.mgmt_perf_mon.perf_array = NULL;
  122.         js_p->error_code = ret;
  123.         return SM_ACTION_COMPLETE;
  124.     }
  125.  
  126.     PINT_perf_retrieve(PINT_server_pc,
  127.         static_value_matrix,
  128.         static_start_time_array_ms,
  129.         static_interval_array_ms,
  130.         static_key_count,
  131.         static_history_size);
  132.  
  133.     /* work through start times, and find the oldest one that is new enough
  134.      * to satisfy next_id
  135.      * NOTE: we encode hi order bits of timestamp as id values.  That
  136.      * should be sufficient to maintain compatibility.
  137.      */
  138.     for(i=static_history_size-1; i>=0; i--)
  139.     {
  140.         tmp_next_id = 0;
  141.         tmp_next_id += (uint32_t)(static_start_time_array_ms[i] % MAX_NEXT_ID);
  142.         
  143.         /* check three conditions:
  144.          * 1) that this interval from the perf counter is valid (start time
  145.          * not zero)
  146.          * 2) if the interval is equal to or more recent than what was
  147.          * suggested by client
  148.          * 3) if the start time has rolled over within MAX_NEXT_ID
  149.          */
  150.         if(tmp_next_id != 0 &&
  151.             ((tmp_next_id >= s_op->req->u.mgmt_perf_mon.next_id) ||
  152.             ((s_op->req->u.mgmt_perf_mon.next_id-tmp_next_id)>(MAX_NEXT_ID/2))))
  153.         {
  154.             /* found the first valid timestamp */
  155.             valid_count = i+1;
  156.             /* compute a next id to suggest that the client use next time 
  157.              * (newest time plus 1)
  158.              */
  159.             tmp_next_id = 0;
  160.             tmp_next_id += (uint32_t)(static_start_time_array_ms[0] %
  161.                 MAX_NEXT_ID);
  162.             tmp_next_id += 1;
  163.             s_op->resp.u.mgmt_perf_mon.suggested_next_id = tmp_next_id;
  164.             break;
  165.         }
  166.     }
  167.     if(valid_count > s_op->req->u.mgmt_perf_mon.count)
  168.     {
  169.         valid_count = s_op->req->u.mgmt_perf_mon.count;
  170.     }
  171.  
  172.     /* fill in all of the valid values */
  173.     for(i=0; i<s_op->req->u.mgmt_perf_mon.count; i++)
  174.     {
  175.         if(i<valid_count && static_start_time_array_ms[i] != 0)
  176.         {
  177.             /* we are filling per_array in backwards, but this is the order
  178.              * karma and other programs that collect data over multiple
  179.              * intervals expect it */
  180.             idx = valid_count - i - 1;
  181.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].valid_flag = 1;
  182.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].id = 0;
  183.                 s_op->resp.u.mgmt_perf_mon.perf_array[idx].id += 
  184.                 (uint32_t)(static_start_time_array_ms[i] % 1000000000);
  185.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].start_time_ms = 
  186.                 static_start_time_array_ms[i];
  187.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].read = 
  188.                 static_value_matrix[PINT_PERF_READ][i];
  189.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].write = 
  190.                 static_value_matrix[PINT_PERF_WRITE][i];
  191.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].metadata_read = 
  192.                 static_value_matrix[PINT_PERF_METADATA_READ][i];
  193.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].metadata_write = 
  194.                 static_value_matrix[PINT_PERF_METADATA_WRITE][i];
  195.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].dspace_queue =
  196.                 static_value_matrix[PINT_PERF_METADATA_DSPACE_OPS][i];
  197.             s_op->resp.u.mgmt_perf_mon.perf_array[idx].keyval_queue =
  198.                 static_value_matrix[PINT_PERF_METADATA_KEYVAL_OPS][i];
  199.         }
  200.         else
  201.         {
  202.             s_op->resp.u.mgmt_perf_mon.perf_array[i].valid_flag = 0;
  203.         }
  204.     }
  205.  
  206.     /* set final end time */
  207.     if(valid_count > 0)
  208.     {
  209.         s_op->resp.u.mgmt_perf_mon.end_time_ms = 
  210.             static_start_time_array_ms[0] +
  211.             static_interval_array_ms[0];
  212.     }
  213.  
  214.     js_p->error_code = 0;
  215.     return SM_ACTION_COMPLETE;
  216. }
  217.  
  218. /* reallocate_static_arrays()
  219.  *
  220.  * allocates new arrays for temporary storage of performance counter data,
  221.  * freeing old memory if needed
  222.  *
  223.  * returns 0 on success, -PVFS_error on failure
  224.  */
  225. static int reallocate_static_arrays_if_needed(void)
  226. {
  227.     unsigned int history_size;
  228.     unsigned int key_count;
  229.     int ret = -1;
  230.     int i;
  231.  
  232.     /* how many keys and history intervals do we have in the perf counter? */
  233.     ret = PINT_perf_get_info(PINT_server_pc, PINT_PERF_KEY_COUNT, 
  234.         &key_count);
  235.     if(ret < 0)
  236.     {
  237.         return(ret);
  238.     }
  239.     ret = PINT_perf_get_info(PINT_server_pc, PINT_PERF_HISTORY_SIZE, 
  240.         &history_size);
  241.     if(ret < 0)
  242.     {
  243.         return(ret);
  244.     }
  245.  
  246.     if(history_size > static_history_size)
  247.     {
  248.         if(static_start_time_array_ms)
  249.         {
  250.             free(static_start_time_array_ms);
  251.         }
  252.         if(static_interval_array_ms)
  253.         {
  254.             free(static_interval_array_ms);
  255.         }
  256.  
  257.         /* reallocate time arrays */
  258.         static_start_time_array_ms =
  259.             (uint64_t*)malloc(history_size*sizeof(uint64_t));
  260.         if(!static_start_time_array_ms)
  261.         {
  262.             return(-PVFS_ENOMEM);
  263.         }
  264.         static_interval_array_ms = 
  265.             (uint64_t*)malloc(history_size*sizeof(uint64_t));
  266.         if(!static_interval_array_ms)
  267.         {
  268.             free(static_start_time_array_ms);
  269.             return(-PVFS_ENOMEM);
  270.         }
  271.  
  272.         /* the key count shouldn't change once acquired */
  273.         assert((static_key_count == 0)||(key_count == static_key_count));
  274.  
  275.         /* allocate value matrix */
  276.         if(!static_value_matrix)
  277.         {
  278.             static_value_matrix =
  279.                 (int64_t**)malloc(key_count*sizeof(int64_t*));
  280.             if(!static_value_matrix)
  281.             {
  282.                 free(static_start_time_array_ms);
  283.                 free(static_interval_array_ms);
  284.                 return(-PVFS_ENOMEM);
  285.             }
  286.             memset(static_value_matrix, 0, key_count*sizeof(int64_t*));
  287.         }
  288.  
  289.         for(i=0; i<key_count; i++)
  290.         {
  291.             static_value_matrix[i] =
  292.                 (int64_t*)malloc(history_size*sizeof(int64_t));
  293.             if(!static_value_matrix[i])
  294.             {
  295.                 for(i=i-1; i>=0; i--)
  296.                 {
  297.                     free(static_value_matrix[i]);
  298.                 }
  299.                 free(static_start_time_array_ms);
  300.                 free(static_interval_array_ms);
  301.                 return(-PVFS_ENOMEM);
  302.             }
  303.         }
  304.     }
  305.  
  306.     /* NOTE: we change the static counts, even if they are decreasing.  It is
  307.      * ok if the arrays are bigger than needed.
  308.      */
  309.     static_history_size = history_size;
  310.     static_key_count = key_count;
  311.  
  312.     return(0);
  313. }
  314.  
  315. struct PINT_server_req_params pvfs2_perf_mon_params =
  316. {
  317.     .string_name = "mgmt_perf_mon",
  318.     .perm = PINT_SERVER_CHECK_NONE,
  319.     .state_machine = &pvfs2_perf_mon_sm
  320. };
  321.  
  322. /*
  323.  * Local variables:
  324.  *  mode: c
  325.  *  c-indent-level: 4
  326.  *  c-basic-offset: 4
  327.  * End:
  328.  *
  329.  * vim: ft=c ts=8 sts=4 sw=4 expandtab
  330.  */
  331.  
  332.