Skip to content
Permalink
1139b72d5e
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
315 lines (274 sloc) 8.52 KB
/*
* (C) 2001 Clemson University and The University of Chicago
*
* See COPYING in top-level directory.
*/
/** \file
* \ingroup mgmtint
*
* PVFS2 management routines for iterating through handles of objects
* stored on servers. These routines are used primarily for file system
* check and repair purposes.
*/
#include <string.h>
#include "client-state-machine.h"
#include "pvfs2-types.h"
#include "pvfs2-mgmt.h"
#include "server-config.h"
extern job_context_id pint_client_sm_context;
static int iterate_handles_list_comp_fn(
void *v_p, struct PVFS_server_resp *resp_p, int i);
%%
machine pvfs2_client_mgmt_iterate_handles_list_sm
{
state setup_msgpair
{
run mgmt_iterate_handles_list_setup_msgpair;
success => xfer_msgpair;
default => cleanup;
}
state xfer_msgpair
{
jump pvfs2_msgpairarray_sm;
default => cleanup;
}
state cleanup
{
run mgmt_iterate_handles_list_cleanup;
default => terminate;
}
}
%%
/** Initiate retrieval of a list of handles in use on a collection of
* servers.
*
* \return 0 on success, -PVFS_error on failure.
*/
PVFS_error PVFS_imgmt_iterate_handles_list(
PVFS_fs_id fs_id,
PVFS_credentials *credentials,
PVFS_handle **handle_matrix,
int *handle_count_array,
PVFS_ds_position *position_array,
PVFS_BMI_addr_t *addr_array,
int server_count,
int flags,
PVFS_error_details *details,
PVFS_hint hints,
PVFS_mgmt_op_id *op_id,
void *user_ptr)
{
PINT_smcb *smcb = NULL;
PINT_client_sm *sm_p = NULL;
int ret;
gossip_debug(GOSSIP_CLIENT_DEBUG,
"PVFS_imgmt_iterate_handles_list() entered.\n");
if (server_count < 1 || !handle_matrix || !position_array
|| !handle_count_array || !addr_array)
{
return -PVFS_EINVAL;
}
PINT_smcb_alloc(&smcb, PVFS_MGMT_ITERATE_HANDLES_LIST,
sizeof(struct PINT_client_sm),
client_op_state_get_machine,
client_state_machine_terminate,
pint_client_sm_context);
if (smcb == NULL)
{
return -PVFS_ENOMEM;
}
sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT);
PINT_init_msgarray_params(sm_p, fs_id);
PINT_init_sysint_credentials(sm_p->cred_p, credentials);
sm_p->u.iterate_handles_list.fs_id = fs_id;
sm_p->u.iterate_handles_list.server_count = server_count;
sm_p->u.iterate_handles_list.addr_array = addr_array;
sm_p->u.iterate_handles_list.handle_matrix = handle_matrix;
sm_p->u.iterate_handles_list.handle_count_array = handle_count_array;
sm_p->u.iterate_handles_list.position_array = position_array;
sm_p->u.iterate_handles_list.details = details;
PVFS_hint_copy(hints, &sm_p->hints);
sm_p->u.iterate_handles_list.flags = flags;
ret = PINT_msgpairarray_init(&sm_p->msgarray_op, server_count);
if(ret != 0)
{
PINT_smcb_free(smcb);
return ret;
}
return PINT_client_state_machine_post(
smcb, op_id, user_ptr);
}
/** Obtain a list of handles in use on a collection of servers.
*/
PVFS_error PVFS_mgmt_iterate_handles_list(
PVFS_fs_id fs_id,
PVFS_credentials *credentials,
PVFS_handle **handle_matrix,
int *handle_count_array,
PVFS_ds_position *position_array,
PVFS_BMI_addr_t *addr_array,
int server_count,
int flags,
PVFS_error_details *details,
PVFS_hint hints)
{
PVFS_error ret = -PVFS_EINVAL, error = 0;
PVFS_mgmt_op_id op_id;
gossip_debug(GOSSIP_CLIENT_DEBUG,
"PVFS_mgmt_iterate_handles_list entered\n");
ret = PVFS_imgmt_iterate_handles_list(
fs_id, credentials, handle_matrix, handle_count_array,
position_array, addr_array, server_count, flags, details, hints, &op_id, NULL);
if (ret)
{
PVFS_perror_gossip("PVFS_imgmt_iterate_handles_list call", ret);
error = ret;
}
else
{
ret = PVFS_mgmt_wait(op_id, "iterate_handles_list", &error);
if (ret)
{
PVFS_perror_gossip("PVFS_mgmt_wait call", ret);
error = ret;
}
}
gossip_debug(GOSSIP_CLIENT_DEBUG,
"PVFS_mgmt_iterate_handles_list completed\n");
PINT_mgmt_release(op_id);
return error;
}
static PINT_sm_action mgmt_iterate_handles_list_setup_msgpair(
struct PINT_smcb *smcb, job_status_s *js_p)
{
struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT);
int i = 0, j = 0;
PINT_sm_msgpair_state *msg_p;
gossip_debug(GOSSIP_CLIENT_DEBUG, "iterate_handles_list state: "
"mgmt_iterate_handles_list_setup_msgpair\n");
/* setup msgpair array */
j=0;
foreach_msgpair(&sm_p->msgarray_op, msg_p, i)
{
skipped:
/* skip servers that have already reached end */
/* TODO: use a better #define or something for ITERATE_END */
if(sm_p->u.iterate_handles_list.position_array[j]
== PVFS_ITERATE_END)
{
sm_p->msgarray_op.count--;
sm_p->u.iterate_handles_list.handle_count_array[j] = 0;
j++;
goto skipped;
}
else
{
PINT_SERVREQ_MGMT_ITERATE_HANDLES_FILL(
msg_p->req,
*sm_p->cred_p,
sm_p->u.iterate_handles_list.fs_id,
sm_p->u.iterate_handles_list.handle_count_array[j],
sm_p->u.iterate_handles_list.position_array[j],
sm_p->u.iterate_handles_list.flags,
sm_p->hints);
msg_p->fs_id = sm_p->u.iterate_handles_list.fs_id;
msg_p->handle = PVFS_HANDLE_NULL;
msg_p->retry_flag = PVFS_MSGPAIR_RETRY;
msg_p->comp_fn = iterate_handles_list_comp_fn;
msg_p->svr_addr = sm_p->u.iterate_handles_list.addr_array[j];
j++;
}
}
/* TODO: be nicer about this, user called function too many times */
assert(sm_p->msgarray_op.count > 0);
PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op);
/* immediate return: next state jumps to msgpairarray machine */
js_p->error_code = 0;
return SM_ACTION_COMPLETE;
}
static PINT_sm_action mgmt_iterate_handles_list_cleanup(
struct PINT_smcb *smcb, job_status_s *js_p)
{
struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT);
int i = 0, errct = 0;
PVFS_error error = js_p->error_code;
/* store server-specific errors if requested and present */
if ((error != 0) && (sm_p->u.iterate_handles_list.details != NULL))
{
sm_p->u.iterate_handles_list.details->count_exceeded = 0;
for(i = 0; i < sm_p->u.iterate_handles_list.server_count; i++)
{
if (sm_p->msgarray_op.msgarray[i].op_status != 0)
{
if (errct <
sm_p->u.iterate_handles_list.details->count_allocated)
{
sm_p->u.iterate_handles_list.details->error[
errct].error = sm_p->msgarray_op.msgarray[i].op_status;
sm_p->u.iterate_handles_list.details->error[
errct].addr = sm_p->msgarray_op.msgarray[i].svr_addr;
errct++;
}
else
{
sm_p->u.iterate_handles_list.details->count_exceeded = 1;
}
}
}
sm_p->u.iterate_handles_list.details->count_used = errct;
error = -PVFS_EDETAIL;
}
PINT_msgpairarray_destroy(&sm_p->msgarray_op);
sm_p->error_code = error;
PINT_SET_OP_COMPLETE;
return SM_ACTION_TERMINATE;
}
static int iterate_handles_list_comp_fn(void *v_p,
struct PVFS_server_resp *resp_p,
int i)
{
int j = 0;
PINT_smcb *smcb = v_p;
PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_MSGPAIR_PARENT_SM);
/* if this particular request was successful, then collect info from
* response
*/
if (sm_p->msgarray_op.msgarray[i].op_status == 0)
{
/* first, we have to match this up with the correct array entry */
for (j=0; j<sm_p->u.iterate_handles_list.server_count; j++)
{
if (sm_p->msgarray_op.msgarray[i].svr_addr
== sm_p->u.iterate_handles_list.addr_array[j])
{
break;
}
}
assert(j != sm_p->u.iterate_handles_list.server_count);
sm_p->u.iterate_handles_list.handle_count_array[j] =
resp_p->u.mgmt_iterate_handles.handle_count;
sm_p->u.iterate_handles_list.position_array[j] =
resp_p->u.mgmt_iterate_handles.position;
memcpy(sm_p->u.iterate_handles_list.handle_matrix[j],
resp_p->u.mgmt_iterate_handles.handle_array,
resp_p->u.mgmt_iterate_handles.handle_count
* sizeof(PVFS_handle));
}
/* if this is the last response, check all of the status values and
* return error code if any requests failed
*/
if (i == (sm_p->msgarray_op.count -1))
{
return PINT_msgarray_status(&sm_p->msgarray_op);
}
return 0;
}
/*
* Local variables:
* mode: c
* c-indent-level: 4
* c-basic-offset: 4
* End:
*
* vim: ft=c ts=8 sts=4 sw=4 expandtab
*/