diff --git a/src/client/sysint/sys-create.sm b/src/client/sysint/sys-create.sm index 6317dc6..ef6925a 100644 --- a/src/client/sysint/sys-create.sm +++ b/src/client/sysint/sys-create.sm @@ -79,7 +79,6 @@ machine pvfs2_client_create_sm state parent_getattr_inspect { run create_parent_getattr_inspect; - OSD_POST_CREATE => cleanup; success => create_setup_msgpair; default => cleanup; } @@ -140,8 +139,7 @@ machine pvfs2_client_create_sm { run create_setattr_setup_msgpair; OSD_MSGPAIR => create_setattr_xfer_osd_msgpair; - success => create_setattr_xfer_msgpair; - default => cleanup; + default => crdirent_setup_msgpair; } state create_setattr_xfer_osd_msgpair @@ -151,13 +149,6 @@ machine pvfs2_client_create_sm default => crdirent_failure; } - state create_setattr_xfer_msgpair - { - jump pvfs2_msgpairarray_sm; - success => crdirent_setup_msgpair; - default => crdirent_failure; - } - state crdirent_xfer_msgpair { jump pvfs2_msgpairarray_sm; @@ -738,7 +729,9 @@ static PINT_sm_action create_create_setup_msgpair( sm_p->u.create.dist; msg_p->req.u.create.attr.u.meta.dist_size = PINT_DIST_PACK_SIZE(sm_p->u.create.dist); - + msg_p->req.u.create.attr.cid = sm_p->getattr.attr.cid; + sm_p->u.create.attr.cid = sm_p->getattr.attr.cid; + PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); return SM_ACTION_COMPLETE; } @@ -746,7 +739,6 @@ static PINT_sm_action create_create_setup_msgpair( static PINT_sm_action create_datafiles_setup_msgpair_array( struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("create_datafiles_setup_msgpair_array\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL; struct server_configuration_s *server_config; @@ -885,6 +877,9 @@ static PINT_sm_action create_setattr_setup_msgpair( "create state: setattr_setup_msgpair\n"); js_p->error_code = 0; + + if (!is_osd_md && !is_osd_meta) + return SM_ACTION_COMPLETE; gossip_debug(GOSSIP_CLIENT_DEBUG," create: posting setattr req\n"); @@ -976,24 +971,24 @@ static PINT_sm_action create_setattr_setup_msgpair( return 1; } } else { - PINT_SERVREQ_SETATTR_FILL( - msg_p->req, - *sm_p->cred_p, - sm_p->object_ref.fs_id, - sm_p->u.create.metafile_handle, - PVFS_TYPE_METAFILE, - sm_p->u.create.attr, - PVFS_ATTR_META_ALL, - sm_p->hints); - - msg_p->req.u.setattr.attr.u.meta.dfile_array = - sm_p->u.create.datafile_handles; - msg_p->req.u.setattr.attr.u.meta.dfile_count = - sm_p->u.create.num_data_files; - msg_p->req.u.setattr.attr.u.meta.dist = - sm_p->u.create.dist; - msg_p->req.u.setattr.attr.u.meta.dist_size = - PINT_DIST_PACK_SIZE(sm_p->u.create.dist); +/* PINT_SERVREQ_SETATTR_FILL(*/ +/* msg_p->req,*/ +/* *sm_p->cred_p,*/ +/* sm_p->object_ref.fs_id,*/ +/* sm_p->u.create.metafile_handle,*/ +/* PVFS_TYPE_METAFILE,*/ +/* sm_p->u.create.attr,*/ +/* PVFS_ATTR_META_ALL,*/ +/* sm_p->hints);*/ + +/* msg_p->req.u.setattr.attr.u.meta.dfile_array =*/ +/* sm_p->u.create.datafile_handles;*/ +/* msg_p->req.u.setattr.attr.u.meta.dfile_count =*/ +/* sm_p->u.create.num_data_files;*/ +/* msg_p->req.u.setattr.attr.u.meta.dist =*/ +/* sm_p->u.create.dist;*/ +/* msg_p->req.u.setattr.attr.u.meta.dist_size =*/ +/* PINT_DIST_PACK_SIZE(sm_p->u.create.dist);*/ } msg_p->fs_id = sm_p->object_ref.fs_id; @@ -1108,6 +1103,7 @@ static PINT_sm_action create_cleanup( { metafile_ref.handle = sm_p->u.create.metafile_handle; metafile_ref.fs_id = sm_p->object_ref.fs_id; + metafile_ref.cid = sm_p->getattr.attr.cid; /* fill in outgoing response fields */ sm_p->u.create.create_resp->ref = metafile_ref; diff --git a/src/client/sysint/sys-getattr.sm b/src/client/sysint/sys-getattr.sm index 0605e7f..cc91814 100644 --- a/src/client/sysint/sys-getattr.sm +++ b/src/client/sysint/sys-getattr.sm @@ -55,8 +55,7 @@ enum GETATTR_ACACHE_MISS = 1, GETATTR_NEED_DATAFILE_SIZES = 2, GETATTR_IO_RETRY = 3, - OSD_MSGPAIR = 2001, - IO_DO_OSD = 2002 + OSD_MSGPAIR = 2001 }; /* completion function prototypes */ @@ -111,7 +110,6 @@ nested machine pvfs2_client_getattr_sm run getattr_acache_lookup; GETATTR_ACACHE_MISS => object_getattr_setup_msgpair; GETATTR_NEED_DATAFILE_SIZES => datafile_get_sizes; - IO_DO_OSD => cleanup; default => cleanup; } @@ -1451,7 +1449,7 @@ static PINT_sm_action getattr_cleanup( free(getattr->size_array); /* cleanup getattr when an error occurs */ - if (js_p->error_code && js_p->error_code != IO_DO_OSD) + if (js_p->error_code && js_p->error_code) { if (getattr->attr.mask & PVFS_ATTR_META_DFILES) { diff --git a/src/client/sysint/sys-io.sm b/src/client/sysint/sys-io.sm index 02cc916..2ca40d4 100644 --- a/src/client/sysint/sys-io.sm +++ b/src/client/sysint/sys-io.sm @@ -34,8 +34,7 @@ extern job_context_id pint_client_sm_context; enum { - IO_DO_OSD_IO = 2001, - IO_OSD_NOT_CREATED = 2002 + IO_DO_OSD_IO = 2001 }; enum @@ -172,7 +171,6 @@ machine pvfs2_client_io_sm { jump pvfs2_client_getattr_sm; success => inspect_attr; - IO_OSD_NOT_CREATED => osd_io; default => io_cleanup; } @@ -437,7 +435,6 @@ PVFS_error PVFS_sys_io( static PINT_sm_action io_init( struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("io_init\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); job_id_t tmp_id; @@ -483,7 +480,6 @@ static PINT_sm_action io_init( static PINT_sm_action io_inspect_attr( struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("io_inspect_attr\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int is_osd_meta = fsid_is_osd_meta(sm_p->getattr.object_ref.fs_id); int is_osd_md = fsid_is_osd_md(sm_p->getattr.object_ref.fs_id); @@ -493,7 +489,7 @@ static PINT_sm_action io_inspect_attr( js_p->error_code = -PVFS_ECANCEL; return SM_ACTION_COMPLETE; } - + /* determine if we need to unstuff or not to service this request */ js_p->error_code = unstuff_needed( sm_p->u.io.mem_req, diff --git a/src/client/sysint/sys-mkdir.sm b/src/client/sysint/sys-mkdir.sm index 57e1db5..bd3dfb5 100644 --- a/src/client/sysint/sys-mkdir.sm +++ b/src/client/sysint/sys-mkdir.sm @@ -35,7 +35,8 @@ enum MKDIR_RETRY = 180, MKDIR_SKIP_EATTR = 181, OSD_MKDIR_MSGPAIR = 2001, - CREATE_COLLECTION = 2002 + CREATE_COLLECTION = 2002, + SKIP_COLLECTION_CREATE = 2003 }; static int mkdir_msg_comp_fn( @@ -96,6 +97,7 @@ machine pvfs2_client_mkdir_sm { run create_collection_setup_msgpair; success => create_collection_xfer_msgpair; + SKIP_COLLECTION_CREATE => mkdir_seteattr_setup_msgpair; default => mkdir_crdirent_failure; } @@ -432,6 +434,7 @@ static PINT_sm_action create_collection_setup_msgpair( js_p->error_code = 0; if (!is_osd) { + js_p->error_code = SKIP_COLLECTION_CREATE; return SM_ACTION_COMPLETE; } diff --git a/src/client/sysint/sys-osd-io.sm b/src/client/sysint/sys-osd-io.sm index 81cd64c..1109221 100755 --- a/src/client/sysint/sys-osd-io.sm +++ b/src/client/sysint/sys-osd-io.sm @@ -35,20 +35,12 @@ #define KERNEL_BUFSIZE (400*1024) enum { - LOOP_NEXT_CHUNK = 1012, - OSD_CREATE_WRITE = 2001, - CREATE_AND_WRITE_DONE = 2002, - CREATE_AND_WRITE_NOT_DONE = 2003 + LOOP_NEXT_CHUNK = 1012 }; static int osd_io_completion_fn(void *user_args, struct PVFS_server_resp *resp_p, int index); -static int create_and_write_comp_fn(void *v_p, - struct PVFS_server_resp *resp_p, - int index); -static int setattr_comp_fn(void *v_p, - struct PVFS_server_resp *resp_p, - int index); + %% nested machine pvfs2_client_osd_io_sm @@ -60,32 +52,6 @@ nested machine pvfs2_client_osd_io_sm default => return; } - state create_and_write - { - run create_and_write_setup_msgpair; - default => xfer_create_and_write; - } - - state xfer_create_and_write - { - jump pvfs2_osd_msgpairarray_sm; - success => maybe_xfer_more; - default => cleanup; - } - - state setattr_setup_msgpair - { - run setattr_setup_msgpair; - success => setattr_xfer_msgpair; - default => cleanup; - } - - state setattr_xfer_msgpair - { - jump pvfs2_msgpairarray_sm; - default => cleanup; - } - state setup_msgpairs { run osd_io_setup_msgpairs; @@ -125,7 +91,6 @@ nested machine pvfs2_client_osd_io_sm static int osd_io_init(struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("osd_io_init\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); struct PINT_client_io_sm *io = &sm_p->u.io; PVFS_object_attr *attr = &sm_p->getattr.attr; @@ -174,7 +139,6 @@ static int osd_io_init(struct PINT_smcb *smcb, job_status_s *js_p) for (i=0; idatafile_count; i++) { PVFS_handle datafile_handle = attr->u.meta.dfile_array[ io->datafile_index_array[i]]; - gossip_err("datafile_handle %d\n", datafile_handle); gossip_debug(GOSSIP_IO_DEBUG, "%s: server %d/%d handle %llu\n", __func__, i, io->datafile_count, llu(datafile_handle)); @@ -182,26 +146,20 @@ static int osd_io_init(struct PINT_smcb *smcb, job_status_s *js_p) ret = PINT_cached_config_map_to_server( &sm_p->msgarray_op.msgarray[i].svr_addr, datafile_handle, sm_p->object_ref.fs_id); - gossip_err("svr_addr: %d\n", sm_p->msgarray_op.msgarray[i].svr_addr); - gossip_err("sm_p->object_ref.fs_id: %d\n", sm_p->object_ref.fs_id); if (ret) goto out; io->file_req_state[i].target_offset = io->file_req_offset; io->file_req_state[i].final_offset = io->file_req_offset + io->mem_req->aggregate_size; - gossip_err("io->file_req_offset: %d\n", io->file_req_offset); - gossip_err("io->mem_req->aggregate_size: %d\n", io->mem_req->aggregate_size); if (i > 0) memcpy(&io->file_data[i], &io->file_data[0], sizeof(io->file_data[0])); io->file_data[i].server_nr = io->datafile_index_array[i]; - gossip_err("io->file_data[i].server_nr: %d\n", io->file_data[i].server_nr); /* invariants */ sm_p->msgarray_op.msgarray[i].fs_id = sm_p->object_ref.fs_id; sm_p->msgarray_op.msgarray[i].handle = sm_p->object_ref.handle; - gossip_err("sm_p->object_ref.handle: %d\n", sm_p->object_ref.handle); sm_p->msgarray_op.msgarray[i].retry_flag = PVFS_MSGPAIR_RETRY; sm_p->msgarray_op.msgarray[i].comp_fn = osd_io_completion_fn; } @@ -216,7 +174,6 @@ out: static int osd_io_setup_msgpairs(struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("osd_io_setup_msgpairs\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); struct PINT_client_io_sm *io = &sm_p->u.io; PVFS_object_attr *attr = &sm_p->getattr.attr; @@ -424,10 +381,18 @@ static int osd_io_setup_msgpairs(struct PINT_smcb *smcb, job_status_s *js_p) } else if (io->io_type == PVFS_IO_WRITE) { if (server_config->post_create && !target_offset[0]) { - gossip_err("first\n"); + uint64_t attrval; + struct attribute_list attr = {ATTR_SET, USER_COLL_PG, 1, &attrval, 8}; + + if(!sm_p->getattr.attr.cid) { + sm_p->getattr.attr.cid = COLLECTION_OID_LB; /* root directory */ + } + set_htonll(&attrval, sm_p->getattr.attr.cid); + osd_command_set_create_and_write(command, PVFS_OSD_DATA_PID, datafile_handle, len, target_offset[0]); + + osd_command_attr_build(command, &attr, 1); } else { - gossip_err("second\n"); osd_command_set_write(command, PVFS_OSD_DATA_PID, datafile_handle, len, target_offset[0]); } @@ -657,259 +622,6 @@ out: return 1; } -static PINT_sm_action create_and_write_setup_msgpair( - struct PINT_smcb *smcb, job_status_s *js_p) -{ - struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); - int ret = -PVFS_EINVAL; - struct server_configuration_s *server_config; - int is_osd; - uint32_t len; - PINT_sm_msgpair_state *msg_p = NULL; - - server_config = PINT_get_server_config_struct( - sm_p->object_ref.fs_id); - PINT_put_server_config_struct(server_config); - is_osd = (server_config->osd_type != OSD_NONE); - - js_p->error_code = 0; - - if(sm_p->u.io.mem_req->aggregate_size <= KERNEL_BUFSIZE) { - /* then we can complete the write process in a single run */ - len = sm_p->u.io.mem_req->aggregate_size; - } else { - /* we need to make multiple calls to complete write */ - if((sm_p->u.io.mem_req->aggregate_size - sm_p->u.io.total_size) > KERNEL_BUFSIZE) { - len = KERNEL_BUFSIZE; - } else { - len = sm_p->u.io.mem_req->aggregate_size - sm_p->u.io.total_size; - } - } - - sm_p->u.io.datafile_count = 1; - sm_p->u.create.layout.algorithm = PVFS_SYS_LAYOUT_ROUND_ROBIN; - - /* allocate handle extent array objects */ - if (sm_p->u.create.io_handle_extent_array == NULL) - { - sm_p->u.create.io_handle_extent_array = (PVFS_handle_extent_array *) - malloc(sm_p->u.create.num_data_files * - sizeof(PVFS_handle_extent_array)); - } - if (!sm_p->u.create.io_handle_extent_array) - { - gossip_err("create: failed to allocate handle_extent_array\n"); - js_p->error_code = -PVFS_ENOMEM; - return SM_ACTION_COMPLETE; - } - - /* allocate data server bmi address array */ - if (sm_p->u.create.data_server_addrs == NULL) - { - sm_p->u.create.data_server_addrs = (PVFS_BMI_addr_t *)malloc( - sm_p->u.create.num_data_files * sizeof(PVFS_BMI_addr_t)); - } - if (!sm_p->u.create.data_server_addrs) - { - gossip_err("create: failed to allocate data server addrs\n"); - js_p->error_code = -PVFS_ENOMEM; - return SM_ACTION_COMPLETE; - } - - ret = PINT_cached_config_map_servers( - sm_p->object_ref.fs_id, - &sm_p->u.io.datafile_count, - &sm_p->u.create.layout, - sm_p->u.create.data_server_addrs, - sm_p->u.create.io_handle_extent_array); - - if(ret < 0) - { - gossip_err("create: failed to map the layout to a set of IO servers\n"); - js_p->error_code = ret; - return 1; - } - - if (ret) - { - gossip_err("Failed to retrieve data server addresses\n"); - js_p->error_code = ret; - return SM_ACTION_COMPLETE; - } - - PINT_msgpair_init(&sm_p->msgarray_op); - msg_p = &sm_p->msgarray_op.msgpair; - -#define CURRENT_COMMAND_PAGE 0xfffffffeUL -#define CURRENT_COMMAND_PAGE_OID 4 - - if (is_osd) { - struct osd_command *command = &sm_p->msgarray_op.msgarray[0].osd_command; - uint64_t attrval; - struct attribute_list attrs[] = {{ ATTR_GET, CUR_CMD_ATTR_PG, CCAP_OID, NULL, CCAP_OID_LEN }, - { ATTR_SET, USER_COLL_PG, 1, &attrval, 8}}; - - if(!sm_p->object_ref.cid) { - sm_p->object_ref.cid = COLLECTION_OID_LB; /* root directory */ - } - set_htonll(&attrval, sm_p->object_ref.cid); - - if(sm_p->u.io.mem_req->aggregate_size > KERNEL_BUFSIZE && sm_p->u.io.total_size) { - /* this means we are completing the write operation in multiple steps - * and we have already gone through the initial create_and_write step - * thus we just need to write the remaining data in the buffer - */ - ret = osd_command_set_write(command, PVFS_OSD_DATA_PID, sm_p->u.create.datafile_handles[0], len, sm_p->u.io.total_size); - command->outdata = sm_p->u.io.buffer + sm_p->u.io.total_size; - command->outlen = len; - } else { - ret = osd_command_set_create_and_write(command, PVFS_OSD_DATA_PID, 0, len, 0); - command->outdata = sm_p->u.io.buffer; - command->outlen = len; - - if (ret) { - osd_error_xerrno(ret, "%s: osd_command_set_create failed", - __func__); - js_p->error_code = ret; - return 1; - } - - ret = osd_command_attr_build(command, attrs, 2); - if (ret) { - osd_error_xerrno(ret, "%s: osd_command_attr_build failed", - __func__); - js_p->error_code = ret; - return 1; - } - } - } - - msg_p->fs_id = sm_p->object_ref.fs_id; - msg_p->handle = sm_p->u.create.io_handle_extent_array[0]. - extent_array[0].first; - msg_p->comp_fn = create_and_write_comp_fn; - msg_p->retry_flag = PVFS_MSGPAIR_NO_RETRY; - msg_p->svr_addr = sm_p->u.create.data_server_addrs[0]; - - PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); - return SM_ACTION_COMPLETE; -} - -static int create_and_write_comp_fn(void *v_p, - struct PVFS_server_resp *resp_p, - int index) -{ - PVFS_error status; - PINT_smcb *smcb = v_p; - PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_MSGPAIR_PARENT_SM); - int ret; - uint64_t oid; - - status = osd_errno_from_status( - sm_p->msgarray_op.msgarray[index].osd_command.status); - - if (status != 0) - { - PVFS_perror_gossip("osd io failure", status); - return status; - } - - if(!(sm_p->u.io.mem_req->aggregate_size > KERNEL_BUFSIZE && sm_p->u.io.total_size)) { - ret = osd_command_attr_resolve(&sm_p->msgarray_op.msgpair.osd_command); - if (ret) { - osd_error_xerrno(ret, "%s: attr_resolve failed", __func__); - } - - oid = get_ntohll(sm_p->msgarray_op.msgarray[index].osd_command.attr[0].val); - - /* allocate memory for the data handles if we haven't already */ - if (sm_p->u.create.datafile_handles == NULL) - { - sm_p->u.create.datafile_handles = (PVFS_handle *)malloc( - sm_p->u.io.datafile_count * sizeof(PVFS_handle)); - - if (sm_p->u.create.datafile_handles == NULL) - { - gossip_err("create: Failed to allocate data handle array\n"); - return -PVFS_ENOMEM; - } - memset(sm_p->u.create.datafile_handles, 0, - sm_p->u.io.datafile_count * sizeof(PVFS_handle)); - } - sm_p->u.create.datafile_handles[index] = oid; - } - - sm_p->u.io.total_size += sm_p->msgarray_op.msgpair.osd_command.outlen; - - return 0; -} - -static PINT_sm_action setattr_setup_msgpair( - struct PINT_smcb *smcb, job_status_s *js_p) -{ - struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); - int ret = -PVFS_EINVAL; - PINT_sm_msgpair_state *msg_p = NULL; - - js_p->error_code = 0; - - PINT_msgpair_init(&sm_p->msgarray_op); - msg_p = &sm_p->msgarray_op.msgpair; - - PINT_SERVREQ_SETATTR_FILL( - msg_p->req, - *sm_p->cred_p, - sm_p->object_ref.fs_id, - sm_p->object_ref.handle, - PVFS_TYPE_METAFILE, - sm_p->u.create.attr, - PVFS_ATTR_META_ALL, - sm_p->hints); - - msg_p->req.u.setattr.attr.u.meta.dfile_array = - sm_p->u.create.datafile_handles; - msg_p->req.u.setattr.attr.u.meta.dfile_count = - sm_p->u.io.datafile_count; - msg_p->req.u.setattr.attr.u.meta.dist = - sm_p->getattr.attr.u.meta.dist; - msg_p->req.u.setattr.attr.u.meta.dist_size = - PINT_DIST_PACK_SIZE(sm_p->getattr.attr.u.meta.dist); - msg_p->req.u.setattr.attr.owner = sm_p->getattr.attr.owner; - msg_p->req.u.setattr.attr.perms = sm_p->getattr.attr.perms; - msg_p->req.u.setattr.attr.mask = sm_p->getattr.attr.mask; - - msg_p->fs_id = sm_p->object_ref.fs_id; - msg_p->handle = sm_p->object_ref.handle; - msg_p->retry_flag = PVFS_MSGPAIR_NO_RETRY; - msg_p->comp_fn = setattr_comp_fn; - - ret = PINT_cached_config_map_to_server( - &msg_p->svr_addr, msg_p->handle, msg_p->fs_id); - - if (ret) - { - gossip_err("Failed to map meta server address\n"); - js_p->error_code = ret; - } - - PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); - return SM_ACTION_COMPLETE; -} - -static int setattr_comp_fn(void *v_p, - struct PVFS_server_resp *resp_p, - int index) -{ - int status; - - gossip_debug(GOSSIP_CLIENT_DEBUG, "create_setattr_comp_fn\n"); - - assert(resp_p->op == PVFS_SERV_SETATTR); - status = resp_p->status; - - return status; -} - /** * We assume that the response buffer hasn't been freed yet (before the * completion function is called. The msgpairarray.sm doesn't free the @@ -920,7 +632,6 @@ static int osd_io_completion_fn( struct PVFS_server_resp *resp_p __attribute__((unused)), int index) { - gossip_err("osd_io_completion_fn\n"); struct PINT_smcb *smcb = user_args; struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_MSGPAIR_PARENT_SM); struct PINT_client_io_sm *io = &sm_p->u.io; @@ -1014,7 +725,6 @@ static int osd_io_completion_fn( static int osd_io_maybe_xfer_more(struct PINT_smcb *smcb, job_status_s *js_p) { - gossip_err("osd_io_maybe_xfer_more\n"); struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); struct PINT_client_io_sm *io = &sm_p->u.io; int i, more = 1; @@ -1022,8 +732,6 @@ static int osd_io_maybe_xfer_more(struct PINT_smcb *smcb, job_status_s *js_p) gossip_debug(GOSSIP_IO_DEBUG, "%s: total %lld want %lld.\n", __func__, lld(io->total_size), lld(io->mem_req->aggregate_size)); - gossip_err("io->total_size: %d\n", io->total_size); - gossip_err("io->mem_req->aggregate_size: %d\n", io->mem_req->aggregate_size); if (io->total_size == io->mem_req->aggregate_size) more = 0; diff --git a/src/client/sysint/sys-readdirplus.sm b/src/client/sysint/sys-readdirplus.sm index 3c7f772..1fee7e0 100644 --- a/src/client/sysint/sys-readdirplus.sm +++ b/src/client/sysint/sys-readdirplus.sm @@ -595,7 +595,7 @@ static int readdirplus_fetch_attrs_comp_fn(void *v_p, gossip_debug(GOSSIP_LISTATTR_DEBUG, "readdirplus_fetch_attrs_comp_fn called\n"); assert(resp_p->op == PVFS_SERV_LISTATTR); - + /* Mark all handles in this server range as having failed a stat */ if (sm_p->msgarray_op.msgarray[index].op_status != 0) { int i, handle_index; @@ -636,7 +636,7 @@ static int readdirplus_fetch_attrs_comp_fn(void *v_p, } } } - + /* if this is the last response, check all the status values and return error codes if any requests failed */ @@ -760,7 +760,7 @@ static PINT_sm_action readdirplus_fetch_sizes_setup_msgpair( PINT_put_server_config_struct(server_config); PINT_msgpairarray_destroy(&sm_p->msgarray_op); - + /* don't need sizes */ if (!(sm_p->u.readdirplus.attrmask & PVFS_ATTR_META_ALL) && !(sm_p->u.readdirplus.attrmask & PVFS_ATTR_DATA_SIZE)) { diff --git a/src/proto/PINT-le-bytefield.c b/src/proto/PINT-le-bytefield.c index c12ff06..ede00e3 100644 --- a/src/proto/PINT-le-bytefield.c +++ b/src/proto/PINT-le-bytefield.c @@ -519,9 +519,9 @@ static int lebf_encode_resp( /** we stand a good chance of segfaulting if we try to encode the response * after something bad happened reading data from disk. */ + if (resp->status == 0) { - /** extra encoding rules for particular responses */ switch (resp->op) { diff --git a/src/proto/PINT-reqproto-encode.c b/src/proto/PINT-reqproto-encode.c index 3e9d259..a3e4386 100644 --- a/src/proto/PINT-reqproto-encode.c +++ b/src/proto/PINT-reqproto-encode.c @@ -108,7 +108,7 @@ int PINT_encode(void* input_buffer, int ret = -PVFS_EINVAL; target_msg->dest = target_addr; target_msg->enc_type = enc_type; - + gossip_debug(GOSSIP_ENDECODE_DEBUG,"PINT_encode\n"); switch(enc_type) { diff --git a/src/server/create.sm b/src/server/create.sm index a42aa56..07a7428 100644 --- a/src/server/create.sm +++ b/src/server/create.sm @@ -36,7 +36,6 @@ machine pvfs2_create_sm state create_metafile { run create_metafile; - OSD_MSGPAIR => setup_local_datafile_handles; success => check_stuffed; default => setup_final_response; } @@ -58,7 +57,6 @@ machine pvfs2_create_sm state setup_local_datafile_handles { run setup_local_datafile_handles; - OSD_MSGPAIR => setup_resp; success => request_datafiles; default => remove_local_datafile_handles; } @@ -191,11 +189,6 @@ static int create_metafile( &i, server_job_context, s_op->req->hints); - if(config->osd_type == OSD_DATAFILE) - { - js_p->error_code = OSD_MSGPAIR; - } - return(ret); } @@ -218,9 +211,15 @@ static int check_stuffed( llu(js_p->handle)); assert(config); - + + if(config->osd_type == OSD_DATAFILE) + { + js_p->error_code = 0; + return SM_ACTION_COMPLETE; + } + layout = &s_op->req->u.create.layout; - + if(layout->algorithm == PVFS_SYS_LAYOUT_LIST) { for(i=0; iserver_list.count; i++) @@ -249,7 +248,6 @@ static int check_stuffed( /* is this metadata server also IO? */ svr_name = PINT_cached_config_map_addr(s_op->req->u.create.fs_id, myaddr, &server_type); - if(!svr_name) { js_p->error_code = ret; @@ -616,10 +614,12 @@ static PINT_sm_action setup_local_datafile_handles( cur = PINT_llist_next(cur); } - s_op->resp.u.create.datafile_count = 1; + s_op->u.create.num_io_servers = s_op->req->u.create.num_dfiles_req; + s_op->req->u.create.attr.u.meta.dfile_count = s_op->u.create.num_io_servers; + s_op->resp.u.create.datafile_count = s_op->u.create.num_io_servers; + s_op->resp.u.create.datafile_handles = malloc(sizeof(PVFS_handle)); s_op->resp.u.create.datafile_handles[0] = trove_handle_alloc_from_range(s_op->req->u.create.fs_id, &data_handle_ext_array); - js_p->error_code = OSD_MSGPAIR; } else { if(s_op->resp.u.create.stuffed) { @@ -643,6 +643,7 @@ static PINT_sm_action setup_local_datafile_handles( } } + js_p->error_code = 0; return SM_ACTION_COMPLETE; } @@ -769,7 +770,7 @@ static PINT_sm_action setattr_setobj_attribs( dspace_a_p = &s_op->attr; a_p = &s_op->req->u.create.attr; - + /* * Remember that mtime is versioned on disk! so convert it here.. * It is better to do it here than change the PVFS_object_attr_overwrite_setable @@ -790,6 +791,7 @@ static PINT_sm_action setattr_setobj_attribs( and specified by the mask value in the request; macro defined in pvfs2-storage.h */ + PVFS_object_attr_overwrite_setable(dspace_a_p, a_p); gossip_debug( @@ -802,7 +804,7 @@ static PINT_sm_action setattr_setobj_attribs( llu(PINT_util_mkversion_time(dspace_a_p->mtime)), llu(dspace_a_p->ctime), (int)dspace_a_p->u.meta.dfile_count, (int)dspace_a_p->u.meta.dist_size); - + /* translate attrs to storage attr format */ ds_attr = &(s_op->ds_attr); PVFS_object_attr_to_ds_attr(dspace_a_p, ds_attr); @@ -822,10 +824,6 @@ static int setup_resp( { struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); - if (js_p->error_code == OSD_MSGPAIR) { - js_p->error_code = 0; - } - if (js_p->error_code == 0) { s_op->resp.u.create.metafile_handle = js_p->handle; diff --git a/src/server/final-response.sm b/src/server/final-response.sm index ea48bf2..e882892 100644 --- a/src/server/final-response.sm +++ b/src/server/final-response.sm @@ -119,9 +119,10 @@ static PINT_sm_action final_response_send_resp( { gossip_lerr("Error: req_sched_release() failure; continuing...\n"); } - + ret = PINT_encode(&s_op->resp, PINT_ENCODE_RESP, &(s_op->encoded), s_op->addr, s_op->decoded.enc_type); + if (ret < 0) { gossip_lerr("Error: PINT_encode() failure.\n"); diff --git a/src/server/list-attr.sm b/src/server/list-attr.sm index 684f895..27ef8b2 100644 --- a/src/server/list-attr.sm +++ b/src/server/list-attr.sm @@ -83,7 +83,7 @@ static PINT_sm_action listattr_read_basic_attrs( int ret; struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); job_id_t tmp_id; - + s_op->u.listattr.ds_attr_a = (PVFS_ds_attributes *) calloc(s_op->req->u.listattr.nhandles * sizeof(PVFS_ds_attributes), 1); if (s_op->u.listattr.ds_attr_a == NULL) { @@ -133,7 +133,7 @@ static PINT_sm_action listattr_setup_getattr( s_op->u.listattr.parallel_sms = 0; js_p->error_code = 0; - + for(i=0; ireq->u.listattr.nhandles; i++) { if(s_op->u.listattr.errors[i]) @@ -221,7 +221,7 @@ static PINT_sm_action listattr_interpret_getattrs(struct PINT_smcb *smcb, assert(s_op); assert(s_op->op == PVFS_SERV_LISTATTR); - + gossip_debug(GOSSIP_SERVER_DEBUG, "listattr: trying to interpret results from %d nested parallel getattr machines.\n", s_op->u.listattr.parallel_sms); @@ -246,7 +246,7 @@ static PINT_sm_action listattr_interpret_getattrs(struct PINT_smcb *smcb, } } } - + /* if we reached this point, then we have a successful ack to send back; * set remaining response fields */ diff --git a/src/server/prelude.sm b/src/server/prelude.sm index a72afac..36f05ab 100644 --- a/src/server/prelude.sm +++ b/src/server/prelude.sm @@ -89,7 +89,7 @@ static PINT_sm_action prelude_setup( ,smcb->base_frame,smcb->frame_count); int ret; struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); - + ret = PINT_server_req_get_object_ref( s_op->req, &s_op->target_fs_id, &s_op->target_handle); if( ret != 0 )