| 作者 | 时间 | QQ技术交流群 |
|---|---|---|
| perrynzhou@gmail.com | 2024/11/11 | 672152841 |

背景
从客户端发起的每个请求之前都必须初始化客户端到服务端连接,本文主要分析和介绍
osc->ost
的连接链路。在我们详细分析之前,通过客户端和OST端的dump debug trace
,抓取整个请求处理的状态机信息,初步得到osc->ost
连接经历了3
(日志上看)个阶段
[perrynzhou@Mac:~/Desktop/lustre-logs/normal-logs]$ cat normal_client.log |grep 00000000eeecae5c|grep ptlrpc_rqphase_move |grep OST0001
00000100:00000040:0.0:1730281611.387123:2208:5213:0:(lustre_net.h:2401:ptlrpc_rqphase_move()) @@@ move request phase from New to Rpc req@00000000eeecae5c x1814197796865216/t0(0) o8->bigfs-OST0001-osc-ffff0000f8945000@10.211.55.4@tcp:28/4 lens 520/544 e 0 to 0 dl 0 ref 1 fl New:NQU/0/ffffffff rc 0/-1 job:''
00000100:00000040:0.0:1730281611.390364:2032:5213:0:(lustre_net.h:2401:ptlrpc_rqphase_move()) @@@ move request phase from Rpc to Interpret req@00000000eeecae5c x1814197796865216/t0(0) o8->bigfs-OST0001-osc-ffff0000f8945000@10.211.55.4@tcp:28/4 lens 520/416 e 0 to 0 dl 1730281616 ref 1 fl Rpc:RNQU/0/0 rc 0/0 job:''
00000100:00000040:0.0:1730281611.390396:2032:5213:0:(lustre_net.h:2401:ptlrpc_rqphase_move()) @@@ move request phase from Interpret to Complete req@00000000eeecae5c x1814197796865216/t0(0) o8->bigfs-OST0001-osc-ffff0000f8945000@10.211.55.4@tcp:28/4 lens 520/416 e 0 to 0 dl 1730281616 ref 1 fl Interpret:RNQU/0/0 rc 0/0 job:''
基本概念
客户端到服务端的请求处理模式是基于状态机,每个请求都会有对应的处理状态。具体的状态定义如下
// [perrynzhou]-ptlrpc中request处理阶段定义
enum rq_phase
{
// (perrynzhou)-请求资源的申请完成阶段
RQ_PHASE_NEW = 0xebc0de00,
// (perrynzhou)-请求的准备发送阶段
RQ_PHASE_RPC = 0xebc0de01,
RQ_PHASE_BULK = 0xebc0de02,
// (perrynzhou)-请求发送完毕等待reply阶段
RQ_PHASE_INTERPRET = 0xebc0de03,
// (perrynzhou)-请求处理完成阶段
RQ_PHASE_COMPLETE = 0xebc0de04,
RQ_PHASE_UNREG_RPC = 0xebc0de05,
RQ_PHASE_UNREG_BULK = 0xebc0de06,
RQ_PHASE_UNDEFINED = 0xebc0de07
};
客户端发送请求和客户端收到repy、服务端接受到请求和服务端发送reply都会在lustre底层执行对应的回调函数
// [perrynzhou]-ptlrpc层回调函数执行
static void ptlrpc_master_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
// (perrynzhou)-请求发送端执行的函数:request_out_callback-请求发送后的回调;reply_in_callback-收到对端reply的回调
// (perrynzhou)-接受请求端执行的函数: request_in_callback-请求到达后的回调;reply_out_callback-发送reply后的回调
// (perrynzhou)-request_out_callback设置链路:ptlrpc_request_bufs_pack->reply_in_callback
// (perrynzhou)-reply_in_callback设置链路:ptlrpc_request_bufs_pack->reply_in_callback
// (perrynzhou)-request_in_callback设置链路:ost_setup->ptlrpc_register_service(ost->ost_service初始化)->ptlrpc_service_part_init->ptlrpc_grow_req_bufs->ptlrpc_alloc_rqbd->request_in_callback
// (perrynzhou)-reply_out_callback设置链路:tgt_request_handle->tgt_handle_request0->req_capsule_server_pack->lustre_pack_reply->lustre_pack_reply_flags->lustre_pack_reply_v2->reply_out_callback
/* Honestly, it's best to find out early. */
LASSERT(cbid->cbid_arg != LP_POISON);
LASSERT(callback == request_out_callback ||
callback == reply_in_callback ||
callback == client_bulk_callback ||
callback == request_in_callback ||
callback == reply_out_callback
#ifdef HAVE_SERVER_SUPPORT
|| callback == server_bulk_callback
#endif
);
// (perrynzhou)-执行回调
callback(ev);
if (ev->unlinked)
percpu_ref_put(&ptlrpc_pending);
}
客户端请求的发送四通过客户端或者服务端的
ptlrpcd
内核线程发送,请求会被挂到ptlrpcd
的请求队列上,然后由ptlrpcd
线程从队列中取出请求发送到对端
/**************请求挂载ptlrpcd队列中*******************/
// (perrynzhou)-请求添加到ptlrpcd的队列,接下来有ptlrpcd发送请求
ptlrpcd_add_req(request);
// [perrynzhou]-ptlrpc的内核线程负责发送rpc
static int ptlrpcd(void *arg)
{
do {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ptlrpcd_check(&env, pc)
{
// 请求处理状态的更新和处理
ptlrpc_check_set(env, set)
{
// (perrynzhou)-如果是新的rpc则进行请求的发送
/*
if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
{
force_timer_recalc = 1;
}
*/
// req->rq_phase == RQ_PHASE_NEW执行发送请求
// (perrynzhou)-如果是新的rpc则进行请求的发送
ptlrpc_send_new_req(req)
{
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
// (perrynzhou)-发送rpc请求
rc = ptl_send_rpc(req, 0)
{
struct lnet_md md;
// (perrynzhou)-设置ptlrpc_handler回调函数
md.handler = ptlrpc_handler;
// (perrynzhou)-数据包的发送
rc = ptl_send_buf(&request->rq_req_md_h,
request->rq_reqbuf, request->rq_reqdata_len,
LNET_NOACK_REQ, &request->rq_req_cbid,
LNET_NID_ANY,
lnet_pid_to_pid4(&connection->c_peer),
request->rq_request_portal,
request->rq_xid, 0, &bulk_cookie);
// (perrynzhou)-请求发送后的回调函数,事件类型为LNET_EVENT_SEND
request_out_callback(...)
{
req->rq_req_unlinked = 1;
if (wakeup)
ptlrpc_client_wake_req(req);
}
request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
}
}
// [perrynzhou]-请求发送后客户端收到reply的回调函数
reply_in_callback(struct lnet_event *ev)
{
// (perrynzhou)-incomming的rely,事件类型为LNET_EVENT_PUT,status=0表示接受成功
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
// (perrynzhou)-收到reply后进行req的状态设置
req->rq_rep_swab_mask = 0;
req->rq_replied = 1;
= req->rq_resend = 0;
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
DEBUG_REQ(D_INFO, req,
"reply in flags=%x mlen=%u offset=%d replen=%d",
lustre_msg_get_flags(req->rq_reqmsg),
ev->mlength, ev->offset, req->rq_replen);
}
}
}
}while
}
osc->ost
连接初始化分析
客户端
发起连接初始化
客户端在挂载时候就会发起连接
ost
的请求,如下链路详细描述从挂载到执行osc底层函数的整体执行流程
static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
const char *devname, void *data)
{
// 执行mount -t lustre期间实际执行的是lustre_fill_super
return mount_nodev(fs_type, flags, data, lustre_fill_super)
{
// (perrynzhou)-连接mgs获取lustre文件系统配置
rc = lustre_start_mgc(sb);
// (perrynzhou)-lustre文件系统内部各种服务初始化,连接mdc,osc等
rc = ll_fill_super(sb)
{
// (perrynzhou)-lustre log处理
cfg->cfg_instance = cfg_instance;
cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
// (perrynzhou)-lustre配置处理的函数
cfg->cfg_callback = class_config_llog_handler;
cfg->cfg_sub_clds = CONFIG_SUB_CLIENT;
/* set up client obds */
err = lustre_process_log(sb, profilenm, cfg)
{
// (perrynzhou)-lcfg处理
rc = obd_process_config(mgc, sizeof(*lcfg), lcfg)
{
// obd_process_config实际调用的是 mgc_obd_ops.o_process_config函数,调用的stack如下
mgc_process_config
mgc_process_log
mgc_process_cfg_log
class_config_parse_llog
class_config_llog_handler
class_process_config
class_setup
obd_setup
osc_setup
osc_setup_common
client_obd_setup ->(connect_op = OST_CONNECT;)
}
}
// (perrynzhou)-lustre中mdc和osc初始化
err = client_common_fill_super(sb, md, dt)
{
// (perrynzhou)-通过lov层初始化osc,obd_connect宏定义指向的是o_connect的lov_obd_ops.lov_connect
// err = obd_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,&sbi->ll_sb_uuid, data, sbi->ll_cache);
err = lov_connect(NULL, &sbi->ll_dt_exp, sbi->ll_dt_obd,&sbi->ll_sb_uuid, data, sbi->ll_cache)
{
// (perrynzhou)-初始化export
rc = class_connect(&conn, obd, cluuid);
*exp = class_conn2export(&conn);
// (perrynzhou)-根据ost个数初始化osc
for (i = 0; i < lov->desc.ld_tgt_count; i++)
{
tgt = lov->lov_tgts[i];
// (perrynzhou)-osc初始化
rc = lov_connect_osc(obd, i, tgt->ltd_activate, &lov->lov_ocd)
{
// (perrynzhou)-osc层初始化osc_obd_ops.o_connect=client_connect_import
// rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,&lov_osc_uuid, data, lov->lov_cache)
rc = client_connect_import(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,&lov_osc_uuid, data, ...)
}
}
}
}
}
}
客户端
发起连接请求
从lov层开始初始化连接的请求并添加到ptlrpcd内核线程的工作队列
rc = client_connect_import(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,&lov_osc_uuid, data, lov->lov_cache)
{
// (perrynzhou)-单个ost的export初始化
rc = class_connect(&conn, obd, cluuid);
*exp = class_conn2export(&conn);
// (perrynzhou)-设置import状态为imp->imp_state = LUSTRE_IMP_NEW
rc = ptlrpc_init_import(imp);
// (perrynzhou)-client端的import连接到server端,属于入口端用于发送消息
rc = ptlrpc_connect_import(imp)
{
return ptlrpc_connect_import_locked(imp)
{
// (perrynzhou)-request设置打包,设置请求发送完毕和reply接受后的回调函数
rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION,...)
{
request->rq_send_state = LUSTRE_IMP_FULL;
request->rq_type = PTL_RPC_MSG_REQUEST;
// (perrynzhou)-请求发送后的回调函数设置
request->rq_req_cbid.cbid_fn = request_out_callback;
request->rq_req_cbid.cbid_arg = request;
// (perrynzhou)-收到reply的回调函数设置
request->rq_reply_cbid.cbid_fn = reply_in_callback;
request->rq_reply_cbid.cbid_arg = request;
request->rq_reply_deadline = 0;
request->rq_bulk_deadline = 0;
request->rq_req_deadline = 0;
request->rq_phase = RQ_PHASE_NEW;
request->rq_next_phase = RQ_PHASE_UNDEFINED;
request->rq_request_portal = imp->imp_client->cli_request_portal;
request->rq_reply_portal = imp->imp_client->cli_reply_portal;
ptlrpc_at_set_req_timeout(request);
// (perrynzhou)-在osc连接初始化的时候import op设置:osc_device_alloc->osc_setup->osc_setup_common->client_obd_setup中设置了connect_op = OST_CONNECT
lustre_msg_set_opc(request->rq_reqmsg, opcode);
}
// (perrynzhou)-请求添加到ptlrpcd的队列,接下来有ptlrpcd发送请求
ptlrpcd_add_req(request);
}
}
ptlrpcd内核线程从工作队列获取请求发送到对端
// [perrynzhou]-ptlrpc的内核线程负责发送rpc
static int ptlrpcd(void *arg)
{
do {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ptlrpcd_check(&env, pc)
{
// 请求处理状态的更新和处理
ptlrpc_check_set(env, set)
{
// (perrynzhou)-如果是新的rpc则进行请求的发送
/*
if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_send_new_req(req))
{
force_timer_recalc = 1;
}
*/
// req->rq_phase == RQ_PHASE_NEW执行发送请求
// (perrynzhou)-如果是新的rpc则进行请求的发送
ptlrpc_send_new_req(req)
{
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
// (perrynzhou)-发送rpc请求
rc = ptl_send_rpc(req, 0)
{
struct lnet_md md;
// (perrynzhou)-设置ptlrpc_handler回调函数
md.handler = ptlrpc_handler;
// (perrynzhou)-数据包的发送
rc = ptl_send_buf(&request->rq_req_md_h,
request->rq_reqbuf, request->rq_reqdata_len,
LNET_NOACK_REQ, &request->rq_req_cbid,
LNET_NID_ANY,
lnet_pid_to_pid4(&connection->c_peer),
request->rq_request_portal,
request->rq_xid, 0, &bulk_cookie);
// (perrynzhou)-请求发送后的回调函数,事件类型为LNET_EVENT_SEND
request_out_callback(...)
{
req->rq_req_unlinked = 1;
if (wakeup)
ptlrpc_client_wake_req(req);
}
request->rq_req_unlinked = 1;
ptlrpc_req_finished(request);
}
}
// [perrynzhou]-请求发送后客户端收到reply的回调函数
reply_in_callback(struct lnet_event *ev)
{
// (perrynzhou)-incomming的rely,事件类型为LNET_EVENT_PUT,status=0表示接受成功
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
// (perrynzhou)-收到reply后进行req的状态设置
req->rq_rep_swab_mask = 0;
req->rq_replied = 1;
= req->rq_resend = 0;
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
DEBUG_REQ(D_INFO, req,
"reply in flags=%x mlen=%u offset=%d replen=%d",
lustre_msg_get_flags(req->rq_reqmsg),
ev->mlength, ev->offset, req->rq_replen);
}
}
}
}while
}
服务端
处理请求
下面是
ost
端监听请求的初始链路,这个在ost初始化完毕后就等待请求到来,通过so_req_handler
来处理请求
/*******************step-2.1:服务端监听和处理请求**** */
// [perrynzhou]-ost端的服务初始化
static int ost_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
// (perrynzhou)-ost 服务线程的配置svc_conf
svc_conf = (typeof(svc_conf)){
.psc_name = LUSTRE_OSS_NAME,
.psc_ops = {
.so_req_handler = tgt_request_handle,
.so_req_printer = target_print_req,
.so_hpreq_handler = ptlrpc_hpreq_handler,
},
};
// (perrynzhou)-启动ost的服务线程
ost->ost_service = ptlrpc_register_service(&svc_conf, &obd->obd_kset,obd->obd_debugfs_entry)
{
// (perrynzhou)-启动&管理内核服务线程
rc = ptlrpc_start_threads(service)
{
// (perrynzhou)-启动内核线程
rc = ptlrpc_start_thread(svc->srv_parts[i], ...)
{
// (perrynzhou)-启动lustre内核线程,监听request的到来
task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name)
{
static int ptlrpc_main(void *arg)
{
// (perrynzhou)-接受请求进行预处理和处理
while (!ptlrpc_thread_stopping(thread))
{
// (perrynzhou)-信号量方式等待请求到来
//if (ptlrpc_wait_event(svcpt, thread))
//{
// break;
//}
// (perrynzhou)-信号量方式等待请求到来
ptlrpc_wait_event(svcpt, thread)
{
wait_event_idle_exclusive_lifo(svcpt->scp_waitq,...);
}
// (perrynzhou)-请求预处理
if (ptlrpc_server_request_incoming(svcpt))
{
// (perrynzhou)-请求预处理阶段
ptlrpc_server_handle_req_in(svcpt, thread);
}
// (perrynzhou)-请求处理
if (ptlrpc_server_request_pending(svcpt, false))
{
// (perrynzhou)-rpc请求处理
ptlrpc_server_handle_request(svcpt, thread)
{
// (perrynzhou)-rpc请求到来时候的处理函数
// (perrynzhou)-so_req_handler在lustre服务端一般设置为tgt_request_handle
// (perrynzhou)-处理opc=OST_CONNECT,so_req_handler=tgt_request_handle
svc->srv_ops.so_req_handler(request);
// 设置RPC的状态为完成
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
}
}
}
}
}
}
}
}
}
接受到Request执行回调函数
// [perrynzhou]-服务端请求进来时候的回调函数,在启动请求处理service时候绑定
// [perrynzhou]-request_in_callback设置链路:ost_setup->ptlrpc_register_service(ost->ost_service初始化)->ptlrpc_service_part_init->ptlrpc_grow_req_bufs->ptlrpc_alloc_rqbd->request_in_callback
//
// 在Lnet底层有请求到来的时候会再ptlrpc_server_handle_req_in 之前调用request_in_callback
void request_in_callback(struct lnet_event *ev)
{
struct ptlrpc_request *req;
if (ev->type == LNET_EVENT_PUT){
CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",req, req->rq_xid, ev->mlength);
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
req->rq_xid = ev->match_bits;
req->rq_reqbuf = ev->md_start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
ktime_get_real_ts64(&req->rq_arrival_time);
/* Multi-Rail: keep track of both initiator and source NID. */
req->rq_peer = lnet_pid_to_pid4(&ev->initiator);
req->rq_source = lnet_pid_to_pid4(&ev->source);
req->rq_self = lnet_nid_to_nid4(&ev->target.nid);
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
// (perrynzhou)-请求入到到svcpt->scp_req_incoming队列中
list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
svcpt->scp_nreqs_incoming++;
/* NB everything can disappear under us once the request
* has been queued and we unlock, so do the wake now... */
// (perynzhou)-唤醒内核线程处理请求的主线程ptlrpc_main,等待在ptlrpc_wait_event函数中
wake_up(&svcpt->scp_waitq);
}
服务端处理OST_CONNECT Request执行回调函
// [perrynzhou]-ost中请求处理的处理函数的tgt_handler
static struct tgt_handler ofd_tgt_handlers[] = {
TGT_RPC_HANDLER(OST_FIRST_OPC,0,OST_CONNECT,tgt_connect,&RQF_CONNECT, LUSTRE_OBD_VERSION),
}
// [perrynzhou]-ost中ofd操作的函数表
const struct obd_ops ofd_obd_ops = {
.o_owner = THIS_MODULE,
.o_connect = ofd_obd_connect,
.o_reconnect = ofd_obd_reconnect,
};
// [perrynzhou]-tgt_request_handle是OST端请求处理函数入口,接受来自rpc的请求
int tgt_request_handle(struct ptlrpc_request *req)
{
req_capsule_set(&req->rq_pill, &RQF_CONNECT);
// (perrynzhou)-lustre后端的处理XXX_CONNECT
rc = target_handle_connect(req)
{
// (perrynzhou)-解析客户端请求的数据
struct obd_connect_data *data = req_capsule_client_get(&req->rq_pill, &RMF_CONNECT_DATA);
struct obd_export *export = obd_uuid_lookup(target, &cluuid);
// (perrynzhou)-export初始化
// OBP(obd, connect)实际调用的是ofd_obd_ops.o_connect
rc = obd_connect(req->rq_svc_thread->t_env,
&export, target, &cluuid, data,
client_nid)
{
// (perrynzhou)-获取ofd
ofd = ofd_dev(obd->obd_lu_dev);
// (perrynzhou)-export-obd初始化
rc = class_connect(&conn, obd, cluuid)
{
// (perrynzhou)-export初始化
struct obd_export *export = class_new_export(obd, cluuid);
}
// (perrynzhou)-绑定连接->客户端映射关系
exp = class_conn2export(&conn);
// (perrynzhou)-根据nid信息
rc = ofd_parse_connect_data(env, exp, data, true);
}
// (perrynzhou)-export对应的服务端import初始化
rc = rev_import_init(export);
// (perrynzhou)-export对应的imprt状态更新
rc = rev_import_reconnect(export, req);
}
// (perrynzhou)-服务端请求处理入口
rc = tgt_handle_request0(tsi, h, req)
{
// (perrynzhou)-后端请求处理的核心函数,核心是构造reply
// rc = h->th_act(tsi),此时opc=OST_CONNECT,h->th-act指向的是tgt_connect
//
rc = tgt_connect(tsi);
// (perrynzhou)-设置请求处理完毕的提交状态
target_committed_to_req(req);
// (perrynzhou)-请求处理完毕发送reply
target_send_reply(req, rc, tsi->tsi_reply_fail_id)
{
// (perrynzhou)-发送reply的消息
target_send_reply_msg(req, rc, fail_id)
{
return ptlrpc_send_reply(req, PTLRPC_REPLY_MAYBE_DIFFICULT)
{
LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
LASSERT(rs->rs_cb_id.cbid_arg == rs);
// (perrynzhou)-发送reply并且完毕后执行reply_out_callback
rc = ptl_send_buf(&rs->rs_md_h, ...,&rs->rs_cb_id, ...)
}
}
}
}
}
客户端
收到repy
lnet_handler_t ptlrpc_handler;
// [perrynzhou]-rpc层回调函数句柄ptlrpc_handler初始化
int ptlrpc_ni_init(void)
{
// [perrynzhou]-绑定网络请求处理回调函数
ptlrpc_handler = ptlrpc_master_callback
{
// [perrynzhou]-ptlrpc层回调函数执行
static void ptlrpc_master_callback(struct lnet_event *ev)
{
struct ptlrpc_cb_id *cbid = ev->md_user_ptr;
void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
LASSERT(callback == request_out_callback ||
callback == reply_in_callback ||
callback == client_bulk_callback ||
callback == request_in_callback ||
callback == reply_out_callback
#ifdef HAVE_SERVER_SUPPORT
|| callback == server_bulk_callback
#endif
);
// (perrynzhou)-执行回调
callback(ev);
}
}
return 0;
}
// [perrynzhou]-客户端收到reply的回调函数
void reply_in_callback(struct lnet_event *ev)
{
req->rq_receiving_reply = 0;
req->rq_early = 0;
/* Real reply */
req->rq_rep_swab_mask = 0;
req->rq_replied = 1;
/* Got reply, no resend required */
req->rq_resend = 0;
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
// (perrynzhou)-唤醒同步等待reply的发送请求的ptlrpcd进程
ptlrpc_client_wake_req(req);
}
// [perrynzhou]-ptlrpc的内核线程负责发送rpc
static int ptlrpcd(void *arg)
{
do {
DEFINE_WAIT_FUNC(wait, woken_wake_function);
ptlrpcd_check(&env, pc)
{
// 请求处理状态的更新和处理
ptlrpc_check_set(env, set)
{
// [perrynzhou]-ptlrpc层发送请求
rc = ptl_send_rpc(req, 0);
if (req->rq_phase == RQ_PHASE_RPC)
{
// lnet网络层收到replay回触发ptlrpc_master_callback执行reply_in_callback
reply_in_callback(...);
// [perrynzhou]-得到reply的处理函数
req->rq_status = after_reply(req);
// 在这里满足req->rq_bulk为null
if (!req->rq_bulk || req->rq_status < 0)
{
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
GOTO(interpret, req->rq_status);
}
}
}
}
}while
if (!list_empty(&set->set_requests))
// [perrynzhou]-ptlrpc_set_wait函数核心是rpc请求发送
// ptlrpc_client_wake_req在这里唤醒继续执行
ptlrpc_set_wait(&env, set);
// 请求完成
complete(&pc->pc_finishing);
}
文章转载自存储内核技术交流,如果涉嫌侵权,请发送邮件至:contact@modb.pro进行举报,并提供相关证据,一经查实,墨天轮将立刻删除相关内容。




