* [PATCH 0/2] libceph fixes
@ 2012-06-26 21:45 Alex Elder
2012-06-26 21:46 ` [PATCH 1/2] libceph: set peer name on con_open, not init Alex Elder
2012-06-26 21:46 ` [PATCH 2/2] libceph: close socket directly during ceph_con_close() Alex Elder
0 siblings, 2 replies; 3+ messages in thread
From: Alex Elder @ 2012-06-26 21:45 UTC (permalink / raw)
To: ceph-devel
These two patches from Sage fix two problems have arisen
since the recent changes in the messenger code. I have
already reviewed them both.
[PATCH 1/2] libceph: set peer name on con_open, not init
[PATCH 2/2] libceph: close socket directly during ceph_con_close()
-Alex
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 1/2] libceph: set peer name on con_open, not init
2012-06-26 21:45 [PATCH 0/2] libceph fixes Alex Elder
@ 2012-06-26 21:46 ` Alex Elder
2012-06-26 21:46 ` [PATCH 2/2] libceph: close socket directly during ceph_con_close() Alex Elder
1 sibling, 0 replies; 3+ messages in thread
From: Alex Elder @ 2012-06-26 21:46 UTC (permalink / raw)
To: ceph-devel
The peer name may change on each open attempt, even when the
connection is reused.
Signed-off-by: Sage Weil <sage@inktank.com>
Reviewed-by: Alex Elder <elder@inktank.com>
---
fs/ceph/mds_client.c | 7 ++++---
include/linux/ceph/messenger.h | 4 ++--
net/ceph/messenger.c | 12 +++++++-----
net/ceph/mon_client.c | 8 ++++----
net/ceph/osd_client.c | 10 ++++++----
5 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index ecd7f15..5ac6434 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -394,8 +394,7 @@ static struct ceph_mds_session
*register_session(struct ceph_mds_client *mdsc,
s->s_seq = 0;
mutex_init(&s->s_mutex);
- ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr,
- CEPH_ENTITY_TYPE_MDS, mds);
+ ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
spin_lock_init(&s->s_gen_ttl_lock);
s->s_cap_gen = 0;
@@ -437,7 +436,8 @@ static struct ceph_mds_session
*register_session(struct ceph_mds_client *mdsc,
mdsc->sessions[mds] = s;
atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
- ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+ ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
+ ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
return s;
@@ -2529,6 +2529,7 @@ static void send_mds_reconnect(struct
ceph_mds_client *mdsc,
session->s_seq = 0;
ceph_con_open(&session->s_con,
+ CEPH_ENTITY_TYPE_MDS, mds,
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
/* replay unsafe requests */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 002d504..cb9d38f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -232,9 +232,9 @@ extern void ceph_messenger_init(struct
ceph_messenger *msgr,
extern void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
- struct ceph_messenger *msgr, __u8 entity_type,
- __u64 entity_num);
+ struct ceph_messenger *msgr);
extern void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
struct ceph_entity_addr *addr);
extern bool ceph_con_opened(struct ceph_connection *con);
extern void ceph_con_close(struct ceph_connection *con);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 3eef039..fe42a36 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -523,12 +523,17 @@ EXPORT_SYMBOL(ceph_con_close);
/*
* Reopen a closed connection, with a new peer address.
*/
-void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr
*addr)
+void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
+ struct ceph_entity_addr *addr)
{
dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
set_bit(OPENING, &con->state);
WARN_ON(!test_and_clear_bit(CLOSED, &con->state));
+ con->peer_name.type = (__u8) entity_type;
+ con->peer_name.num = cpu_to_le64(entity_num);
+
memcpy(&con->peer_addr, addr, sizeof(*addr));
con->delay = 0; /* reset backoff memory */
queue_con(con);
@@ -548,7 +553,7 @@ bool ceph_con_opened(struct ceph_connection *con)
*/
void ceph_con_init(struct ceph_connection *con, void *private,
const struct ceph_connection_operations *ops,
- struct ceph_messenger *msgr, __u8 entity_type, __u64 entity_num)
+ struct ceph_messenger *msgr)
{
dout("con_init %p\n", con);
memset(con, 0, sizeof(*con));
@@ -558,9 +563,6 @@ void ceph_con_init(struct ceph_connection *con, void
*private,
con_sock_state_init(con);
- con->peer_name.type = (__u8) entity_type;
- con->peer_name.num = cpu_to_le64(entity_num);
-
mutex_init(&con->mutex);
INIT_LIST_HEAD(&con->out_queue);
INIT_LIST_HEAD(&con->out_sent);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index e9db3de..44b8526 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -142,12 +142,9 @@ static int __open_session(struct ceph_mon_client *monc)
monc->sub_renew_after = jiffies; /* i.e., expired */
monc->want_next_osdmap = !!monc->want_next_osdmap;
- ceph_con_init(&monc->con, monc, &mon_con_ops,
- &monc->client->msgr,
- CEPH_ENTITY_TYPE_MON, monc->cur_mon);
-
dout("open_session mon%d opening\n", monc->cur_mon);
ceph_con_open(&monc->con,
+ CEPH_ENTITY_TYPE_MON, monc->cur_mon,
&monc->monmap->mon_inst[monc->cur_mon].addr);
/* initiatiate authentication handshake */
@@ -798,6 +795,9 @@ int ceph_monc_init(struct ceph_mon_client *monc,
struct ceph_client *cl)
if (!monc->m_auth)
goto out_auth_reply;
+ ceph_con_init(&monc->con, monc, &mon_con_ops,
+ &monc->client->msgr);
+
monc->cur_mon = -1;
monc->hunting = true;
monc->sub_renew_after = jiffies;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index db2da54..c252711 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -639,8 +639,7 @@ static struct ceph_osd *create_osd(struct
ceph_osd_client *osdc, int onum)
INIT_LIST_HEAD(&osd->o_osd_lru);
osd->o_incarnation = 1;
- ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr,
- CEPH_ENTITY_TYPE_OSD, onum);
+ ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
INIT_LIST_HEAD(&osd->o_keepalive_item);
return osd;
@@ -750,7 +749,8 @@ static int __reset_osd(struct ceph_osd_client *osdc,
struct ceph_osd *osd)
ret = -EAGAIN;
} else {
ceph_con_close(&osd->o_con);
- ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
+ ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+ &osdc->osdmap->osd_addr[osd->o_osd]);
osd->o_incarnation++;
}
return ret;
@@ -1005,7 +1005,9 @@ static int __map_request(struct ceph_osd_client *osdc,
dout("map_request osd %p is osd%d\n", req->r_osd, o);
__insert_osd(osdc, req->r_osd);
- ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
+ ceph_con_open(&req->r_osd->o_con,
+ CEPH_ENTITY_TYPE_OSD, o,
+ &osdc->osdmap->osd_addr[o]);
}
if (req->r_osd) {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH 2/2] libceph: close socket directly during ceph_con_close()
2012-06-26 21:45 [PATCH 0/2] libceph fixes Alex Elder
2012-06-26 21:46 ` [PATCH 1/2] libceph: set peer name on con_open, not init Alex Elder
@ 2012-06-26 21:46 ` Alex Elder
1 sibling, 0 replies; 3+ messages in thread
From: Alex Elder @ 2012-06-26 21:46 UTC (permalink / raw)
To: ceph-devel
When we close the connection, immediately shut down the socket
instead of queuing work and letting the worker thread do it. This
is simpler.
Signed-off-by: Sage Weil <sage@inktank.com>
Reviewed-by: Alex Elder <elder@inktank.com>
---
net/ceph/messenger.c | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index fe42a36..2f574d2 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -47,18 +47,18 @@
* | \ con_sock_state_connecting()
* | ----------------------
* | \
- * + con_sock_state_closed() \
- * |\ \
- * | \ \
- * | ----------- \
- * | | CLOSING | socket event; \
- * | ----------- await close \
- * | ^ |
- * | | |
- * | + con_sock_state_closing() |
- * | / \ |
- * | / --------------- |
- * | / \ v
+ * +--------------------------- \
+ * |\ con_sock_state_closed() \ \
+ * | \ \ \
+ * | ----------- \ \
+ * | | CLOSING | socket event; \ \
+ * | ----------- await close \ \
+ * | ^ \ |
+ * | | \ |
+ * | + con_sock_state_closing() \ |
+ * | / \ | |
+ * | / --------------- | |
+ * | / \ | v
* | / --------------
* | / -----------------| CONNECTING | socket created, TCP
* | | / -------------- connect initiated
@@ -241,7 +241,8 @@ static void con_sock_state_closed(struct
ceph_connection *con)
old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
- old_state != CON_SOCK_STATE_CLOSING))
+ old_state != CON_SOCK_STATE_CLOSING &&
+ old_state != CON_SOCK_STATE_CONNECTING))
printk("%s: unexpected old state %d\n", __func__, old_state);
}
@@ -514,9 +515,9 @@ void ceph_con_close(struct ceph_connection *con)
mutex_lock(&con->mutex);
reset_connection(con);
con->peer_global_seq = 0;
+ con_close_socket(con);
cancel_delayed_work(&con->work);
mutex_unlock(&con->mutex);
- queue_con(con);
}
EXPORT_SYMBOL(ceph_con_close);
--
1.7.9.5
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2012-06-26 21:46 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-06-26 21:45 [PATCH 0/2] libceph fixes Alex Elder
2012-06-26 21:46 ` [PATCH 1/2] libceph: set peer name on con_open, not init Alex Elder
2012-06-26 21:46 ` [PATCH 2/2] libceph: close socket directly during ceph_con_close() Alex Elder
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.