package com.wpcertification.spi;
import java.io.IOException;
import java.util.Iterator;
import java.util.Locale;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import javax.portlet.GenericPortlet;
import javax.portlet.PortletException;
import javax.portlet.RenderRequest;
import javax.portlet.RenderResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.ibm.portal.ModelException;
import com.ibm.portal.model.PortletModelHome;
import com.ibm.portal.portletmodel.PortletDefinition;
import com.ibm.portal.portletmodel.WebApplication;
import com.ibm.portal.portletmodel.admin.AdminPortletModel;
import com.ibm.portal.portletmodel.admin.PortletDefinitionList;
import com.ibm.portal.portletmodel.admin.WebApplicationList;
public class PortletDeflistPortlet extends GenericPortlet{
PortletModelHome portletModelHome;
public void init() throws PortletException {
System.out.println("Entering PortalPOCPortlet.init()");
try {
InitialContext context = new InitialContext();
portletModelHome =(PortletModelHome) context.lookup("portal:service/model/PortletModel");
System.out.println("PortletModelHome " + portletModelHome);
} catch (NamingException e) {
e.printStackTrace(System.out);
}
System.out.println("Entering PortalPOCPortlet.init()");
}
protected void doView(RenderRequest request, RenderResponse response)
throws PortletException, IOException {
response.setContentType("text/html");
response.getWriter().println("**************** Portlet Web Applications ************
");
printPortletApplicationList(request,response);
response.getWriter().println("**************** Portlet Definitions ************
");
printPortletDefinitionList(request,response);
response.getWriter().println("**********************************************************************
");
}
public void printPortletApplicationList(RenderRequest request, RenderResponse response){
System.out.println("Entering PortalPOCPortlet.getObjectIdOfPortlet()");
try {
AdminPortletModel adminModel = portletModelHome.getPortletModelProvider().getAdminPortletModel((HttpServletRequest)request, (HttpServletResponse)response);
WebApplicationList webApplicationList = adminModel.getWebApplicationList();
Iterator webAppIt = webApplicationList.iterator();
while(webAppIt.hasNext()){
WebApplication webApplication = (WebApplication)webAppIt.next();
response.getWriter().println(webApplication.getObjectID() +" " +webApplication.getContextRoot() +"
");
}
} catch (ModelException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Exiting PortalPOCPortlet.getObjectIdOfPortlet()");
}
public void printPortletDefinitionList(RenderRequest request, RenderResponse response){
System.out.println("Entering PortalPOCPortlet.getObjectIdOfPortlet()");
try {
AdminPortletModel adminModel = portletModelHome.getPortletModelProvider().getAdminPortletModel((HttpServletRequest)request, (HttpServletResponse)response);
PortletDefinitionList portletDefinitionList = adminModel.getPortletDefinitionList();
Iterator portletDefinitionIt = portletDefinitionList.iterator();
while(portletDefinitionIt.hasNext()){
PortletDefinition portletDefinition = portletDefinitionIt.next();
response.getWriter().println(portletDefinition.getObjectID().toString() +" " + portletDefinition.getObjectID().getUniqueName() +" " + portletDefinition.getTitle(new Locale("en","US"))+" " + portletDefinition.getDescription(new Locale("en","US")) +"
");
}
} catch (ModelException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Exiting PortalPOCPortlet.getObjectIdOfPortlet()");
}
}
Querying data about your environment
You can use the AdminPortletModel interface to find out more information about your portal installation, things like what all Web applications are installed on your portal, the PortletDefintions means all the portlets installed on your portal. I built this sample portlet to demonstrate that
Finding uniqueName from ObjectId of the portlet and other way round
When your working with Portal Model then you might need a way to figure out unique name of the portlet from its ObjectId and other way. I built this POC to do that. In
import java.io.IOException;
import java.util.Iterator;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import javax.portlet.GenericPortlet;
import javax.portlet.PortletException;
import javax.portlet.PortletRequest;
import javax.portlet.PortletResponse;
import javax.portlet.RenderRequest;
import javax.portlet.RenderResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.ibm.portal.ModelException;
import com.ibm.portal.ObjectID;
import com.ibm.portal.model.PortletModelHome;
import com.ibm.portal.portletmodel.PortletDefinition;
import com.ibm.portal.portletmodel.admin.AdminPortletModel;
import com.ibm.portal.portletmodel.admin.PortletDefinitionList;
public class PortletUniqueNamePortlet extends GenericPortlet{
PortletModelHome portletModelHome;
public void init() throws PortletException {
System.out.println("Entering PortalPOCPortlet.init()");
try {
InitialContext context = new InitialContext();
portletModelHome =(PortletModelHome) context.lookup("portal:service/model/PortletModel");
System.out.println("PortletModelHome " + portletModelHome);
} catch (NamingException e) {
e.printStackTrace(System.out);
}
System.out.println("Entering PortalPOCPortlet.init()");
}
protected void doView(RenderRequest request, RenderResponse response)
throws PortletException, IOException {
response.setContentType("text/html");
response.getWriter().println("Object Id of the sitemap portlet " + getObjectIdOfPortlet(request, response, "wps.p.Sitemap"));
response.getWriter().println("
Unique Name of the sitemap portlet " + getUniqueNameOfPortlet(request, response, getObjectIDStr(getObjectIdOfPortlet(request, response, "wps.p.Sitemap"))));
}
public static String getObjectIDStr(ObjectID objectID){
String temp = objectID.toString();
int firstInd = temp.indexOf("'");
String result = temp.substring(firstInd+1, temp.indexOf("'", firstInd+1 ));
return result;
}
public ObjectID getObjectIdOfPortlet(PortletRequest request, PortletResponse response, String portletUniqueName){
System.out.println("Entering PortalPOCPortlet.getObjectIdOfPortlet()");
try {
AdminPortletModel adminModel = portletModelHome.getPortletModelProvider().getAdminPortletModel((HttpServletRequest)request, (HttpServletResponse)response);
PortletDefinition portletDef = adminModel.getPortletDefinitionList().getLocator().findByUniqueName(portletUniqueName);
return portletDef.getObjectID();
} catch (ModelException e) {
e.printStackTrace();
}
System.out.println("Exiting PortalPOCPortlet.getObjectIdOfPortlet()");
return null;
}
public String getUniqueNameOfPortlet(PortletRequest request, PortletResponse response, String portletObjectId){
System.out.println("Entering PortalPOCPortlet.getUniqueNameOfPortlet()");
try {
AdminPortletModel adminModel = portletModelHome.getPortletModelProvider().getAdminPortletModel((HttpServletRequest)request, (HttpServletResponse)response);
PortletDefinitionList portletDefinitionList = adminModel.getPortletDefinitionList();
Iterator portletDefinitionIterator = portletDefinitionList.iterator();
while(portletDefinitionIterator.hasNext()){
PortletDefinition portletDefinition = portletDefinitionIterator.next();
String currentObjectIdStr = getObjectIDStr(portletDefinition.getObjectID());
if(currentObjectIdStr.equals(portletObjectId)){
return portletDefinition.getObjectID().getUniqueName();
}
}
} catch (ModelException e) {
e.printStackTrace();
}
System.out.println("Not able to find portlet for given portletObjectId");
return null;
}
}
Redirecting user on login
One of the reader posted a comment, asking how do i redirect user as soon as they login based on some condition, so i changed my SampleExplicitLoginFilter like this
I am checking if the remote user is wasdmin if yes i am redirecting him to
public class SampleExplicitLoginFilter implements ExplicitLoginFilter{
public void login(HttpServletRequest request, HttpServletResponse response,
String userId, char[] password, FilterChainContext portalLoginContext, Subject subject,
String realm, ExplicitLoginFilterChain chain) throws LoginException,
WSSecurityException, PasswordInvalidException,
UserIDInvalidException, AuthenticationFailedException,
AuthenticationException, SystemLoginException,
com.ibm.portal.auth.exceptions.LoginException {
System.out.println("Entering SamplExplicitLoginFilter.login()");
System.out.println("User Id " + userId);
System.out.println("Password " + String.valueOf(password));
System.out.println("Realm" + realm);
chain.login(request, response, userId, password, portalLoginContext, subject, realm);
if(request.getRemoteUser().equals("wasadmin"))
portalLoginContext.setRedirectURL("/wps/myportal/Administration");
System.out.println("Exiting SamplExplicitLoginFilter.login()");
}
public void destroy() {
}
public void init(SecurityFilterConfig arg0)
throws SecurityFilterInitException {
}
}
I am checking if the remote user is wasdmin if yes i am redirecting him to
/wps/myportal/Administration
page.
Creating loginfilter for WebSphere Portal
The portal authentication filters are a set of plug-in points. You can use them to intercept or extend the portal login, logout, session timeout, and request processing by custom code, for example to redirect users to a specific URL.
The New Security API in WebSphere Portal talks about various ways to extend the login process. I wanted to play with the LoginFilters so i followed simple steps to build this solution
First i did create SampleExplicityLoginFilter java class like this
This class only reads the userId and password and prints it in the System.out and lets control go to next step.
Similarly i did create a Sample Filter for each of the other interfaces and you can download the sample application from here
Then i built that project and copied it into the PortalServer/shared/app directory. I went to WAS Admin Console and configured all my sample login filters like this.
After that i had to restart my server but after restart when i tried login into portal i could see that i was able to get control in the LoginFilter and write userId and password used by user while login in to System.out like this
The New Security API in WebSphere Portal talks about various ways to extend the login process. I wanted to play with the LoginFilters so i followed simple steps to build this solution
First i did create SampleExplicityLoginFilter java class like this
public class SampleExplicitLoginFilter implements ExplicitLoginFilter{
public void login(HttpServletRequest request, HttpServletResponse response,
String userId, char[] password, FilterChainContext portalLoginContext, Subject subject,
String realm, ExplicitLoginFilterChain chain) throws LoginException,
WSSecurityException, PasswordInvalidException,
UserIDInvalidException, AuthenticationFailedException,
AuthenticationException, SystemLoginException,
com.ibm.portal.auth.exceptions.LoginException {
System.out.println("Entering SamplExplicitLoginFilter.login()");
System.out.println("User Id " + userId);
System.out.println("Password " + String.valueOf(password));
System.out.println("Realm" + realm);
chain.login(request, response, userId, password, portalLoginContext, subject, realm);
System.out.println("Exiting SamplExplicitLoginFilter.login()");
}
public void destroy() {
}
public void init(SecurityFilterConfig arg0)
throws SecurityFilterInitException {
}
}
This class only reads the userId and password and prints it in the System.out and lets control go to next step.
Similarly i did create a Sample Filter for each of the other interfaces and you can download the sample application from here
Then i built that project and copied it into the PortalServer/shared/app directory. I went to WAS Admin Console and configured all my sample login filters like this.
After that i had to restart my server but after restart when i tried login into portal i could see that i was able to get control in the LoginFilter and write userId and password used by user while login in to System.out like this
[12/17/09 10:52:51:198 PST] 0000002e SystemOut O Entering SamplExplicitLoginFilter.login()
[12/17/09 10:52:51:198 PST] 0000002e SystemOut O User Id wasadmin
[12/17/09 10:52:51:198 PST] 0000002e SystemOut O Password password
[12/17/09 10:52:51:198 PST] 0000002e SystemOut O Realmnull
[12/17/09 10:52:51:245 PST] 0000002e SystemOut O Exiting SamplExplicitLoginFilter.login()
WebSphere Portal 6.1.5 ships with Dojo 1.3.2
Starting from version 6.1.5 WebSphere Portal ships with Dojo 1.3.2 in addition to Dojo version 1.1.1. Starting from portal 6.1 dojo version 1.1.1 is shiped as part of the wps.ear, to be precise it is in wp_profile\installedApps\sunpa\wps.ear\wps.war\themes\dojo\portal_dojo folder, IBM kept it as it is.
In order to include Dojo 1.3.2 they create a Dojo_Resources.ear file which has 1.3.2 version of the dojo
The Dojo_Resources.ear has new version of dojo and few additional dojo classes created by IBM, these classes implement some of the IBM's client side logic. This enterprise application does not have any java classes so its used only for making dojo resources accessible.
As you can see the Dojo_Resources.war is available at /portal_dojo path. The dojo client side theme loads dojo from this path
As you can see the value of baseUrl property in the djConfig is /portal_dojo/dojo/, that means dojo is loaded from this location. You can see that even the tundra.css or other dijit related resources are loaded from Dojo_Resources.war.
You can verify the dojo version by looking at dojo.version properties. As you can see we are using 1.3.2 version
In order to include Dojo 1.3.2 they create a Dojo_Resources.ear file which has 1.3.2 version of the dojo
The Dojo_Resources.ear has new version of dojo and few additional dojo classes created by IBM, these classes implement some of the IBM's client side logic. This enterprise application does not have any java classes so its used only for making dojo resources accessible.
As you can see the Dojo_Resources.war is available at /portal_dojo path. The dojo client side theme loads dojo from this path
As you can see the value of baseUrl property in the djConfig is /portal_dojo/dojo/, that means dojo is loaded from this location. You can see that even the tundra.css or other dijit related resources are loaded from Dojo_Resources.war.
You can verify the dojo version by looking at dojo.version properties. As you can see we are using 1.3.2 version
Implementation of GenericPortlet
My first impression of specification document is that it will be very complicated and only very few people or someone who is developing portlet engine will be able to understand it but surprisingly both Portlet Specification 2.0 and Portlet Specification 1.0 are very well written document.
What i like to do is download the portlet specification, which has a .pdf document describing specification and a src.zip file. This file has source code for javax.portlet files, or the Java classes/interfaces defined by the specification. Reading through these documents gives us a very good understanding of the specification.
Take a look at GenericPortlet.java that is part of src.zip which give us very good understanding of default portal implementation, Ex. doDispatch(), how the resource serving or action processing work
What i like to do is download the portlet specification, which has a .pdf document describing specification and a src.zip file. This file has source code for javax.portlet files, or the Java classes/interfaces defined by the specification. Reading through these documents gives us a very good understanding of the specification.
Take a look at GenericPortlet.java that is part of src.zip which give us very good understanding of default portal implementation, Ex. doDispatch(), how the resource serving or action processing work
package javax.portlet;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import javax.xml.namespace.QName;
public abstract class GenericPortlet implements Portlet, PortletConfig, EventPortlet, ResourceServingPortlet {
private transient PortletConfig config;
private transient Map processActionHandlingMethodsMap = new HashMap();
private transient Map processEventHandlingMethodsMap = new HashMap();
private transient Map renderModeHandlingMethodsMap = new HashMap();
public GenericPortlet() {
}
public void init(PortletConfig config) throws PortletException {
this.config = config;
cacheAnnotatedMethods();
this.init();
}
public void init() throws PortletException {
}
public void processAction(ActionRequest request, ActionResponse response) throws PortletException,
java.io.IOException {
String action = request.getParameter(ActionRequest.ACTION_NAME);
try {
// check if action is cached
Method actionMethod = processActionHandlingMethodsMap.get(action);
if (actionMethod != null) {
actionMethod.invoke(this, request, response);
return;
}
} catch (Exception e) {
throw new PortletException(e);
}
// if no action processing method was found throw exc
throw new PortletException("processAction method not implemented");
}
public void render(RenderRequest request, RenderResponse response) throws PortletException, java.io.IOException {
Object renderPartAttrValue = request.getAttribute(RenderRequest.RENDER_PART);
if (renderPartAttrValue != null) {
// streaming portal calling
if (renderPartAttrValue.equals(RenderRequest.RENDER_HEADERS)) {
doHeaders(request, response);
Collection nextModes = getNextPossiblePortletModes(request);
if (nextModes != null)
response.setNextPossiblePortletModes(nextModes);
response.setTitle(getTitle(request));
} else if (renderPartAttrValue.equals(RenderRequest.RENDER_MARKUP)) {
doDispatch(request, response);
} else {
throw new PortletException("Unknown value of the 'javax.portlet.render_part' request attribute");
}
} else {
// buffered portal calling
doHeaders(request, response);
Collection nextModes = getNextPossiblePortletModes(request);
if (nextModes != null)
response.setNextPossiblePortletModes(nextModes);
response.setTitle(getTitle(request));
doDispatch(request, response);
}
}
protected java.lang.String getTitle(RenderRequest request) {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getResourceBundle(request.getLocale()).getString("javax.portlet.title");
}
protected void doDispatch(RenderRequest request, RenderResponse response) throws PortletException,
java.io.IOException {
WindowState state = request.getWindowState();
if (!state.equals(WindowState.MINIMIZED)) {
PortletMode mode = request.getPortletMode();
// first look if there are methods annotated for
// handling the rendering of this mode
try {
// check if mode is cached
Method renderMethod = renderModeHandlingMethodsMap.get(mode.toString());
if (renderMethod != null) {
renderMethod.invoke(this, request, response);
return;
}
} catch (Exception e) {
throw new PortletException(e);
}
// if not, try the default doXYZ methods
if (mode.equals(PortletMode.VIEW)) {
doView(request, response);
} else if (mode.equals(PortletMode.EDIT)) {
doEdit(request, response);
} else if (mode.equals(PortletMode.HELP)) {
doHelp(request, response);
} else {
throw new PortletException("unknown portlet mode: " + mode);
}
}
}
protected void doView(RenderRequest request, RenderResponse response) throws PortletException, java.io.IOException {
throw new PortletException("doView method not implemented");
}
protected void doEdit(RenderRequest request, RenderResponse response) throws PortletException, java.io.IOException {
throw new PortletException("doEdit method not implemented");
}
protected void doHelp(RenderRequest request, RenderResponse response) throws PortletException, java.io.IOException {
throw new PortletException("doHelp method not implemented");
}
public PortletConfig getPortletConfig() {
return config;
}
public void destroy() {
// do nothing
}
public String getPortletName() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getPortletName();
}
public PortletContext getPortletContext() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getPortletContext();
}
public java.util.ResourceBundle getResourceBundle(java.util.Locale locale) {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getResourceBundle(locale);
}
public String getInitParameter(java.lang.String name) {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getInitParameter(name);
}
public java.util.Enumeration getInitParameterNames() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getInitParameterNames();
}
public Enumeration getProcessingEventQNames() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getProcessingEventQNames();
}
public Enumeration getPublishingEventQNames() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getPublishingEventQNames();
}
public Enumeration getSupportedLocales() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getSupportedLocales();
}
public Map getContainerRuntimeOptions() {
return config.getContainerRuntimeOptions();
}
public void serveResource(ResourceRequest request, ResourceResponse response) throws PortletException, IOException {
if (request.getResourceID() != null) {
PortletRequestDispatcher rd = getPortletConfig().getPortletContext().getRequestDispatcher(
request.getResourceID());
if (rd != null)
rd.forward(request, response);
}
}
public void processEvent(EventRequest request, EventResponse response) throws PortletException, IOException {
String eventName = request.getEvent().getQName().toString();
try {
// check for exact match
Method eventMethod = processEventHandlingMethodsMap.get(eventName);
if (eventMethod != null) {
eventMethod.invoke(this, request, response);
return;
} else {
// Search for the longest possible matching wildcard annotation
int endPos = eventName.indexOf('}');
int dotPos = eventName.lastIndexOf('.');
while (dotPos > endPos) {
String wildcardLookup = eventName.substring(0, dotPos + 1);
eventMethod = processEventHandlingMethodsMap.get(wildcardLookup);
if (eventMethod != null) {
eventMethod.invoke(this, request, response);
return;
}
if (dotPos == 0) {
break;
}
dotPos = eventName.lastIndexOf('.', dotPos - 1);
}
}
} catch (Exception e) {
throw new PortletException(e);
}
// if no event processing method was found just keep render params
response.setRenderParameters(request);
}
protected void doHeaders(RenderRequest request, RenderResponse response) {
return;
}
protected java.util.Collection getNextPossiblePortletModes(RenderRequest request) {
return null;
}
public Enumeration getPublicRenderParameterNames() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getPublicRenderParameterNames();
}
public String getDefaultNamespace() {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
return config.getDefaultNamespace();
}
private void cacheAnnotatedMethods() {
// cache all annotated and visible public methods
for (Method method : this.getClass().getMethods()) {
Annotation[] annotations = method.getAnnotations();
if (annotations != null) {
for (Annotation annotation : annotations) {
Class annotationType = annotation.annotationType();
if (ProcessAction.class.equals(annotationType)) {
String name = ((ProcessAction) annotation).name();
if (name != null && name.length() > 0)
processActionHandlingMethodsMap.put(name, method);
} else if (ProcessEvent.class.equals(annotationType)) {
String qname = ((ProcessEvent) annotation).qname();
if (qname == null || qname.length() <= 0) {
if (config == null)
throw new java.lang.IllegalStateException(
"Config is null, please ensure that your init(config) method calls super.init(config)");
String name = ((ProcessEvent) annotation).name();
if (name != null && name.length() > 0) {
qname = new QName(config.getDefaultNamespace(), name).toString();
processEventHandlingMethodsMap.put(qname, method);
}
} else
processEventHandlingMethodsMap.put(qname, method);
} else if (RenderMode.class.equals(annotationType)) {
String name = ((RenderMode) annotation).name();
if (name != null && name.length() > 0)
renderModeHandlingMethodsMap.put(name.toLowerCase(), method);
}
}
}
}
}
}
Spring Web Application, loading classes from classpath
Recently i had to break my big Spring Portlet Application into smaller pieces and move some of the code to shared library so that it can be used by multiple web application. Actually in my case Portlet Skin, which is part of wps.war needed access to same beans as that of my portlet.
What i did to solve this problem is i created a new Java Project and moved my DAO classes along with the Spring configuration for DAO classes to that java project, the jar file built by that java project would go in shared lib. Since the spring configuration files were copied to root of the shared library my Skin could load those classes by using ClassPathXmlApplicationContext like this
But how do i include the dao.xmls while creating WebApplicationContext. In Spring Portlet MVC framework the org.springframework.web.context.ContextLoaderListener is used for reading value of contextConfigLocation Web Application Context parameter and then reads all those Spring configuration files into Web Application COntext. Normally WebApplication Context looks for the configuration files in WEB-INF directory. In my case dao.xml is not part of web application so it was throwing file not found error.
I was able to solve that problem by appending classpath: instead of file name. When i do that the Spring framework starts using ClasspathResourceLoader instead of WebApplicationResourceLoader for loading configuration file. And ClasspathResourceLoader is capable of reading files from the classpath, which is shared lib
What i did to solve this problem is i created a new Java Project and moved my DAO classes along with the Spring configuration for DAO classes to that java project, the jar file built by that java project would go in shared lib. Since the spring configuration files were copied to root of the shared library my Skin could load those classes by using ClassPathXmlApplicationContext like this
new ClassPathXmlApplicationContext(new String[]{"dao.xml","dao1.xml","dao2.xml"})
But how do i include the dao.xmls while creating WebApplicationContext. In Spring Portlet MVC framework the org.springframework.web.context.ContextLoaderListener is used for reading value of contextConfigLocation Web Application Context parameter and then reads all those Spring configuration files into Web Application COntext. Normally WebApplication Context looks for the configuration files in WEB-INF directory. In my case dao.xml is not part of web application so it was throwing file not found error.
I was able to solve that problem by appending classpath: instead of file name. When i do that the Spring framework starts using ClasspathResourceLoader instead of WebApplicationResourceLoader for loading configuration file. And ClasspathResourceLoader is capable of reading files from the classpath, which is shared lib
<context-param>
<param-name>contextConfigLocation</param-name>
<param-value>classpath:dao.xml,classpath:dao1.xml,classpath:dao2.xml</param-value>
</context-param>
Sharing ApplicationContext in Spring MVC and Non Spring MVC Portlet
In my project i had this interesting problem that i have one Portlet Application, which has 4 portlets now 3 of the portlets are using Spring MVC framework but the fourth portlet does not use Spring Portlet MVC framework instead it extends GenericPortlet. All my 4 portlets use same DAO and other classes and i am using Spring for wiring my DAOs
Problem was that how do i obtain ApplicationContext for the portlet that is not Spring Portlet MVC or how do i get access to ApplicationContext object.
You can use PortletApplicationContextUtils.getWebApplicationContext() method, which is static method anywhere in your web application code to get access to ApplicationContext created by the ContextLoaderListener in your class.
Problem was that how do i obtain ApplicationContext for the portlet that is not Spring Portlet MVC or how do i get access to ApplicationContext object.
PortletApplicationContextUtils.getWebApplicationContext(getPortletContext())
You can use PortletApplicationContextUtils.getWebApplicationContext() method, which is static method anywhere in your web application code to get access to ApplicationContext created by the ContextLoaderListener in your class.
Improved version of enable-develop-mode-startup-performance
WebSphere Portal has concept of Development mode for some time, basic idea is to improve the startup time of portal by delaying the startup of application. The application should be started when it is accessed for first time instead of starting it at the server startup time. As per Marshal Lamb there are close to 75 portlets for administrating portal and some of the companies dont use Portal Admin Console in certain environments such as Production.
When working in Portlet developer role i dont use Portal Admin Console as much, most of my work revolves around updating the portlet application which i can do easily using RAD and RAD makes use of xmlaccess and wsadmin script so i dont need any of the Admin portlets.
WebSphere Portal provides enable-develop-mode-startup-performance to enable development mode and disable-develop-mode-startup-performance task to disable development mode. This part is same as that of WPS 6.1, what has changed is that now this task works more gracefully, before it use to disable Portal Help, Portlet palette, Personalization. As part of WPS 6.1.5, IBM did test this feature to make sure that everything works properly.
I tried using development mode on WPS 6.1.5 and now it works really well. I havent run into any problems so far.
When working in Portlet developer role i dont use Portal Admin Console as much, most of my work revolves around updating the portlet application which i can do easily using RAD and RAD makes use of xmlaccess and wsadmin script so i dont need any of the Admin portlets.
WebSphere Portal provides enable-develop-mode-startup-performance to enable development mode and disable-develop-mode-startup-performance task to disable development mode. This part is same as that of WPS 6.1, what has changed is that now this task works more gracefully, before it use to disable Portal Help, Portlet palette, Personalization. As part of WPS 6.1.5, IBM did test this feature to make sure that everything works properly.
I tried using development mode on WPS 6.1.5 and now it works really well. I havent run into any problems so far.
Assiging UniqueName to Skin
I had this business requirement in which i had to assign a unique name to my skin. In order to do that i followed these steps
- First copy the skin folder say Test Folder into your wp_profile\installedApps\sunpa\wps.ear\wps.war\skins\html folder.
- Next define the Skin using Themes and Skins Admin portlet
- Export full portal and find out the Skin element for test, you will notice that portal has assigned a objectid to skin. Copy that element in separate file and add uniqueName attribute to it and import it into portal using xmlaccess
<?xml version="1.0" encoding="UTF-8"?>
<request xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" build="wp6103_201_01" type="update" version="6.1.0.3" xsi:noNamespaceSchemaLocation="PortalConfig_6.1.0.2.xsd">
<portal action="locate">
<skin action="update" active="true" default="false" domain="rel" objectid="K_8000CB1A08N4C0IKUIQTT60000" resourceroot="Test" type="default" uniquename="wps.skin.TestSkin">
<localedata locale="en">
<title>Test</title>
</localedata>
</skin>
</portal>
</request>
Jar Class finder Eclipse Plugin
The Jar Class Finder is a must have eclipse plug-in for all the WebSphere developer. Every now and then we run into NoClassDefFound error, the JAR class finder utility helps in those scenarios in finding the class.
First download and install the JAR class finder eclipse plug-in in your IDE by following the instructions on the download page.
Once it is installed icon of eye will appear in your Eclipse IDE toolbar, click on that icon to get Jar Class finder dialog box. Enter name of the class and the classpath where you want to find it like this
Click on Find it will take few minutes to find the JAR file that contains the class that your looking for and list out all the .jar files that contain the class like this
First download and install the JAR class finder eclipse plug-in in your IDE by following the instructions on the download page.
Once it is installed icon of eye will appear in your Eclipse IDE toolbar, click on that icon to get Jar Class finder dialog box. Enter name of the class and the classpath where you want to find it like this
Click on Find it will take few minutes to find the JAR file that contains the class that your looking for and list out all the .jar files that contain the class like this
Accessing Database using Eclipse/ Rational Application Developer
If your someone like me who is using Eclipse/ RAD for most of the development work and want to connect to database then you might want to consider using Database development perspective in Eclipe/RAD. The database perspective allows you to work with any database using JDBC. It is almost same in both Eclipse and RAD. I took these screen shots in Eclipse Galileo but the basics remain same in RAD.
In my case i am using Apache Derby database and i wanted to work with it. Before i do that i will have to configure the Derby driver. These are the steps that you can follow to configure and use your database
Now once the Database driver is configured, next step is to configure Database connection follow these steps for that.
In my case i am using Apache Derby database and i wanted to work with it. Before i do that i will have to configure the Derby driver. These are the steps that you can follow to configure and use your database
- In your IDE click on Windows -< Preferences and it will open preferences window. In the preferences window click on Data -< Connectivity -< Driver definitions.
- Click on Add and it will open New Driver Definition dialog box like this. Since i want to connect to Network version of Apache Derby 10.2, i will select it like this
- As you can see when we select the Derby Client JDBC Driver, it is giving an error message "Unable to locate JAR/zip in file system as specified by the driver definition: derbyclient.jar.", that is because Eclipse is not able to find the .jar file containing the database driver for Apache Derby.
- Switch to the Jar list tab like this, on this tab you will notice that derbyclient.jar is already added but it is not pointing to actual file on your machine so remove empty derbyclient.jar and add the derbyclient.jar on your machine like this
- Save your changes and it will take you back to the Driver definitions list, Since i configured only Apache Derby so far, it is listed here. If you want to connect to additional databases configure them over here.
Now once the Database driver is configured, next step is to configure Database connection follow these steps for that.
- Switch to Database development perspective and in the Datasource Explorer view click on New Connection Profile to get Connection Profile dialog like this
- On the next page configure your database connection properties such as DB Name, user id password, same properties as you will use to connect to that database using JDBC.
- After setting properties, click on Test Connection button to verify that your actually able to connect to database. If everything works fine click on Finish
- Once the connection is open you should be will get a view like this, you can use the SQL scrapbook to write one or more query and execute them against the database or you can use database explorer view to explore the database structure.
Whats new in Websphere Portal 615
Today morning i did attend the Whats new in WebSphere Portal 6.1.5 call. It was bit early for west coast time zone (7.00 AM) but it was really good.
Marshal Lamb, who is Senior technical staff member was the main speaker in this call, He talked about how IBM WebSphere Portal is used by lots of user facing internet site (Search for /wps/portal in Google and you will find quite few sites) and IBM is planning to make it easier for them to build internet facing sites using WebSphere Portal.
It seems that IBM has made lot of investment in improving IBM's Worplace web content management solution from the perspective of end user, content authors and also for companies by providing lot of pre-built templates. They added social computing by giving support for Blogs and Wikis built on top of IBM' WWCM. The integration in WWCM and portal is much more tighter now. They are also investing on newer technologies such as widgets now widgets are primary citizens in the WebSphere portal world.
In all it seems that WPS 6.1.5 is really cool and seems to have lot of features that we were waiting for. Marshal also indicated that there might be a new version of portal sometime in 2010.
I did install WPS 615 on my machine yesterday and now i am planning to try out some of the newer features
Marshal Lamb, who is Senior technical staff member was the main speaker in this call, He talked about how IBM WebSphere Portal is used by lots of user facing internet site (Search for /wps/portal in Google and you will find quite few sites) and IBM is planning to make it easier for them to build internet facing sites using WebSphere Portal.
It seems that IBM has made lot of investment in improving IBM's Worplace web content management solution from the perspective of end user, content authors and also for companies by providing lot of pre-built templates. They added social computing by giving support for Blogs and Wikis built on top of IBM' WWCM. The integration in WWCM and portal is much more tighter now. They are also investing on newer technologies such as widgets now widgets are primary citizens in the WebSphere portal world.
In all it seems that WPS 6.1.5 is really cool and seems to have lot of features that we were waiting for. Marshal also indicated that there might be a new version of portal sometime in 2010.
I did install WPS 615 on my machine yesterday and now i am planning to try out some of the newer features
The Client side aggreagation theme works in IE 8 and Firefox 3.5
One of the major problem with the WPS 6.1 was that it ships with Dojo 1.1 and since Dojo 1.1 does not work with Internet Explorer 8 or Mozilla's Firefox 3.5, If you tried using the client side aggregation (CSA) theme it use to throw the this browser is not supported error and it use to automatically switch the theme to Server side aggregation (SSA) theme.
Starting with WPS 6.1.5 the Portal ships Dojo 1.3.2 version and as a result the CSA theme works in both Internet Explorer 8.0 and Mozilla Firefox 3.5
As you can see i am using Firefox 3.5.5 (3.6 beta) and you can see that i am using a CSA theme and portal is making call ATOM service to get parts of the theme.
This is my screen shot of Client side aggregation theme being used in Internet Explorer 8 and as you can see i dont have to switch to the compatibility mode (IE 8 has concept of compatibility mode, if the website that your accessing is not compatible with IE 8 then you can switch into compatibility mode and IE 8 will behave like IE7)
Starting with WPS 6.1.5 the Portal ships Dojo 1.3.2 version and as a result the CSA theme works in both Internet Explorer 8.0 and Mozilla Firefox 3.5
As you can see i am using Firefox 3.5.5 (3.6 beta) and you can see that i am using a CSA theme and portal is making call ATOM service to get parts of the theme.
This is my screen shot of Client side aggregation theme being used in Internet Explorer 8 and as you can see i dont have to switch to the compatibility mode (IE 8 has concept of compatibility mode, if the website that your accessing is not compatible with IE 8 then you can switch into compatibility mode and IE 8 will behave like IE7)
Describe the purpose for log files created during installation, maintenance, and operation of the portal
The installation logs are located in the /log directory:
- wpinstalllog.txt
- installmessages.txt
- LocalizeTrace.archive[1..5].log
The configuration logs are located in the /ConfigEngine/log directory:
- ConfigTrace.log
- ConfigMessages.log
The runtime logs are located in the /logs/WebSphere_Portal:
- SystemOut.log
- SystemErr.log
Configure Login / Logout / Session Filter
In Portal 6.1, you can customize the behavior of the Portal in specific authentication situations, through the Authentication Filters. The Authentication Filters use the same pattern as defined by the J2EE servlet filter facility, and make use of filter chains
Explicit login: This is a login by user name and password as represented by the interfacecom.ibm.portal.auth.ExplicitLoginFilter. For example, this can be a login by using the login portlet or the login URL. Implicit login: For example, this can be when a user is already authenticated by WAS, but not yet to Portal. This is represented by the interface com.ibm.portal.auth.ImplicitLoginFilter. Explicit logout: This means that the user triggers a logout action directly, for example by clicking the Logout button in the user interface, interface com.ibm.portal.auth.ExplicitLogoutFilter. Implicit logout: For example, this can be after a session timeout, or if an authenticated user accesses a public page, or if the user navigates to a virtual portal without being member of the associated user realm. This is represented by the interface com.ibm.portal.auth.ImplicitLogoutFilter. Session Timeout: This is called immediately after an idle timeout of the user session occurred. This is represented by the interface com.ibm.portal.auth.SessionTimeoutFilter. Session Validation: This is called for every request before actions are triggered and the page is rendered. This is represented by the interface com.ibm.portal.auth.SessionValidationFilter.
The following authentication filter chains are available for the developer:
You can configure them through the Portal configuration services. You can no longer set these properties by simply changing the property value in the properties file and restarting the portal. The configuration for each service is stored in and accessible through the IBM WebSphere Application Server administrative console.
Use the following properties to define the custom filters in the various authentication filter chains in the portal. Each of these properties takes a comma-separated list of the fully qualified class names of the custom filter implementations.
- login.explicit.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered for an explicit login by user name and password. The classes listed in this property must implement the interface com.ibm.portal.auth.ExplicitLoginFilter.
- login.implicit.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered for an implicit login, that is if the user is already authenticated to WebSphere Application Server but has no portal session yet. The classes listed in this property must implement the interface com.ibm.portal.auth.ImplicitLoginFilter.
- logout.explicit.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered for an explicit logout. The classes listed in this property must implement the interface com.ibm.portal.auth.ExplicitLogoutFilter.
- logout.implicit.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered for an implicit logout, that is if the user got a session timeout. The classes listed in this property must implement the interface com.ibm.portal.auth.ImplicitLogoutFilter.
- sessiontimeout.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered directly after an idle timeout of the session occurred. The classes listed in this property must implement the interfacecom.ibm.portal.auth.SessionTimeoutFilter.
- sessionvalidation.filterchain =
- Use this property to specify the custom filters for the filter chain that is triggered for every request before the action handling and rendering is processed. The classes listed in this property must implement the interfacecom.ibm.portal.auth.SessionValidationFilter.
- filterchain.properties.. =
- Use an arbitrary set of properties according to the above pattern to specify properties for any of your custom filters. The property value is then available to the specified filter class in the SecurityFilterConfig object passed to its init method.
WebSphere Portal 6.1.5
New version of WebSphere Portal 6.1.5 was released before thanks giving. It seems that it has quite few nice features such as Integrated Site analysis support, Mashup Integration Support, Page builder, New version of dojo.
This is the link to the Info center of 6.1.5
IBM also release 6.1.0.3 along with 6.1.5
This is the link to the Info center of 6.1.5
IBM also release 6.1.0.3 along with 6.1.5
Dojo and WebSphere portal 6.1.5
One of the most common questions about WebSphere Portal is, which version of Dojo does IBM WebSphere Portal use and what should i do if i want to use newer version of Dojo.
WebSphere Portal 6.1 is using Dojo Version 1.1.1 and replacing dojo version is not supported by IBM (i.e. if you change it then your own your own). The CSA theme depends heavily on the Dojo in fact almost all of the client side functionality is built using Dojo so even if your using Server side aggregation theme, features like client side wires or setting Preferneces, reading PUMA properties might break
Starting with WebSphere Portal 6.5, IBM is shipping two versions of Dojos
The Portal and PortalWeb2 themes use version 1.3.2 by default in WebSphere Portal Version 6.1.5. The Page Builder features and widgets are currently supported on Dojo 1.3.2 only.
IBM Support policy remains same that the version of dojo that is shipped with portal might be replaced entirely
WebSphere Portal 6.1 is using Dojo Version 1.1.1 and replacing dojo version is not supported by IBM (i.e. if you change it then your own your own). The CSA theme depends heavily on the Dojo in fact almost all of the client side functionality is built using Dojo so even if your using Server side aggregation theme, features like client side wires or setting Preferneces, reading PUMA properties might break
Starting with WebSphere Portal 6.5, IBM is shipping two versions of Dojos
- Dojo V 1.3.2. This is packaged in its own Web application named Dojo_Resources. You can manage it in the WebSphere Application Server administration console. By default it is deployed at the context root /portal_dojo . The path for the Dojo V 1.3.2 files is wp_profile_root/installedApps/node_name/Dojo_Resources.ear/dojo.war .
- Dojo V 1.1.1. This is packaged in the directory wp_profile_root/installedApps/node_name/wps.ear/wps.war/themes/dojo/portal_dojo.
The Portal and PortalWeb2 themes use version 1.3.2 by default in WebSphere Portal Version 6.1.5. The Page Builder features and widgets are currently supported on Dojo 1.3.2 only.
IBM Support policy remains same that the version of dojo that is shipped with portal might be replaced entirely
Understand how to leverage feeds using WebSphere Portal
There is a very nice portlet in the Business Catalog called Syndicated Feed Portlet, sometimes called Feed Reader Portlet after its previous version. It's a sweet AJAX-enabled configuration interface where users themselves can add RSS or ATOM feeds at their leisure, create categories of feeds, and drag and drop feeds between these categories.
Here are a few screenshots:
Rational Application Framework for WebSphere
For last few days i am spending time on learning about Rational Application Framework for websphere(RAFW) which is optional feature for Rational Build forge. We can use it to automate either the installation and configuration of the enterprise level WebSphere Application Server, WebSphere Portal Server, WebSphere Process server or we can use it to deploy applications, other artifacts on the WebSphere Environment.
Most of the customers will have customized version of WebSphere Installation. By that i mean they will have multi cell WebSphere ND environment, then they will have to integrate it with some time of TAI, such as Tivoli access manager, or netegrity siteminder,.. in case of WebSphere Portal you will want to move data to some other Database, such as Oracle, DB2,.., apply fixes and fix packs, customize environment by adding custom attributes in WMM. In the end your installation guide for the client will be at least couple of hundred pages. Other problem will be when you try to create new environment based on that big document manually, it will take long time and we will make some mistakes. The RAFW helps in these issues by automating the installation process.
Second issue is how do you allow your development team to deploy there applications, create data sources, queues, shared libraries. let them restart environment. The RAFW allows us to automate deployment. You can combine it with Build forge to do end to end deployment, which might start from checking out code from CVS, building and deploying code on WAS, Testing it and sending emails to every one if the test fails.
You can also use RAFW in the performance tuning phase. Most of the customer have dedicated performance environment, during Performance testing we tune that environment to find out what is the optimal size of connection pool, connection time out,... Once the performance environment is tuned we need to apply same changes to other environment, In order to do that you will have to keep noting the changes you made in the performance environment and apply those changes to other environment, thats not very easy. RAFW simplifies that because it can import all the settings from your performance environment and compare it with your production or other environments, it can also apply the difference to all environments.
I am planning to post my RAFW notes on this blog
Most of the customers will have customized version of WebSphere Installation. By that i mean they will have multi cell WebSphere ND environment, then they will have to integrate it with some time of TAI, such as Tivoli access manager, or netegrity siteminder,.. in case of WebSphere Portal you will want to move data to some other Database, such as Oracle, DB2,.., apply fixes and fix packs, customize environment by adding custom attributes in WMM. In the end your installation guide for the client will be at least couple of hundred pages. Other problem will be when you try to create new environment based on that big document manually, it will take long time and we will make some mistakes. The RAFW helps in these issues by automating the installation process.
Second issue is how do you allow your development team to deploy there applications, create data sources, queues, shared libraries. let them restart environment. The RAFW allows us to automate deployment. You can combine it with Build forge to do end to end deployment, which might start from checking out code from CVS, building and deploying code on WAS, Testing it and sending emails to every one if the test fails.
You can also use RAFW in the performance tuning phase. Most of the customer have dedicated performance environment, during Performance testing we tune that environment to find out what is the optimal size of connection pool, connection time out,... Once the performance environment is tuned we need to apply same changes to other environment, In order to do that you will have to keep noting the changes you made in the performance environment and apply those changes to other environment, thats not very easy. RAFW simplifies that because it can import all the settings from your performance environment and compare it with your production or other environments, it can also apply the difference to all environments.
I am planning to post my RAFW notes on this blog
Recommeded reading list for WAS Core Admin test
I used these resources for preparing for my WAS 6.1 core admin test
IBM WebSphere Deployment and Advanced Configuration, is a very good book, which helps cover most of the topics.
In addition to that i used following red book
IBM WebSphere Deployment and Advanced Configuration, is a very good book, which helps cover most of the topics.
In addition to that i used following red book
Good news
Today i cleared my IBM Certified System Administrator- WebSphere Application Server, Network Deployment, V6.1 Exam with 90% marks :)
Now i am thinking about IBM Certified Advanced System Administrator - WebSphere Application Server Network Deployment V6.1
Now i am thinking about IBM Certified Advanced System Administrator - WebSphere Application Server Network Deployment V6.1
J2C activation specification
This topic provides an overview about the configuration and use of J2C activation specifications, used in the deployment of message-driven beans for JCA 1.5 resources.
J2C activation specifications are part of the configuration of inbound messaging support that can be part of a JCA 1.5 resource adapter. Each JCA 1.5 resource adapter that supports inbound messaging defines one or more types of message listener in its deployment descriptor (messagelistener in the ra.xml). The message listener is the interface that the resource adapter uses to communicate inbound messages to the message endpoint. A message-driven bean (MDB) is a message endpoint and implements one of the message listener interfaces provided by the resource adapter. By allowing multiple types of message listener, a resource adapter can support a variety of different protocols. For example, the interface javax.jms.MessageListener, is a type of message listener that supports JMS messaging. For each type of message listener that a resource adapter implements, the resource adapter defines an associated activation specification (activationspec in the ra.xml). The activation specification is used to set configuration properties for a particular use of the inbound support for the receiving endpoint.
When an application containing a message-driven bean is deployed, the deployer must select a resource adapter that supports the same type of message listener that the message-driven bean implements. As part of the message-driven bean deployment, the deployer needs to specify the properties to set on the J2C activation specification. Later, during application startup, a J2C activation specification instance is created, and these properties are set and used to activate the endpoint (that is, to configure the resource adapter’s inbound support for the specific message-driven bean).
Applications with message-driven beans can also specify all, some, or none of the configuration properties needed by the ActivationSpec class, to override those defined by the resource adapter-scoped definition. These properties, specified as activation-config properties in the application’s deployment descriptor, are configured when the application is assembled. To change any of these properties requires redeploying the application. These properties are unique to this applications use and are not shared with other message-driven beans. Any properties defined in the application's deployment descriptor take precedence over those defined by the resource adapter-scoped definition. This allows application developers to choose the best defaults for their applications.
J2C activation specifications are part of the configuration of inbound messaging support that can be part of a JCA 1.5 resource adapter. Each JCA 1.5 resource adapter that supports inbound messaging defines one or more types of message listener in its deployment descriptor (messagelistener in the ra.xml). The message listener is the interface that the resource adapter uses to communicate inbound messages to the message endpoint. A message-driven bean (MDB) is a message endpoint and implements one of the message listener interfaces provided by the resource adapter. By allowing multiple types of message listener, a resource adapter can support a variety of different protocols. For example, the interface javax.jms.MessageListener, is a type of message listener that supports JMS messaging. For each type of message listener that a resource adapter implements, the resource adapter defines an associated activation specification (activationspec in the ra.xml). The activation specification is used to set configuration properties for a particular use of the inbound support for the receiving endpoint.
When an application containing a message-driven bean is deployed, the deployer must select a resource adapter that supports the same type of message listener that the message-driven bean implements. As part of the message-driven bean deployment, the deployer needs to specify the properties to set on the J2C activation specification. Later, during application startup, a J2C activation specification instance is created, and these properties are set and used to activate the endpoint (that is, to configure the resource adapter’s inbound support for the specific message-driven bean).
Applications with message-driven beans can also specify all, some, or none of the configuration properties needed by the ActivationSpec class, to override those defined by the resource adapter-scoped definition. These properties, specified as activation-config properties in the application’s deployment descriptor, are configured when the application is assembled. To change any of these properties requires redeploying the application. These properties are unique to this applications use and are not shared with other message-driven beans. Any properties defined in the application's deployment descriptor take precedence over those defined by the resource adapter-scoped definition. This allows application developers to choose the best defaults for their applications.
Message driven beans
WebSphere Application Server supports the use of message-driven beans as asynchronous message consumers.
A client sends messages to the destination (or endpoint) for which the message-driven bean is deployed as the message listener. When a message arrives at the destination, the EJB container invokes the message-driven bean automatically without an application having to explicitly poll the destination. The message-driven bean implements some business logic to process incoming messages on the destination.
Message-driven beans can be configured as listeners on a Java EE Connector Architecture (JCA) 1.5 resource adapter or against a listener port (as for WebSphere Application Server Version 5). With a JCA 1.5 resource adapter, message-driven beans can handle generic message types, not just JMS messages. This makes message-driven beans suitable for handling generic requests inbound to WebSphere Application Server from enterprise information systems through the resource adapter. In the JCA 1.5 specification, such message-driven beans are commonly called message endpoints or simply endpoints.
All message-driven beans must implement the MessageDrivenBean interface. For JMS messaging, a message-driven bean must also implement the message listener interface, javax.jms.MessageListener.
A message driven bean can be registered with the EJB timer service for time-based event notifications if it implements the javax.ejb.TimedObject interface in addition to the message listener interface.
You are recommended to develop a message-driven bean to delegate the business processing of incoming messages to another enterprise bean, to provide clear separation of message handling and business processing. This also enables the business processing to be invoked by either the arrival of incoming messages or, for example, from a WebSphere J2EE client.
Messages arriving at a destination being processed by a message-driven bean have no client credentials associated with them; the messages are anonymous. Security depends on the role specified by the RunAs Identity for the message-driven bean as an EJB component. For more information about EJB security, see Securing enterprise bean applications.
For JMS messaging, message-driven beans can use a JMS provider that has a JCA 1.5 resource adapter, for example the default messaging provider that is part of WebSphere Application Server. With a JCA 1.5 resource adapter, you deploy EJB 2.1 message-driven beans as JCA 1.5-compliant resources, to use a J2C activation specification. If the JMS provider does not have a JCA 1.5 resource adapter, for example the V5 Default Messaging provider and the WebSphere MQ messaging provider, you must configure JMS message-driven beans against a listener port.
A client sends messages to the destination (or endpoint) for which the message-driven bean is deployed as the message listener. When a message arrives at the destination, the EJB container invokes the message-driven bean automatically without an application having to explicitly poll the destination. The message-driven bean implements some business logic to process incoming messages on the destination.
Message-driven beans can be configured as listeners on a Java EE Connector Architecture (JCA) 1.5 resource adapter or against a listener port (as for WebSphere Application Server Version 5). With a JCA 1.5 resource adapter, message-driven beans can handle generic message types, not just JMS messages. This makes message-driven beans suitable for handling generic requests inbound to WebSphere Application Server from enterprise information systems through the resource adapter. In the JCA 1.5 specification, such message-driven beans are commonly called message endpoints or simply endpoints.
All message-driven beans must implement the MessageDrivenBean interface. For JMS messaging, a message-driven bean must also implement the message listener interface, javax.jms.MessageListener.
A message driven bean can be registered with the EJB timer service for time-based event notifications if it implements the javax.ejb.TimedObject interface in addition to the message listener interface.
You are recommended to develop a message-driven bean to delegate the business processing of incoming messages to another enterprise bean, to provide clear separation of message handling and business processing. This also enables the business processing to be invoked by either the arrival of incoming messages or, for example, from a WebSphere J2EE client.
Messages arriving at a destination being processed by a message-driven bean have no client credentials associated with them; the messages are anonymous. Security depends on the role specified by the RunAs Identity for the message-driven bean as an EJB component. For more information about EJB security, see Securing enterprise bean applications.
For JMS messaging, message-driven beans can use a JMS provider that has a JCA 1.5 resource adapter, for example the default messaging provider that is part of WebSphere Application Server. With a JCA 1.5 resource adapter, you deploy EJB 2.1 message-driven beans as JCA 1.5-compliant resources, to use a J2C activation specification. If the JMS provider does not have a JCA 1.5 resource adapter, for example the V5 Default Messaging provider and the WebSphere MQ messaging provider, you must configure JMS message-driven beans against a listener port.
When to use activiation specification vs listener port
Guidelines, related to versions of WebSphere® Application Server, to help you choose when to configure your message-driven beans to work with listener ports rather than activation specifications.
You can configure the following resources for message-driven beans:
* Activation specifications for message-driven beans that comply with Java™ EE Connector Architecture (JCA) Version 1.5.
* The message listener service, listener ports, and listeners for any message-driven beans that you want to deploy against listener ports.
If you want to use message-driven beans with a messaging provider that does not have a JCA 1.5 resource adapter (for example the WebSphere MQ messaging provider or the V5 Default Messaging provider), you cannot use activation specifications and therefore you must configure your beans against a listener port. There are also a few scenarios in which, although you could use activation specifications, you might still choose to use listener ports. For example, for compatability with existing message-driven bean applications. Here are some guidelines, related to versions of WebSphere Application Server, to help you choose when to use listener ports rather than activation specifications:
* WebSphere Application Server Version 4 does not support message-driven beans, so listener ports and activation specifications are not applicable. WebSphere Application Server Version 4 does support message beans, but these are not message-driven beans.
* WebSphere Application Server Version 5 supports EJB 2.0 (JMS only) message-driven beans that are deployed using listener ports. This deployment technology is sometimes called application server facility (ASF).
* WebSphere Application Server Version 6 continues to support message-driven beans that are deployed using listener ports, and also supports JCA, which you can use to deploy message-driven beans using activation specifications. This gives you the following options for deploying message-driven beans on WebSphere Application Server Version 6:
o You must deploy default messaging (service integration bus) message-driven beans using activation specifications.
o You must deploy WebSphere MQ message-driven beans using listener ports.
o You can deploy third-party messaging message-driven beans using either listener ports or activation specifications, depending on the facilities available from your third-party messaging provider.
You can configure the following resources for message-driven beans:
* Activation specifications for message-driven beans that comply with Java™ EE Connector Architecture (JCA) Version 1.5.
* The message listener service, listener ports, and listeners for any message-driven beans that you want to deploy against listener ports.
If you want to use message-driven beans with a messaging provider that does not have a JCA 1.5 resource adapter (for example the WebSphere MQ messaging provider or the V5 Default Messaging provider), you cannot use activation specifications and therefore you must configure your beans against a listener port. There are also a few scenarios in which, although you could use activation specifications, you might still choose to use listener ports. For example, for compatability with existing message-driven bean applications. Here are some guidelines, related to versions of WebSphere Application Server, to help you choose when to use listener ports rather than activation specifications:
* WebSphere Application Server Version 4 does not support message-driven beans, so listener ports and activation specifications are not applicable. WebSphere Application Server Version 4 does support message beans, but these are not message-driven beans.
* WebSphere Application Server Version 5 supports EJB 2.0 (JMS only) message-driven beans that are deployed using listener ports. This deployment technology is sometimes called application server facility (ASF).
* WebSphere Application Server Version 6 continues to support message-driven beans that are deployed using listener ports, and also supports JCA, which you can use to deploy message-driven beans using activation specifications. This gives you the following options for deploying message-driven beans on WebSphere Application Server Version 6:
o You must deploy default messaging (service integration bus) message-driven beans using activation specifications.
o You must deploy WebSphere MQ message-driven beans using listener ports.
o You can deploy third-party messaging message-driven beans using either listener ports or activation specifications, depending on the facilities available from your third-party messaging provider.
Entity bean extended deployment descriptor file
J2C ConnectionFactory security
EJB Container tuning
If you use applications that affect the size of the EJB Container Cache, it is possible that the performance of your applications can be impacted by an incorrect size setting. Monitoring Tivoli Performance Viewer (TPV) is a great way to diagnose if the EJB Container Cache size setting is tuned correctly for your application. If the application has filled the cache causing evictions to occur, TPV will show a very high rate of ejbStores() being called and probably a lower than expected CPU utilization on the application server machine
External cache groups
The dynamic cache can control caches outside of the application server, such as the Edge server, an IBM HTTP Server, or an HTTP Server ESI Fragment Processor plugin
When external cache groups are defined, the dynamic cache matches externally cacheable cache entries with those groups, and pushes cache entries and invalidations out to those groups. This allows WebSphere Application Server to manage dynamic content beyond the application server. The content can then be served from the external cache, instead of the application server, improving savings in performance.
When external cache groups are defined, the dynamic cache matches externally cacheable cache entries with those groups, and pushes cache entries and invalidations out to those groups. This allows WebSphere Application Server to manage dynamic content beyond the application server. The content can then be served from the external cache, instead of the application server, improving savings in performance.
Edget side caching
The Web server plug-in contains a built-in ESI processor. The ESI processor can cache whole pages, as well as fragments, providing a higher cache hit ratio. The cache implemented by the ESI processor is an in-memory cache, not a disk cache, therefore, the cache entries are not saved when the Web server is restarted.
When a request is received by the Web server plug-in, it is sent to the ESI processor, unless the ESI processor is disabled. It is enabled by default. If a cache miss occurs, a Surrogate-Capabilities header is added to the request and the request is forwarded to the WebSphere Application Server. If servlet caching is enabled in the application server, and the response is edge cacheable, the application server returns a Surrogate-Control header in response to the WebSphere Application Server plug-in.
The value of the Surrogate-Control response header contains the list of rules that are used by the ESI processor to generate the cache ID. The response is then stored in the ESI cache, using the cache ID as the key. For each ESI include tag in the body of the response, a new request is processed so that each nested include results in either a cache hit or another request that forwards to the application server. When all nested includes have been processed, the page is assembled and returned to the client.
The ESI processor is configurable through the WebSphere Web server plug-in configuration file plugin-cfg.xml. The following is an example of the beginning of this file, which illustrates the ESI configuration options.
Speed WebSphere Apps with Edge Side Includes, has some information on WebSphere WebSphere APIs for Edge Side Includes (WESI) are a set of Java application programming interface (API) and JavaServer Page (JSP) custom tags for accelerating Web application delivery through distributed fragment caching and assembly with Edge Side Includes (ESI).
When a request is received by the Web server plug-in, it is sent to the ESI processor, unless the ESI processor is disabled. It is enabled by default. If a cache miss occurs, a Surrogate-Capabilities header is added to the request and the request is forwarded to the WebSphere Application Server. If servlet caching is enabled in the application server, and the response is edge cacheable, the application server returns a Surrogate-Control header in response to the WebSphere Application Server plug-in.
The value of the Surrogate-Control response header contains the list of rules that are used by the ESI processor to generate the cache ID. The response is then stored in the ESI cache, using the cache ID as the key. For each ESI include tag in the body of the response, a new request is processed so that each nested include results in either a cache hit or another request that forwards to the application server. When all nested includes have been processed, the page is assembled and returned to the client.
The ESI processor is configurable through the WebSphere Web server plug-in configuration file plugin-cfg.xml. The following is an example of the beginning of this file, which illustrates the ESI configuration options.
Speed WebSphere Apps with Edge Side Includes, has some information on WebSphere WebSphere APIs for Edge Side Includes (WESI) are a set of Java application programming interface (API) and JavaServer Page (JSP) custom tags for accelerating Web application delivery through distributed fragment caching and assembly with Edge Side Includes (ESI).
Profile Management Tool used by Installation factory
When you add any profile customization assets to your CIP, Installation Factory will generate what's called a profile template. The CIP will install this template when it installs WebSphere Application Server. These templates are what WebSphere Application Server uses to create profiles. They are also used by other WebSphere products to augment a WebSphere Application Server profile with the configuration for that product. Installation Factory uses profile templates to execute your profile customization actions as an integrated part of profile creation and augmentation.
A template contains a series of actions that can perform required configuration and application deployment steps during profile creation or augmentation. The template that is generated by Installation Factory contains actions that will process your profile customization assets in the specified order (for example, import a CAR, deploy an EAR, run a script, and so on). You can simply choose this template within the Profile Management Tool (PMT), or pass the template to the manageprofiles command, just as you would to create a standard WebSphere Application Server profile. The profile creation mechanism within WebSphere Application Server looks at the actions in the template and invokes them one after the other.
Using a profile template to execute configuration actions leverages WebSphere Application Server’s rich infrastructure, and provides a user experience that is tightly integrated with the standard profile creation tools.
A template contains a series of actions that can perform required configuration and application deployment steps during profile creation or augmentation. The template that is generated by Installation Factory contains actions that will process your profile customization assets in the specified order (for example, import a CAR, deploy an EAR, run a script, and so on). You can simply choose this template within the Profile Management Tool (PMT), or pass the template to the manageprofiles command, just as you would to create a standard WebSphere Application Server profile. The profile creation mechanism within WebSphere Application Server looks at the actions in the template and invokes them one after the other.
Using a profile template to execute configuration actions leverages WebSphere Application Server’s rich infrastructure, and provides a user experience that is tightly integrated with the standard profile creation tools.
Log levels
WebSphere application server lets you define what all logging messages should be logged to the trace.log file.
These are the log levels defined by WAS
These are the log levels defined by WAS
- Off: No events are logged.
- Fatal: Task cannot continue and component cannot function.
- Severe: Task cannot continue, but component can still function
- Warning: Potential error or impending error
- Audit: Significant event affecting server state or resources
- Info: General information outlining overall task progress
- Config: Configuration change or status
- Detail: General information detailing subtask progress
- Fine: Trace information - General trace + method entry / exit / return values
- Finer: Trace information - Detailed trace
- Finest: Trace information - A more detailed trace - Includes all the detail that is needed to debug problems
- All: All events are logged. If you create custom levels, All includes your custom levels, and can provide a more detailed trace than Finest.
User Management using Federated repository
If your WAS server is configured to use the Federated repository then you can add or delete users and groups. You have following three options for doing that.
Important Note: Federated user repository is the only option if you want ability to add, delete user and groups. The Standalone LDAP registry or local operating system user registry or custom user registry are readonly registries
- Programmatic User management APIs
- The administrative console
- wsadmin commands
Important Note: Federated user repository is the only option if you want ability to add, delete user and groups. The Standalone LDAP registry or local operating system user registry or custom user registry are readonly registries
What is proxy server
A proxy server seats between the client browser and the servers behind it, it intercepts the client request and then connects to the backend server using its own connection pool, forwards clients request to the server, gets response back from the back end server and then returns that response back to the client. A proxy server performs the same basic duties as a web server and in addition to that they perform following things
- Proxy: Accepts requrest from the client, forwards that request to backend, gets response back and forwards that response to client. In doing so it hides the actual back end server information from the client
- Content filtering: They are often used to filter access to internet. The proxy can be configured with rules to reject access to sites that contain objectionable material, preventing users from gaining access to those sites
- Caching: Proxy servers can cache the results of common user requests and next time they get request for that resource, they can server the response from cache instead of sending to the actual backend server
- Security: They terminate the user connection inside the DMZ, they hide server identities by providing thier own domain names in the URL instead of backend server name. Combined with authentication proxy such as TAM, proxies can authenticate the user before forwarding the request to backend
What is ws_ant command line tool
To support using Apache Ant with Java 2 Platform, Enterprise Edition (J2EE) applications running on the application server, the product provides a copy of the Ant tool and a set of Ant tasks that extend the capabilities of Ant to include product-specific functions.
ws_ant.sh/bat (or ws_ant on i5/OS) is a wrapper around Apache Ant that sets up a WebSphere specific environment and includes classpath, WebSphere Ant tasks, and environment variables. It allows the same command line options as Apache Ant. For example, a simple invocation of a build file named myBuildFile.xml could be called as: ws_ant.sh -f myBuildFile.xml. The following example uses a build file named build.xml by default: ./ws_ant.sh
By combining the following tasks with those provided by Ant, you can create build scripts that compile, package, install, and test your application on the application server:
ws_ant.sh/bat (or ws_ant on i5/OS) is a wrapper around Apache Ant that sets up a WebSphere specific environment and includes classpath, WebSphere Ant tasks, and environment variables. It allows the same command line options as Apache Ant. For example, a simple invocation of a build file named myBuildFile.xml could be called as: ws_ant.sh -f myBuildFile.xml. The following example uses a build file named build.xml by default: ./ws_ant.sh
By combining the following tasks with those provided by Ant, you can create build scripts that compile, package, install, and test your application on the application server:
- Install and uninstall applications
- Start and stop servers in a base configuration
- Run administrative scripts or commands
- Run the Enterprise JavaBeans (EJB) deployment tool
- Run the JavaServer Pages (JSP) file precompilation tool
J2C notes
Java connection architecture defines standard for connecting to different EIS from the J2EE application.
EIS vendors wirte J2C resource adapater similar to JDBC providers written by database vendors. A resource adapter is a driver that connects at the system level to backend EIS.
In keeping with the J2EE packaging model, resource adapters are packaged as modules, in thsi case as RAR files, with an extension of .rar. You would typically have a .rar file for each type of EIS that you are connecting to. A RAR file is esentially a JAR file with some specific content - bascially code and XML deployment descriptor with the filename ra.xml. The RAR file may also contain utility classes, native binary files and documentation.
The adapter can be installed could also be installed as a part of application install, but this practice is discoraged in favor of installing resource adapters separately to the container.
You cannot install resource adapter at cell level. WAS cell contains a heterogeneous mixture of servers, such as combination of windows and Unix machines. Resource adapaters often contain binary shared libraries and there is no distribution mechanism at the cell level for this. The adapters use native path to locate these executables, which could be different on some nodes or platforms. Because of these problems, J2C resource adapters have to be installed to nodes directly and this is enforced by both wasadmin and admin console. When using resource adapater in a cell environment, you should install to the nodes and then configure them at the cell scope.
J2C resources provide connection factories similar to datasources provided by JDBC resources. They provide common configuration and pool of connection for improving performance. Application code obtains connection from connection factory and then releases the connection when done. THe container taks care of making actual connection, connection pooling,..etc
J2C can use both container managed and component managed security identity propagation.
EIS vendors wirte J2C resource adapater similar to JDBC providers written by database vendors. A resource adapter is a driver that connects at the system level to backend EIS.
In keeping with the J2EE packaging model, resource adapters are packaged as modules, in thsi case as RAR files, with an extension of .rar. You would typically have a .rar file for each type of EIS that you are connecting to. A RAR file is esentially a JAR file with some specific content - bascially code and XML deployment descriptor with the filename ra.xml. The RAR file may also contain utility classes, native binary files and documentation.
The adapter can be installed could also be installed as a part of application install, but this practice is discoraged in favor of installing resource adapters separately to the container.
You cannot install resource adapter at cell level. WAS cell contains a heterogeneous mixture of servers, such as combination of windows and Unix machines. Resource adapaters often contain binary shared libraries and there is no distribution mechanism at the cell level for this. The adapters use native path to locate these executables, which could be different on some nodes or platforms. Because of these problems, J2C resource adapters have to be installed to nodes directly and this is enforced by both wasadmin and admin console. When using resource adapater in a cell environment, you should install to the nodes and then configure them at the cell scope.
J2C resources provide connection factories similar to datasources provided by JDBC resources. They provide common configuration and pool of connection for improving performance. Application code obtains connection from connection factory and then releases the connection when done. THe container taks care of making actual connection, connection pooling,..etc
J2C can use both container managed and component managed security identity propagation.
SIP Misc notes
The Session Initiation protocol (SIP) servlets are packaged in SAR archive file
SIP is used for developing following types of applications
SIP is used for developing following types of applications
- Chat instant messaging applications
- Collaborative, entertainment and gaming applications
- VIdeo conferencing and video over IP applications
The PortletServingServlet service
A simple portal framework is provided by the PortletServingServlet servlet. The PortletServingServlet servlet registers itself for each Web application that contains portlets. You can use the PortletServingServlet servlet to directly render a portlet into a full browser page by a URL request and invoke each portlet by its context root and name. See Portlet Uniform Resource Locator (URL) addressability for additional information. If you want to aggregate multiple portlets on the page, you need to use the aggregation tag library. The PortletServingServlet servlet can be disabled in an extended portlet deployment descriptor called the ibm-portlet-ext.xmi file.
Extensions for the portlet deployment descriptor are defined within a file called ibm-portlet-ext.xmi. This deployment descriptor is an optional descriptor that you can use to configure WebSphere extensions for the portlet application and its portlets. For example, you can disable the PortletServingServlet servlet for the portlet application in the extended portlet deployment descriptor.
The ibm-portlet-ext.xmi extension file is loaded during application startup. If there are no extension files specified with this setting, the portlet container’s default values are used.
The default for the portletServingEnabled attribute is true. The following is an example of how to configure that a PortletServingServlet servlet is not created for any portlet on the portlet application.
Extensions for the portlet deployment descriptor are defined within a file called ibm-portlet-ext.xmi. This deployment descriptor is an optional descriptor that you can use to configure WebSphere extensions for the portlet application and its portlets. For example, you can disable the PortletServingServlet servlet for the portlet application in the extended portlet deployment descriptor.
The ibm-portlet-ext.xmi extension file is loaded during application startup. If there are no extension files specified with this setting, the portlet container’s default values are used.
The default for the portletServingEnabled attribute is true. The following is an example of how to configure that a PortletServingServlet servlet is not created for any portlet on the portlet application.
<?xml version="1.0" encoding="UTF-8"?>
<portletappext:PortletApplicationExtension xmi:version="1.0"
xmlns:xmi="http://www.omg.org/XMI"
xmlns:portletappext="portletapplicationext.xmi"
xmlns:portletapplication="portletapplication.xmi"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmi:id="PortletApp_ID_Ext"
portletServingEnabled="false">
<portletappext:portletApplication href="WEB-INF/portlet.xml#myPortletApp"/>
</portletappext:PortletApplicationExtension>
Transaction manager high availability
The WebSphere Application Server Transaction Manager writes to its transaction recovery logs when it handles global transactions that involve two or more resources. Transaction recovery logs are stored on disk and are used for recovering in-flight transactions from system crashes or process failures. To enable WebSphere application server transaction peer recovery, it is necessary to place the recovery logs on a highly available file system, such as IBM SAN FS or NAS, for all the application servers within the same cluster to access. All application servers must be able to read from and write to the logs.
For a peer server to recover in-flight transactions, any database locks associated with the failed transactions should be released prior to the recovery. You need to use the lease-based exclusive locking protocol, such as Common Internet File System (CIFS) or Network File System (NFS) Version4, to access remote recovery logs from WebSphere application server nodes. Without the lease-based locking support, if one of the nodes crashes, locks held by all the processes on that node will not automatically be released. As a result, the transactions cannot be completed, and database access can be impaired due to the unreleased locks
In the event of a server failure, the transaction service of the failed application server is out of service. Also, the in-flight transactions that have not be committed might leave locks in the database, which blocks the peer server from gaining access to the locked records. There are only two ways to complete the transactions and release the locks. One is to restart the failed server and the other is to start an application server process on another box that has access to the transaction logs. Using the new HAManager support, a highly available file system and a lease-based locking protocol, a recovery process will be started in a peer member of the cluster. The recovery locks are released and in-flight transactions are committed.
For a peer server to recover in-flight transactions, any database locks associated with the failed transactions should be released prior to the recovery. You need to use the lease-based exclusive locking protocol, such as Common Internet File System (CIFS) or Network File System (NFS) Version4, to access remote recovery logs from WebSphere application server nodes. Without the lease-based locking support, if one of the nodes crashes, locks held by all the processes on that node will not automatically be released. As a result, the transactions cannot be completed, and database access can be impaired due to the unreleased locks
In the event of a server failure, the transaction service of the failed application server is out of service. Also, the in-flight transactions that have not be committed might leave locks in the database, which blocks the peer server from gaining access to the locked records. There are only two ways to complete the transactions and release the locks. One is to restart the failed server and the other is to start an application server process on another box that has access to the transaction logs. Using the new HAManager support, a highly available file system and a lease-based locking protocol, a recovery process will be started in a peer member of the cluster. The recovery locks are released and in-flight transactions are committed.
What is a core group
A core group is a high availability domain within a cell. It serves as a physical grouping of JVMs in a cell that are candidates to host singleton services. It can contain stand-alone servers, cluster members, Node Agents, or the Deployment Manager.
A cell must have at least one core group. The WebSphere Application Server creates a default core group, called DefaultCoreGroup, for each cell. Each JVM process can only be a member of one core group. Naturally, cluster members must belong to the same core group. At runtime, the core group and policy configurations are matched together to form high availability groups
A set of JVMs can work together as a group to host a highly available service. All JVMs with the potential to host the service join the group when they start. If the scope of the singleton (such as a Transaction Manager or a messaging engine) is a WebSphere cluster then all members of the cluster are part of such a group of JVMs that can host the service.
In a large-scale implementation with clusters spanning multiple geographies, you
can create multiple core groups in the cell and link them together with the core
group bridge to form flexible topologies. The most important thing is that every
JVM in a core group must be able to open a connection to all other members of
the core group.
A core group cannot extend beyond a cell, or overlap with other core groups. Core groups in the same cell or from different cells, however, can share workload management routing information using the core group bridge service
A cell must have at least one core group. The WebSphere Application Server creates a default core group, called DefaultCoreGroup, for each cell. Each JVM process can only be a member of one core group. Naturally, cluster members must belong to the same core group. At runtime, the core group and policy configurations are matched together to form high availability groups
A set of JVMs can work together as a group to host a highly available service. All JVMs with the potential to host the service join the group when they start. If the scope of the singleton (such as a Transaction Manager or a messaging engine) is a WebSphere cluster then all members of the cluster are part of such a group of JVMs that can host the service.
In a large-scale implementation with clusters spanning multiple geographies, you
can create multiple core groups in the cell and link them together with the core
group bridge to form flexible topologies. The most important thing is that every
JVM in a core group must be able to open a connection to all other members of
the core group.
A core group cannot extend beyond a cell, or overlap with other core groups. Core groups in the same cell or from different cells, however, can share workload management routing information using the core group bridge service
What is hamanager
IBM WebSphere Application Server Network Deployment V6 introduces a new feature called High Availability Manager (commonly called HAManager) that enhances the availability of WebSphere singleton services such as transaction or messaging services. It provides a peer recovery mechanism for in-flight transactions or messages among clustered WebSphere application servers.HAManager enhances teh availability of singleton services in WebSphere. THese singleton services include
The HAManager runs as a service within each WebSphere process (Deployment Manager, Node Agents, or application servers) that monitors the health of WebSphere singleton services. In the event of a server failure, the HAManager will failover any singleton service that was running on the failed server to a peer server. Examples of such a failover include the recovery of any in-flight transactions or restarting any messaging engines that were running on the failed server.The HAManager runs as a service within each WebSphere process (Deployment Manager, Node Agents, or application servers) that monitors the health of WebSphere singleton services. In the event of a server failure, the HAManager will failover any singleton service that was running on the failed server to a peer server. Examples of such a failover include the recovery of any in-flight transactions or restarting any messaging engines that were running on the failed server.
- Transaction service - Transaction log recovery
- Messaging service - Messaging engine restarting
The HAManager runs as a service within each WebSphere process (Deployment Manager, Node Agents, or application servers) that monitors the health of WebSphere singleton services. In the event of a server failure, the HAManager will failover any singleton service that was running on the failed server to a peer server. Examples of such a failover include the recovery of any in-flight transactions or restarting any messaging engines that were running on the failed server.The HAManager runs as a service within each WebSphere process (Deployment Manager, Node Agents, or application servers) that monitors the health of WebSphere singleton services. In the event of a server failure, the HAManager will failover any singleton service that was running on the failed server to a peer server. Examples of such a failover include the recovery of any in-flight transactions or restarting any messaging engines that were running on the failed server.